file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
mod.rs | //! This module is an attempt to provide a friendly, rust-esque interface to Apple's Audio Unit API.
//!
//! Learn more about the Audio Unit API [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/Introduction/Introduction.html#//apple_ref/doc/uid/TP40003278-CH1-SW2)
//! and [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
//!
//! TODO: The following are `kAudioUnitSubType`s (along with their const u32) generated by
//! rust-bindgen that we could not find any documentation on:
//!
//! - MIDISynth = 1836284270,
//! - RoundTripAAC = 1918984547,
//! - SpatialMixer = 862217581,
//! - SphericalHeadPanner = 1936746610,
//! - VectorPanner = 1986158963,
//! - SoundFieldPanner = 1634558569,
//! - HRTFPanner = 1752331366,
//! - NetReceive = 1852990326,
//!
//! If you can find documentation on these, please feel free to submit an issue or PR with the
//! fixes!
use crate::error::Error;
use std::mem;
use std::os::raw::{c_uint, c_void};
use std::ptr;
use sys;
| EffectType, FormatConverterType, GeneratorType, IOType, MixerType, MusicDeviceType, Type,
};
#[cfg(target_os = "macos")]
pub mod macos_helpers;
pub mod audio_format;
pub mod render_callback;
pub mod sample_format;
pub mod stream_format;
pub mod types;
/// The input and output **Scope**s.
///
/// More info [here](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/constant_group/Audio_Unit_Scopes)
/// and [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
#[derive(Copy, Clone, Debug)]
pub enum Scope {
Global = 0,
Input = 1,
Output = 2,
Group = 3,
Part = 4,
Note = 5,
Layer = 6,
LayerItem = 7,
}
/// Represents the **Input** and **Output** **Element**s.
///
/// These are used when specifying which **Element** we're setting the properties of.
#[derive(Copy, Clone, Debug)]
pub enum Element {
Output = 0,
Input = 1,
}
/// A rust representation of the sys::AudioUnit, including a pointer to the current rendering callback.
///
/// Find the original Audio Unit Programming Guide [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
pub struct AudioUnit {
instance: sys::AudioUnit,
maybe_render_callback: Option<*mut render_callback::InputProcFnWrapper>,
maybe_input_callback: Option<InputCallback>,
}
struct InputCallback {
// The audio buffer list to which input data is rendered.
buffer_list: *mut sys::AudioBufferList,
callback: *mut render_callback::InputProcFnWrapper,
}
macro_rules! try_os_status {
($expr:expr) => {
Error::from_os_status($expr)?
};
}
impl AudioUnit {
/// Construct a new AudioUnit with any type that may be automatically converted into
/// [**Type**](./enum.Type).
///
/// Here is a list of compatible types:
///
/// - [**Type**](./types/enum.Type)
/// - [**IOType**](./types/enum.IOType)
/// - [**MusicDeviceType**](./types/enum.MusicDeviceType)
/// - [**GeneratorType**](./types/enum.GeneratorType)
/// - [**FormatConverterType**](./types/enum.FormatConverterType)
/// - [**EffectType**](./types/enum.EffectType)
/// - [**MixerType**](./types/enum.MixerType)
///
/// To construct the **AudioUnit** with some component flags, see
/// [**AudioUnit::new_with_flags**](./struct.AudioUnit#method.new_with_flags).
///
/// Note: the `AudioUnit` is constructed with the `kAudioUnitManufacturer_Apple` Manufacturer
/// Identifier, as this is the only Audio Unit Manufacturer Identifier documented by Apple in
/// the AudioUnit reference (see [here](https://developer.apple.com/library/prerelease/mac/documentation/AudioUnit/Reference/AUComponentServicesReference/index.html#//apple_ref/doc/constant_group/Audio_Unit_Manufacturer_Identifier)).
pub fn new<T>(ty: T) -> Result<AudioUnit, Error>
where
T: Into<Type>,
{
AudioUnit::new_with_flags(ty, 0, 0)
}
/// The same as [**AudioUnit::new**](./struct.AudioUnit#method.new) but with the given
/// component flags and mask.
pub fn new_with_flags<T>(ty: T, flags: u32, mask: u32) -> Result<AudioUnit, Error>
where
T: Into<Type>,
{
const MANUFACTURER_IDENTIFIER: u32 = sys::kAudioUnitManufacturer_Apple;
let au_type: Type = ty.into();
let sub_type_u32 = match au_type.as_subtype_u32() {
Some(u) => u,
None => return Err(Error::NoKnownSubtype),
};
// A description of the audio unit we desire.
let desc = sys::AudioComponentDescription {
componentType: au_type.as_u32() as c_uint,
componentSubType: sub_type_u32 as c_uint,
componentManufacturer: MANUFACTURER_IDENTIFIER,
componentFlags: flags,
componentFlagsMask: mask,
};
unsafe {
// Find the default audio unit for the description.
//
// From the "Audio Unit Hosting Guide for iOS":
//
// Passing NULL to the first parameter of AudioComponentFindNext tells this function to
// find the first system audio unit matching the description, using a system-defined
// ordering. If you instead pass a previously found audio unit reference in this
// parameter, the function locates the next audio unit matching the description.
let component = sys::AudioComponentFindNext(ptr::null_mut(), &desc as *const _);
if component.is_null() {
return Err(Error::NoMatchingDefaultAudioUnitFound);
}
// Create an instance of the default audio unit using the component.
let mut instance_uninit = mem::MaybeUninit::<sys::AudioUnit>::uninit();
try_os_status!(sys::AudioComponentInstanceNew(
component,
instance_uninit.as_mut_ptr() as *mut sys::AudioUnit
));
let instance: sys::AudioUnit = instance_uninit.assume_init();
// Initialise the audio unit!
try_os_status!(sys::AudioUnitInitialize(instance));
Ok(AudioUnit {
instance,
maybe_render_callback: None,
maybe_input_callback: None,
})
}
}
/// On successful initialization, the audio formats for input and output are valid
/// and the audio unit is ready to render. During initialization, an audio unit
/// allocates memory according to the maximum number of audio frames it can produce
/// in response to a single render call.
///
/// Usually, the state of an audio unit (such as its I/O formats and memory allocations)
/// cannot be changed while an audio unit is initialized.
pub fn initialize(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioUnitInitialize(self.instance));
}
Ok(())
}
/// Before you change an initialize audio unit’s processing characteristics,
/// such as its input or output audio data format or its sample rate, you must
/// first uninitialize it. Calling this function deallocates the audio unit’s resources.
///
/// After calling this function, you can reconfigure the audio unit and then call
/// AudioUnitInitialize to reinitialize it.
pub fn uninitialize(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioUnitUninitialize(self.instance));
}
Ok(())
}
/// Sets the value for some property of the **AudioUnit**.
///
/// To clear an audio unit property value, set the data paramater with `None::<()>`.
///
/// Clearing properties only works for those properties that do not have a default value.
///
/// For more on "properties" see [the reference](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/uid/TP40007288).
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
/// - **maybe_data**: The value that you want to apply to the property.
pub fn set_property<T>(
&mut self,
id: u32,
scope: Scope,
elem: Element,
maybe_data: Option<&T>,
) -> Result<(), Error> {
set_property(self.instance, id, scope, elem, maybe_data)
}
/// Gets the value of an **AudioUnit** property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
pub fn get_property<T>(&self, id: u32, scope: Scope, elem: Element) -> Result<T, Error> {
get_property(self.instance, id, scope, elem)
}
/// Starts an I/O **AudioUnit**, which in turn starts the audio unit processing graph that it is
/// connected to.
///
/// **Available** in OS X v10.0 and later.
pub fn start(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioOutputUnitStart(self.instance));
}
Ok(())
}
/// Stops an I/O **AudioUnit**, which in turn stops the audio unit processing graph that it is
/// connected to.
///
/// **Available** in OS X v10.0 and later.
pub fn stop(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioOutputUnitStop(self.instance));
}
Ok(())
}
/// Set the **AudioUnit**'s sample rate.
///
/// **Available** in iOS 2.0 and later.
pub fn set_sample_rate(&mut self, sample_rate: f64) -> Result<(), Error> {
let id = sys::kAudioUnitProperty_SampleRate;
self.set_property(id, Scope::Input, Element::Output, Some(&sample_rate))
}
/// Get the **AudioUnit**'s sample rate.
pub fn sample_rate(&self) -> Result<f64, Error> {
let id = sys::kAudioUnitProperty_SampleRate;
self.get_property(id, Scope::Input, Element::Output)
}
/// Sets the current **StreamFormat** for the AudioUnit.
///
/// Core Audio uses slightly different defaults depending on the platform.
///
/// From the Core Audio Overview:
///
/// > The canonical formats in Core Audio are as follows:
/// >
/// > - iOS input and output: Linear PCM with 16-bit integer samples.
/// > - iOS audio units and other audio processing: Noninterleaved linear PCM with 8.24-bit
/// fixed-point samples
/// > - Mac input and output: Linear PCM with 32-bit floating point samples.
/// > - Mac audio units and other audio processing: Noninterleaved linear PCM with 32-bit
/// floating-point
pub fn set_stream_format(
&mut self,
stream_format: StreamFormat,
scope: Scope,
) -> Result<(), Error> {
let id = sys::kAudioUnitProperty_StreamFormat;
let asbd = stream_format.to_asbd();
self.set_property(id, scope, Element::Output, Some(&asbd))
}
/// Return the current Stream Format for the AudioUnit.
pub fn stream_format(&self, scope: Scope) -> Result<StreamFormat, Error> {
let id = sys::kAudioUnitProperty_StreamFormat;
let asbd = self.get_property(id, scope, Element::Output)?;
StreamFormat::from_asbd(asbd)
}
/// Return the current output Stream Format for the AudioUnit.
pub fn output_stream_format(&self) -> Result<StreamFormat, Error> {
self.stream_format(Scope::Output)
}
/// Return the current input Stream Format for the AudioUnit.
pub fn input_stream_format(&self) -> Result<StreamFormat, Error> {
self.stream_format(Scope::Input)
}
}
unsafe impl Send for AudioUnit {}
impl Drop for AudioUnit {
fn drop(&mut self) {
unsafe {
use crate::error;
// We don't want to panic in `drop`, so we'll ignore returned errors.
//
// A user should explicitly terminate the `AudioUnit` if they want to handle errors (we
// still need to provide a way to actually do that).
self.stop().ok();
error::Error::from_os_status(sys::AudioUnitUninitialize(self.instance)).ok();
self.free_render_callback();
self.free_input_callback();
error::Error::from_os_status(sys::AudioComponentInstanceDispose(self.instance)).ok();
}
}
}
/// Sets the value for some property of the **AudioUnit**.
///
/// To clear an audio unit property value, set the data paramater with `None::<()>`.
///
/// Clearing properties only works for those properties that do not have a default value.
///
/// For more on "properties" see [the reference](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/uid/TP40007288).
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **au**: The AudioUnit instance.
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
/// - **maybe_data**: The value that you want to apply to the property.
pub fn set_property<T>(
au: sys::AudioUnit,
id: u32,
scope: Scope,
elem: Element,
maybe_data: Option<&T>,
) -> Result<(), Error> {
let (data_ptr, size) = maybe_data
.map(|data| {
let ptr = data as *const _ as *const c_void;
let size = ::std::mem::size_of::<T>() as u32;
(ptr, size)
})
.unwrap_or_else(|| (::std::ptr::null(), 0));
let scope = scope as c_uint;
let elem = elem as c_uint;
unsafe {
try_os_status!(sys::AudioUnitSetProperty(
au, id, scope, elem, data_ptr, size
))
}
Ok(())
}
/// Gets the value of an **AudioUnit** property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **au**: The AudioUnit instance.
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
pub fn get_property<T>(
au: sys::AudioUnit,
id: u32,
scope: Scope,
elem: Element,
) -> Result<T, Error> {
let scope = scope as c_uint;
let elem = elem as c_uint;
let mut size = ::std::mem::size_of::<T>() as u32;
unsafe {
let mut data_uninit = ::std::mem::MaybeUninit::<T>::uninit();
let data_ptr = data_uninit.as_mut_ptr() as *mut _ as *mut c_void;
let size_ptr = &mut size as *mut _;
try_os_status!(sys::AudioUnitGetProperty(
au, id, scope, elem, data_ptr, size_ptr
));
let data: T = data_uninit.assume_init();
Ok(data)
}
}
/// Gets the value of a specified audio session property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
#[cfg(target_os = "ios")]
pub fn audio_session_get_property<T>(id: u32) -> Result<T, Error> {
let mut size = ::std::mem::size_of::<T>() as u32;
unsafe {
let mut data_uninit = ::std::mem::MaybeUninit::<T>::uninit();
let data_ptr = data_uninit.as_mut_ptr() as *mut _ as *mut c_void;
let size_ptr = &mut size as *mut _;
try_os_status!(sys::AudioSessionGetProperty(id, size_ptr, data_ptr));
let data: T = data_uninit.assume_init();
Ok(data)
}
} | pub use self::audio_format::AudioFormat;
pub use self::sample_format::{Sample, SampleFormat};
pub use self::stream_format::StreamFormat;
pub use self::types::{ | random_line_split |
mod.rs | //! This module is an attempt to provide a friendly, rust-esque interface to Apple's Audio Unit API.
//!
//! Learn more about the Audio Unit API [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/Introduction/Introduction.html#//apple_ref/doc/uid/TP40003278-CH1-SW2)
//! and [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
//!
//! TODO: The following are `kAudioUnitSubType`s (along with their const u32) generated by
//! rust-bindgen that we could not find any documentation on:
//!
//! - MIDISynth = 1836284270,
//! - RoundTripAAC = 1918984547,
//! - SpatialMixer = 862217581,
//! - SphericalHeadPanner = 1936746610,
//! - VectorPanner = 1986158963,
//! - SoundFieldPanner = 1634558569,
//! - HRTFPanner = 1752331366,
//! - NetReceive = 1852990326,
//!
//! If you can find documentation on these, please feel free to submit an issue or PR with the
//! fixes!
use crate::error::Error;
use std::mem;
use std::os::raw::{c_uint, c_void};
use std::ptr;
use sys;
pub use self::audio_format::AudioFormat;
pub use self::sample_format::{Sample, SampleFormat};
pub use self::stream_format::StreamFormat;
pub use self::types::{
EffectType, FormatConverterType, GeneratorType, IOType, MixerType, MusicDeviceType, Type,
};
#[cfg(target_os = "macos")]
pub mod macos_helpers;
pub mod audio_format;
pub mod render_callback;
pub mod sample_format;
pub mod stream_format;
pub mod types;
/// The input and output **Scope**s.
///
/// More info [here](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/constant_group/Audio_Unit_Scopes)
/// and [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
#[derive(Copy, Clone, Debug)]
pub enum Scope {
Global = 0,
Input = 1,
Output = 2,
Group = 3,
Part = 4,
Note = 5,
Layer = 6,
LayerItem = 7,
}
/// Represents the **Input** and **Output** **Element**s.
///
/// These are used when specifying which **Element** we're setting the properties of.
#[derive(Copy, Clone, Debug)]
pub enum Element {
Output = 0,
Input = 1,
}
/// A rust representation of the sys::AudioUnit, including a pointer to the current rendering callback.
///
/// Find the original Audio Unit Programming Guide [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
pub struct AudioUnit {
instance: sys::AudioUnit,
maybe_render_callback: Option<*mut render_callback::InputProcFnWrapper>,
maybe_input_callback: Option<InputCallback>,
}
struct | {
// The audio buffer list to which input data is rendered.
buffer_list: *mut sys::AudioBufferList,
callback: *mut render_callback::InputProcFnWrapper,
}
macro_rules! try_os_status {
($expr:expr) => {
Error::from_os_status($expr)?
};
}
impl AudioUnit {
/// Construct a new AudioUnit with any type that may be automatically converted into
/// [**Type**](./enum.Type).
///
/// Here is a list of compatible types:
///
/// - [**Type**](./types/enum.Type)
/// - [**IOType**](./types/enum.IOType)
/// - [**MusicDeviceType**](./types/enum.MusicDeviceType)
/// - [**GeneratorType**](./types/enum.GeneratorType)
/// - [**FormatConverterType**](./types/enum.FormatConverterType)
/// - [**EffectType**](./types/enum.EffectType)
/// - [**MixerType**](./types/enum.MixerType)
///
/// To construct the **AudioUnit** with some component flags, see
/// [**AudioUnit::new_with_flags**](./struct.AudioUnit#method.new_with_flags).
///
/// Note: the `AudioUnit` is constructed with the `kAudioUnitManufacturer_Apple` Manufacturer
/// Identifier, as this is the only Audio Unit Manufacturer Identifier documented by Apple in
/// the AudioUnit reference (see [here](https://developer.apple.com/library/prerelease/mac/documentation/AudioUnit/Reference/AUComponentServicesReference/index.html#//apple_ref/doc/constant_group/Audio_Unit_Manufacturer_Identifier)).
pub fn new<T>(ty: T) -> Result<AudioUnit, Error>
where
T: Into<Type>,
{
AudioUnit::new_with_flags(ty, 0, 0)
}
/// The same as [**AudioUnit::new**](./struct.AudioUnit#method.new) but with the given
/// component flags and mask.
pub fn new_with_flags<T>(ty: T, flags: u32, mask: u32) -> Result<AudioUnit, Error>
where
T: Into<Type>,
{
const MANUFACTURER_IDENTIFIER: u32 = sys::kAudioUnitManufacturer_Apple;
let au_type: Type = ty.into();
let sub_type_u32 = match au_type.as_subtype_u32() {
Some(u) => u,
None => return Err(Error::NoKnownSubtype),
};
// A description of the audio unit we desire.
let desc = sys::AudioComponentDescription {
componentType: au_type.as_u32() as c_uint,
componentSubType: sub_type_u32 as c_uint,
componentManufacturer: MANUFACTURER_IDENTIFIER,
componentFlags: flags,
componentFlagsMask: mask,
};
unsafe {
// Find the default audio unit for the description.
//
// From the "Audio Unit Hosting Guide for iOS":
//
// Passing NULL to the first parameter of AudioComponentFindNext tells this function to
// find the first system audio unit matching the description, using a system-defined
// ordering. If you instead pass a previously found audio unit reference in this
// parameter, the function locates the next audio unit matching the description.
let component = sys::AudioComponentFindNext(ptr::null_mut(), &desc as *const _);
if component.is_null() {
return Err(Error::NoMatchingDefaultAudioUnitFound);
}
// Create an instance of the default audio unit using the component.
let mut instance_uninit = mem::MaybeUninit::<sys::AudioUnit>::uninit();
try_os_status!(sys::AudioComponentInstanceNew(
component,
instance_uninit.as_mut_ptr() as *mut sys::AudioUnit
));
let instance: sys::AudioUnit = instance_uninit.assume_init();
// Initialise the audio unit!
try_os_status!(sys::AudioUnitInitialize(instance));
Ok(AudioUnit {
instance,
maybe_render_callback: None,
maybe_input_callback: None,
})
}
}
/// On successful initialization, the audio formats for input and output are valid
/// and the audio unit is ready to render. During initialization, an audio unit
/// allocates memory according to the maximum number of audio frames it can produce
/// in response to a single render call.
///
/// Usually, the state of an audio unit (such as its I/O formats and memory allocations)
/// cannot be changed while an audio unit is initialized.
pub fn initialize(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioUnitInitialize(self.instance));
}
Ok(())
}
/// Before you change an initialize audio unit’s processing characteristics,
/// such as its input or output audio data format or its sample rate, you must
/// first uninitialize it. Calling this function deallocates the audio unit’s resources.
///
/// After calling this function, you can reconfigure the audio unit and then call
/// AudioUnitInitialize to reinitialize it.
pub fn uninitialize(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioUnitUninitialize(self.instance));
}
Ok(())
}
/// Sets the value for some property of the **AudioUnit**.
///
/// To clear an audio unit property value, set the data paramater with `None::<()>`.
///
/// Clearing properties only works for those properties that do not have a default value.
///
/// For more on "properties" see [the reference](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/uid/TP40007288).
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
/// - **maybe_data**: The value that you want to apply to the property.
pub fn set_property<T>(
&mut self,
id: u32,
scope: Scope,
elem: Element,
maybe_data: Option<&T>,
) -> Result<(), Error> {
set_property(self.instance, id, scope, elem, maybe_data)
}
/// Gets the value of an **AudioUnit** property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
pub fn get_property<T>(&self, id: u32, scope: Scope, elem: Element) -> Result<T, Error> {
get_property(self.instance, id, scope, elem)
}
/// Starts an I/O **AudioUnit**, which in turn starts the audio unit processing graph that it is
/// connected to.
///
/// **Available** in OS X v10.0 and later.
pub fn start(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioOutputUnitStart(self.instance));
}
Ok(())
}
/// Stops an I/O **AudioUnit**, which in turn stops the audio unit processing graph that it is
/// connected to.
///
/// **Available** in OS X v10.0 and later.
pub fn stop(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioOutputUnitStop(self.instance));
}
Ok(())
}
/// Set the **AudioUnit**'s sample rate.
///
/// **Available** in iOS 2.0 and later.
pub fn set_sample_rate(&mut self, sample_rate: f64) -> Result<(), Error> {
let id = sys::kAudioUnitProperty_SampleRate;
self.set_property(id, Scope::Input, Element::Output, Some(&sample_rate))
}
/// Get the **AudioUnit**'s sample rate.
pub fn sample_rate(&self) -> Result<f64, Error> {
let id = sys::kAudioUnitProperty_SampleRate;
self.get_property(id, Scope::Input, Element::Output)
}
/// Sets the current **StreamFormat** for the AudioUnit.
///
/// Core Audio uses slightly different defaults depending on the platform.
///
/// From the Core Audio Overview:
///
/// > The canonical formats in Core Audio are as follows:
/// >
/// > - iOS input and output: Linear PCM with 16-bit integer samples.
/// > - iOS audio units and other audio processing: Noninterleaved linear PCM with 8.24-bit
/// fixed-point samples
/// > - Mac input and output: Linear PCM with 32-bit floating point samples.
/// > - Mac audio units and other audio processing: Noninterleaved linear PCM with 32-bit
/// floating-point
pub fn set_stream_format(
&mut self,
stream_format: StreamFormat,
scope: Scope,
) -> Result<(), Error> {
let id = sys::kAudioUnitProperty_StreamFormat;
let asbd = stream_format.to_asbd();
self.set_property(id, scope, Element::Output, Some(&asbd))
}
/// Return the current Stream Format for the AudioUnit.
pub fn stream_format(&self, scope: Scope) -> Result<StreamFormat, Error> {
let id = sys::kAudioUnitProperty_StreamFormat;
let asbd = self.get_property(id, scope, Element::Output)?;
StreamFormat::from_asbd(asbd)
}
/// Return the current output Stream Format for the AudioUnit.
pub fn output_stream_format(&self) -> Result<StreamFormat, Error> {
self.stream_format(Scope::Output)
}
/// Return the current input Stream Format for the AudioUnit.
pub fn input_stream_format(&self) -> Result<StreamFormat, Error> {
self.stream_format(Scope::Input)
}
}
unsafe impl Send for AudioUnit {}
impl Drop for AudioUnit {
fn drop(&mut self) {
unsafe {
use crate::error;
// We don't want to panic in `drop`, so we'll ignore returned errors.
//
// A user should explicitly terminate the `AudioUnit` if they want to handle errors (we
// still need to provide a way to actually do that).
self.stop().ok();
error::Error::from_os_status(sys::AudioUnitUninitialize(self.instance)).ok();
self.free_render_callback();
self.free_input_callback();
error::Error::from_os_status(sys::AudioComponentInstanceDispose(self.instance)).ok();
}
}
}
/// Sets the value for some property of the **AudioUnit**.
///
/// To clear an audio unit property value, set the data paramater with `None::<()>`.
///
/// Clearing properties only works for those properties that do not have a default value.
///
/// For more on "properties" see [the reference](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/uid/TP40007288).
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **au**: The AudioUnit instance.
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
/// - **maybe_data**: The value that you want to apply to the property.
pub fn set_property<T>(
au: sys::AudioUnit,
id: u32,
scope: Scope,
elem: Element,
maybe_data: Option<&T>,
) -> Result<(), Error> {
let (data_ptr, size) = maybe_data
.map(|data| {
let ptr = data as *const _ as *const c_void;
let size = ::std::mem::size_of::<T>() as u32;
(ptr, size)
})
.unwrap_or_else(|| (::std::ptr::null(), 0));
let scope = scope as c_uint;
let elem = elem as c_uint;
unsafe {
try_os_status!(sys::AudioUnitSetProperty(
au, id, scope, elem, data_ptr, size
))
}
Ok(())
}
/// Gets the value of an **AudioUnit** property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **au**: The AudioUnit instance.
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
pub fn get_property<T>(
au: sys::AudioUnit,
id: u32,
scope: Scope,
elem: Element,
) -> Result<T, Error> {
let scope = scope as c_uint;
let elem = elem as c_uint;
let mut size = ::std::mem::size_of::<T>() as u32;
unsafe {
let mut data_uninit = ::std::mem::MaybeUninit::<T>::uninit();
let data_ptr = data_uninit.as_mut_ptr() as *mut _ as *mut c_void;
let size_ptr = &mut size as *mut _;
try_os_status!(sys::AudioUnitGetProperty(
au, id, scope, elem, data_ptr, size_ptr
));
let data: T = data_uninit.assume_init();
Ok(data)
}
}
/// Gets the value of a specified audio session property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
#[cfg(target_os = "ios")]
pub fn audio_session_get_property<T>(id: u32) -> Result<T, Error> {
let mut size = ::std::mem::size_of::<T>() as u32;
unsafe {
let mut data_uninit = ::std::mem::MaybeUninit::<T>::uninit();
let data_ptr = data_uninit.as_mut_ptr() as *mut _ as *mut c_void;
let size_ptr = &mut size as *mut _;
try_os_status!(sys::AudioSessionGetProperty(id, size_ptr, data_ptr));
let data: T = data_uninit.assume_init();
Ok(data)
}
}
| InputCallback | identifier_name |
mod.rs | //! This module is an attempt to provide a friendly, rust-esque interface to Apple's Audio Unit API.
//!
//! Learn more about the Audio Unit API [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/Introduction/Introduction.html#//apple_ref/doc/uid/TP40003278-CH1-SW2)
//! and [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
//!
//! TODO: The following are `kAudioUnitSubType`s (along with their const u32) generated by
//! rust-bindgen that we could not find any documentation on:
//!
//! - MIDISynth = 1836284270,
//! - RoundTripAAC = 1918984547,
//! - SpatialMixer = 862217581,
//! - SphericalHeadPanner = 1936746610,
//! - VectorPanner = 1986158963,
//! - SoundFieldPanner = 1634558569,
//! - HRTFPanner = 1752331366,
//! - NetReceive = 1852990326,
//!
//! If you can find documentation on these, please feel free to submit an issue or PR with the
//! fixes!
use crate::error::Error;
use std::mem;
use std::os::raw::{c_uint, c_void};
use std::ptr;
use sys;
pub use self::audio_format::AudioFormat;
pub use self::sample_format::{Sample, SampleFormat};
pub use self::stream_format::StreamFormat;
pub use self::types::{
EffectType, FormatConverterType, GeneratorType, IOType, MixerType, MusicDeviceType, Type,
};
#[cfg(target_os = "macos")]
pub mod macos_helpers;
pub mod audio_format;
pub mod render_callback;
pub mod sample_format;
pub mod stream_format;
pub mod types;
/// The input and output **Scope**s.
///
/// More info [here](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/constant_group/Audio_Unit_Scopes)
/// and [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
#[derive(Copy, Clone, Debug)]
pub enum Scope {
Global = 0,
Input = 1,
Output = 2,
Group = 3,
Part = 4,
Note = 5,
Layer = 6,
LayerItem = 7,
}
/// Represents the **Input** and **Output** **Element**s.
///
/// These are used when specifying which **Element** we're setting the properties of.
#[derive(Copy, Clone, Debug)]
pub enum Element {
Output = 0,
Input = 1,
}
/// A rust representation of the sys::AudioUnit, including a pointer to the current rendering callback.
///
/// Find the original Audio Unit Programming Guide [here](https://developer.apple.com/library/mac/documentation/MusicAudio/Conceptual/AudioUnitProgrammingGuide/TheAudioUnit/TheAudioUnit.html).
pub struct AudioUnit {
instance: sys::AudioUnit,
maybe_render_callback: Option<*mut render_callback::InputProcFnWrapper>,
maybe_input_callback: Option<InputCallback>,
}
struct InputCallback {
// The audio buffer list to which input data is rendered.
buffer_list: *mut sys::AudioBufferList,
callback: *mut render_callback::InputProcFnWrapper,
}
macro_rules! try_os_status {
($expr:expr) => {
Error::from_os_status($expr)?
};
}
impl AudioUnit {
/// Construct a new AudioUnit with any type that may be automatically converted into
/// [**Type**](./enum.Type).
///
/// Here is a list of compatible types:
///
/// - [**Type**](./types/enum.Type)
/// - [**IOType**](./types/enum.IOType)
/// - [**MusicDeviceType**](./types/enum.MusicDeviceType)
/// - [**GeneratorType**](./types/enum.GeneratorType)
/// - [**FormatConverterType**](./types/enum.FormatConverterType)
/// - [**EffectType**](./types/enum.EffectType)
/// - [**MixerType**](./types/enum.MixerType)
///
/// To construct the **AudioUnit** with some component flags, see
/// [**AudioUnit::new_with_flags**](./struct.AudioUnit#method.new_with_flags).
///
/// Note: the `AudioUnit` is constructed with the `kAudioUnitManufacturer_Apple` Manufacturer
/// Identifier, as this is the only Audio Unit Manufacturer Identifier documented by Apple in
/// the AudioUnit reference (see [here](https://developer.apple.com/library/prerelease/mac/documentation/AudioUnit/Reference/AUComponentServicesReference/index.html#//apple_ref/doc/constant_group/Audio_Unit_Manufacturer_Identifier)).
pub fn new<T>(ty: T) -> Result<AudioUnit, Error>
where
T: Into<Type>,
{
AudioUnit::new_with_flags(ty, 0, 0)
}
/// The same as [**AudioUnit::new**](./struct.AudioUnit#method.new) but with the given
/// component flags and mask.
pub fn new_with_flags<T>(ty: T, flags: u32, mask: u32) -> Result<AudioUnit, Error>
where
T: Into<Type>,
{
const MANUFACTURER_IDENTIFIER: u32 = sys::kAudioUnitManufacturer_Apple;
let au_type: Type = ty.into();
let sub_type_u32 = match au_type.as_subtype_u32() {
Some(u) => u,
None => return Err(Error::NoKnownSubtype),
};
// A description of the audio unit we desire.
let desc = sys::AudioComponentDescription {
componentType: au_type.as_u32() as c_uint,
componentSubType: sub_type_u32 as c_uint,
componentManufacturer: MANUFACTURER_IDENTIFIER,
componentFlags: flags,
componentFlagsMask: mask,
};
unsafe {
// Find the default audio unit for the description.
//
// From the "Audio Unit Hosting Guide for iOS":
//
// Passing NULL to the first parameter of AudioComponentFindNext tells this function to
// find the first system audio unit matching the description, using a system-defined
// ordering. If you instead pass a previously found audio unit reference in this
// parameter, the function locates the next audio unit matching the description.
let component = sys::AudioComponentFindNext(ptr::null_mut(), &desc as *const _);
if component.is_null() {
return Err(Error::NoMatchingDefaultAudioUnitFound);
}
// Create an instance of the default audio unit using the component.
let mut instance_uninit = mem::MaybeUninit::<sys::AudioUnit>::uninit();
try_os_status!(sys::AudioComponentInstanceNew(
component,
instance_uninit.as_mut_ptr() as *mut sys::AudioUnit
));
let instance: sys::AudioUnit = instance_uninit.assume_init();
// Initialise the audio unit!
try_os_status!(sys::AudioUnitInitialize(instance));
Ok(AudioUnit {
instance,
maybe_render_callback: None,
maybe_input_callback: None,
})
}
}
/// On successful initialization, the audio formats for input and output are valid
/// and the audio unit is ready to render. During initialization, an audio unit
/// allocates memory according to the maximum number of audio frames it can produce
/// in response to a single render call.
///
/// Usually, the state of an audio unit (such as its I/O formats and memory allocations)
/// cannot be changed while an audio unit is initialized.
pub fn initialize(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioUnitInitialize(self.instance));
}
Ok(())
}
/// Before you change an initialize audio unit’s processing characteristics,
/// such as its input or output audio data format or its sample rate, you must
/// first uninitialize it. Calling this function deallocates the audio unit’s resources.
///
/// After calling this function, you can reconfigure the audio unit and then call
/// AudioUnitInitialize to reinitialize it.
pub fn uninitialize(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioUnitUninitialize(self.instance));
}
Ok(())
}
/// Sets the value for some property of the **AudioUnit**.
///
/// To clear an audio unit property value, set the data paramater with `None::<()>`.
///
/// Clearing properties only works for those properties that do not have a default value.
///
/// For more on "properties" see [the reference](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/uid/TP40007288).
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
/// - **maybe_data**: The value that you want to apply to the property.
pub fn set_property<T>(
&mut self,
id: u32,
scope: Scope,
elem: Element,
maybe_data: Option<&T>,
) -> Result<(), Error> {
set_property(self.instance, id, scope, elem, maybe_data)
}
/// Gets the value of an **AudioUnit** property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
pub fn get_property<T>(&self, id: u32, scope: Scope, elem: Element) -> Result<T, Error> {
get_property(self.instance, id, scope, elem)
}
/// Starts an I/O **AudioUnit**, which in turn starts the audio unit processing graph that it is
/// connected to.
///
/// **Available** in OS X v10.0 and later.
pub fn start(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioOutputUnitStart(self.instance));
}
Ok(())
}
/// Stops an I/O **AudioUnit**, which in turn stops the audio unit processing graph that it is
/// connected to.
///
/// **Available** in OS X v10.0 and later.
pub fn stop(&mut self) -> Result<(), Error> {
unsafe {
try_os_status!(sys::AudioOutputUnitStop(self.instance));
}
Ok(())
}
/// Set the **AudioUnit**'s sample rate.
///
/// **Available** in iOS 2.0 and later.
pub fn set_sample_rate(&mut self, sample_rate: f64) -> Result<(), Error> {
let id = sys::kAudioUnitProperty_SampleRate;
self.set_property(id, Scope::Input, Element::Output, Some(&sample_rate))
}
/// Get the **AudioUnit**'s sample rate.
pub fn sample_rate(&self) -> Result<f64, Error> {
let id = sys::kAudioUnitProperty_SampleRate;
self.get_property(id, Scope::Input, Element::Output)
}
/// Sets the current **StreamFormat** for the AudioUnit.
///
/// Core Audio uses slightly different defaults depending on the platform.
///
/// From the Core Audio Overview:
///
/// > The canonical formats in Core Audio are as follows:
/// >
/// > - iOS input and output: Linear PCM with 16-bit integer samples.
/// > - iOS audio units and other audio processing: Noninterleaved linear PCM with 8.24-bit
/// fixed-point samples
/// > - Mac input and output: Linear PCM with 32-bit floating point samples.
/// > - Mac audio units and other audio processing: Noninterleaved linear PCM with 32-bit
/// floating-point
pub fn set_stream_format(
&mut self,
stream_format: StreamFormat,
scope: Scope,
) -> Result<(), Error> {
let id = sys::kAudioUnitProperty_StreamFormat;
let asbd = stream_format.to_asbd();
self.set_property(id, scope, Element::Output, Some(&asbd))
}
/// Return the current Stream Format for the AudioUnit.
pub fn stream_format(&self, scope: Scope) -> Result<StreamFormat, Error> {
let id = sys::kAudioUnitProperty_StreamFormat;
let asbd = self.get_property(id, scope, Element::Output)?;
StreamFormat::from_asbd(asbd)
}
/// Return the current output Stream Format for the AudioUnit.
pub fn output_stream_format(&self) -> Result<StreamFormat, Error> {
| /// Return the current input Stream Format for the AudioUnit.
pub fn input_stream_format(&self) -> Result<StreamFormat, Error> {
self.stream_format(Scope::Input)
}
}
unsafe impl Send for AudioUnit {}
impl Drop for AudioUnit {
fn drop(&mut self) {
unsafe {
use crate::error;
// We don't want to panic in `drop`, so we'll ignore returned errors.
//
// A user should explicitly terminate the `AudioUnit` if they want to handle errors (we
// still need to provide a way to actually do that).
self.stop().ok();
error::Error::from_os_status(sys::AudioUnitUninitialize(self.instance)).ok();
self.free_render_callback();
self.free_input_callback();
error::Error::from_os_status(sys::AudioComponentInstanceDispose(self.instance)).ok();
}
}
}
/// Sets the value for some property of the **AudioUnit**.
///
/// To clear an audio unit property value, set the data paramater with `None::<()>`.
///
/// Clearing properties only works for those properties that do not have a default value.
///
/// For more on "properties" see [the reference](https://developer.apple.com/library/ios/documentation/AudioUnit/Reference/AudioUnitPropertiesReference/index.html#//apple_ref/doc/uid/TP40007288).
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **au**: The AudioUnit instance.
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
/// - **maybe_data**: The value that you want to apply to the property.
pub fn set_property<T>(
au: sys::AudioUnit,
id: u32,
scope: Scope,
elem: Element,
maybe_data: Option<&T>,
) -> Result<(), Error> {
let (data_ptr, size) = maybe_data
.map(|data| {
let ptr = data as *const _ as *const c_void;
let size = ::std::mem::size_of::<T>() as u32;
(ptr, size)
})
.unwrap_or_else(|| (::std::ptr::null(), 0));
let scope = scope as c_uint;
let elem = elem as c_uint;
unsafe {
try_os_status!(sys::AudioUnitSetProperty(
au, id, scope, elem, data_ptr, size
))
}
Ok(())
}
/// Gets the value of an **AudioUnit** property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **au**: The AudioUnit instance.
/// - **id**: The identifier of the property.
/// - **scope**: The audio unit scope for the property.
/// - **elem**: The audio unit element for the property.
pub fn get_property<T>(
au: sys::AudioUnit,
id: u32,
scope: Scope,
elem: Element,
) -> Result<T, Error> {
let scope = scope as c_uint;
let elem = elem as c_uint;
let mut size = ::std::mem::size_of::<T>() as u32;
unsafe {
let mut data_uninit = ::std::mem::MaybeUninit::<T>::uninit();
let data_ptr = data_uninit.as_mut_ptr() as *mut _ as *mut c_void;
let size_ptr = &mut size as *mut _;
try_os_status!(sys::AudioUnitGetProperty(
au, id, scope, elem, data_ptr, size_ptr
));
let data: T = data_uninit.assume_init();
Ok(data)
}
}
/// Gets the value of a specified audio session property.
///
/// **Available** in iOS 2.0 and later.
///
/// Parameters
/// ----------
///
/// - **id**: The identifier of the property.
#[cfg(target_os = "ios")]
pub fn audio_session_get_property<T>(id: u32) -> Result<T, Error> {
let mut size = ::std::mem::size_of::<T>() as u32;
unsafe {
let mut data_uninit = ::std::mem::MaybeUninit::<T>::uninit();
let data_ptr = data_uninit.as_mut_ptr() as *mut _ as *mut c_void;
let size_ptr = &mut size as *mut _;
try_os_status!(sys::AudioSessionGetProperty(id, size_ptr, data_ptr));
let data: T = data_uninit.assume_init();
Ok(data)
}
}
| self.stream_format(Scope::Output)
}
| identifier_body |
updater.go | package debian
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/textproto"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"github.com/quay/claircore"
"github.com/quay/zlog"
"github.com/quay/claircore/libvuln/driver"
"github.com/quay/claircore/pkg/tmp"
)
//doc:url updater
const (
defaultMirror = `https://deb.debian.org/`
defaultJSON = `https://security-tracker.debian.org/tracker/data/json`
)
var (
_ driver.UpdaterSetFactory = (*Factory)(nil)
_ driver.Configurable = (*Factory)(nil)
_ driver.Updater = (*updater)(nil)
_ driver.Configurable = (*updater)(nil)
)
// Factory creates Updaters for all Debian distributions that exist
// in the mirror, and have entries in the JSON security tracker.
//
// [Configure] must be called before [UpdaterSet].
type Factory struct {
c *http.Client
mirror *url.URL
json *url.URL
}
// NewFactory constructs a Factory.
//
// [Configure] must be called before [UpdaterSet].
func NewFactory(_ context.Context) (*Factory, error) {
f := &Factory{}
return f, nil
}
// Configure implements [driver.Configurable].
func (f *Factory) | (_ context.Context, cf driver.ConfigUnmarshaler, c *http.Client) error {
f.c = c
var cfg FactoryConfig
if err := cf(&cfg); err != nil {
return fmt.Errorf("debian: factory configuration error: %w", err)
}
if cfg.ArchiveURL != "" || cfg.OVALURL != "" {
return fmt.Errorf("debian: neither archive_url nor oval_url should be populated anymore; use json_url and mirror_url instead")
}
u, err := url.Parse(defaultMirror)
if cfg.MirrorURL != "" {
u, err = url.Parse(cfg.MirrorURL)
}
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.mirror, err = u.Parse("debian/")
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.json, err = url.Parse(defaultJSON)
if cfg.JSONURL != "" {
f.json, err = url.Parse(cfg.JSONURL)
}
if err != nil {
return fmt.Errorf("debian: bad JSON URL: %w", err)
}
return nil
}
// FactoryConfig is the configuration honored by the Factory.
//
// The "mirror" URLs expect to find HTML at "dists/" formatted like
// the HTML from the Debian project (that is to say, HTML containing relative links
// to distribution directories).
//
// The "mirror" URL needs a trailing slash.
//
// The "JSON" URL expects to find a JSON array of packages mapped to related vulnerabilities.
type FactoryConfig struct {
// ArchiveURL is a URL to a Debian archive.
//
// Deprecated: Only MirrorURL should be used.
ArchiveURL string `json:"archive_url" yaml:"archive_url"`
MirrorURL string `json:"mirror_url" yaml:"mirror_url"`
// OVALURL is a URL to a collection of OVAL XML documents.
//
// Deprecated: Use JSONURL instead.
OVALURL string `json:"oval_url" yaml:"oval_url"`
// JSONURL is a URL to a JSON vulnerability feed.
JSONURL string `json:"json_url" yaml:"json_url"`
}
var (
// LinkRegexp is a bad regexp to extract link targets.
// This will break if Debian's codenames include a double-quote in the future.
linkRegexp = regexp.MustCompile(`href="([^"]+)"`)
// SkipList is a list of strings that, experimentally, indicate the string
// is not a codename.
skipList = []string{
"-", "Debian", "sid", "stable", "testing", "experimental", "README", "updates", "backports",
}
)
// UpdaterSet implements [driver.UpdaterSetFactory].
func (f *Factory) UpdaterSet(ctx context.Context) (driver.UpdaterSet, error) {
s := driver.NewUpdaterSet()
ds, err := f.findReleases(ctx, f.mirror)
if err != nil {
return s, fmt.Errorf("debian: examining remote: %w", err)
}
// TODO: Consider returning stub if Last-Modified has not updated.
u := &updater{
jsonURL: f.json.String(),
}
for _, d := range ds {
src, err := f.mirror.Parse(path.Join("dists", d.VersionCodeName) + "/")
if err != nil {
return s, fmt.Errorf("debian: unable to construct source URL: %w", err)
}
u.dists = append(u.dists, sourceURL{
distro: d.VersionCodeName,
url: src,
})
}
if err := s.Add(u); err != nil {
return s, fmt.Errorf("debian: unable to add updater: %w", err)
}
return s, nil
}
// FindReleases is split out as a method to make it easier to examine the mirror and the archive.
func (f *Factory) findReleases(ctx context.Context, u *url.URL) ([]*claircore.Distribution, error) {
dir, err := u.Parse("dists/")
if err != nil {
return nil, fmt.Errorf("debian: unable to construct URL: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, dir.String(), nil)
if err != nil {
return nil, fmt.Errorf("debian: unable to construct request: %w", err)
}
res, err := f.c.Do(req)
if err != nil {
return nil, fmt.Errorf("debian: unable to do request: %w", err)
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
default:
return nil, fmt.Errorf("debian: unexpected status fetching %q: %s", dir.String(), res.Status)
}
var buf bytes.Buffer
if _, err := buf.ReadFrom(res.Body); err != nil {
return nil, fmt.Errorf("debian: unable to read dists listing: %w", err)
}
ms := linkRegexp.FindAllStringSubmatch(buf.String(), -1)
var todos []*claircore.Distribution
Listing:
for _, m := range ms {
dist := m[1]
switch {
case dist == "":
continue
case dist[0] == '/', dist[0] == '?':
continue
}
for _, s := range skipList {
if strings.Contains(dist, s) {
continue Listing
}
}
dist = strings.Trim(dist, "/")
rf, err := dir.Parse(path.Join(dist, `Release`))
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("context", dir).
Str("target", path.Join(dist, `Release`)).
Msg("unable to construct URL")
continue
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rf.String(), nil)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to construct request")
continue
}
req.Header.Set("range", "bytes=0-512")
res, err := f.c.Do(req)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to do request")
continue
}
buf.Reset()
buf.ReadFrom(res.Body)
res.Body.Close()
switch res.StatusCode {
case http.StatusPartialContent, http.StatusOK:
case http.StatusNotFound: // Probably extremely old, it's fine.
continue
default:
zlog.Info(ctx).
Str("status", res.Status).
Stringer("url", rf).
Msg("unexpected response")
continue
}
tp := textproto.NewReader(bufio.NewReader(io.MultiReader(&buf, bytes.NewReader([]byte("\r\n\r\n")))))
h, err := tp.ReadMIMEHeader()
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to read MIME-ish headers")
continue
}
sv := h.Get("Version")
if sv == "" {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
vs := strings.Split(sv, ".")
if len(vs) == 1 {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
ver, err := strconv.ParseInt(vs[0], 10, 32)
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to parse version")
continue
}
todos = append(todos, mkDist(dist, int(ver)))
}
return todos, nil
}
// Updater implements [driver.updater].
type updater struct {
// jsonURL is the URL from which to fetch JSON vulnerability data
jsonURL string
dists []sourceURL
c *http.Client
sm *sourcesMap
}
// UpdaterConfig is the configuration for the updater.
type UpdaterConfig struct {
// Deprecated: Use JSONURL instead.
OVALURL string `json:"url" yaml:"url"`
JSONURL string `json:"json_url" yaml:"json_url"`
// Deprecated: Use DistsURLs instead.
DistsURL string `json:"dists_url" yaml:"dists_url"`
DistsURLs []sourceURL `json:"dists_urls" yaml:"dists_urls"`
}
// Name implements [driver.Updater].
func (u *updater) Name() string {
return "debian/updater"
}
// Configure implements [driver.Configurable].
func (u *updater) Configure(ctx context.Context, f driver.ConfigUnmarshaler, c *http.Client) error {
ctx = zlog.ContextWithValues(ctx, "component", "debian/Updater.Configure")
u.c = c
var cfg UpdaterConfig
if err := f(&cfg); err != nil {
return err
}
if cfg.DistsURL != "" || cfg.OVALURL != "" {
zlog.Error(ctx).Msg("configured with deprecated URLs")
return fmt.Errorf("debian: neither url nor dists_url should be used anymore; use json_url and dists_urls instead")
}
if cfg.JSONURL != "" {
u.jsonURL = cfg.JSONURL
zlog.Info(ctx).
Msg("configured JSON database URL")
}
if len(cfg.DistsURLs) != 0 {
u.dists = cfg.DistsURLs
zlog.Info(ctx).
Msg("configured dists URLs")
}
var srcs []sourceURL
for _, dist := range u.dists {
src, err := url.Parse(dist.url.String())
if err != nil {
return fmt.Errorf("debian: unable to parse dist URL: %w", err)
}
srcs = append(srcs, sourceURL{distro: dist.distro, url: src})
}
u.sm = newSourcesMap(u.c, srcs)
return nil
}
// Fetch implements [driver.Fetcher].
func (u *updater) Fetch(ctx context.Context, fingerprint driver.Fingerprint) (io.ReadCloser, driver.Fingerprint, error) {
ctx = zlog.ContextWithValues(ctx,
"component", "debian/Updater.Fetch",
"database", u.jsonURL)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.jsonURL, nil)
if err != nil {
return nil, "", fmt.Errorf("failed to create request")
}
if fingerprint != "" {
req.Header.Set("If-Modified-Since", string(fingerprint))
}
// fetch JSON database
resp, err := u.c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, "", fmt.Errorf("failed to retrieve JSON database: %v", err)
}
fp := resp.Header.Get("Last-Modified")
switch resp.StatusCode {
case http.StatusOK:
if fingerprint == "" || fp != string(fingerprint) {
zlog.Info(ctx).Msg("fetching latest JSON database")
break
}
fallthrough
case http.StatusNotModified:
return nil, fingerprint, driver.Unchanged
default:
return nil, "", fmt.Errorf("unexpected response: %v", resp.Status)
}
f, err := tmp.NewFile("", "debian.")
if err != nil {
return nil, "", err
}
var success bool
defer func() {
if !success {
if err := f.Close(); err != nil {
zlog.Warn(ctx).Err(err).Msg("unable to close spool")
}
}
}()
if _, err := io.Copy(f, resp.Body); err != nil {
return nil, "", fmt.Errorf("failed to read http body: %w", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, "", fmt.Errorf("failed to seek body: %w", err)
}
zlog.Info(ctx).Msg("fetched latest json database successfully")
if err := u.sm.Update(ctx); err != nil {
return nil, "", fmt.Errorf("could not update source to binary map: %w", err)
}
zlog.Info(ctx).Msg("updated the debian source to binary map successfully")
success = true
return f, driver.Fingerprint(fp), err
}
| Configure | identifier_name |
updater.go | package debian
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/textproto"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"github.com/quay/claircore"
"github.com/quay/zlog"
"github.com/quay/claircore/libvuln/driver"
"github.com/quay/claircore/pkg/tmp"
)
//doc:url updater
const (
defaultMirror = `https://deb.debian.org/`
defaultJSON = `https://security-tracker.debian.org/tracker/data/json`
)
var (
_ driver.UpdaterSetFactory = (*Factory)(nil)
_ driver.Configurable = (*Factory)(nil)
_ driver.Updater = (*updater)(nil)
_ driver.Configurable = (*updater)(nil)
)
// Factory creates Updaters for all Debian distributions that exist
// in the mirror, and have entries in the JSON security tracker.
//
// [Configure] must be called before [UpdaterSet].
type Factory struct {
c *http.Client
mirror *url.URL
json *url.URL
}
// NewFactory constructs a Factory.
//
// [Configure] must be called before [UpdaterSet].
func NewFactory(_ context.Context) (*Factory, error) |
// Configure implements [driver.Configurable].
func (f *Factory) Configure(_ context.Context, cf driver.ConfigUnmarshaler, c *http.Client) error {
f.c = c
var cfg FactoryConfig
if err := cf(&cfg); err != nil {
return fmt.Errorf("debian: factory configuration error: %w", err)
}
if cfg.ArchiveURL != "" || cfg.OVALURL != "" {
return fmt.Errorf("debian: neither archive_url nor oval_url should be populated anymore; use json_url and mirror_url instead")
}
u, err := url.Parse(defaultMirror)
if cfg.MirrorURL != "" {
u, err = url.Parse(cfg.MirrorURL)
}
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.mirror, err = u.Parse("debian/")
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.json, err = url.Parse(defaultJSON)
if cfg.JSONURL != "" {
f.json, err = url.Parse(cfg.JSONURL)
}
if err != nil {
return fmt.Errorf("debian: bad JSON URL: %w", err)
}
return nil
}
// FactoryConfig is the configuration honored by the Factory.
//
// The "mirror" URLs expect to find HTML at "dists/" formatted like
// the HTML from the Debian project (that is to say, HTML containing relative links
// to distribution directories).
//
// The "mirror" URL needs a trailing slash.
//
// The "JSON" URL expects to find a JSON array of packages mapped to related vulnerabilities.
type FactoryConfig struct {
// ArchiveURL is a URL to a Debian archive.
//
// Deprecated: Only MirrorURL should be used.
ArchiveURL string `json:"archive_url" yaml:"archive_url"`
MirrorURL string `json:"mirror_url" yaml:"mirror_url"`
// OVALURL is a URL to a collection of OVAL XML documents.
//
// Deprecated: Use JSONURL instead.
OVALURL string `json:"oval_url" yaml:"oval_url"`
// JSONURL is a URL to a JSON vulnerability feed.
JSONURL string `json:"json_url" yaml:"json_url"`
}
var (
// LinkRegexp is a bad regexp to extract link targets.
// This will break if Debian's codenames include a double-quote in the future.
linkRegexp = regexp.MustCompile(`href="([^"]+)"`)
// SkipList is a list of strings that, experimentally, indicate the string
// is not a codename.
skipList = []string{
"-", "Debian", "sid", "stable", "testing", "experimental", "README", "updates", "backports",
}
)
// UpdaterSet implements [driver.UpdaterSetFactory].
func (f *Factory) UpdaterSet(ctx context.Context) (driver.UpdaterSet, error) {
s := driver.NewUpdaterSet()
ds, err := f.findReleases(ctx, f.mirror)
if err != nil {
return s, fmt.Errorf("debian: examining remote: %w", err)
}
// TODO: Consider returning stub if Last-Modified has not updated.
u := &updater{
jsonURL: f.json.String(),
}
for _, d := range ds {
src, err := f.mirror.Parse(path.Join("dists", d.VersionCodeName) + "/")
if err != nil {
return s, fmt.Errorf("debian: unable to construct source URL: %w", err)
}
u.dists = append(u.dists, sourceURL{
distro: d.VersionCodeName,
url: src,
})
}
if err := s.Add(u); err != nil {
return s, fmt.Errorf("debian: unable to add updater: %w", err)
}
return s, nil
}
// FindReleases is split out as a method to make it easier to examine the mirror and the archive.
func (f *Factory) findReleases(ctx context.Context, u *url.URL) ([]*claircore.Distribution, error) {
dir, err := u.Parse("dists/")
if err != nil {
return nil, fmt.Errorf("debian: unable to construct URL: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, dir.String(), nil)
if err != nil {
return nil, fmt.Errorf("debian: unable to construct request: %w", err)
}
res, err := f.c.Do(req)
if err != nil {
return nil, fmt.Errorf("debian: unable to do request: %w", err)
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
default:
return nil, fmt.Errorf("debian: unexpected status fetching %q: %s", dir.String(), res.Status)
}
var buf bytes.Buffer
if _, err := buf.ReadFrom(res.Body); err != nil {
return nil, fmt.Errorf("debian: unable to read dists listing: %w", err)
}
ms := linkRegexp.FindAllStringSubmatch(buf.String(), -1)
var todos []*claircore.Distribution
Listing:
for _, m := range ms {
dist := m[1]
switch {
case dist == "":
continue
case dist[0] == '/', dist[0] == '?':
continue
}
for _, s := range skipList {
if strings.Contains(dist, s) {
continue Listing
}
}
dist = strings.Trim(dist, "/")
rf, err := dir.Parse(path.Join(dist, `Release`))
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("context", dir).
Str("target", path.Join(dist, `Release`)).
Msg("unable to construct URL")
continue
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rf.String(), nil)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to construct request")
continue
}
req.Header.Set("range", "bytes=0-512")
res, err := f.c.Do(req)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to do request")
continue
}
buf.Reset()
buf.ReadFrom(res.Body)
res.Body.Close()
switch res.StatusCode {
case http.StatusPartialContent, http.StatusOK:
case http.StatusNotFound: // Probably extremely old, it's fine.
continue
default:
zlog.Info(ctx).
Str("status", res.Status).
Stringer("url", rf).
Msg("unexpected response")
continue
}
tp := textproto.NewReader(bufio.NewReader(io.MultiReader(&buf, bytes.NewReader([]byte("\r\n\r\n")))))
h, err := tp.ReadMIMEHeader()
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to read MIME-ish headers")
continue
}
sv := h.Get("Version")
if sv == "" {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
vs := strings.Split(sv, ".")
if len(vs) == 1 {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
ver, err := strconv.ParseInt(vs[0], 10, 32)
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to parse version")
continue
}
todos = append(todos, mkDist(dist, int(ver)))
}
return todos, nil
}
// Updater implements [driver.updater].
type updater struct {
// jsonURL is the URL from which to fetch JSON vulnerability data
jsonURL string
dists []sourceURL
c *http.Client
sm *sourcesMap
}
// UpdaterConfig is the configuration for the updater.
type UpdaterConfig struct {
// Deprecated: Use JSONURL instead.
OVALURL string `json:"url" yaml:"url"`
JSONURL string `json:"json_url" yaml:"json_url"`
// Deprecated: Use DistsURLs instead.
DistsURL string `json:"dists_url" yaml:"dists_url"`
DistsURLs []sourceURL `json:"dists_urls" yaml:"dists_urls"`
}
// Name implements [driver.Updater].
func (u *updater) Name() string {
return "debian/updater"
}
// Configure implements [driver.Configurable].
func (u *updater) Configure(ctx context.Context, f driver.ConfigUnmarshaler, c *http.Client) error {
ctx = zlog.ContextWithValues(ctx, "component", "debian/Updater.Configure")
u.c = c
var cfg UpdaterConfig
if err := f(&cfg); err != nil {
return err
}
if cfg.DistsURL != "" || cfg.OVALURL != "" {
zlog.Error(ctx).Msg("configured with deprecated URLs")
return fmt.Errorf("debian: neither url nor dists_url should be used anymore; use json_url and dists_urls instead")
}
if cfg.JSONURL != "" {
u.jsonURL = cfg.JSONURL
zlog.Info(ctx).
Msg("configured JSON database URL")
}
if len(cfg.DistsURLs) != 0 {
u.dists = cfg.DistsURLs
zlog.Info(ctx).
Msg("configured dists URLs")
}
var srcs []sourceURL
for _, dist := range u.dists {
src, err := url.Parse(dist.url.String())
if err != nil {
return fmt.Errorf("debian: unable to parse dist URL: %w", err)
}
srcs = append(srcs, sourceURL{distro: dist.distro, url: src})
}
u.sm = newSourcesMap(u.c, srcs)
return nil
}
// Fetch implements [driver.Fetcher].
func (u *updater) Fetch(ctx context.Context, fingerprint driver.Fingerprint) (io.ReadCloser, driver.Fingerprint, error) {
ctx = zlog.ContextWithValues(ctx,
"component", "debian/Updater.Fetch",
"database", u.jsonURL)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.jsonURL, nil)
if err != nil {
return nil, "", fmt.Errorf("failed to create request")
}
if fingerprint != "" {
req.Header.Set("If-Modified-Since", string(fingerprint))
}
// fetch JSON database
resp, err := u.c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, "", fmt.Errorf("failed to retrieve JSON database: %v", err)
}
fp := resp.Header.Get("Last-Modified")
switch resp.StatusCode {
case http.StatusOK:
if fingerprint == "" || fp != string(fingerprint) {
zlog.Info(ctx).Msg("fetching latest JSON database")
break
}
fallthrough
case http.StatusNotModified:
return nil, fingerprint, driver.Unchanged
default:
return nil, "", fmt.Errorf("unexpected response: %v", resp.Status)
}
f, err := tmp.NewFile("", "debian.")
if err != nil {
return nil, "", err
}
var success bool
defer func() {
if !success {
if err := f.Close(); err != nil {
zlog.Warn(ctx).Err(err).Msg("unable to close spool")
}
}
}()
if _, err := io.Copy(f, resp.Body); err != nil {
return nil, "", fmt.Errorf("failed to read http body: %w", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, "", fmt.Errorf("failed to seek body: %w", err)
}
zlog.Info(ctx).Msg("fetched latest json database successfully")
if err := u.sm.Update(ctx); err != nil {
return nil, "", fmt.Errorf("could not update source to binary map: %w", err)
}
zlog.Info(ctx).Msg("updated the debian source to binary map successfully")
success = true
return f, driver.Fingerprint(fp), err
}
| {
f := &Factory{}
return f, nil
} | identifier_body |
updater.go | package debian
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/textproto"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"github.com/quay/claircore"
"github.com/quay/zlog"
"github.com/quay/claircore/libvuln/driver"
"github.com/quay/claircore/pkg/tmp"
)
//doc:url updater
const (
defaultMirror = `https://deb.debian.org/`
defaultJSON = `https://security-tracker.debian.org/tracker/data/json`
)
var (
_ driver.UpdaterSetFactory = (*Factory)(nil)
_ driver.Configurable = (*Factory)(nil)
_ driver.Updater = (*updater)(nil)
_ driver.Configurable = (*updater)(nil)
)
// Factory creates Updaters for all Debian distributions that exist
// in the mirror, and have entries in the JSON security tracker.
//
// [Configure] must be called before [UpdaterSet].
type Factory struct {
c *http.Client
mirror *url.URL
json *url.URL
}
// NewFactory constructs a Factory.
//
// [Configure] must be called before [UpdaterSet].
func NewFactory(_ context.Context) (*Factory, error) {
f := &Factory{}
return f, nil
}
// Configure implements [driver.Configurable].
func (f *Factory) Configure(_ context.Context, cf driver.ConfigUnmarshaler, c *http.Client) error {
f.c = c
var cfg FactoryConfig
if err := cf(&cfg); err != nil {
return fmt.Errorf("debian: factory configuration error: %w", err)
}
if cfg.ArchiveURL != "" || cfg.OVALURL != "" {
return fmt.Errorf("debian: neither archive_url nor oval_url should be populated anymore; use json_url and mirror_url instead")
}
u, err := url.Parse(defaultMirror)
if cfg.MirrorURL != "" {
u, err = url.Parse(cfg.MirrorURL)
}
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.mirror, err = u.Parse("debian/")
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.json, err = url.Parse(defaultJSON)
if cfg.JSONURL != "" {
f.json, err = url.Parse(cfg.JSONURL)
}
if err != nil {
return fmt.Errorf("debian: bad JSON URL: %w", err)
}
return nil
}
// FactoryConfig is the configuration honored by the Factory.
//
// The "mirror" URLs expect to find HTML at "dists/" formatted like
// the HTML from the Debian project (that is to say, HTML containing relative links
// to distribution directories).
//
// The "mirror" URL needs a trailing slash.
//
// The "JSON" URL expects to find a JSON array of packages mapped to related vulnerabilities.
type FactoryConfig struct {
// ArchiveURL is a URL to a Debian archive.
//
// Deprecated: Only MirrorURL should be used.
ArchiveURL string `json:"archive_url" yaml:"archive_url"`
MirrorURL string `json:"mirror_url" yaml:"mirror_url"`
// OVALURL is a URL to a collection of OVAL XML documents.
//
// Deprecated: Use JSONURL instead.
OVALURL string `json:"oval_url" yaml:"oval_url"`
// JSONURL is a URL to a JSON vulnerability feed.
JSONURL string `json:"json_url" yaml:"json_url"`
}
var (
// LinkRegexp is a bad regexp to extract link targets.
// This will break if Debian's codenames include a double-quote in the future.
linkRegexp = regexp.MustCompile(`href="([^"]+)"`)
// SkipList is a list of strings that, experimentally, indicate the string
// is not a codename.
skipList = []string{
"-", "Debian", "sid", "stable", "testing", "experimental", "README", "updates", "backports",
}
)
// UpdaterSet implements [driver.UpdaterSetFactory].
func (f *Factory) UpdaterSet(ctx context.Context) (driver.UpdaterSet, error) {
s := driver.NewUpdaterSet()
ds, err := f.findReleases(ctx, f.mirror)
if err != nil {
return s, fmt.Errorf("debian: examining remote: %w", err)
}
// TODO: Consider returning stub if Last-Modified has not updated.
u := &updater{
jsonURL: f.json.String(),
}
for _, d := range ds {
src, err := f.mirror.Parse(path.Join("dists", d.VersionCodeName) + "/")
if err != nil {
return s, fmt.Errorf("debian: unable to construct source URL: %w", err)
}
u.dists = append(u.dists, sourceURL{
distro: d.VersionCodeName,
url: src,
})
}
if err := s.Add(u); err != nil {
return s, fmt.Errorf("debian: unable to add updater: %w", err)
}
return s, nil
}
// FindReleases is split out as a method to make it easier to examine the mirror and the archive.
func (f *Factory) findReleases(ctx context.Context, u *url.URL) ([]*claircore.Distribution, error) {
dir, err := u.Parse("dists/")
if err != nil {
return nil, fmt.Errorf("debian: unable to construct URL: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, dir.String(), nil)
if err != nil {
return nil, fmt.Errorf("debian: unable to construct request: %w", err)
}
res, err := f.c.Do(req)
if err != nil {
return nil, fmt.Errorf("debian: unable to do request: %w", err)
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
default:
return nil, fmt.Errorf("debian: unexpected status fetching %q: %s", dir.String(), res.Status)
}
var buf bytes.Buffer
if _, err := buf.ReadFrom(res.Body); err != nil {
return nil, fmt.Errorf("debian: unable to read dists listing: %w", err)
}
ms := linkRegexp.FindAllStringSubmatch(buf.String(), -1)
var todos []*claircore.Distribution
Listing:
for _, m := range ms {
dist := m[1]
switch {
case dist == "":
continue
case dist[0] == '/', dist[0] == '?':
continue
}
for _, s := range skipList {
if strings.Contains(dist, s) {
continue Listing
}
}
dist = strings.Trim(dist, "/")
rf, err := dir.Parse(path.Join(dist, `Release`))
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("context", dir).
Str("target", path.Join(dist, `Release`)).
Msg("unable to construct URL")
continue
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rf.String(), nil)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to construct request")
continue
}
req.Header.Set("range", "bytes=0-512")
res, err := f.c.Do(req)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to do request")
continue
}
buf.Reset()
buf.ReadFrom(res.Body)
res.Body.Close()
switch res.StatusCode {
case http.StatusPartialContent, http.StatusOK:
case http.StatusNotFound: // Probably extremely old, it's fine.
continue
default:
zlog.Info(ctx).
Str("status", res.Status).
Stringer("url", rf).
Msg("unexpected response")
continue
}
tp := textproto.NewReader(bufio.NewReader(io.MultiReader(&buf, bytes.NewReader([]byte("\r\n\r\n")))))
h, err := tp.ReadMIMEHeader()
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to read MIME-ish headers")
continue
}
sv := h.Get("Version")
if sv == "" {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
vs := strings.Split(sv, ".")
if len(vs) == 1 {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
ver, err := strconv.ParseInt(vs[0], 10, 32)
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to parse version")
continue
}
todos = append(todos, mkDist(dist, int(ver)))
}
return todos, nil
}
// Updater implements [driver.updater].
type updater struct {
// jsonURL is the URL from which to fetch JSON vulnerability data
jsonURL string
dists []sourceURL
c *http.Client
sm *sourcesMap
}
// UpdaterConfig is the configuration for the updater.
type UpdaterConfig struct {
// Deprecated: Use JSONURL instead.
OVALURL string `json:"url" yaml:"url"`
JSONURL string `json:"json_url" yaml:"json_url"`
// Deprecated: Use DistsURLs instead.
DistsURL string `json:"dists_url" yaml:"dists_url"`
DistsURLs []sourceURL `json:"dists_urls" yaml:"dists_urls"`
}
// Name implements [driver.Updater].
func (u *updater) Name() string {
return "debian/updater"
}
// Configure implements [driver.Configurable].
func (u *updater) Configure(ctx context.Context, f driver.ConfigUnmarshaler, c *http.Client) error {
ctx = zlog.ContextWithValues(ctx, "component", "debian/Updater.Configure")
u.c = c
var cfg UpdaterConfig
if err := f(&cfg); err != nil {
return err
}
| if cfg.DistsURL != "" || cfg.OVALURL != "" {
zlog.Error(ctx).Msg("configured with deprecated URLs")
return fmt.Errorf("debian: neither url nor dists_url should be used anymore; use json_url and dists_urls instead")
}
if cfg.JSONURL != "" {
u.jsonURL = cfg.JSONURL
zlog.Info(ctx).
Msg("configured JSON database URL")
}
if len(cfg.DistsURLs) != 0 {
u.dists = cfg.DistsURLs
zlog.Info(ctx).
Msg("configured dists URLs")
}
var srcs []sourceURL
for _, dist := range u.dists {
src, err := url.Parse(dist.url.String())
if err != nil {
return fmt.Errorf("debian: unable to parse dist URL: %w", err)
}
srcs = append(srcs, sourceURL{distro: dist.distro, url: src})
}
u.sm = newSourcesMap(u.c, srcs)
return nil
}
// Fetch implements [driver.Fetcher].
func (u *updater) Fetch(ctx context.Context, fingerprint driver.Fingerprint) (io.ReadCloser, driver.Fingerprint, error) {
ctx = zlog.ContextWithValues(ctx,
"component", "debian/Updater.Fetch",
"database", u.jsonURL)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.jsonURL, nil)
if err != nil {
return nil, "", fmt.Errorf("failed to create request")
}
if fingerprint != "" {
req.Header.Set("If-Modified-Since", string(fingerprint))
}
// fetch JSON database
resp, err := u.c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, "", fmt.Errorf("failed to retrieve JSON database: %v", err)
}
fp := resp.Header.Get("Last-Modified")
switch resp.StatusCode {
case http.StatusOK:
if fingerprint == "" || fp != string(fingerprint) {
zlog.Info(ctx).Msg("fetching latest JSON database")
break
}
fallthrough
case http.StatusNotModified:
return nil, fingerprint, driver.Unchanged
default:
return nil, "", fmt.Errorf("unexpected response: %v", resp.Status)
}
f, err := tmp.NewFile("", "debian.")
if err != nil {
return nil, "", err
}
var success bool
defer func() {
if !success {
if err := f.Close(); err != nil {
zlog.Warn(ctx).Err(err).Msg("unable to close spool")
}
}
}()
if _, err := io.Copy(f, resp.Body); err != nil {
return nil, "", fmt.Errorf("failed to read http body: %w", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, "", fmt.Errorf("failed to seek body: %w", err)
}
zlog.Info(ctx).Msg("fetched latest json database successfully")
if err := u.sm.Update(ctx); err != nil {
return nil, "", fmt.Errorf("could not update source to binary map: %w", err)
}
zlog.Info(ctx).Msg("updated the debian source to binary map successfully")
success = true
return f, driver.Fingerprint(fp), err
} | random_line_split | |
updater.go | package debian
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/textproto"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"github.com/quay/claircore"
"github.com/quay/zlog"
"github.com/quay/claircore/libvuln/driver"
"github.com/quay/claircore/pkg/tmp"
)
//doc:url updater
const (
defaultMirror = `https://deb.debian.org/`
defaultJSON = `https://security-tracker.debian.org/tracker/data/json`
)
var (
_ driver.UpdaterSetFactory = (*Factory)(nil)
_ driver.Configurable = (*Factory)(nil)
_ driver.Updater = (*updater)(nil)
_ driver.Configurable = (*updater)(nil)
)
// Factory creates Updaters for all Debian distributions that exist
// in the mirror, and have entries in the JSON security tracker.
//
// [Configure] must be called before [UpdaterSet].
type Factory struct {
c *http.Client
mirror *url.URL
json *url.URL
}
// NewFactory constructs a Factory.
//
// [Configure] must be called before [UpdaterSet].
func NewFactory(_ context.Context) (*Factory, error) {
f := &Factory{}
return f, nil
}
// Configure implements [driver.Configurable].
func (f *Factory) Configure(_ context.Context, cf driver.ConfigUnmarshaler, c *http.Client) error {
f.c = c
var cfg FactoryConfig
if err := cf(&cfg); err != nil {
return fmt.Errorf("debian: factory configuration error: %w", err)
}
if cfg.ArchiveURL != "" || cfg.OVALURL != "" {
return fmt.Errorf("debian: neither archive_url nor oval_url should be populated anymore; use json_url and mirror_url instead")
}
u, err := url.Parse(defaultMirror)
if cfg.MirrorURL != "" {
u, err = url.Parse(cfg.MirrorURL)
}
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.mirror, err = u.Parse("debian/")
if err != nil {
return fmt.Errorf("debian: bad mirror URL: %w", err)
}
f.json, err = url.Parse(defaultJSON)
if cfg.JSONURL != "" {
f.json, err = url.Parse(cfg.JSONURL)
}
if err != nil {
return fmt.Errorf("debian: bad JSON URL: %w", err)
}
return nil
}
// FactoryConfig is the configuration honored by the Factory.
//
// The "mirror" URLs expect to find HTML at "dists/" formatted like
// the HTML from the Debian project (that is to say, HTML containing relative links
// to distribution directories).
//
// The "mirror" URL needs a trailing slash.
//
// The "JSON" URL expects to find a JSON array of packages mapped to related vulnerabilities.
type FactoryConfig struct {
// ArchiveURL is a URL to a Debian archive.
//
// Deprecated: Only MirrorURL should be used.
ArchiveURL string `json:"archive_url" yaml:"archive_url"`
MirrorURL string `json:"mirror_url" yaml:"mirror_url"`
// OVALURL is a URL to a collection of OVAL XML documents.
//
// Deprecated: Use JSONURL instead.
OVALURL string `json:"oval_url" yaml:"oval_url"`
// JSONURL is a URL to a JSON vulnerability feed.
JSONURL string `json:"json_url" yaml:"json_url"`
}
var (
// LinkRegexp is a bad regexp to extract link targets.
// This will break if Debian's codenames include a double-quote in the future.
linkRegexp = regexp.MustCompile(`href="([^"]+)"`)
// SkipList is a list of strings that, experimentally, indicate the string
// is not a codename.
skipList = []string{
"-", "Debian", "sid", "stable", "testing", "experimental", "README", "updates", "backports",
}
)
// UpdaterSet implements [driver.UpdaterSetFactory].
func (f *Factory) UpdaterSet(ctx context.Context) (driver.UpdaterSet, error) {
s := driver.NewUpdaterSet()
ds, err := f.findReleases(ctx, f.mirror)
if err != nil {
return s, fmt.Errorf("debian: examining remote: %w", err)
}
// TODO: Consider returning stub if Last-Modified has not updated.
u := &updater{
jsonURL: f.json.String(),
}
for _, d := range ds {
src, err := f.mirror.Parse(path.Join("dists", d.VersionCodeName) + "/")
if err != nil {
return s, fmt.Errorf("debian: unable to construct source URL: %w", err)
}
u.dists = append(u.dists, sourceURL{
distro: d.VersionCodeName,
url: src,
})
}
if err := s.Add(u); err != nil {
return s, fmt.Errorf("debian: unable to add updater: %w", err)
}
return s, nil
}
// FindReleases is split out as a method to make it easier to examine the mirror and the archive.
func (f *Factory) findReleases(ctx context.Context, u *url.URL) ([]*claircore.Distribution, error) {
dir, err := u.Parse("dists/")
if err != nil {
return nil, fmt.Errorf("debian: unable to construct URL: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, dir.String(), nil)
if err != nil {
return nil, fmt.Errorf("debian: unable to construct request: %w", err)
}
res, err := f.c.Do(req)
if err != nil {
return nil, fmt.Errorf("debian: unable to do request: %w", err)
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
default:
return nil, fmt.Errorf("debian: unexpected status fetching %q: %s", dir.String(), res.Status)
}
var buf bytes.Buffer
if _, err := buf.ReadFrom(res.Body); err != nil {
return nil, fmt.Errorf("debian: unable to read dists listing: %w", err)
}
ms := linkRegexp.FindAllStringSubmatch(buf.String(), -1)
var todos []*claircore.Distribution
Listing:
for _, m := range ms {
dist := m[1]
switch {
case dist == "":
continue
case dist[0] == '/', dist[0] == '?':
continue
}
for _, s := range skipList {
if strings.Contains(dist, s) {
continue Listing
}
}
dist = strings.Trim(dist, "/")
rf, err := dir.Parse(path.Join(dist, `Release`))
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("context", dir).
Str("target", path.Join(dist, `Release`)).
Msg("unable to construct URL")
continue
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, rf.String(), nil)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to construct request")
continue
}
req.Header.Set("range", "bytes=0-512")
res, err := f.c.Do(req)
if err != nil {
zlog.Info(ctx).
Err(err).
Stringer("url", rf).
Msg("unable to do request")
continue
}
buf.Reset()
buf.ReadFrom(res.Body)
res.Body.Close()
switch res.StatusCode {
case http.StatusPartialContent, http.StatusOK:
case http.StatusNotFound: // Probably extremely old, it's fine.
continue
default:
zlog.Info(ctx).
Str("status", res.Status).
Stringer("url", rf).
Msg("unexpected response")
continue
}
tp := textproto.NewReader(bufio.NewReader(io.MultiReader(&buf, bytes.NewReader([]byte("\r\n\r\n")))))
h, err := tp.ReadMIMEHeader()
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to read MIME-ish headers")
continue
}
sv := h.Get("Version")
if sv == "" {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
vs := strings.Split(sv, ".")
if len(vs) == 1 {
zlog.Debug(ctx).Str("dist", dist).Msg("no version assigned, skipping")
continue
}
ver, err := strconv.ParseInt(vs[0], 10, 32)
if err != nil {
zlog.Info(ctx).Err(err).Msg("unable to parse version")
continue
}
todos = append(todos, mkDist(dist, int(ver)))
}
return todos, nil
}
// Updater implements [driver.updater].
type updater struct {
// jsonURL is the URL from which to fetch JSON vulnerability data
jsonURL string
dists []sourceURL
c *http.Client
sm *sourcesMap
}
// UpdaterConfig is the configuration for the updater.
type UpdaterConfig struct {
// Deprecated: Use JSONURL instead.
OVALURL string `json:"url" yaml:"url"`
JSONURL string `json:"json_url" yaml:"json_url"`
// Deprecated: Use DistsURLs instead.
DistsURL string `json:"dists_url" yaml:"dists_url"`
DistsURLs []sourceURL `json:"dists_urls" yaml:"dists_urls"`
}
// Name implements [driver.Updater].
func (u *updater) Name() string {
return "debian/updater"
}
// Configure implements [driver.Configurable].
func (u *updater) Configure(ctx context.Context, f driver.ConfigUnmarshaler, c *http.Client) error {
ctx = zlog.ContextWithValues(ctx, "component", "debian/Updater.Configure")
u.c = c
var cfg UpdaterConfig
if err := f(&cfg); err != nil {
return err
}
if cfg.DistsURL != "" || cfg.OVALURL != "" {
zlog.Error(ctx).Msg("configured with deprecated URLs")
return fmt.Errorf("debian: neither url nor dists_url should be used anymore; use json_url and dists_urls instead")
}
if cfg.JSONURL != "" {
u.jsonURL = cfg.JSONURL
zlog.Info(ctx).
Msg("configured JSON database URL")
}
if len(cfg.DistsURLs) != 0 {
u.dists = cfg.DistsURLs
zlog.Info(ctx).
Msg("configured dists URLs")
}
var srcs []sourceURL
for _, dist := range u.dists {
src, err := url.Parse(dist.url.String())
if err != nil {
return fmt.Errorf("debian: unable to parse dist URL: %w", err)
}
srcs = append(srcs, sourceURL{distro: dist.distro, url: src})
}
u.sm = newSourcesMap(u.c, srcs)
return nil
}
// Fetch implements [driver.Fetcher].
func (u *updater) Fetch(ctx context.Context, fingerprint driver.Fingerprint) (io.ReadCloser, driver.Fingerprint, error) {
ctx = zlog.ContextWithValues(ctx,
"component", "debian/Updater.Fetch",
"database", u.jsonURL)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.jsonURL, nil)
if err != nil {
return nil, "", fmt.Errorf("failed to create request")
}
if fingerprint != "" {
req.Header.Set("If-Modified-Since", string(fingerprint))
}
// fetch JSON database
resp, err := u.c.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
return nil, "", fmt.Errorf("failed to retrieve JSON database: %v", err)
}
fp := resp.Header.Get("Last-Modified")
switch resp.StatusCode {
case http.StatusOK:
if fingerprint == "" || fp != string(fingerprint) |
fallthrough
case http.StatusNotModified:
return nil, fingerprint, driver.Unchanged
default:
return nil, "", fmt.Errorf("unexpected response: %v", resp.Status)
}
f, err := tmp.NewFile("", "debian.")
if err != nil {
return nil, "", err
}
var success bool
defer func() {
if !success {
if err := f.Close(); err != nil {
zlog.Warn(ctx).Err(err).Msg("unable to close spool")
}
}
}()
if _, err := io.Copy(f, resp.Body); err != nil {
return nil, "", fmt.Errorf("failed to read http body: %w", err)
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, "", fmt.Errorf("failed to seek body: %w", err)
}
zlog.Info(ctx).Msg("fetched latest json database successfully")
if err := u.sm.Update(ctx); err != nil {
return nil, "", fmt.Errorf("could not update source to binary map: %w", err)
}
zlog.Info(ctx).Msg("updated the debian source to binary map successfully")
success = true
return f, driver.Fingerprint(fp), err
}
| {
zlog.Info(ctx).Msg("fetching latest JSON database")
break
} | conditional_block |
GA.js |
/** 任务集合(tasks[i]表示第i个任务的长度) */
var tasks = [];
// 任务数量
var taskNum = 100;
/** 处理节点集合(nodes[i]表示第i个处理节点的处理速度) */
var nodes = [];
// 处理节点数量
var nodeNum = 10;
/** 任务长度取值范围 */
var taskLengthRange = [10,100];
/** 节点处理速度取值范围 */
var nodeSpeendRange = [10,100];
/** 任务处理时间矩阵(记录单个任务在不同节点上的处理时间) */
var timeMatrix = [];
/** 迭代次数 */
var iteratorNum = 100;
/** 染色体数量 */
var chromosomeNum = 10;
/** 适应度矩阵(下标:染色体编号、值:该染色体的适应度) */
var adaptability = [];
/** 自然选择的概率矩阵(下标:染色体编号、值:该染色体被选择的概率) */
var selectionProbability = [];
/** 染色体复制的比例(每代中保留适应度较高的染色体直接成为下一代) */
var cp = 0.2;
/** 参与交叉变异的染色体数量 */
var crossoverMutationNum;
/** 任务处理时间结果集([迭代次数][染色体编号]) */
var resultData = [];
/**
* 初始化遗传算法
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
(function initGA(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
// 参数校验
if (!checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp)) {
return;
}
// 初始化任务集合
tasks = initRandomArray(_taskNum, taskLengthRange);
// 初始化节点集合
nodes = initRandomArray(_nodeNum, nodeSpeendRange);
debugger;
// 执行遗传算法
ga();
// 渲染视图
draw(resultData);
})(100, 10, 100, 100, 0.2);
/**
* 遗传算法
*/
function ga() {
// 初始化任务执行时间矩阵
initTimeMatrix(tasks, nodes, timeMatrix);
// 迭代搜索
gaSearch(iteratorNum, chromosomeNum);
}
/**
* 参数校验
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
function checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
if (isNaN(_taskNum)) {
alert("任务数量必须是数字!");
return false;
}
if (isNaN(_nodeNum)) {
alert("节点数量必须是数字!");
return false;
}
if (isNaN(_iteratorNum)) {
alert("迭代次数必须是数字!");
return false;
}
if (isNaN(_chromosomeNum)) {
alert("染色体数量必须是数字!");
return false;
}
if (isNaN(_cp) || _cp<0 || _cp>1) {
alert("cp值必须为数字!并且在0~1之间!");
return false;
}
taskNum = _taskNum;
nodeNum = _nodeNum;
iteratorNum = _iteratorNum;
chromosomeNum = _chromosomeNum;
cp = _cp;
crossoverMutationNum = chromosomeNum - chromosomeNum*_cp;
return true;
}
/**
* 计算 染色体适应度
* @param chromosomeMatrix
*/
function calAdaptability(chromosomeMatrix) {
adaptability = [];
// 计算每条染色体的任务长度
var chromosomeTaskLengths = calTaskLengthOfEachChromosome(chromosomeMatrix);
for (var i=0; i<chromosomeTaskLengths.length; ++i) {
// 适应度 = 1/任务长度
adaptability.push(1/chromosomeTaskLengths[i]);
}
}
/**
* 计算自然选择概率
* @param adaptability
*/
function calSelectionProbability(adaptability) {
selectionProbability = [];
// 计算适应度总和
var sumAdaptability = 0;
for (var i=0; i<chromosomeNum; i++) {
sumAdaptability += adaptability[i];
}
// 计算每条染色体的选择概率
for (var i=0; i<chromosomeNum; i++) {
selectionProbability.push(adaptability[i] / sumAdaptability);
}
}
/**
* 迭代搜索
* @param iteratorNum 迭代次数
* @param chromosomeNum 染色体数量
*/
function gaSearch(iteratorNum, chromosomeNum) {
// 初始化第一代染色体
var chromosomeMatrix = createGeneration();
// 迭代繁衍
for (var itIndex=1; itIndex<iteratorNum; itIndex++) {
// 计算上一代各条染色体的适应度
calAdaptability(chromosomeMatrix);
// 计算自然选择概率
calSelectionProbability(adaptability);
// 生成新一代染色体
chromosomeMatrix = createGeneration(ch |
}
/**
* 交叉生成{crossoverMutationNum}条染色体
* @param chromosomeMatrix 上一代染色体矩阵
*/
function cross(chromosomeMatrix) {
var newChromosomeMatrix = [];
for (var chromosomeIndex=0; chromosomeIndex<crossoverMutationNum; chromosomeIndex++) {
// 采用轮盘赌选择父母染色体
var chromosomeBaba = chromosomeMatrix[RWS(selectionProbability)].slice(0);
var chromosomeMama = chromosomeMatrix[RWS(selectionProbability)].slice(0);
// 交叉
var crossIndex = random(0, taskNum-1);
chromosomeBaba.splice(crossIndex);
chromosomeBaba = chromosomeBaba.concat(chromosomeMama.slice(crossIndex));
// debugger;
newChromosomeMatrix.push(chromosomeBaba);
}
return newChromosomeMatrix;
}
/**
* 从数组中寻找最大的n个元素
* @param array
* @param n
*/
function maxN(array, n) {
// 将一切数组升级成二维数组,二维数组的每一行都有两个元素构成[原一位数组的下标,值]
var matrix = [];
for (var i=0; i<array.length; i++) {
matrix.push([i, array[i]]);
}
// 对二维数组排序
for (var i=0; i<n; i++) {
for (var j=1; j<matrix.length; j++) {
if (matrix[j-1][1] > matrix[j][1]) {
var temp = matrix[j-1];
matrix[j-1] = matrix[j];
matrix[j] = temp;
}
}
}
// 取最大的n个元素
var maxIndexArray = [];
for (var i=matrix.length-1; i>matrix.length-n-1; i--) {
maxIndexArray.push(matrix[i][0]);
}
return maxIndexArray;
}
/**
* 复制(复制上一代中优良的染色体)
* @param chromosomeMatrix 上一代染色体矩阵
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function copy(chromosomeMatrix, newChromosomeMatrix) {
// 寻找适应度最高的N条染色体的下标(N=染色体数量*复制比例)
var chromosomeIndexArr = maxN(adaptability, chromosomeNum*cp);
// 复制
for (var i=0; i<chromosomeIndexArr.length; i++) {
var chromosome = chromosomeMatrix[chromosomeIndexArr[i]];
newChromosomeMatrix.push(chromosome);
}
return newChromosomeMatrix;
}
/**
* 计算所有染色体的任务处理时间
* @param chromosomeMatrix
*/
function calTime_oneIt(chromosomeMatrix) {
resultData.push(calTaskLengthOfEachChromosome(chromosomeMatrix));
}
/**
* 计算每条染色体的任务长度
* @param chromosomeMatrix
*/
function calTaskLengthOfEachChromosome(chromosomeMatrix) {
var chromosomeTaskLengths = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var maxLength = Number.MIN_VALUE;
for (var nodeIndex=0; nodeIndex<nodeNum; nodeIndex++) {
var sumLength = 0;
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
if (chromosomeMatrix[chromosomeIndex][taskIndex] == nodeIndex) {
sumLength += timeMatrix[taskIndex][nodeIndex];
}
}
if (sumLength > maxLength) {
maxLength = sumLength;
}
}
chromosomeTaskLengths.push(maxLength);
}
return chromosomeTaskLengths;
}
/**
* 繁衍新一代染色体
* @param chromosomeMatrix 上一代染色体
*/
function createGeneration(chromosomeMatrix) {
// 第一代染色体,随机生成
if (chromosomeMatrix == null || chromosomeMatrix == undefined) {
var newChromosomeMatrix = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var chromosomeMatrix_i = [];
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
chromosomeMatrix_i.push(random(0, nodeNum-1));
}
newChromosomeMatrix.push(chromosomeMatrix_i);
}
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
// 交叉生成{crossoverMutationNum}条染色体
var newChromosomeMatrix = cross(chromosomeMatrix);
// 变异
newChromosomeMatrix = mutation(newChromosomeMatrix);
// 复制
newChromosomeMatrix = copy(chromosomeMatrix, newChromosomeMatrix);
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
/**
* 轮盘赌算法
* @param selectionProbability 概率数组(下标:元素编号、值:该元素对应的概率)
* @returns {number} 返回概率数组中某一元素的下标
*/
function RWS(selectionProbability) {
var sum = 0;
var rand = Math.random();
for (var i=0; i<selectionProbability.length; i++) {
sum += selectionProbability[i];
if (sum >= rand) {
return i;
}
}
}
/**
* 变异
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function mutation(newChromosomeMatrix) {
// 随机找一条染色体
var chromosomeIndex = random(0, crossoverMutationNum-1);
// 随机找一个任务
var taskIndex = random(0, taskNum-1);
// 随机找一个节点
var nodeIndex = random(0, nodeNum-1);
newChromosomeMatrix[chromosomeIndex][taskIndex] = nodeIndex;
return newChromosomeMatrix;
}
/**
* 渲染视图
* @param resultData
*/
function draw(resultData) {
// 基于准备好的dom,初始化echarts实例
var myChart = echarts.init(document.getElementById('main'));
// 指定图表的配置项和数据
var option = {
title: {
text: '基于遗传算法的负载均衡调度策略'
},
tooltip : {
trigger: 'axis',
showDelay : 0,
axisPointer:{
show: true,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
},
zlevel: 1
},
legend: {
data:['遗传算法']
},
toolbox: {
show : true,
feature : {
mark : {show: true},
dataZoom : {show: true},
dataView : {show: true, readOnly: false},
restore : {show: true},
saveAsImage : {show: true}
}
},
xAxis : [
{
type : 'value',
scale:true,
name: '迭代次数'
}
],
yAxis : [
{
type : 'value',
scale:true,
name: '任务处理时间'
}
],
series : [
{
name:'遗传算法',
type:'scatter',
large: true,
symbolSize: 3,
data: (function () {
var d = [];
for (var itIndex=0; itIndex<iteratorNum; itIndex++) {
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
d.push([itIndex, resultData[itIndex][chromosomeIndex]]);
}
}
return d;
})()
}
]
};
// 使用刚指定的配置项和数据显示图表。
myChart.setOption(option);
} | romosomeMatrix);
} | identifier_name |
GA.js | /** 任务集合(tasks[i]表示第i个任务的长度) */
var tasks = [];
// 任务数量
var taskNum = 100;
/** 处理节点集合(nodes[i]表示第i个处理节点的处理速度) */
var nodes = [];
// 处理节点数量
var nodeNum = 10;
/** 任务长度取值范围 */
var taskLengthRange = [10,100];
/** 节点处理速度取值范围 */
var nodeSpeendRange = [10,100];
/** 任务处理时间矩阵(记录单个任务在不同节点上的处理时间) */
var timeMatrix = [];
/** 迭代次数 */
var iteratorNum = 100;
/** 染色体数量 */
var chromosomeNum = 10;
/** 适应度矩阵(下标:染色体编号、值:该染色体的适应度) */
var adaptability = [];
/** 自然选择的概率矩阵(下标:染色体编号、值:该染色体被选择的概率) */
var selectionProbability = [];
/** 染色体复制的比例(每代中保留适应度较高的染色体直接成为下一代) */
var cp = 0.2;
/** 参与交叉变异的染色体数量 */
var crossoverMutationNum;
/** 任务处理时间结果集([迭代次数][染色体编号]) */
var resultData = [];
/**
* 初始化遗传算法
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
(function initGA(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
// 参数校验
if (!checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp)) {
return;
}
// 初始化任务集合
tasks = initRandomArray(_taskNum, taskLengthRange);
// 初始化节点集合
nodes = initRandomArray(_nodeNum, nodeSpeendRange);
debugger;
// 执行遗传算法
ga();
// 渲染视图
draw(resultData);
})(100, 10, 100, 100, 0.2);
/**
* 遗传算法
*/
function ga() {
// 初始化任务执行时间矩阵
initTimeMatrix(tasks, nodes, timeMatrix);
// 迭代搜索
gaSearch(iteratorNum, chromosomeNum);
}
/**
* 参数校验
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
function checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
if (isNaN(_taskNum)) {
alert("任务数量必须是数字!");
return false;
}
if (isNaN(_nodeNum)) {
alert("节点数量必须是数字!");
return false;
}
if (isNaN(_iteratorNum)) {
alert("迭代次数必须是数字!");
return false;
}
if (isNaN(_chromosomeNum)) {
alert("染色体数量必须是数字!");
return false;
}
if (isNaN(_cp) || _cp<0 || _cp>1) {
alert("cp值必须为数字!并且在0~1之间!");
return false;
}
taskNum = _taskNum;
nodeNum = _nodeNum;
iteratorNum = _iteratorNum;
chromosomeNum = _chromosomeNum;
cp = _cp;
crossoverMutationNum = chromosomeNum - chromosomeNum*_cp;
return true;
}
/**
* 计算 染色体适应度
* @param chromosomeMatrix
*/
function calAdaptability(chromosomeMatrix) {
adaptability = [];
// 计算每条染色体的任务长度
var chromosomeTaskLengths = calTaskLengthOfEachChromosome(chromosomeMatrix);
for (var i=0; i<chromosomeTaskLengths.length; ++i) {
// 适应度 = 1/任务长度
adaptability.push(1/chromosomeTaskLengths[i]);
}
}
/**
* 计算自然选择概率
* @param adaptability
*/
function calSelectionProbability(adaptability) {
selectionProbability = [];
// 计算适应度总和
var sumAdaptability = 0;
for (var i=0; i<chromosomeNum; i++) {
sumAdaptability += adaptability[i];
}
// 计算每条染色体的选择概率
for (var i=0; i<chromosomeNum; i++) {
selectionProbability.push(adaptability[i] / sumAdaptability);
}
}
/**
* 迭代搜索
* @param iteratorNum 迭代次数
* @param chromosomeNum 染色体数量
*/
function gaSearch(iteratorNum, chromosomeNum) {
// 初始化第一代染色体
var chromosomeMatrix = createGeneration();
// 迭代繁衍
for (var itIndex=1; itIndex<iteratorNum; itIndex++) {
// 计算上一代各条染色体的适应度
calAdaptability(chromosomeMatrix);
// 计算自然选择概率
calSelectionProbability(adaptability);
// 生成新一代染色体
chromosomeMatrix = createGeneration(chromosomeMatrix);
}
}
/**
* 交叉生成{crossoverMutationNum}条染色体
* @param chromosomeMatrix 上一代染色体矩阵
*/
function cross(chromosomeMatrix) {
var newChromosomeMatrix = [];
for (var chromosomeIndex=0; chromosomeIndex<crossoverMutationNum; chromosomeIndex++) {
// 采用轮盘赌选择父母染色体
var chromosomeBaba = chromosomeMatrix[RWS(selectionProbability)].slice(0);
var chromosomeMama = chromosomeMatrix[RWS(selectionProbability)].slice(0);
// 交叉
var crossIndex = random(0, taskNum-1);
chromosomeBaba.splice(crossIndex);
chromosomeBaba = chromosomeBaba.concat(chromosomeMama.slice(crossIndex));
// debugger;
newChromosomeMatrix.push(chromosomeBaba);
}
return newChromosomeMatrix;
}
/**
* 从数组中寻找最大的n个元素
* @param array
* @param n
*/
function maxN(array, n) {
// 将一切数组升级成二维数组,二维数组的每一行都有两个元素构成[原一位数组的下标,值]
var matrix = [];
for (var i=0; i<array.length; i++) {
matrix.push([i, array[i]]);
}
// 对二维数组排序
for (var i=0; i<n; i++) {
for (var j=1; j<matrix.length; j++) {
if (matrix[j-1][1] > matrix[j][1]) {
var temp = matrix[j-1];
matrix[j-1] = matrix[j];
matrix[j] = temp;
}
}
}
// 取最大的n个元素
var maxIndexArray = [];
for (var i=matrix.length-1; i>matrix.length-n-1; i--) {
maxIndexArray.push(matrix[i][0]);
}
return maxIndexArray;
}
/**
* 复制(复制上一代中优良的染色体)
* @param chromosomeMatrix 上一代染色体矩阵
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function copy(chromosomeMatrix, newChromosomeMatrix) {
// 寻找适应度最高的N条染色体的下标(N=染色体数量*复制比例)
var chromosomeIndexArr = maxN(adaptability, chromosomeNum*cp);
// 复制
for (var i=0; i<chromosomeIndexArr.length; i++) {
var chromosome = chromosomeMatrix[chromosomeIndexArr[i]];
newChromosomeMatrix.push(chromosome);
} | /**
* 计算所有染色体的任务处理时间
* @param chromosomeMatrix
*/
function calTime_oneIt(chromosomeMatrix) {
resultData.push(calTaskLengthOfEachChromosome(chromosomeMatrix));
}
/**
* 计算每条染色体的任务长度
* @param chromosomeMatrix
*/
function calTaskLengthOfEachChromosome(chromosomeMatrix) {
var chromosomeTaskLengths = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var maxLength = Number.MIN_VALUE;
for (var nodeIndex=0; nodeIndex<nodeNum; nodeIndex++) {
var sumLength = 0;
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
if (chromosomeMatrix[chromosomeIndex][taskIndex] == nodeIndex) {
sumLength += timeMatrix[taskIndex][nodeIndex];
}
}
if (sumLength > maxLength) {
maxLength = sumLength;
}
}
chromosomeTaskLengths.push(maxLength);
}
return chromosomeTaskLengths;
}
/**
* 繁衍新一代染色体
* @param chromosomeMatrix 上一代染色体
*/
function createGeneration(chromosomeMatrix) {
// 第一代染色体,随机生成
if (chromosomeMatrix == null || chromosomeMatrix == undefined) {
var newChromosomeMatrix = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var chromosomeMatrix_i = [];
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
chromosomeMatrix_i.push(random(0, nodeNum-1));
}
newChromosomeMatrix.push(chromosomeMatrix_i);
}
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
// 交叉生成{crossoverMutationNum}条染色体
var newChromosomeMatrix = cross(chromosomeMatrix);
// 变异
newChromosomeMatrix = mutation(newChromosomeMatrix);
// 复制
newChromosomeMatrix = copy(chromosomeMatrix, newChromosomeMatrix);
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
/**
* 轮盘赌算法
* @param selectionProbability 概率数组(下标:元素编号、值:该元素对应的概率)
* @returns {number} 返回概率数组中某一元素的下标
*/
function RWS(selectionProbability) {
var sum = 0;
var rand = Math.random();
for (var i=0; i<selectionProbability.length; i++) {
sum += selectionProbability[i];
if (sum >= rand) {
return i;
}
}
}
/**
* 变异
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function mutation(newChromosomeMatrix) {
// 随机找一条染色体
var chromosomeIndex = random(0, crossoverMutationNum-1);
// 随机找一个任务
var taskIndex = random(0, taskNum-1);
// 随机找一个节点
var nodeIndex = random(0, nodeNum-1);
newChromosomeMatrix[chromosomeIndex][taskIndex] = nodeIndex;
return newChromosomeMatrix;
}
/**
* 渲染视图
* @param resultData
*/
function draw(resultData) {
// 基于准备好的dom,初始化echarts实例
var myChart = echarts.init(document.getElementById('main'));
// 指定图表的配置项和数据
var option = {
title: {
text: '基于遗传算法的负载均衡调度策略'
},
tooltip : {
trigger: 'axis',
showDelay : 0,
axisPointer:{
show: true,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
},
zlevel: 1
},
legend: {
data:['遗传算法']
},
toolbox: {
show : true,
feature : {
mark : {show: true},
dataZoom : {show: true},
dataView : {show: true, readOnly: false},
restore : {show: true},
saveAsImage : {show: true}
}
},
xAxis : [
{
type : 'value',
scale:true,
name: '迭代次数'
}
],
yAxis : [
{
type : 'value',
scale:true,
name: '任务处理时间'
}
],
series : [
{
name:'遗传算法',
type:'scatter',
large: true,
symbolSize: 3,
data: (function () {
var d = [];
for (var itIndex=0; itIndex<iteratorNum; itIndex++) {
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
d.push([itIndex, resultData[itIndex][chromosomeIndex]]);
}
}
return d;
})()
}
]
};
// 使用刚指定的配置项和数据显示图表。
myChart.setOption(option);
} |
return newChromosomeMatrix;
}
| random_line_split |
GA.js |
/** 任务集合(tasks[i]表示第i个任务的长度) */
var tasks = [];
// 任务数量
var taskNum = 100;
/** 处理节点集合(nodes[i]表示第i个处理节点的处理速度) */
var nodes = [];
// 处理节点数量
var nodeNum = 10;
/** 任务长度取值范围 */
var taskLengthRange = [10,100];
/** 节点处理速度取值范围 */
var nodeSpeendRange = [10,100];
/** 任务处理时间矩阵(记录单个任务在不同节点上的处理时间) */
var timeMatrix = [];
/** 迭代次数 */
var iteratorNum = 100;
/** 染色体数量 */
var chromosomeNum = 10;
/** 适应度矩阵(下标:染色体编号、值:该染色体的适应度) */
var adaptability = [];
/** 自然选择的概率矩阵(下标:染色体编号、值:该染色体被选择的概率) */
var selectionProbability = [];
/** 染色体复制的比例(每代中保留适应度较高的染色体直接成为下一代) */
var cp = 0.2;
/** 参与交叉变异的染色体数量 */
var crossoverMutationNum;
/** 任务处理时间结果集([迭代次数][染色体编号]) */
var resultData = [];
/**
* 初始化遗传算法
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
(function initGA(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
// 参数校验
if (!checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp)) {
return;
}
// 初始化任务集合
tasks = initRandomArray(_taskNum, taskLengthRange);
// 初始化节点集合
nodes = initRandomArray(_nodeNum, nodeSpeendRange);
debugger;
// 执行遗传算法
ga();
// 渲染视图
draw(resultData);
})(100, 10, 100, 100, 0.2);
/**
* 遗传算法
*/
function ga() {
// 初始化任务执行时间矩阵
initTimeMatrix(tasks, nodes, timeMatrix);
// 迭代搜索
gaSearch(iteratorNum, chromosomeNum);
}
/**
* 参数校验
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
function checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
if (isNaN(_taskNum)) {
alert("任务数量必须是数字!");
return false;
}
if (isNaN(_nodeNum)) {
alert("节点数量必须是数字!");
return false;
}
if (isNaN(_iteratorNum)) {
alert("迭代次数必须是数字!");
return false;
}
if (isNaN(_chromosomeNum)) {
alert("染色体数量必须是数字!");
return false;
}
if (isNaN(_cp) || _cp<0 || _cp>1) {
alert("cp值必须为数字!并且在0~1之间!");
return false;
}
taskNum = _taskNum;
nodeNum = _nodeNum;
iteratorNum = _iteratorNum;
chromosomeNum = _chromosomeNum;
cp = _cp;
crossoverMutationNum = chromosomeNum - chromosomeNum*_cp;
return true;
}
/**
* 计算 染色体适应度
* @param chromosomeMatrix
*/
function calAdaptability(chromosomeMatrix) {
adaptability = [];
// 计算每条染色体的任务长度
var chromosomeTaskLengths = calTaskLengthOfEachChromosome(chromosomeMatrix);
for (var i=0; i<chromosomeTaskLengths.length; ++i) {
// 适应度 = 1/任务长度
adaptability.push(1/chromosomeTaskLengths[i]);
}
}
/**
* 计算自然选择概率
* @param adaptability
*/
function calSelectionProbability(adaptability) {
selectionProbability = [];
// 计算适应度总和
var sumAdaptability = 0;
for (var i=0; i<chromosomeNum; i++) {
sumAdaptability += adaptability[i];
}
// 计算每条染色体的选择概率
for (var i=0; i<chromosomeNum; i++) {
selectionProbability.push(adaptability[i] / sumAdaptability);
}
}
/**
* 迭代搜索
* @param iteratorNum 迭代次数
* @param chromosomeNum 染色体数量
*/
function gaSearch(iteratorNum, chromosomeNum) {
// 初始化第一代染色体
var chromosomeMatrix = createGeneration();
// 迭代繁衍
for (var itIndex=1; itIndex<iteratorNum; itIndex++) {
// 计算上一代各条染色体的适应度
calAdaptability(chromosomeMatrix);
// 计算自然选择概率
calSelectionProbability(adaptability);
// 生成新一代染色体
chromosomeMatrix = createGeneration(chromosomeMatrix);
}
}
/**
* 交叉生成{crossoverMutationNum}条染色体
* @param chromosomeMatrix 上一代染色体矩阵
*/
function cross(chromosomeMatrix) {
var newChromosomeMatrix = [];
for (var chromosomeIndex=0; chromosomeIndex<crossoverMutationNum; chromosomeIndex++) {
// 采用轮盘赌选择父母染色体
var chromosomeBaba = chromosomeMatrix[RWS(selectionProbability)].slice(0);
var chromosomeMama = chromosomeMatrix[RWS(selectionProbability)].slice(0);
// 交叉
var crossIndex = random(0, taskNum-1);
chromosomeBaba.splice(crossIndex);
chromosomeBaba = chromosomeBaba.concat(chromosomeMama.slice(crossIndex));
// debugger;
newChromosomeMatrix.push(chromosomeBaba);
}
return newChromosomeMatrix;
}
/**
* 从数组中寻找最大的n个元素
* @param array
* @param n
*/
function maxN(array, n) {
// 将一切数组升级成二维数组,二维数组的每一行都有两个元素构成[原一位数组的下标,值]
var matrix = [];
for (var i=0; i<array.length; i++) {
matrix.push([i, array[i]]);
}
// 对二维数组排序
for (var i=0; i<n; i++) {
for (var j=1; j<matrix.length; j++) {
if (matrix[j-1][1] > matrix[j][1]) {
| gth; i++) {
var chromosome = chromosomeMatrix[chromosomeIndexArr[i]];
newChromosomeMatrix.push(chromosome);
}
return newChromosomeMatrix;
}
/**
* 计算所有染色体的任务处理时间
* @param chromosomeMatrix
*/
function calTime_oneIt(chromosomeMatrix) {
resultData.push(calTaskLengthOfEachChromosome(chromosomeMatrix));
}
/**
* 计算每条染色体的任务长度
* @param chromosomeMatrix
*/
function calTaskLengthOfEachChromosome(chromosomeMatrix) {
var chromosomeTaskLengths = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var maxLength = Number.MIN_VALUE;
for (var nodeIndex=0; nodeIndex<nodeNum; nodeIndex++) {
var sumLength = 0;
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
if (chromosomeMatrix[chromosomeIndex][taskIndex] == nodeIndex) {
sumLength += timeMatrix[taskIndex][nodeIndex];
}
}
if (sumLength > maxLength) {
maxLength = sumLength;
}
}
chromosomeTaskLengths.push(maxLength);
}
return chromosomeTaskLengths;
}
/**
* 繁衍新一代染色体
* @param chromosomeMatrix 上一代染色体
*/
function createGeneration(chromosomeMatrix) {
// 第一代染色体,随机生成
if (chromosomeMatrix == null || chromosomeMatrix == undefined) {
var newChromosomeMatrix = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var chromosomeMatrix_i = [];
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
chromosomeMatrix_i.push(random(0, nodeNum-1));
}
newChromosomeMatrix.push(chromosomeMatrix_i);
}
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
// 交叉生成{crossoverMutationNum}条染色体
var newChromosomeMatrix = cross(chromosomeMatrix);
// 变异
newChromosomeMatrix = mutation(newChromosomeMatrix);
// 复制
newChromosomeMatrix = copy(chromosomeMatrix, newChromosomeMatrix);
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
/**
* 轮盘赌算法
* @param selectionProbability 概率数组(下标:元素编号、值:该元素对应的概率)
* @returns {number} 返回概率数组中某一元素的下标
*/
function RWS(selectionProbability) {
var sum = 0;
var rand = Math.random();
for (var i=0; i<selectionProbability.length; i++) {
sum += selectionProbability[i];
if (sum >= rand) {
return i;
}
}
}
/**
* 变异
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function mutation(newChromosomeMatrix) {
// 随机找一条染色体
var chromosomeIndex = random(0, crossoverMutationNum-1);
// 随机找一个任务
var taskIndex = random(0, taskNum-1);
// 随机找一个节点
var nodeIndex = random(0, nodeNum-1);
newChromosomeMatrix[chromosomeIndex][taskIndex] = nodeIndex;
return newChromosomeMatrix;
}
/**
* 渲染视图
* @param resultData
*/
function draw(resultData) {
// 基于准备好的dom,初始化echarts实例
var myChart = echarts.init(document.getElementById('main'));
// 指定图表的配置项和数据
var option = {
title: {
text: '基于遗传算法的负载均衡调度策略'
},
tooltip : {
trigger: 'axis',
showDelay : 0,
axisPointer:{
show: true,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
},
zlevel: 1
},
legend: {
data:['遗传算法']
},
toolbox: {
show : true,
feature : {
mark : {show: true},
dataZoom : {show: true},
dataView : {show: true, readOnly: false},
restore : {show: true},
saveAsImage : {show: true}
}
},
xAxis : [
{
type : 'value',
scale:true,
name: '迭代次数'
}
],
yAxis : [
{
type : 'value',
scale:true,
name: '任务处理时间'
}
],
series : [
{
name:'遗传算法',
type:'scatter',
large: true,
symbolSize: 3,
data: (function () {
var d = [];
for (var itIndex=0; itIndex<iteratorNum; itIndex++) {
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
d.push([itIndex, resultData[itIndex][chromosomeIndex]]);
}
}
return d;
})()
}
]
};
// 使用刚指定的配置项和数据显示图表。
myChart.setOption(option);
} | var temp = matrix[j-1];
matrix[j-1] = matrix[j];
matrix[j] = temp;
}
}
}
// 取最大的n个元素
var maxIndexArray = [];
for (var i=matrix.length-1; i>matrix.length-n-1; i--) {
maxIndexArray.push(matrix[i][0]);
}
return maxIndexArray;
}
/**
* 复制(复制上一代中优良的染色体)
* @param chromosomeMatrix 上一代染色体矩阵
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function copy(chromosomeMatrix, newChromosomeMatrix) {
// 寻找适应度最高的N条染色体的下标(N=染色体数量*复制比例)
var chromosomeIndexArr = maxN(adaptability, chromosomeNum*cp);
// 复制
for (var i=0; i<chromosomeIndexArr.len | identifier_body |
GA.js |
/** 任务集合(tasks[i]表示第i个任务的长度) */
var tasks = [];
// 任务数量
var taskNum = 100;
/** 处理节点集合(nodes[i]表示第i个处理节点的处理速度) */
var nodes = [];
// 处理节点数量
var nodeNum = 10;
/** 任务长度取值范围 */
var taskLengthRange = [10,100];
/** 节点处理速度取值范围 */
var nodeSpeendRange = [10,100];
/** 任务处理时间矩阵(记录单个任务在不同节点上的处理时间) */
var timeMatrix = [];
/** 迭代次数 */
var iteratorNum = 100;
/** 染色体数量 */
var chromosomeNum = 10;
/** 适应度矩阵(下标:染色体编号、值:该染色体的适应度) */
var adaptability = [];
/** 自然选择的概率矩阵(下标:染色体编号、值:该染色体被选择的概率) */
var selectionProbability = [];
/** 染色体复制的比例(每代中保留适应度较高的染色体直接成为下一代) */
var cp = 0.2;
/** 参与交叉变异的染色体数量 */
var crossoverMutationNum;
/** 任务处理时间结果集([迭代次数][染色体编号]) */
var resultData = [];
/**
* 初始化遗传算法
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
(function initGA(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
// 参数校验
if (!checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp)) {
return;
}
// 初始化任务集合
tasks = initRandomArray(_taskNum, taskLengthRange);
// 初始化节点集合
nodes = initRandomArray(_nodeNum, nodeSpeendRange);
debugger;
// 执行遗传算法
ga();
// 渲染视图
draw(resultData);
})(100, 10, 100, 100, 0.2);
/**
* 遗传算法
*/
function ga() {
// 初始化任务执行时间矩阵
initTimeMatrix(tasks, nodes, timeMatrix);
// 迭代搜索
gaSearch(iteratorNum, chromosomeNum);
}
/**
* 参数校验
* @param _taskNum 任务数量
* @param _nodeNum 节点数量
* @param _iteratorNum 迭代次数
* @param _chromosomeNum 染色体数量
* @param _cp 染色体复制的比例
*/
function checkParam(_taskNum, _nodeNum, _iteratorNum, _chromosomeNum, _cp) {
if (isNaN(_taskNum)) {
alert("任务数量必须是数字!");
return false;
}
if (isNaN(_nodeNum)) {
alert("节点数量必须是数字!");
return false;
}
if (isNaN(_iteratorNum)) {
alert("迭代次数必须是数字!");
return false;
}
if (isNaN(_chromosomeNum)) {
alert("染色体数量必须是数字!");
return false;
}
if (isNaN(_cp) || _cp<0 || _cp>1) {
alert("cp值必须为数字!并且在0~1之间!");
return false;
}
taskNum = _taskNum;
nodeNum = _nodeNum;
iteratorNum = _iteratorNum;
chromosomeNum = _chromosomeNum;
cp = _cp;
crossoverMutationNum = chromosomeNum - chromosomeNum*_cp;
return true;
}
/**
* 计算 染色体适应度
* @param chromosomeMatrix
*/
function calAdaptability(chromosomeMatrix) {
adaptability = [];
// 计算每条染色体的任务长度
var chromosomeTaskLengths = calTaskLengthOfEachChromosome(chromosomeMatrix);
for (var i=0; i<chromosomeTaskLengths.length; ++i) {
// 适应度 = 1/任务长度
adaptability.push(1/chromosomeTaskLengths[i]);
}
}
/**
* 计算自然选择概率
* @param adaptability
*/
function calSelectionProbability(adaptability) {
selectionProbability = [];
// 计算适应度总和
var sumAdaptability = 0;
for (var i=0; i<chromosomeNum; i++) {
sumAdaptability += adaptability[i];
}
// 计算每条染色体的选择概率
for (var i=0; i<chromosomeNum; i++) {
selectionProbability.push(adaptability[i] / sumAdaptability);
}
}
/**
* 迭代搜索
* @param iteratorNum 迭代次数
* @param chromosomeNum 染色体数量
*/
function gaSearch(iteratorNum, chromosomeNum) {
// 初始化第一代染色体
var chromosomeMatrix = createGeneration();
// 迭代繁衍
for (var itIndex=1; itIndex<iteratorNum; itIndex++) {
// 计算上一代各条染色体的适应度
calAdaptability(chromosomeMatrix);
// 计算自然选择概率
calSelectionProbability(adaptability);
// 生成新一代染色体
chromosomeMatrix = createGeneration(chromosomeMatrix);
}
}
/**
* 交叉生成{crossoverMutationNum}条染色体
* @param chromosomeMatrix 上一代染色体矩阵
*/
function cross(chromosomeMatrix) {
var newChromosomeMatrix = | x<crossoverMutationNum; chromosomeIndex++) {
// 采用轮盘赌选择父母染色体
var chromosomeBaba = chromosomeMatrix[RWS(selectionProbability)].slice(0);
var chromosomeMama = chromosomeMatrix[RWS(selectionProbability)].slice(0);
// 交叉
var crossIndex = random(0, taskNum-1);
chromosomeBaba.splice(crossIndex);
chromosomeBaba = chromosomeBaba.concat(chromosomeMama.slice(crossIndex));
// debugger;
newChromosomeMatrix.push(chromosomeBaba);
}
return newChromosomeMatrix;
}
/**
* 从数组中寻找最大的n个元素
* @param array
* @param n
*/
function maxN(array, n) {
// 将一切数组升级成二维数组,二维数组的每一行都有两个元素构成[原一位数组的下标,值]
var matrix = [];
for (var i=0; i<array.length; i++) {
matrix.push([i, array[i]]);
}
// 对二维数组排序
for (var i=0; i<n; i++) {
for (var j=1; j<matrix.length; j++) {
if (matrix[j-1][1] > matrix[j][1]) {
var temp = matrix[j-1];
matrix[j-1] = matrix[j];
matrix[j] = temp;
}
}
}
// 取最大的n个元素
var maxIndexArray = [];
for (var i=matrix.length-1; i>matrix.length-n-1; i--) {
maxIndexArray.push(matrix[i][0]);
}
return maxIndexArray;
}
/**
* 复制(复制上一代中优良的染色体)
* @param chromosomeMatrix 上一代染色体矩阵
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function copy(chromosomeMatrix, newChromosomeMatrix) {
// 寻找适应度最高的N条染色体的下标(N=染色体数量*复制比例)
var chromosomeIndexArr = maxN(adaptability, chromosomeNum*cp);
// 复制
for (var i=0; i<chromosomeIndexArr.length; i++) {
var chromosome = chromosomeMatrix[chromosomeIndexArr[i]];
newChromosomeMatrix.push(chromosome);
}
return newChromosomeMatrix;
}
/**
* 计算所有染色体的任务处理时间
* @param chromosomeMatrix
*/
function calTime_oneIt(chromosomeMatrix) {
resultData.push(calTaskLengthOfEachChromosome(chromosomeMatrix));
}
/**
* 计算每条染色体的任务长度
* @param chromosomeMatrix
*/
function calTaskLengthOfEachChromosome(chromosomeMatrix) {
var chromosomeTaskLengths = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var maxLength = Number.MIN_VALUE;
for (var nodeIndex=0; nodeIndex<nodeNum; nodeIndex++) {
var sumLength = 0;
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
if (chromosomeMatrix[chromosomeIndex][taskIndex] == nodeIndex) {
sumLength += timeMatrix[taskIndex][nodeIndex];
}
}
if (sumLength > maxLength) {
maxLength = sumLength;
}
}
chromosomeTaskLengths.push(maxLength);
}
return chromosomeTaskLengths;
}
/**
* 繁衍新一代染色体
* @param chromosomeMatrix 上一代染色体
*/
function createGeneration(chromosomeMatrix) {
// 第一代染色体,随机生成
if (chromosomeMatrix == null || chromosomeMatrix == undefined) {
var newChromosomeMatrix = [];
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
var chromosomeMatrix_i = [];
for (var taskIndex=0; taskIndex<taskNum; taskIndex++) {
chromosomeMatrix_i.push(random(0, nodeNum-1));
}
newChromosomeMatrix.push(chromosomeMatrix_i);
}
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
// 交叉生成{crossoverMutationNum}条染色体
var newChromosomeMatrix = cross(chromosomeMatrix);
// 变异
newChromosomeMatrix = mutation(newChromosomeMatrix);
// 复制
newChromosomeMatrix = copy(chromosomeMatrix, newChromosomeMatrix);
// 计算当前染色体的任务处理时间
calTime_oneIt(newChromosomeMatrix);
return newChromosomeMatrix;
}
/**
* 轮盘赌算法
* @param selectionProbability 概率数组(下标:元素编号、值:该元素对应的概率)
* @returns {number} 返回概率数组中某一元素的下标
*/
function RWS(selectionProbability) {
var sum = 0;
var rand = Math.random();
for (var i=0; i<selectionProbability.length; i++) {
sum += selectionProbability[i];
if (sum >= rand) {
return i;
}
}
}
/**
* 变异
* @param newChromosomeMatrix 新一代染色体矩阵
*/
function mutation(newChromosomeMatrix) {
// 随机找一条染色体
var chromosomeIndex = random(0, crossoverMutationNum-1);
// 随机找一个任务
var taskIndex = random(0, taskNum-1);
// 随机找一个节点
var nodeIndex = random(0, nodeNum-1);
newChromosomeMatrix[chromosomeIndex][taskIndex] = nodeIndex;
return newChromosomeMatrix;
}
/**
* 渲染视图
* @param resultData
*/
function draw(resultData) {
// 基于准备好的dom,初始化echarts实例
var myChart = echarts.init(document.getElementById('main'));
// 指定图表的配置项和数据
var option = {
title: {
text: '基于遗传算法的负载均衡调度策略'
},
tooltip : {
trigger: 'axis',
showDelay : 0,
axisPointer:{
show: true,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
},
zlevel: 1
},
legend: {
data:['遗传算法']
},
toolbox: {
show : true,
feature : {
mark : {show: true},
dataZoom : {show: true},
dataView : {show: true, readOnly: false},
restore : {show: true},
saveAsImage : {show: true}
}
},
xAxis : [
{
type : 'value',
scale:true,
name: '迭代次数'
}
],
yAxis : [
{
type : 'value',
scale:true,
name: '任务处理时间'
}
],
series : [
{
name:'遗传算法',
type:'scatter',
large: true,
symbolSize: 3,
data: (function () {
var d = [];
for (var itIndex=0; itIndex<iteratorNum; itIndex++) {
for (var chromosomeIndex=0; chromosomeIndex<chromosomeNum; chromosomeIndex++) {
d.push([itIndex, resultData[itIndex][chromosomeIndex]]);
}
}
return d;
})()
}
]
};
// 使用刚指定的配置项和数据显示图表。
myChart.setOption(option);
} | [];
for (var chromosomeIndex=0; chromosomeInde | conditional_block |
diagnostics.rs | #![warn(
clippy::print_stdout,
clippy::unimplemented,
clippy::doc_markdown,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::similar_names,
clippy::single_match_else,
clippy::use_self,
clippy::use_debug
)]
//! The diagnostics object controls the output of warnings and errors generated
//! by the compiler during the lexing, parsing and semantic analysis phases.
//! It also tracks the number of warnings and errors generated for flow control.
//!
//! This implementation is NOT thread-safe. Messages from different threads may
//! be interleaved.
use asciifile::{MaybeSpanned, Span, Spanned};
use failure::Error;
use std::{ascii::escape_default, cell::RefCell, collections::HashMap, fmt::Display};
use termcolor::{Color, WriteColor};
use utils::color::ColorOutput;
pub mod lint;
pub fn u8_to_printable_representation(byte: u8) -> String {
let bytes = escape_default(byte).collect::<Vec<u8>>();
let rep = unsafe { std::str::from_utf8_unchecked(&bytes) };
rep.to_owned()
}
/// This abstraction allows us to call the diagnostics API with pretty
/// much everything.
///
/// The following examples are all equivalent and will print a warning
/// without a source code snippet below the message:
///
/// ```rust,ignore
/// context.diagnostics.warning(&"Something went wrong");
/// context
/// .diagnostics
/// .warning(&WithoutSpan("Something went wrong"));
/// ```
///
/// The following examples will print a message with a source code
/// snippet. Note that all errors generated by the compiler are
/// a `Spanned<_, Fail>` and can therefore be directly passed to
/// the diagnostics API.
///
/// ```rust,ignore
/// // `lexer_error` is the `Err` returned by `Lexer::next`
/// context.diagnostics.error(&lexer_error);
/// // `span` is some `asciifile::Span`
/// context.diagnostics.error({
/// span: span,
/// data: "something went wrong"
/// });
/// ```
pub trait Printable<'a, 'b> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display>;
}
// TODO: implementing on `str` (which is what you would like to do, to
// support calls with warning("aa") instead of warning(&"aa").
impl<'a, 'b> Printable<'a, 'b> for &'b str {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
MaybeSpanned::WithoutSpan(self)
}
}
impl<'a, 'b, T: Display + 'b> Printable<'a, 'b> for Spanned<'a, T> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
MaybeSpanned::WithSpan(Spanned {
span: self.span,
data: &self.data,
})
}
}
impl<'a, 'b, T: Display + 'b> Printable<'a, 'b> for MaybeSpanned<'a, T> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
match self {
MaybeSpanned::WithSpan(ref spanned) => MaybeSpanned::WithSpan(Spanned {
span: spanned.span,
data: &spanned.data,
}),
MaybeSpanned::WithoutSpan(ref data) => MaybeSpanned::WithoutSpan(data),
}
}
}
/// Width of tabs in error and warning messages
const TAB_WIDTH: usize = 4;
/// Color used for rendering line numbers, escape sequences
/// and others...
const HIGHLIGHT_COLOR: Option<Color> = Some(Color::Cyan);
// TODO reimplement line truncation
/// Instead of writing errors, warnings and lints generated in the different
/// compiler stages directly to stdout, they are collected in this object.
///
/// This has several advantages:
/// - the output level can be adapted by users.
/// - we have a single source responsible for formatting compiler messages.
pub struct Diagnostics {
message_count: RefCell<HashMap<MessageLevel, usize>>,
writer: RefCell<Box<dyn WriteColor>>,
}
impl Diagnostics {
pub fn new(writer: Box<dyn WriteColor>) -> Self {
Self {
writer: RefCell::new(writer),
message_count: RefCell::new(HashMap::new()),
}
}
/// True when an error message was emitted, false
/// if only warnings were emitted.
pub fn errored(&self) -> bool {
self.message_count
.borrow()
.get(&MessageLevel::Error)
.is_some()
}
pub fn count(&self, level: MessageLevel) -> usize {
self.message_count
.borrow()
.get(&level)
.cloned()
.unwrap_or(0)
}
pub fn write_statistics(&self) {
let mut writer = self.writer.borrow_mut();
let mut output = ColorOutput::new(&mut **writer);
output.set_bold(true);
if self.errored() {
output.set_color(MessageLevel::Error.color());
writeln!(
output.writer(),
"Compilation aborted due to {}",
match self.count(MessageLevel::Error) {
1 => "an error".to_string(),
n => format!("{} errors", n),
}
)
.ok();
} else {
output.set_color(Some(Color::Green));
writeln!(
output.writer(),
"Compilation finished successfully {}",
match self.count(MessageLevel::Warning) {
0 => "without warnings".to_string(),
1 => "with a warning".to_string(),
n => format!("with {} warnings", n),
}
)
.ok();
}
}
/// Generate an error or a warning that is printed to the
/// writer given in the `new` constructor. Most of the time
/// this will be stderr.
pub fn emit(&self, level: MessageLevel, kind: MaybeSpanned<'_, &dyn Display>) {
self.increment_level_count(level);
let mut writer = self.writer.borrow_mut();
let msg = Message { level, kind };
// `ok()` surpresses io error
msg.write(&mut **writer).ok();
}
#[allow(dead_code)]
pub fn warning<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Warning, kind.as_maybe_spanned())
}
#[allow(dead_code)]
pub fn error<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Error, kind.as_maybe_spanned())
}
#[allow(dead_code)]
pub fn info<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Info, kind.as_maybe_spanned())
}
fn increment_level_count(&self, level: MessageLevel) {
let mut message_count = self.message_count.borrow_mut();
let counter = message_count.entry(level).or_insert(0);
*counter += 1;
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum MessageLevel {
Error,
Warning,
Info,
Allow,
}
impl MessageLevel {
fn color(self) -> Option<Color> {
// Don't be confused by the return type.
// `None` means default color in the colorterm
// crate!
match self {
MessageLevel::Error => Some(Color::Red),
MessageLevel::Warning => Some(Color::Yellow),
MessageLevel::Info => Some(Color::Cyan),
MessageLevel::Allow => None,
}
}
fn name(&self) -> &str {
match self {
MessageLevel::Error => "error",
MessageLevel::Warning => "warning",
MessageLevel::Info => "info",
MessageLevel::Allow => "allow",
}
}
pub fn from_string(level: &str) -> Option<Self> {
match level {
"allow" => Some(MessageLevel::Allow),
"info" => Some(MessageLevel::Info),
"warning" => Some(MessageLevel::Warning),
"error" => Some(MessageLevel::Error),
_ => None,
}
}
}
pub struct Message<'file, 'msg> {
pub level: MessageLevel,
pub kind: MaybeSpanned<'file, &'msg dyn Display>,
}
impl<'file, 'msg> Message<'file, 'msg> {
pub fn write(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
match &self.kind {
MaybeSpanned::WithoutSpan(_) => {
self.write_description(writer)?;
}
MaybeSpanned::WithSpan(spanned) => {
self.write_description(writer)?;
self.write_code(writer, &spanned.span)?;
}
}
writeln!(writer)?;
Ok(())
}
fn write_description(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(self.level.color());
output.set_bold(true);
write!(output.writer(), "{}: ", self.level.name())?;
output.set_color(None);
writeln!(output.writer(), "{}", *self.kind)?;
Ok(())
}
fn write_code(&self, writer: &mut dyn WriteColor, error: &Span<'_>) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
let num_fmt = LineNumberFormatter::new(error);
num_fmt.spaces(output.writer())?;
writeln!(output.writer())?;
for (line_number, line) in error.lines().numbered() {
let line_fmt = LineFormatter::new(&line);
num_fmt.number(output.writer(), line_number)?;
line_fmt.render(output.writer())?;
// currently, the span will always exist since we take the line from the error
// but future versions may print a line below and above for context that
// is not part of the error
if let Some(faulty_part_of_line) = Span::intersect(error, &line) {
// TODO: implement this without the following 3 assumptions:
// - start_pos - end_pos >= 0, guranteed by data structure invariant of Span
// - start_term_pos - end_term_pos >= 0, guranteed by monotony of columns (a
// Position.char() can only be rendered to 0 or more terminal characters) | let (start_term_pos, end_term_pos) =
line_fmt.actual_columns(&faulty_part_of_line).unwrap();
let term_width = end_term_pos - start_term_pos;
num_fmt.spaces(output.writer())?;
{
let mut output = ColorOutput::new(output.writer());
output.set_color(self.level.color());
output.set_bold(true);
writeln!(
output.writer(),
"{spaces}{underline}",
spaces = " ".repeat(start_term_pos),
underline = "^".repeat(term_width)
)?;
}
}
}
Ok(())
}
}
/// Helper that prints a range of numbers with the correct
/// amount of padding
struct LineNumberFormatter {
width: usize,
}
impl LineNumberFormatter {
pub fn new(span: &Span<'_>) -> Self {
Self {
width: span.end_position().line_number().to_string().len(),
}
}
pub fn spaces(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(HIGHLIGHT_COLOR);
output.set_bold(true);
write!(output.writer(), " {} | ", " ".repeat(self.width))?;
Ok(())
}
pub fn number(&self, writer: &mut dyn WriteColor, line_number: usize) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(HIGHLIGHT_COLOR);
output.set_bold(true);
let padded_number = pad_left(&line_number.to_string(), self.width);
write!(output.writer(), " {} | ", padded_number)?;
Ok(())
}
}
pub fn pad_left(s: &str, pad: usize) -> String {
pad_left_with_char(s, pad, ' ')
}
pub fn pad_left_with_char(s: &str, pad: usize, chr: char) -> String {
format!(
"{padding}{string}",
padding = chr
.to_string()
.repeat(pad.checked_sub(s.len()).unwrap_or(0)),
string = s
)
}
/// Writes a user-supplied input line in a safe manner by replacing
/// control-characters with escape sequences.
struct LineFormatter<'span, 'file> {
line: &'span Span<'file>,
}
impl<'span, 'file> LineFormatter<'span, 'file> {
fn new(line: &'span Span<'file>) -> Self {
Self { line }
}
fn render(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
// TODO: implement an iterator
let chars = self.line.start_position().iter();
for position in chars {
let (text, color) = self.render_char(position.chr());
output.set_color(color);
write!(output.writer(), "{}", text)?;
if position == self.line.end_position() {
break;
}
}
writeln!(output.writer())?;
Ok(())
}
/// Map terminal columns to `Position` columns. Returns a inclusive
/// lower bound, and an exclusive upper bound.
///
/// Each printed character does not actually take up monospace grid cell.
/// For example a TAB character may be represented by 4 spaces. This
/// function will return the actual number of 'monospace grid cells'
/// rendered before the given
/// position.
///
/// Returns `None` if the column is out of bounds.
fn actual_columns(&self, span: &Span<'_>) -> Option<(usize, usize)> {
let lower = self.len_printed_before(span.start_position().column());
let upper = self.len_printed_before(span.end_position().column());
match (lower, upper) {
(Some(lower), Some(upper)) => {
let last_char_width = self.render_char(span.end_position().chr()).0.len();
Some((lower, upper + last_char_width))
}
_ => None,
}
}
fn len_printed_before(&self, col: usize) -> Option<usize> {
// TODO: get rid of this nonsense
// NOTE: it would actually be nice to condition the Position on the Line
// instead of AsciiFile. Thinking of this, we could actually just do
// `AsciiFile::new((span.as_str().as_bytes()))`. Meaning AsciiFile is
// not a file, but a View
// that restricts the
// linked lists in Positions and Spans to a subset of the file.
// TODO: implement an iterator on span, or
// span.to_view().iter()/.to_ascii_file().iter() this method is
// inherintly unsafe
// because we do not have
// a way to restrict
// positions in a type safe manner.
if self.line.len() < col {
return None;
}
let chars = self.line.start_position().iter();
let mut actual_column = 0;
for position in chars {
if position.column() == col {
break;
}
actual_column += self.render_char(position.chr()).0.len();
}
Some(actual_column)
}
fn render_char(&self, chr: char) -> (String, Option<Color>) {
match chr {
'\t' => (" ".repeat(TAB_WIDTH), None),
'\r' | '\n' => ("".to_string(), None),
chr if chr.is_control() => (
format!("{{{}}}", u8_to_printable_representation(chr as u8)),
HIGHLIGHT_COLOR,
),
_ => (chr.to_string(), None),
}
}
}
#[cfg(test)]
#[allow(clippy::print_stdout, clippy::use_debug)]
mod tests {
use super::*;
#[test]
fn test_pad_left() {
let tests = vec![("a", " a"), ("", " "), ("a", "a"), ("", "")];
for (input, expected) in tests {
println!("Testing: {:?} => {:?}", input, expected);
assert_eq!(expected, pad_left(input, expected.len()));
}
// not enough padding does not truncate string
assert_eq!("a", pad_left("a", 0));
}
} | // - unwrap(.): both positions are guranteed to exist in the line since we just
// got them from the faulty line, which is a subset of the whole error line | random_line_split |
diagnostics.rs | #![warn(
clippy::print_stdout,
clippy::unimplemented,
clippy::doc_markdown,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::similar_names,
clippy::single_match_else,
clippy::use_self,
clippy::use_debug
)]
//! The diagnostics object controls the output of warnings and errors generated
//! by the compiler during the lexing, parsing and semantic analysis phases.
//! It also tracks the number of warnings and errors generated for flow control.
//!
//! This implementation is NOT thread-safe. Messages from different threads may
//! be interleaved.
use asciifile::{MaybeSpanned, Span, Spanned};
use failure::Error;
use std::{ascii::escape_default, cell::RefCell, collections::HashMap, fmt::Display};
use termcolor::{Color, WriteColor};
use utils::color::ColorOutput;
pub mod lint;
pub fn u8_to_printable_representation(byte: u8) -> String {
let bytes = escape_default(byte).collect::<Vec<u8>>();
let rep = unsafe { std::str::from_utf8_unchecked(&bytes) };
rep.to_owned()
}
/// This abstraction allows us to call the diagnostics API with pretty
/// much everything.
///
/// The following examples are all equivalent and will print a warning
/// without a source code snippet below the message:
///
/// ```rust,ignore
/// context.diagnostics.warning(&"Something went wrong");
/// context
/// .diagnostics
/// .warning(&WithoutSpan("Something went wrong"));
/// ```
///
/// The following examples will print a message with a source code
/// snippet. Note that all errors generated by the compiler are
/// a `Spanned<_, Fail>` and can therefore be directly passed to
/// the diagnostics API.
///
/// ```rust,ignore
/// // `lexer_error` is the `Err` returned by `Lexer::next`
/// context.diagnostics.error(&lexer_error);
/// // `span` is some `asciifile::Span`
/// context.diagnostics.error({
/// span: span,
/// data: "something went wrong"
/// });
/// ```
pub trait Printable<'a, 'b> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display>;
}
// TODO: implementing on `str` (which is what you would like to do, to
// support calls with warning("aa") instead of warning(&"aa").
impl<'a, 'b> Printable<'a, 'b> for &'b str {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
MaybeSpanned::WithoutSpan(self)
}
}
impl<'a, 'b, T: Display + 'b> Printable<'a, 'b> for Spanned<'a, T> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
MaybeSpanned::WithSpan(Spanned {
span: self.span,
data: &self.data,
})
}
}
impl<'a, 'b, T: Display + 'b> Printable<'a, 'b> for MaybeSpanned<'a, T> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
match self {
MaybeSpanned::WithSpan(ref spanned) => MaybeSpanned::WithSpan(Spanned {
span: spanned.span,
data: &spanned.data,
}),
MaybeSpanned::WithoutSpan(ref data) => MaybeSpanned::WithoutSpan(data),
}
}
}
/// Width of tabs in error and warning messages
const TAB_WIDTH: usize = 4;
/// Color used for rendering line numbers, escape sequences
/// and others...
const HIGHLIGHT_COLOR: Option<Color> = Some(Color::Cyan);
// TODO reimplement line truncation
/// Instead of writing errors, warnings and lints generated in the different
/// compiler stages directly to stdout, they are collected in this object.
///
/// This has several advantages:
/// - the output level can be adapted by users.
/// - we have a single source responsible for formatting compiler messages.
pub struct Diagnostics {
message_count: RefCell<HashMap<MessageLevel, usize>>,
writer: RefCell<Box<dyn WriteColor>>,
}
impl Diagnostics {
pub fn new(writer: Box<dyn WriteColor>) -> Self {
Self {
writer: RefCell::new(writer),
message_count: RefCell::new(HashMap::new()),
}
}
/// True when an error message was emitted, false
/// if only warnings were emitted.
pub fn errored(&self) -> bool {
self.message_count
.borrow()
.get(&MessageLevel::Error)
.is_some()
}
pub fn count(&self, level: MessageLevel) -> usize {
self.message_count
.borrow()
.get(&level)
.cloned()
.unwrap_or(0)
}
pub fn write_statistics(&self) {
let mut writer = self.writer.borrow_mut();
let mut output = ColorOutput::new(&mut **writer);
output.set_bold(true);
if self.errored() {
output.set_color(MessageLevel::Error.color());
writeln!(
output.writer(),
"Compilation aborted due to {}",
match self.count(MessageLevel::Error) {
1 => "an error".to_string(),
n => format!("{} errors", n),
}
)
.ok();
} else {
output.set_color(Some(Color::Green));
writeln!(
output.writer(),
"Compilation finished successfully {}",
match self.count(MessageLevel::Warning) {
0 => "without warnings".to_string(),
1 => "with a warning".to_string(),
n => format!("with {} warnings", n),
}
)
.ok();
}
}
/// Generate an error or a warning that is printed to the
/// writer given in the `new` constructor. Most of the time
/// this will be stderr.
pub fn emit(&self, level: MessageLevel, kind: MaybeSpanned<'_, &dyn Display>) {
self.increment_level_count(level);
let mut writer = self.writer.borrow_mut();
let msg = Message { level, kind };
// `ok()` surpresses io error
msg.write(&mut **writer).ok();
}
#[allow(dead_code)]
pub fn warning<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Warning, kind.as_maybe_spanned())
}
#[allow(dead_code)]
pub fn error<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Error, kind.as_maybe_spanned())
}
#[allow(dead_code)]
pub fn info<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Info, kind.as_maybe_spanned())
}
fn increment_level_count(&self, level: MessageLevel) {
let mut message_count = self.message_count.borrow_mut();
let counter = message_count.entry(level).or_insert(0);
*counter += 1;
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum MessageLevel {
Error,
Warning,
Info,
Allow,
}
impl MessageLevel {
fn color(self) -> Option<Color> {
// Don't be confused by the return type.
// `None` means default color in the colorterm
// crate!
match self {
MessageLevel::Error => Some(Color::Red),
MessageLevel::Warning => Some(Color::Yellow),
MessageLevel::Info => Some(Color::Cyan),
MessageLevel::Allow => None,
}
}
fn name(&self) -> &str {
match self {
MessageLevel::Error => "error",
MessageLevel::Warning => "warning",
MessageLevel::Info => "info",
MessageLevel::Allow => "allow",
}
}
pub fn from_string(level: &str) -> Option<Self> {
match level {
"allow" => Some(MessageLevel::Allow),
"info" => Some(MessageLevel::Info),
"warning" => Some(MessageLevel::Warning),
"error" => Some(MessageLevel::Error),
_ => None,
}
}
}
pub struct Message<'file, 'msg> {
pub level: MessageLevel,
pub kind: MaybeSpanned<'file, &'msg dyn Display>,
}
impl<'file, 'msg> Message<'file, 'msg> {
pub fn write(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
match &self.kind {
MaybeSpanned::WithoutSpan(_) => {
self.write_description(writer)?;
}
MaybeSpanned::WithSpan(spanned) => {
self.write_description(writer)?;
self.write_code(writer, &spanned.span)?;
}
}
writeln!(writer)?;
Ok(())
}
fn write_description(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(self.level.color());
output.set_bold(true);
write!(output.writer(), "{}: ", self.level.name())?;
output.set_color(None);
writeln!(output.writer(), "{}", *self.kind)?;
Ok(())
}
fn write_code(&self, writer: &mut dyn WriteColor, error: &Span<'_>) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
let num_fmt = LineNumberFormatter::new(error);
num_fmt.spaces(output.writer())?;
writeln!(output.writer())?;
for (line_number, line) in error.lines().numbered() {
let line_fmt = LineFormatter::new(&line);
num_fmt.number(output.writer(), line_number)?;
line_fmt.render(output.writer())?;
// currently, the span will always exist since we take the line from the error
// but future versions may print a line below and above for context that
// is not part of the error
if let Some(faulty_part_of_line) = Span::intersect(error, &line) {
// TODO: implement this without the following 3 assumptions:
// - start_pos - end_pos >= 0, guranteed by data structure invariant of Span
// - start_term_pos - end_term_pos >= 0, guranteed by monotony of columns (a
// Position.char() can only be rendered to 0 or more terminal characters)
// - unwrap(.): both positions are guranteed to exist in the line since we just
// got them from the faulty line, which is a subset of the whole error line
let (start_term_pos, end_term_pos) =
line_fmt.actual_columns(&faulty_part_of_line).unwrap();
let term_width = end_term_pos - start_term_pos;
num_fmt.spaces(output.writer())?;
{
let mut output = ColorOutput::new(output.writer());
output.set_color(self.level.color());
output.set_bold(true);
writeln!(
output.writer(),
"{spaces}{underline}",
spaces = " ".repeat(start_term_pos),
underline = "^".repeat(term_width)
)?;
}
}
}
Ok(())
}
}
/// Helper that prints a range of numbers with the correct
/// amount of padding
struct LineNumberFormatter {
width: usize,
}
impl LineNumberFormatter {
pub fn new(span: &Span<'_>) -> Self {
Self {
width: span.end_position().line_number().to_string().len(),
}
}
pub fn spaces(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(HIGHLIGHT_COLOR);
output.set_bold(true);
write!(output.writer(), " {} | ", " ".repeat(self.width))?;
Ok(())
}
pub fn number(&self, writer: &mut dyn WriteColor, line_number: usize) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(HIGHLIGHT_COLOR);
output.set_bold(true);
let padded_number = pad_left(&line_number.to_string(), self.width);
write!(output.writer(), " {} | ", padded_number)?;
Ok(())
}
}
pub fn pad_left(s: &str, pad: usize) -> String {
pad_left_with_char(s, pad, ' ')
}
pub fn pad_left_with_char(s: &str, pad: usize, chr: char) -> String {
format!(
"{padding}{string}",
padding = chr
.to_string()
.repeat(pad.checked_sub(s.len()).unwrap_or(0)),
string = s
)
}
/// Writes a user-supplied input line in a safe manner by replacing
/// control-characters with escape sequences.
struct LineFormatter<'span, 'file> {
line: &'span Span<'file>,
}
impl<'span, 'file> LineFormatter<'span, 'file> {
fn | (line: &'span Span<'file>) -> Self {
Self { line }
}
fn render(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
// TODO: implement an iterator
let chars = self.line.start_position().iter();
for position in chars {
let (text, color) = self.render_char(position.chr());
output.set_color(color);
write!(output.writer(), "{}", text)?;
if position == self.line.end_position() {
break;
}
}
writeln!(output.writer())?;
Ok(())
}
/// Map terminal columns to `Position` columns. Returns a inclusive
/// lower bound, and an exclusive upper bound.
///
/// Each printed character does not actually take up monospace grid cell.
/// For example a TAB character may be represented by 4 spaces. This
/// function will return the actual number of 'monospace grid cells'
/// rendered before the given
/// position.
///
/// Returns `None` if the column is out of bounds.
fn actual_columns(&self, span: &Span<'_>) -> Option<(usize, usize)> {
let lower = self.len_printed_before(span.start_position().column());
let upper = self.len_printed_before(span.end_position().column());
match (lower, upper) {
(Some(lower), Some(upper)) => {
let last_char_width = self.render_char(span.end_position().chr()).0.len();
Some((lower, upper + last_char_width))
}
_ => None,
}
}
fn len_printed_before(&self, col: usize) -> Option<usize> {
// TODO: get rid of this nonsense
// NOTE: it would actually be nice to condition the Position on the Line
// instead of AsciiFile. Thinking of this, we could actually just do
// `AsciiFile::new((span.as_str().as_bytes()))`. Meaning AsciiFile is
// not a file, but a View
// that restricts the
// linked lists in Positions and Spans to a subset of the file.
// TODO: implement an iterator on span, or
// span.to_view().iter()/.to_ascii_file().iter() this method is
// inherintly unsafe
// because we do not have
// a way to restrict
// positions in a type safe manner.
if self.line.len() < col {
return None;
}
let chars = self.line.start_position().iter();
let mut actual_column = 0;
for position in chars {
if position.column() == col {
break;
}
actual_column += self.render_char(position.chr()).0.len();
}
Some(actual_column)
}
fn render_char(&self, chr: char) -> (String, Option<Color>) {
match chr {
'\t' => (" ".repeat(TAB_WIDTH), None),
'\r' | '\n' => ("".to_string(), None),
chr if chr.is_control() => (
format!("{{{}}}", u8_to_printable_representation(chr as u8)),
HIGHLIGHT_COLOR,
),
_ => (chr.to_string(), None),
}
}
}
#[cfg(test)]
#[allow(clippy::print_stdout, clippy::use_debug)]
mod tests {
use super::*;
#[test]
fn test_pad_left() {
let tests = vec![("a", " a"), ("", " "), ("a", "a"), ("", "")];
for (input, expected) in tests {
println!("Testing: {:?} => {:?}", input, expected);
assert_eq!(expected, pad_left(input, expected.len()));
}
// not enough padding does not truncate string
assert_eq!("a", pad_left("a", 0));
}
}
| new | identifier_name |
diagnostics.rs | #![warn(
clippy::print_stdout,
clippy::unimplemented,
clippy::doc_markdown,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::similar_names,
clippy::single_match_else,
clippy::use_self,
clippy::use_debug
)]
//! The diagnostics object controls the output of warnings and errors generated
//! by the compiler during the lexing, parsing and semantic analysis phases.
//! It also tracks the number of warnings and errors generated for flow control.
//!
//! This implementation is NOT thread-safe. Messages from different threads may
//! be interleaved.
use asciifile::{MaybeSpanned, Span, Spanned};
use failure::Error;
use std::{ascii::escape_default, cell::RefCell, collections::HashMap, fmt::Display};
use termcolor::{Color, WriteColor};
use utils::color::ColorOutput;
pub mod lint;
pub fn u8_to_printable_representation(byte: u8) -> String {
let bytes = escape_default(byte).collect::<Vec<u8>>();
let rep = unsafe { std::str::from_utf8_unchecked(&bytes) };
rep.to_owned()
}
/// This abstraction allows us to call the diagnostics API with pretty
/// much everything.
///
/// The following examples are all equivalent and will print a warning
/// without a source code snippet below the message:
///
/// ```rust,ignore
/// context.diagnostics.warning(&"Something went wrong");
/// context
/// .diagnostics
/// .warning(&WithoutSpan("Something went wrong"));
/// ```
///
/// The following examples will print a message with a source code
/// snippet. Note that all errors generated by the compiler are
/// a `Spanned<_, Fail>` and can therefore be directly passed to
/// the diagnostics API.
///
/// ```rust,ignore
/// // `lexer_error` is the `Err` returned by `Lexer::next`
/// context.diagnostics.error(&lexer_error);
/// // `span` is some `asciifile::Span`
/// context.diagnostics.error({
/// span: span,
/// data: "something went wrong"
/// });
/// ```
pub trait Printable<'a, 'b> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display>;
}
// TODO: implementing on `str` (which is what you would like to do, to
// support calls with warning("aa") instead of warning(&"aa").
impl<'a, 'b> Printable<'a, 'b> for &'b str {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
MaybeSpanned::WithoutSpan(self)
}
}
impl<'a, 'b, T: Display + 'b> Printable<'a, 'b> for Spanned<'a, T> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
MaybeSpanned::WithSpan(Spanned {
span: self.span,
data: &self.data,
})
}
}
impl<'a, 'b, T: Display + 'b> Printable<'a, 'b> for MaybeSpanned<'a, T> {
fn as_maybe_spanned(&'b self) -> MaybeSpanned<'a, &'b dyn Display> {
match self {
MaybeSpanned::WithSpan(ref spanned) => MaybeSpanned::WithSpan(Spanned {
span: spanned.span,
data: &spanned.data,
}),
MaybeSpanned::WithoutSpan(ref data) => MaybeSpanned::WithoutSpan(data),
}
}
}
/// Width of tabs in error and warning messages
const TAB_WIDTH: usize = 4;
/// Color used for rendering line numbers, escape sequences
/// and others...
const HIGHLIGHT_COLOR: Option<Color> = Some(Color::Cyan);
// TODO reimplement line truncation
/// Instead of writing errors, warnings and lints generated in the different
/// compiler stages directly to stdout, they are collected in this object.
///
/// This has several advantages:
/// - the output level can be adapted by users.
/// - we have a single source responsible for formatting compiler messages.
pub struct Diagnostics {
message_count: RefCell<HashMap<MessageLevel, usize>>,
writer: RefCell<Box<dyn WriteColor>>,
}
impl Diagnostics {
pub fn new(writer: Box<dyn WriteColor>) -> Self {
Self {
writer: RefCell::new(writer),
message_count: RefCell::new(HashMap::new()),
}
}
/// True when an error message was emitted, false
/// if only warnings were emitted.
pub fn errored(&self) -> bool {
self.message_count
.borrow()
.get(&MessageLevel::Error)
.is_some()
}
pub fn count(&self, level: MessageLevel) -> usize {
self.message_count
.borrow()
.get(&level)
.cloned()
.unwrap_or(0)
}
pub fn write_statistics(&self) {
let mut writer = self.writer.borrow_mut();
let mut output = ColorOutput::new(&mut **writer);
output.set_bold(true);
if self.errored() {
output.set_color(MessageLevel::Error.color());
writeln!(
output.writer(),
"Compilation aborted due to {}",
match self.count(MessageLevel::Error) {
1 => "an error".to_string(),
n => format!("{} errors", n),
}
)
.ok();
} else {
output.set_color(Some(Color::Green));
writeln!(
output.writer(),
"Compilation finished successfully {}",
match self.count(MessageLevel::Warning) {
0 => "without warnings".to_string(),
1 => "with a warning".to_string(),
n => format!("with {} warnings", n),
}
)
.ok();
}
}
/// Generate an error or a warning that is printed to the
/// writer given in the `new` constructor. Most of the time
/// this will be stderr.
pub fn emit(&self, level: MessageLevel, kind: MaybeSpanned<'_, &dyn Display>) {
self.increment_level_count(level);
let mut writer = self.writer.borrow_mut();
let msg = Message { level, kind };
// `ok()` surpresses io error
msg.write(&mut **writer).ok();
}
#[allow(dead_code)]
pub fn warning<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Warning, kind.as_maybe_spanned())
}
#[allow(dead_code)]
pub fn error<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Error, kind.as_maybe_spanned())
}
#[allow(dead_code)]
pub fn info<'a, 'b, T: Printable<'a, 'b> + ?Sized>(&self, kind: &'b T) {
self.emit(MessageLevel::Info, kind.as_maybe_spanned())
}
fn increment_level_count(&self, level: MessageLevel) {
let mut message_count = self.message_count.borrow_mut();
let counter = message_count.entry(level).or_insert(0);
*counter += 1;
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub enum MessageLevel {
Error,
Warning,
Info,
Allow,
}
impl MessageLevel {
fn color(self) -> Option<Color> {
// Don't be confused by the return type.
// `None` means default color in the colorterm
// crate!
match self {
MessageLevel::Error => Some(Color::Red),
MessageLevel::Warning => Some(Color::Yellow),
MessageLevel::Info => Some(Color::Cyan),
MessageLevel::Allow => None,
}
}
fn name(&self) -> &str {
match self {
MessageLevel::Error => "error",
MessageLevel::Warning => "warning",
MessageLevel::Info => "info",
MessageLevel::Allow => "allow",
}
}
pub fn from_string(level: &str) -> Option<Self> {
match level {
"allow" => Some(MessageLevel::Allow),
"info" => Some(MessageLevel::Info),
"warning" => Some(MessageLevel::Warning),
"error" => Some(MessageLevel::Error),
_ => None,
}
}
}
pub struct Message<'file, 'msg> {
pub level: MessageLevel,
pub kind: MaybeSpanned<'file, &'msg dyn Display>,
}
impl<'file, 'msg> Message<'file, 'msg> {
pub fn write(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
match &self.kind {
MaybeSpanned::WithoutSpan(_) => {
self.write_description(writer)?;
}
MaybeSpanned::WithSpan(spanned) => {
self.write_description(writer)?;
self.write_code(writer, &spanned.span)?;
}
}
writeln!(writer)?;
Ok(())
}
fn write_description(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(self.level.color());
output.set_bold(true);
write!(output.writer(), "{}: ", self.level.name())?;
output.set_color(None);
writeln!(output.writer(), "{}", *self.kind)?;
Ok(())
}
fn write_code(&self, writer: &mut dyn WriteColor, error: &Span<'_>) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
let num_fmt = LineNumberFormatter::new(error);
num_fmt.spaces(output.writer())?;
writeln!(output.writer())?;
for (line_number, line) in error.lines().numbered() {
let line_fmt = LineFormatter::new(&line);
num_fmt.number(output.writer(), line_number)?;
line_fmt.render(output.writer())?;
// currently, the span will always exist since we take the line from the error
// but future versions may print a line below and above for context that
// is not part of the error
if let Some(faulty_part_of_line) = Span::intersect(error, &line) {
// TODO: implement this without the following 3 assumptions:
// - start_pos - end_pos >= 0, guranteed by data structure invariant of Span
// - start_term_pos - end_term_pos >= 0, guranteed by monotony of columns (a
// Position.char() can only be rendered to 0 or more terminal characters)
// - unwrap(.): both positions are guranteed to exist in the line since we just
// got them from the faulty line, which is a subset of the whole error line
let (start_term_pos, end_term_pos) =
line_fmt.actual_columns(&faulty_part_of_line).unwrap();
let term_width = end_term_pos - start_term_pos;
num_fmt.spaces(output.writer())?;
{
let mut output = ColorOutput::new(output.writer());
output.set_color(self.level.color());
output.set_bold(true);
writeln!(
output.writer(),
"{spaces}{underline}",
spaces = " ".repeat(start_term_pos),
underline = "^".repeat(term_width)
)?;
}
}
}
Ok(())
}
}
/// Helper that prints a range of numbers with the correct
/// amount of padding
struct LineNumberFormatter {
width: usize,
}
impl LineNumberFormatter {
pub fn new(span: &Span<'_>) -> Self {
Self {
width: span.end_position().line_number().to_string().len(),
}
}
pub fn spaces(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(HIGHLIGHT_COLOR);
output.set_bold(true);
write!(output.writer(), " {} | ", " ".repeat(self.width))?;
Ok(())
}
pub fn number(&self, writer: &mut dyn WriteColor, line_number: usize) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
output.set_color(HIGHLIGHT_COLOR);
output.set_bold(true);
let padded_number = pad_left(&line_number.to_string(), self.width);
write!(output.writer(), " {} | ", padded_number)?;
Ok(())
}
}
pub fn pad_left(s: &str, pad: usize) -> String {
pad_left_with_char(s, pad, ' ')
}
pub fn pad_left_with_char(s: &str, pad: usize, chr: char) -> String {
format!(
"{padding}{string}",
padding = chr
.to_string()
.repeat(pad.checked_sub(s.len()).unwrap_or(0)),
string = s
)
}
/// Writes a user-supplied input line in a safe manner by replacing
/// control-characters with escape sequences.
struct LineFormatter<'span, 'file> {
line: &'span Span<'file>,
}
impl<'span, 'file> LineFormatter<'span, 'file> {
fn new(line: &'span Span<'file>) -> Self {
Self { line }
}
fn render(&self, writer: &mut dyn WriteColor) -> Result<(), Error> {
let mut output = ColorOutput::new(writer);
// TODO: implement an iterator
let chars = self.line.start_position().iter();
for position in chars {
let (text, color) = self.render_char(position.chr());
output.set_color(color);
write!(output.writer(), "{}", text)?;
if position == self.line.end_position() {
break;
}
}
writeln!(output.writer())?;
Ok(())
}
/// Map terminal columns to `Position` columns. Returns a inclusive
/// lower bound, and an exclusive upper bound.
///
/// Each printed character does not actually take up monospace grid cell.
/// For example a TAB character may be represented by 4 spaces. This
/// function will return the actual number of 'monospace grid cells'
/// rendered before the given
/// position.
///
/// Returns `None` if the column is out of bounds.
fn actual_columns(&self, span: &Span<'_>) -> Option<(usize, usize)> {
let lower = self.len_printed_before(span.start_position().column());
let upper = self.len_printed_before(span.end_position().column());
match (lower, upper) {
(Some(lower), Some(upper)) => {
let last_char_width = self.render_char(span.end_position().chr()).0.len();
Some((lower, upper + last_char_width))
}
_ => None,
}
}
fn len_printed_before(&self, col: usize) -> Option<usize> |
fn render_char(&self, chr: char) -> (String, Option<Color>) {
match chr {
'\t' => (" ".repeat(TAB_WIDTH), None),
'\r' | '\n' => ("".to_string(), None),
chr if chr.is_control() => (
format!("{{{}}}", u8_to_printable_representation(chr as u8)),
HIGHLIGHT_COLOR,
),
_ => (chr.to_string(), None),
}
}
}
#[cfg(test)]
#[allow(clippy::print_stdout, clippy::use_debug)]
mod tests {
use super::*;
#[test]
fn test_pad_left() {
let tests = vec![("a", " a"), ("", " "), ("a", "a"), ("", "")];
for (input, expected) in tests {
println!("Testing: {:?} => {:?}", input, expected);
assert_eq!(expected, pad_left(input, expected.len()));
}
// not enough padding does not truncate string
assert_eq!("a", pad_left("a", 0));
}
}
| {
// TODO: get rid of this nonsense
// NOTE: it would actually be nice to condition the Position on the Line
// instead of AsciiFile. Thinking of this, we could actually just do
// `AsciiFile::new((span.as_str().as_bytes()))`. Meaning AsciiFile is
// not a file, but a View
// that restricts the
// linked lists in Positions and Spans to a subset of the file.
// TODO: implement an iterator on span, or
// span.to_view().iter()/.to_ascii_file().iter() this method is
// inherintly unsafe
// because we do not have
// a way to restrict
// positions in a type safe manner.
if self.line.len() < col {
return None;
}
let chars = self.line.start_position().iter();
let mut actual_column = 0;
for position in chars {
if position.column() == col {
break;
}
actual_column += self.render_char(position.chr()).0.len();
}
Some(actual_column)
} | identifier_body |
LAB2_OpRes.py | # System libraries
import time
# Third party libraries
import networkx as nx
# Our libraries
import input_controls as inc
import graph_topologies as gt
import graph_traffic_matrix as tm
import ltd_utilities as ltd
def LTD_random(n, n_edges, delta_in, delta_out, traffic_matrix, title = 'Random LTD - Comparisons', userView = True, withLabels = True):
'''
This function solves the LTD problem generating a random topology, according to the input specified criteria:
- "n" is the number of nodes
- "n_edges" is the number of edges
- "delta_in" is the maximum number of receivers per node
- "delta_out" is the maximum number of transmitters per node
- "traffic_matrix" is the traffic matrix, used to decide edges' flow values
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
# n, delta_in, delta_out and traffic_matrix
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# n_edges: extreme case are the ring or the full mesh topologies
inc.check_integer(n_edges, 'n_edges', minValue = n, maxValue = n * (n - 1))
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Create the topology (oriented random graph)
T = gt.random_topology(n, n_edges, delta_in, delta_out)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Random')
def greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 1 - Mesh LTD', userView = True, withLabels = True):
'''
This function generates a network topolgy in order to solve, using a greedy approach, an LTD problem.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(n, traffic_matrix):
'''
Lists the edges of the graph "G", ordered by their flow value (ascending order)
'''
G = gt.loaded_mesh_topology(n, traffic_matrix)
edges_to_check = []
for e in G.edges():
# Associate the flow to the edge
u = e[0]
v = e[1]
f = G.edge[u][v]['flow']
edges_to_check.append({
'edge': e,
'flow': f
})
edges_to_check.sort(key = lambda x: x['flow'])
return edges_to_check
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on the screen the traffic matrix content
tm.print_TM(traffic_matrix)
# If one of the deltas is equal to 1, I know for sure that the resulting topology has to be a ring
if delta_in == 1 or delta_out == 1:
T = gt.ring_topology(n)
else:
# Instantiate the initial full mesh topology, from which I'm going to remove edges
T = gt.mesh_topology(n)
# This array contains the graph's edges, sorted according their flow (ascending order)
edges_to_check = edges_to_check(n, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Noe, I have to remove edges until the delta contraints are satisfied
# BUT: I could find edges impossible to remove...
print('\nPlease wait...')
while (not ltd.check_global_delta_constraints(T, delta_in, delta_out)) and len(edges_to_check) > 0:
# The edge I try to remove first is the one with minimum flow value
edge_to_remove = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_remove[0]
v = edge_to_remove[1]
# Analyzing the delta constraint on "u" and "v", I could find that it is not necessary to remove this edge
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
# Check if I really need to remove the edge
if u_out_degree > delta_out or v_in_degree > delta_in:
# Verify that, once the edge is removed, the resulting graph will not be disconnected
if gt.has_alternative_paths(T, edge_to_remove):
# I can remove the selected edge
T.remove_edge(u, v)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Mesh')
def greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 2 - Ring LTD', userView = True, withLabels = True):
'''
This function computes a network topology in order to solve, using a greedy approach, an LTD problem.
With respect to the function "greedy_LTD_mesh", here the starting topology is a ring: the idea is to add edges
to it until the delta constraints are satisfied.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(G, traffic_matrix):
'''
This function lists the possible edges I can add to the topology, sorted by decreasing flow value
'''
res = []
nodes = G.nodes()
edges = G.edges()
# Loop on the traffic matrix values
for u in nodes:
for v in nodes:
# No zero-flow edges (self-loops included)
f = traffic_matrix[u][v]
if f > 0:
# The edge must not already exist i the topology
e = (u, v)
if e not in edges:
# I've foun a candidate edge
res.append({
'edge': e,
'flow': f
})
# Sort by decreasing fow value
res.sort(key = lambda x: x['flow'], reverse = True)
# Result
return res
def check_can_add_edges(G, delta_in, delta_out):
'''
This function verify that exist at least 2 nodes, different each other, having at least
a free receiver and a free transmitter
'''
# Input/output degree of the graph's nodes
in_deg = G.in_degree()
out_deg = G.out_degree()
# Graph's nodes
nodes = G.nodes()
# Check the delta_in constraint
res_ok = False
for x in nodes:
if in_deg[x] < delta_in:
# I've found a node with a free receiver
# Check now the delta_out constraint
for y in nodes:
if y != x and out_deg[y] < delta_out:
# I've found another node with a free transmitter
res_ok = True
break
if res_ok:
break
# Result
return res_ok
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# The starting topology is a ring
T = gt.ring_topology(n)
# If one of the delta constraints is equal to 1, I know for sure that the resulting topology will be the starting one
if delta_in > 1 and delta_out > 1:
# Graph's edges, serted by decreasing flow values
edges_to_check = edges_to_check(T, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Now, I have to add edges until the delta constraints allow me to do that
# BUT: I could find edges impossible to add...
print('\nPlease wait...')
while check_can_add_edges(T, delta_in, delta_out) and len(edges_to_check) > 0:
# The edge I'm going to try to add is the one with the least associated flow value
edge_to_add = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_add[0]
v = edge_to_add[1]
# Check if the selected edge can be added to the topology
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
if u_out_degree < delta_out and v_in_degree < delta_in:
# Add the selected edge
T.add_edge(u, v, flow = 0.0)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Ring')
def LTD_manhattan_smart(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# UTILITY FUNCTIONS
def max_pair(T):
'''
Retrieve indexes of the highest traffic value into the traffic matrix
'''
# Init variables
maxV = -1
s_res = None
d_res = None
# T is a matrix n x n
n = len(T)
# loop over the matrix, to find the maximum value
for s in range(n):
for d in range(n):
if T[s][d] > maxV:
# Update max values
maxV = T[s][d]
s_res = s
d_res = d
# Result
return (s_res, d_res)
def copy_TM(T):
|
def empty_place(G, n):
'''
Verify that the position "n" of the "G" is empty
'''
return G.node[n]['name'] == None
def place_node(G, pos, name):
'''
Place the node "name" into the position "pos" of the topology "G"
'''
G.node[pos]['name'] = name
def node_position(G, n):
'''
Retrieve the position in the topology "G" of a node whose name is "n", already positioned
'''
res = None
# Loop over positions
for p in G.nodes():
if G.node[p]['name'] == n:
# I've found the position for the node named "n"
res = p
break
# Result
return res
def place_2_nodes(G, s, d):
'''
Try to place in "G" nodes "s" and "d": there must be two adjacent places
'''
# Loop over positions
for u in G.nodes():
# If not positioned, control adjacent nodes
if empty_place(G, u):
if place_1_node(G, u, d):
# I've found a free position and I've placed the second node
place_node(G, pos = u, name = s)
return True
# No adjacent places for "s" and "d"
return False
def place_1_node(G, p, x):
'''
Try to place in "G" node "x" near to "p"
'''
# Loop over "p" adjacent places
for v in G.edge[p].keys():
if empty_place(G, v):
# I've found two free adjacent places
place_node(G, pos = v, name = x)
return True
# I haven't found an available place for "x"
return False
# Start computation time, in seconds
initial_time = time.time()
# Print the content of the traffic matrix
tm.print_TM(traffic_matrix)
# Create a copy of the traffic matrix (to avoid reference pointers)
tm_temp = copy_TM(traffic_matrix)
# First of all, retrieve the starting topology
T_temp = gt.manhattan_topology(nr, nc)
# Then, name nodes using an "empty" name
nodes = T_temp.nodes()
for n in nodes:
T_temp.node[n]['name'] = None
# STEP 0
# End of computation flag
end = False
# Placed nodes
S = set()
# Not placed yet nodes
L = set(nodes)
while not end:
# STEP 1
# Retrieve the pair of nodes who exchange most traffic
s, d = max_pair(tm_temp)
tm_temp[s][d] = -1
# STEP 2
s_placed = s in S
d_placed = d in S
# Both nodes of the pair are not placed
if not s_placed and not d_placed:
# STEP 3
# Try to place these nodes
if place_2_nodes(T_temp, s, d):
# Mark as placed
S.add(s)
S.add(d)
L.remove(s)
L.remove(d)
# Only one of the nodes is placed
elif (s_placed and not d_placed) or (not s_placed and d_placed):
# STEP 4
# Let us call "p" the placed node, "x" the other one
if s_placed:
p = s
x = d
else:
p = d
x = s
# Try to place node "x"
if place_1_node(T_temp, node_position(T_temp, p), x):
# Mark as placed
S.add(x)
L.remove(x)
# STEP 5
# Both nodes were already placed, or their placement attempt failed
# Control if I have other nodes to place
end = len(L) == 0
# Now, create a second Manhattan topology in which nodes are swapped
T = gt.manhattan_topology(nr, nc, derived = T_temp)
# Decide how deep is the existing path research between a pair of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Route traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def LTD_manhattan(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# Computation starting time, in seconds
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# First of all, compute the topology
T = gt.manhattan_topology(nr, nc)
# Evaluate the maximum search depth for the paths between pairs of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Now, route the traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def greedy_LTD_start():
'''
Shortcut: called by the user, in order to retrieve several solutions and compare them each other
Parameters used to create the graphs are take as input from the user
'''
# Number of nodes
n = inc.input_int('Number of nodes', minValue = 1)
# Extreme values for the traffic matrix
TM_min = inc.input_int('Traffic matrix lower bound', minValue = 1)
TM_max = inc.input_int('Traffic matrix upper bound', minValue = TM_min)
# Delta values
delta_in = inc.input_int('Delta_in (max #rx per node)', minValue = 1)
delta_out = inc.input_int('Delta_out (max #tx per node)', minValue = 1)
# Traffic matrix
traffic_matrix = tm.random_TM(n, TM_min, TM_max)
# Results:
T1 = greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out)
T1_bis = LTD_random(n, len(T1.edges()), delta_in, delta_out, traffic_matrix)
T2 = greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out)
T2_bis = LTD_random(n, len(T2.edges()), delta_in, delta_out, traffic_matrix)
# Executable code (main)
if __name__ == '__main__':
T = greedy_LTD_start()
| '''
Retrieve a copy for the given traffic matrix (avoid pointer references)
'''
res = []
for row in T:
res.append([])
for c in row:
res[-1].append(c)
return res | identifier_body |
LAB2_OpRes.py | # System libraries
import time
# Third party libraries
import networkx as nx
# Our libraries
import input_controls as inc
import graph_topologies as gt
import graph_traffic_matrix as tm
import ltd_utilities as ltd
def LTD_random(n, n_edges, delta_in, delta_out, traffic_matrix, title = 'Random LTD - Comparisons', userView = True, withLabels = True):
'''
This function solves the LTD problem generating a random topology, according to the input specified criteria:
- "n" is the number of nodes
- "n_edges" is the number of edges
- "delta_in" is the maximum number of receivers per node
- "delta_out" is the maximum number of transmitters per node
- "traffic_matrix" is the traffic matrix, used to decide edges' flow values
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
# n, delta_in, delta_out and traffic_matrix
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# n_edges: extreme case are the ring or the full mesh topologies
inc.check_integer(n_edges, 'n_edges', minValue = n, maxValue = n * (n - 1))
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Create the topology (oriented random graph)
T = gt.random_topology(n, n_edges, delta_in, delta_out)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Random')
def greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 1 - Mesh LTD', userView = True, withLabels = True):
'''
This function generates a network topolgy in order to solve, using a greedy approach, an LTD problem.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(n, traffic_matrix):
'''
Lists the edges of the graph "G", ordered by their flow value (ascending order)
'''
G = gt.loaded_mesh_topology(n, traffic_matrix)
edges_to_check = []
for e in G.edges():
# Associate the flow to the edge
u = e[0]
v = e[1]
f = G.edge[u][v]['flow']
edges_to_check.append({
'edge': e,
'flow': f
})
edges_to_check.sort(key = lambda x: x['flow'])
return edges_to_check
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on the screen the traffic matrix content
tm.print_TM(traffic_matrix)
# If one of the deltas is equal to 1, I know for sure that the resulting topology has to be a ring
if delta_in == 1 or delta_out == 1:
T = gt.ring_topology(n)
else:
# Instantiate the initial full mesh topology, from which I'm going to remove edges
T = gt.mesh_topology(n)
# This array contains the graph's edges, sorted according their flow (ascending order)
edges_to_check = edges_to_check(n, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Noe, I have to remove edges until the delta contraints are satisfied
# BUT: I could find edges impossible to remove...
print('\nPlease wait...')
while (not ltd.check_global_delta_constraints(T, delta_in, delta_out)) and len(edges_to_check) > 0:
# The edge I try to remove first is the one with minimum flow value
edge_to_remove = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_remove[0]
v = edge_to_remove[1]
# Analyzing the delta constraint on "u" and "v", I could find that it is not necessary to remove this edge
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
# Check if I really need to remove the edge
if u_out_degree > delta_out or v_in_degree > delta_in:
# Verify that, once the edge is removed, the resulting graph will not be disconnected
if gt.has_alternative_paths(T, edge_to_remove):
# I can remove the selected edge
T.remove_edge(u, v)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Mesh')
def greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 2 - Ring LTD', userView = True, withLabels = True):
'''
This function computes a network topology in order to solve, using a greedy approach, an LTD problem.
With respect to the function "greedy_LTD_mesh", here the starting topology is a ring: the idea is to add edges
to it until the delta constraints are satisfied.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(G, traffic_matrix):
'''
This function lists the possible edges I can add to the topology, sorted by decreasing flow value
'''
res = []
nodes = G.nodes()
edges = G.edges()
# Loop on the traffic matrix values
for u in nodes:
for v in nodes:
# No zero-flow edges (self-loops included)
f = traffic_matrix[u][v]
if f > 0:
# The edge must not already exist i the topology
e = (u, v)
if e not in edges:
# I've foun a candidate edge
res.append({
'edge': e,
'flow': f
})
# Sort by decreasing fow value
res.sort(key = lambda x: x['flow'], reverse = True)
# Result
return res
def check_can_add_edges(G, delta_in, delta_out):
'''
This function verify that exist at least 2 nodes, different each other, having at least
a free receiver and a free transmitter
'''
# Input/output degree of the graph's nodes
in_deg = G.in_degree()
out_deg = G.out_degree()
# Graph's nodes
nodes = G.nodes()
# Check the delta_in constraint
res_ok = False
for x in nodes:
if in_deg[x] < delta_in:
# I've found a node with a free receiver
# Check now the delta_out constraint
for y in nodes:
if y != x and out_deg[y] < delta_out:
# I've found another node with a free transmitter
res_ok = True
break
if res_ok:
break
# Result
return res_ok
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# The starting topology is a ring
T = gt.ring_topology(n)
# If one of the delta constraints is equal to 1, I know for sure that the resulting topology will be the starting one
if delta_in > 1 and delta_out > 1:
# Graph's edges, serted by decreasing flow values
edges_to_check = edges_to_check(T, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Now, I have to add edges until the delta constraints allow me to do that
# BUT: I could find edges impossible to add...
print('\nPlease wait...')
while check_can_add_edges(T, delta_in, delta_out) and len(edges_to_check) > 0:
# The edge I'm going to try to add is the one with the least associated flow value
edge_to_add = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_add[0]
v = edge_to_add[1]
# Check if the selected edge can be added to the topology
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
if u_out_degree < delta_out and v_in_degree < delta_in:
# Add the selected edge
T.add_edge(u, v, flow = 0.0)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Ring')
def LTD_manhattan_smart(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# UTILITY FUNCTIONS
def max_pair(T):
'''
Retrieve indexes of the highest traffic value into the traffic matrix
'''
# Init variables
maxV = -1
s_res = None
d_res = None
# T is a matrix n x n
n = len(T)
# loop over the matrix, to find the maximum value
for s in range(n):
for d in range(n):
if T[s][d] > maxV:
# Update max values
maxV = T[s][d]
s_res = s
d_res = d
# Result
return (s_res, d_res)
def copy_TM(T):
'''
Retrieve a copy for the given traffic matrix (avoid pointer references)
'''
res = []
for row in T:
res.append([])
for c in row:
res[-1].append(c)
return res
def empty_place(G, n):
'''
Verify that the position "n" of the "G" is empty
'''
return G.node[n]['name'] == None
def place_node(G, pos, name):
'''
Place the node "name" into the position "pos" of the topology "G"
'''
G.node[pos]['name'] = name
def node_position(G, n):
'''
Retrieve the position in the topology "G" of a node whose name is "n", already positioned
'''
res = None
# Loop over positions
for p in G.nodes():
if G.node[p]['name'] == n:
# I've found the position for the node named "n"
res = p
break | return res
def place_2_nodes(G, s, d):
'''
Try to place in "G" nodes "s" and "d": there must be two adjacent places
'''
# Loop over positions
for u in G.nodes():
# If not positioned, control adjacent nodes
if empty_place(G, u):
if place_1_node(G, u, d):
# I've found a free position and I've placed the second node
place_node(G, pos = u, name = s)
return True
# No adjacent places for "s" and "d"
return False
def place_1_node(G, p, x):
'''
Try to place in "G" node "x" near to "p"
'''
# Loop over "p" adjacent places
for v in G.edge[p].keys():
if empty_place(G, v):
# I've found two free adjacent places
place_node(G, pos = v, name = x)
return True
# I haven't found an available place for "x"
return False
# Start computation time, in seconds
initial_time = time.time()
# Print the content of the traffic matrix
tm.print_TM(traffic_matrix)
# Create a copy of the traffic matrix (to avoid reference pointers)
tm_temp = copy_TM(traffic_matrix)
# First of all, retrieve the starting topology
T_temp = gt.manhattan_topology(nr, nc)
# Then, name nodes using an "empty" name
nodes = T_temp.nodes()
for n in nodes:
T_temp.node[n]['name'] = None
# STEP 0
# End of computation flag
end = False
# Placed nodes
S = set()
# Not placed yet nodes
L = set(nodes)
while not end:
# STEP 1
# Retrieve the pair of nodes who exchange most traffic
s, d = max_pair(tm_temp)
tm_temp[s][d] = -1
# STEP 2
s_placed = s in S
d_placed = d in S
# Both nodes of the pair are not placed
if not s_placed and not d_placed:
# STEP 3
# Try to place these nodes
if place_2_nodes(T_temp, s, d):
# Mark as placed
S.add(s)
S.add(d)
L.remove(s)
L.remove(d)
# Only one of the nodes is placed
elif (s_placed and not d_placed) or (not s_placed and d_placed):
# STEP 4
# Let us call "p" the placed node, "x" the other one
if s_placed:
p = s
x = d
else:
p = d
x = s
# Try to place node "x"
if place_1_node(T_temp, node_position(T_temp, p), x):
# Mark as placed
S.add(x)
L.remove(x)
# STEP 5
# Both nodes were already placed, or their placement attempt failed
# Control if I have other nodes to place
end = len(L) == 0
# Now, create a second Manhattan topology in which nodes are swapped
T = gt.manhattan_topology(nr, nc, derived = T_temp)
# Decide how deep is the existing path research between a pair of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Route traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def LTD_manhattan(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# Computation starting time, in seconds
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# First of all, compute the topology
T = gt.manhattan_topology(nr, nc)
# Evaluate the maximum search depth for the paths between pairs of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Now, route the traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def greedy_LTD_start():
'''
Shortcut: called by the user, in order to retrieve several solutions and compare them each other
Parameters used to create the graphs are take as input from the user
'''
# Number of nodes
n = inc.input_int('Number of nodes', minValue = 1)
# Extreme values for the traffic matrix
TM_min = inc.input_int('Traffic matrix lower bound', minValue = 1)
TM_max = inc.input_int('Traffic matrix upper bound', minValue = TM_min)
# Delta values
delta_in = inc.input_int('Delta_in (max #rx per node)', minValue = 1)
delta_out = inc.input_int('Delta_out (max #tx per node)', minValue = 1)
# Traffic matrix
traffic_matrix = tm.random_TM(n, TM_min, TM_max)
# Results:
T1 = greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out)
T1_bis = LTD_random(n, len(T1.edges()), delta_in, delta_out, traffic_matrix)
T2 = greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out)
T2_bis = LTD_random(n, len(T2.edges()), delta_in, delta_out, traffic_matrix)
# Executable code (main)
if __name__ == '__main__':
T = greedy_LTD_start() | # Result | random_line_split |
LAB2_OpRes.py | # System libraries
import time
# Third party libraries
import networkx as nx
# Our libraries
import input_controls as inc
import graph_topologies as gt
import graph_traffic_matrix as tm
import ltd_utilities as ltd
def LTD_random(n, n_edges, delta_in, delta_out, traffic_matrix, title = 'Random LTD - Comparisons', userView = True, withLabels = True):
'''
This function solves the LTD problem generating a random topology, according to the input specified criteria:
- "n" is the number of nodes
- "n_edges" is the number of edges
- "delta_in" is the maximum number of receivers per node
- "delta_out" is the maximum number of transmitters per node
- "traffic_matrix" is the traffic matrix, used to decide edges' flow values
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
# n, delta_in, delta_out and traffic_matrix
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# n_edges: extreme case are the ring or the full mesh topologies
inc.check_integer(n_edges, 'n_edges', minValue = n, maxValue = n * (n - 1))
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Create the topology (oriented random graph)
T = gt.random_topology(n, n_edges, delta_in, delta_out)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Random')
def greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 1 - Mesh LTD', userView = True, withLabels = True):
'''
This function generates a network topolgy in order to solve, using a greedy approach, an LTD problem.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(n, traffic_matrix):
'''
Lists the edges of the graph "G", ordered by their flow value (ascending order)
'''
G = gt.loaded_mesh_topology(n, traffic_matrix)
edges_to_check = []
for e in G.edges():
# Associate the flow to the edge
u = e[0]
v = e[1]
f = G.edge[u][v]['flow']
edges_to_check.append({
'edge': e,
'flow': f
})
edges_to_check.sort(key = lambda x: x['flow'])
return edges_to_check
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on the screen the traffic matrix content
tm.print_TM(traffic_matrix)
# If one of the deltas is equal to 1, I know for sure that the resulting topology has to be a ring
if delta_in == 1 or delta_out == 1:
T = gt.ring_topology(n)
else:
# Instantiate the initial full mesh topology, from which I'm going to remove edges
T = gt.mesh_topology(n)
# This array contains the graph's edges, sorted according their flow (ascending order)
edges_to_check = edges_to_check(n, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Noe, I have to remove edges until the delta contraints are satisfied
# BUT: I could find edges impossible to remove...
print('\nPlease wait...')
while (not ltd.check_global_delta_constraints(T, delta_in, delta_out)) and len(edges_to_check) > 0:
# The edge I try to remove first is the one with minimum flow value
edge_to_remove = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_remove[0]
v = edge_to_remove[1]
# Analyzing the delta constraint on "u" and "v", I could find that it is not necessary to remove this edge
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
# Check if I really need to remove the edge
if u_out_degree > delta_out or v_in_degree > delta_in:
# Verify that, once the edge is removed, the resulting graph will not be disconnected
if gt.has_alternative_paths(T, edge_to_remove):
# I can remove the selected edge
T.remove_edge(u, v)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Mesh')
def greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 2 - Ring LTD', userView = True, withLabels = True):
'''
This function computes a network topology in order to solve, using a greedy approach, an LTD problem.
With respect to the function "greedy_LTD_mesh", here the starting topology is a ring: the idea is to add edges
to it until the delta constraints are satisfied.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(G, traffic_matrix):
'''
This function lists the possible edges I can add to the topology, sorted by decreasing flow value
'''
res = []
nodes = G.nodes()
edges = G.edges()
# Loop on the traffic matrix values
for u in nodes:
for v in nodes:
# No zero-flow edges (self-loops included)
f = traffic_matrix[u][v]
if f > 0:
# The edge must not already exist i the topology
e = (u, v)
if e not in edges:
# I've foun a candidate edge
res.append({
'edge': e,
'flow': f
})
# Sort by decreasing fow value
res.sort(key = lambda x: x['flow'], reverse = True)
# Result
return res
def check_can_add_edges(G, delta_in, delta_out):
'''
This function verify that exist at least 2 nodes, different each other, having at least
a free receiver and a free transmitter
'''
# Input/output degree of the graph's nodes
in_deg = G.in_degree()
out_deg = G.out_degree()
# Graph's nodes
nodes = G.nodes()
# Check the delta_in constraint
res_ok = False
for x in nodes:
if in_deg[x] < delta_in:
# I've found a node with a free receiver
# Check now the delta_out constraint
for y in nodes:
if y != x and out_deg[y] < delta_out:
# I've found another node with a free transmitter
res_ok = True
break
if res_ok:
break
# Result
return res_ok
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# The starting topology is a ring
T = gt.ring_topology(n)
# If one of the delta constraints is equal to 1, I know for sure that the resulting topology will be the starting one
if delta_in > 1 and delta_out > 1:
# Graph's edges, serted by decreasing flow values
edges_to_check = edges_to_check(T, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Now, I have to add edges until the delta constraints allow me to do that
# BUT: I could find edges impossible to add...
print('\nPlease wait...')
while check_can_add_edges(T, delta_in, delta_out) and len(edges_to_check) > 0:
# The edge I'm going to try to add is the one with the least associated flow value
edge_to_add = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_add[0]
v = edge_to_add[1]
# Check if the selected edge can be added to the topology
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
if u_out_degree < delta_out and v_in_degree < delta_in:
# Add the selected edge
T.add_edge(u, v, flow = 0.0)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Ring')
def LTD_manhattan_smart(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# UTILITY FUNCTIONS
def max_pair(T):
'''
Retrieve indexes of the highest traffic value into the traffic matrix
'''
# Init variables
maxV = -1
s_res = None
d_res = None
# T is a matrix n x n
n = len(T)
# loop over the matrix, to find the maximum value
for s in range(n):
for d in range(n):
if T[s][d] > maxV:
# Update max values
maxV = T[s][d]
s_res = s
d_res = d
# Result
return (s_res, d_res)
def | (T):
'''
Retrieve a copy for the given traffic matrix (avoid pointer references)
'''
res = []
for row in T:
res.append([])
for c in row:
res[-1].append(c)
return res
def empty_place(G, n):
'''
Verify that the position "n" of the "G" is empty
'''
return G.node[n]['name'] == None
def place_node(G, pos, name):
'''
Place the node "name" into the position "pos" of the topology "G"
'''
G.node[pos]['name'] = name
def node_position(G, n):
'''
Retrieve the position in the topology "G" of a node whose name is "n", already positioned
'''
res = None
# Loop over positions
for p in G.nodes():
if G.node[p]['name'] == n:
# I've found the position for the node named "n"
res = p
break
# Result
return res
def place_2_nodes(G, s, d):
'''
Try to place in "G" nodes "s" and "d": there must be two adjacent places
'''
# Loop over positions
for u in G.nodes():
# If not positioned, control adjacent nodes
if empty_place(G, u):
if place_1_node(G, u, d):
# I've found a free position and I've placed the second node
place_node(G, pos = u, name = s)
return True
# No adjacent places for "s" and "d"
return False
def place_1_node(G, p, x):
'''
Try to place in "G" node "x" near to "p"
'''
# Loop over "p" adjacent places
for v in G.edge[p].keys():
if empty_place(G, v):
# I've found two free adjacent places
place_node(G, pos = v, name = x)
return True
# I haven't found an available place for "x"
return False
# Start computation time, in seconds
initial_time = time.time()
# Print the content of the traffic matrix
tm.print_TM(traffic_matrix)
# Create a copy of the traffic matrix (to avoid reference pointers)
tm_temp = copy_TM(traffic_matrix)
# First of all, retrieve the starting topology
T_temp = gt.manhattan_topology(nr, nc)
# Then, name nodes using an "empty" name
nodes = T_temp.nodes()
for n in nodes:
T_temp.node[n]['name'] = None
# STEP 0
# End of computation flag
end = False
# Placed nodes
S = set()
# Not placed yet nodes
L = set(nodes)
while not end:
# STEP 1
# Retrieve the pair of nodes who exchange most traffic
s, d = max_pair(tm_temp)
tm_temp[s][d] = -1
# STEP 2
s_placed = s in S
d_placed = d in S
# Both nodes of the pair are not placed
if not s_placed and not d_placed:
# STEP 3
# Try to place these nodes
if place_2_nodes(T_temp, s, d):
# Mark as placed
S.add(s)
S.add(d)
L.remove(s)
L.remove(d)
# Only one of the nodes is placed
elif (s_placed and not d_placed) or (not s_placed and d_placed):
# STEP 4
# Let us call "p" the placed node, "x" the other one
if s_placed:
p = s
x = d
else:
p = d
x = s
# Try to place node "x"
if place_1_node(T_temp, node_position(T_temp, p), x):
# Mark as placed
S.add(x)
L.remove(x)
# STEP 5
# Both nodes were already placed, or their placement attempt failed
# Control if I have other nodes to place
end = len(L) == 0
# Now, create a second Manhattan topology in which nodes are swapped
T = gt.manhattan_topology(nr, nc, derived = T_temp)
# Decide how deep is the existing path research between a pair of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Route traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def LTD_manhattan(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# Computation starting time, in seconds
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# First of all, compute the topology
T = gt.manhattan_topology(nr, nc)
# Evaluate the maximum search depth for the paths between pairs of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Now, route the traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def greedy_LTD_start():
'''
Shortcut: called by the user, in order to retrieve several solutions and compare them each other
Parameters used to create the graphs are take as input from the user
'''
# Number of nodes
n = inc.input_int('Number of nodes', minValue = 1)
# Extreme values for the traffic matrix
TM_min = inc.input_int('Traffic matrix lower bound', minValue = 1)
TM_max = inc.input_int('Traffic matrix upper bound', minValue = TM_min)
# Delta values
delta_in = inc.input_int('Delta_in (max #rx per node)', minValue = 1)
delta_out = inc.input_int('Delta_out (max #tx per node)', minValue = 1)
# Traffic matrix
traffic_matrix = tm.random_TM(n, TM_min, TM_max)
# Results:
T1 = greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out)
T1_bis = LTD_random(n, len(T1.edges()), delta_in, delta_out, traffic_matrix)
T2 = greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out)
T2_bis = LTD_random(n, len(T2.edges()), delta_in, delta_out, traffic_matrix)
# Executable code (main)
if __name__ == '__main__':
T = greedy_LTD_start()
| copy_TM | identifier_name |
LAB2_OpRes.py | # System libraries
import time
# Third party libraries
import networkx as nx
# Our libraries
import input_controls as inc
import graph_topologies as gt
import graph_traffic_matrix as tm
import ltd_utilities as ltd
def LTD_random(n, n_edges, delta_in, delta_out, traffic_matrix, title = 'Random LTD - Comparisons', userView = True, withLabels = True):
'''
This function solves the LTD problem generating a random topology, according to the input specified criteria:
- "n" is the number of nodes
- "n_edges" is the number of edges
- "delta_in" is the maximum number of receivers per node
- "delta_out" is the maximum number of transmitters per node
- "traffic_matrix" is the traffic matrix, used to decide edges' flow values
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
# n, delta_in, delta_out and traffic_matrix
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# n_edges: extreme case are the ring or the full mesh topologies
inc.check_integer(n_edges, 'n_edges', minValue = n, maxValue = n * (n - 1))
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Create the topology (oriented random graph)
T = gt.random_topology(n, n_edges, delta_in, delta_out)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Random')
def greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 1 - Mesh LTD', userView = True, withLabels = True):
'''
This function generates a network topolgy in order to solve, using a greedy approach, an LTD problem.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(n, traffic_matrix):
'''
Lists the edges of the graph "G", ordered by their flow value (ascending order)
'''
G = gt.loaded_mesh_topology(n, traffic_matrix)
edges_to_check = []
for e in G.edges():
# Associate the flow to the edge
u = e[0]
v = e[1]
f = G.edge[u][v]['flow']
edges_to_check.append({
'edge': e,
'flow': f
})
edges_to_check.sort(key = lambda x: x['flow'])
return edges_to_check
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on the screen the traffic matrix content
tm.print_TM(traffic_matrix)
# If one of the deltas is equal to 1, I know for sure that the resulting topology has to be a ring
if delta_in == 1 or delta_out == 1:
T = gt.ring_topology(n)
else:
# Instantiate the initial full mesh topology, from which I'm going to remove edges
T = gt.mesh_topology(n)
# This array contains the graph's edges, sorted according their flow (ascending order)
edges_to_check = edges_to_check(n, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Noe, I have to remove edges until the delta contraints are satisfied
# BUT: I could find edges impossible to remove...
print('\nPlease wait...')
while (not ltd.check_global_delta_constraints(T, delta_in, delta_out)) and len(edges_to_check) > 0:
# The edge I try to remove first is the one with minimum flow value
edge_to_remove = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_remove[0]
v = edge_to_remove[1]
# Analyzing the delta constraint on "u" and "v", I could find that it is not necessary to remove this edge
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
# Check if I really need to remove the edge
if u_out_degree > delta_out or v_in_degree > delta_in:
# Verify that, once the edge is removed, the resulting graph will not be disconnected
if gt.has_alternative_paths(T, edge_to_remove):
# I can remove the selected edge
T.remove_edge(u, v)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Mesh')
def greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out, title = 'Sol. 2 - Ring LTD', userView = True, withLabels = True):
'''
This function computes a network topology in order to solve, using a greedy approach, an LTD problem.
With respect to the function "greedy_LTD_mesh", here the starting topology is a ring: the idea is to add edges
to it until the delta constraints are satisfied.
Input parameters are:
- n: number of nodes
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- delta_in: constraint on the maximum number of receivers per node
- delta_out: constraint on the maximum number of trnasmitters per node
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# INPUT CONTROL
ltd.input_control(n, traffic_matrix, delta_in, delta_out)
# UTILITY FUNCTIONS
def edges_to_check(G, traffic_matrix):
'''
This function lists the possible edges I can add to the topology, sorted by decreasing flow value
'''
res = []
nodes = G.nodes()
edges = G.edges()
# Loop on the traffic matrix values
for u in nodes:
for v in nodes:
# No zero-flow edges (self-loops included)
f = traffic_matrix[u][v]
if f > 0:
# The edge must not already exist i the topology
e = (u, v)
if e not in edges:
# I've foun a candidate edge
res.append({
'edge': e,
'flow': f
})
# Sort by decreasing fow value
res.sort(key = lambda x: x['flow'], reverse = True)
# Result
return res
def check_can_add_edges(G, delta_in, delta_out):
'''
This function verify that exist at least 2 nodes, different each other, having at least
a free receiver and a free transmitter
'''
# Input/output degree of the graph's nodes
in_deg = G.in_degree()
out_deg = G.out_degree()
# Graph's nodes
nodes = G.nodes()
# Check the delta_in constraint
res_ok = False
for x in nodes:
if in_deg[x] < delta_in:
# I've found a node with a free receiver
# Check now the delta_out constraint
for y in nodes:
if y != x and out_deg[y] < delta_out:
# I've found another node with a free transmitter
res_ok = True
break
if res_ok:
break
# Result
return res_ok
# ALGORITHM
# Computation starting time
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# The starting topology is a ring
T = gt.ring_topology(n)
# If one of the delta constraints is equal to 1, I know for sure that the resulting topology will be the starting one
if delta_in > 1 and delta_out > 1:
# Graph's edges, serted by decreasing flow values
edges_to_check = edges_to_check(T, traffic_matrix)
# OPTIMIZE THE TOPOLOGY
# Now, I have to add edges until the delta constraints allow me to do that
# BUT: I could find edges impossible to add...
print('\nPlease wait...')
while check_can_add_edges(T, delta_in, delta_out) and len(edges_to_check) > 0:
# The edge I'm going to try to add is the one with the least associated flow value
edge_to_add = edges_to_check.pop(0)['edge']
# Nodes of the selected edge
u = edge_to_add[0]
v = edge_to_add[1]
# Check if the selected edge can be added to the topology
u_out_degree = T.out_degree()[u]
v_in_degree = T.in_degree()[v]
if u_out_degree < delta_out and v_in_degree < delta_in:
# Add the selected edge
T.add_edge(u, v, flow = 0.0)
# Result
return ltd.result(T, traffic_matrix, delta_in, delta_out, initial_time, title, userView, withLabels, 'Ring')
def LTD_manhattan_smart(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# UTILITY FUNCTIONS
def max_pair(T):
'''
Retrieve indexes of the highest traffic value into the traffic matrix
'''
# Init variables
maxV = -1
s_res = None
d_res = None
# T is a matrix n x n
n = len(T)
# loop over the matrix, to find the maximum value
for s in range(n):
for d in range(n):
|
# Result
return (s_res, d_res)
def copy_TM(T):
'''
Retrieve a copy for the given traffic matrix (avoid pointer references)
'''
res = []
for row in T:
res.append([])
for c in row:
res[-1].append(c)
return res
def empty_place(G, n):
'''
Verify that the position "n" of the "G" is empty
'''
return G.node[n]['name'] == None
def place_node(G, pos, name):
'''
Place the node "name" into the position "pos" of the topology "G"
'''
G.node[pos]['name'] = name
def node_position(G, n):
'''
Retrieve the position in the topology "G" of a node whose name is "n", already positioned
'''
res = None
# Loop over positions
for p in G.nodes():
if G.node[p]['name'] == n:
# I've found the position for the node named "n"
res = p
break
# Result
return res
def place_2_nodes(G, s, d):
'''
Try to place in "G" nodes "s" and "d": there must be two adjacent places
'''
# Loop over positions
for u in G.nodes():
# If not positioned, control adjacent nodes
if empty_place(G, u):
if place_1_node(G, u, d):
# I've found a free position and I've placed the second node
place_node(G, pos = u, name = s)
return True
# No adjacent places for "s" and "d"
return False
def place_1_node(G, p, x):
'''
Try to place in "G" node "x" near to "p"
'''
# Loop over "p" adjacent places
for v in G.edge[p].keys():
if empty_place(G, v):
# I've found two free adjacent places
place_node(G, pos = v, name = x)
return True
# I haven't found an available place for "x"
return False
# Start computation time, in seconds
initial_time = time.time()
# Print the content of the traffic matrix
tm.print_TM(traffic_matrix)
# Create a copy of the traffic matrix (to avoid reference pointers)
tm_temp = copy_TM(traffic_matrix)
# First of all, retrieve the starting topology
T_temp = gt.manhattan_topology(nr, nc)
# Then, name nodes using an "empty" name
nodes = T_temp.nodes()
for n in nodes:
T_temp.node[n]['name'] = None
# STEP 0
# End of computation flag
end = False
# Placed nodes
S = set()
# Not placed yet nodes
L = set(nodes)
while not end:
# STEP 1
# Retrieve the pair of nodes who exchange most traffic
s, d = max_pair(tm_temp)
tm_temp[s][d] = -1
# STEP 2
s_placed = s in S
d_placed = d in S
# Both nodes of the pair are not placed
if not s_placed and not d_placed:
# STEP 3
# Try to place these nodes
if place_2_nodes(T_temp, s, d):
# Mark as placed
S.add(s)
S.add(d)
L.remove(s)
L.remove(d)
# Only one of the nodes is placed
elif (s_placed and not d_placed) or (not s_placed and d_placed):
# STEP 4
# Let us call "p" the placed node, "x" the other one
if s_placed:
p = s
x = d
else:
p = d
x = s
# Try to place node "x"
if place_1_node(T_temp, node_position(T_temp, p), x):
# Mark as placed
S.add(x)
L.remove(x)
# STEP 5
# Both nodes were already placed, or their placement attempt failed
# Control if I have other nodes to place
end = len(L) == 0
# Now, create a second Manhattan topology in which nodes are swapped
T = gt.manhattan_topology(nr, nc, derived = T_temp)
# Decide how deep is the existing path research between a pair of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Route traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def LTD_manhattan(n, nr, nc, traffic_matrix, title = 'Manhattan LTD', userView = True, withLabels = True):
'''
This function creates a Manattan topology and, according to the input traffic matrix, solves an LTD problem.
Input parameters are:
- n: number of nodes in the topology, placed as a "rectangle"
- nr: number of nodes per row
- nc: number of nodes per column
- traffic_matrix: traffic matrix (mean traffic value exchanged by node pairs)
- title: graph's title and output files names (.txt e .png)
- userView: boolean, used to require the visualization of the topology and the log of the results on screen
- withLabels: boolean, used to require the visualization of the flow labels in the obtained topology photo
'''
# Computation starting time, in seconds
initial_time = time.time()
# Print on screen the content of the traffic matrix
tm.print_TM(traffic_matrix)
# First of all, compute the topology
T = gt.manhattan_topology(nr, nc)
# Evaluate the maximum search depth for the paths between pairs of nodes
depth = nr-1 if nr == nc else nr/2 + nc/2
# Now, route the traffic according to the "water filling" principle
return ltd.result(T, traffic_matrix, 4, 4, initial_time, title, userView, withLabels, 'Manhattan', depth)
def greedy_LTD_start():
'''
Shortcut: called by the user, in order to retrieve several solutions and compare them each other
Parameters used to create the graphs are take as input from the user
'''
# Number of nodes
n = inc.input_int('Number of nodes', minValue = 1)
# Extreme values for the traffic matrix
TM_min = inc.input_int('Traffic matrix lower bound', minValue = 1)
TM_max = inc.input_int('Traffic matrix upper bound', minValue = TM_min)
# Delta values
delta_in = inc.input_int('Delta_in (max #rx per node)', minValue = 1)
delta_out = inc.input_int('Delta_out (max #tx per node)', minValue = 1)
# Traffic matrix
traffic_matrix = tm.random_TM(n, TM_min, TM_max)
# Results:
T1 = greedy_LTD_mesh(n, traffic_matrix, delta_in, delta_out)
T1_bis = LTD_random(n, len(T1.edges()), delta_in, delta_out, traffic_matrix)
T2 = greedy_LTD_ring(n, traffic_matrix, delta_in, delta_out)
T2_bis = LTD_random(n, len(T2.edges()), delta_in, delta_out, traffic_matrix)
# Executable code (main)
if __name__ == '__main__':
T = greedy_LTD_start()
| if T[s][d] > maxV:
# Update max values
maxV = T[s][d]
s_res = s
d_res = d | conditional_block |
tags.rs | //! Constants for commonly used tags in TIFF files, baseline
//! or extended.
//!
//! Check the [Tiff Tag Reference](https://www.awaresystems.be/imaging/tiff/tifftags.html)
//! for more information on each tag.
#![allow(non_upper_case_globals)]
/// 16-bit identifier of a field entry.
pub type FieldTag = u16;
// pub const NewSubfileType: u16 = 0x00FE;
// pub const ImageWidth: u16 = 0x0100;
// pub const ImageLength: u16 = 0x0101;
// pub const BitsPerSample: u16 = 0x0102;
// pub const Compression: u16 = 0x0103;
// pub const PhotometricInterpretation: u16 = 0x0106;
// pub const FillOrder: u16 = 0x010A;
// pub const ImageDescription: u16 = 0x010E;
// pub const Make: u16 = 0x010F;
// pub const Model: u16 = 0x0110;
// pub const StripOffsets: u16 = 0x0111;
// pub const Orientation: u16 = 0x0112;
// pub const SamplesPerPixel: u16 = 0x0115;
// pub const RowsPerStrip: u16 = 0x0116;
// pub const StripByteCounts: u16 = 0x0117;
// pub const XResolution: u16 = 0x011A;
// pub const YResolution: u16 = 0x011B;
// pub const PlanarConfiguration: u16 = 0x011C;
// pub const ResolutionUnit: u16 = 0x0128;
// pub const Software: u16 = 0x0131;
// pub const DateTime: u16 = 0x0132;
// pub const Artist: u16 = 0x013B;
// pub const TileWidth: u16 = 0x0142;
// pub const TileLength: u16 = 0x0143;
// pub const TileOffsets: u16 = 0x0144;
// pub const TileByteCounts: u16 = 0x0145;
// pub const Copyright: u16 = 0x8298;
pub const SubfileType: u16 = 0x00FF;
pub const Threshholding: u16 = 0x0107;
pub const CellWidth: u16 = 0x0108;
pub const CellLength: u16 = 0x0109;
pub const DocumentName: u16 = 0x010D;
pub const MinSampleValue: u16 = 0x0118;
pub const MaxSampleValue: u16 = 0x0119;
pub const PageName: u16 = 0x011D;
pub const XPosition: u16 = 0x011E;
pub const YPosition: u16 = 0x011F;
pub const FreeOffsets: u16 = 0x0120;
pub const FreeByteCounts: u16 = 0x0121;
pub const GrayResponseUnit: u16 = 0x0122;
pub const GrayResponseCurve: u16 = 0x0123;
pub const T4Options: u16 = 0x0124;
pub const T6Options: u16 = 0x0125;
pub const PageNumber: u16 = 0x0129;
pub const TransferFunction: u16 = 0x012D;
pub const HostComputer: u16 = 0x013C;
pub const Predictor: u16 = 0x013D;
pub const WhitePoint: u16 = 0x013E;
pub const PrimaryChromaticities: u16 = 0x013F;
pub const ColorMap: u16 = 0x0140;
pub const HalftoneHints: u16 = 0x0141;
pub const BadFaxLines: u16 = 0x0146;
pub const CleanFaxData: u16 = 0x0147;
pub const ConsecutiveBadFaxLines: u16 = 0x0148;
pub const SubIFDs: u16 = 0x014A;
pub const InkSet: u16 = 0x014C;
pub const InkNames: u16 = 0x014D;
pub const NumberOfInks: u16 = 0x014E;
pub const DotRange: u16 = 0x0150;
pub const TargetPrinter: u16 = 0x0151;
pub const ExtraSamples: u16 = 0x0152;
pub const SampleFormat: u16 = 0x0153;
pub const SMinSampleValue: u16 = 0x0154;
pub const SMaxSampleValue: u16 = 0x0155;
pub const TransferRange: u16 = 0x0156;
pub const ClipPath: u16 = 0x0157;
pub const XClipPathUnits: u16 = 0x0158;
pub const YClipPathUnits: u16 = 0x0159;
pub const Indexed: u16 = 0x015A;
pub const JPEGTables: u16 = 0x015B;
pub const OPIProxy: u16 = 0x015F;
pub const GlobalParametersIFD: u16 = 0x0190;
pub const ProfileType: u16 = 0x0191;
pub const FaxProfile: u16 = 0x0192;
pub const CodingMethods: u16 = 0x0193;
pub const VersionYear: u16 = 0x0194;
pub const ModeNumber: u16 = 0x0195;
pub const Decode: u16 = 0x01B1;
pub const DefaultImageColor: u16 = 0x01B2;
pub const JPEGProc: u16 = 0x0200;
pub const JPEGInterchangeFormat: u16 = 0x0201;
pub const JPEGInterchangeFormatLength: u16 = 0x0202;
pub const JPEGRestartInterval: u16 = 0x0203;
pub const JPEGLosslessPredictors: u16 = 0x0205;
pub const JPEGPointTransforms: u16 = 0x0206;
pub const JPEGQTables: u16 = 0x0207;
pub const JPEGDCTables: u16 = 0x0208;
pub const JPEGACTables: u16 = 0x0209;
pub const YCbCrCoefficients: u16 = 0x0211;
pub const YCbCrSubSampling: u16 = 0x0212;
pub const YCbCrPositioning: u16 = 0x0213;
pub const ReferenceBlackWhite: u16 = 0x0214;
pub const StripRowCounts: u16 = 0x022F;
pub const XMP: u16 = 0x02BC;
pub const ImageID: u16 = 0x800D;
pub const ImageLayer: u16 = 0x87AC;
// extracted from https://github.com/schoolpost/PyDNG/raw/master/pydng.py
// cat /tmp/tiffs.txt | tr "(),=" \ | awk '{print "printf \"pub const "$1": u16 = 0x%04x; //"$3"\\n\" "$2}'| bash
pub const NewSubfileType: u16 = 0x00fe; //Type.Long
pub const ImageWidth: u16 = 0x0100; //Type.Long
pub const ImageLength: u16 = 0x0101; //Type.Long
pub const BitsPerSample: u16 = 0x0102; //Type.Short
pub const Compression: u16 = 0x0103; //Type.Short
pub const PhotometricInterpretation: u16 = 0x0106; //Type.Short
pub const FillOrder: u16 = 0x010a; //Type.Short
pub const ImageDescription: u16 = 0x010e; //Type.Ascii
pub const Make: u16 = 0x010f; //Type.Ascii
pub const Model: u16 = 0x0110; //Type.Ascii
pub const StripOffsets: u16 = 0x0111; //Type.Long
pub const Orientation: u16 = 0x0112; //Type.Short
pub const SamplesPerPixel: u16 = 0x0115; //Type.Short
pub const RowsPerStrip: u16 = 0x0116; //Type.Short
pub const StripByteCounts: u16 = 0x0117; //Type.Long
pub const XResolution: u16 = 0x011a; //Type.Rational
pub const YResolution: u16 = 0x011b; //Type.Rational
pub const PlanarConfiguration: u16 = 0x011c; //Type.Short
pub const ResolutionUnit: u16 = 0x0128; //Type.Short
pub const Software: u16 = 0x0131; //Type.Ascii
pub const DateTime: u16 = 0x0132; //Type.Ascii
pub const Artist: u16 = 0x013b; //Type.Ascii
pub const TileWidth: u16 = 0x0142; //Type.Short
pub const TileLength: u16 = 0x0143; //Type.Short
pub const TileOffsets: u16 = 0x0144; //Type.Long
pub const TileByteCounts: u16 = 0x0145; //Type.Long
pub const Copyright: u16 = 0x8298; //Type.Ascii
pub const SubIFD: u16 = 0x014a; //Type.IFD
pub const XMP_Metadata: u16 = 0x02bc; //Type.Undefined
pub const CFARepeatPatternDim: u16 = 0x828d; //Type.Short
pub const CFAPattern: u16 = 0x828e; //Type.Byte
pub const ExposureTime: u16 = 0x829a; //Type.Rational
pub const FNumber: u16 = 0x829d; //Type.Rational
pub const EXIF_IFD: u16 = 0x8769; //Type.IFD
pub const ExposureProgram: u16 = 0x8822; //Type.Short
pub const PhotographicSensitivity: u16 = 0x8827; //Type.Short
pub const SensitivityType: u16 = 0x8830; //Type.Short
pub const ExifVersion: u16 = 0x9000; //Type.Undefined
pub const DateTimeOriginal: u16 = 0x9003; //Type.Ascii
pub const ShutterSpeedValue: u16 = 0x9201; //Type.Srational
pub const ApertureValue: u16 = 0x9202; //Type.Rational
pub const ExposureBiasValue: u16 = 0x9204; //Type.Srational
pub const MaxApertureValue: u16 = 0x9205; //Type.Rational
pub const SubjectDistance: u16 = 0x9206; //Type.Rational
pub const MeteringMode: u16 = 0x9207; //Type.Short
pub const Flash: u16 = 0x9209; //Type.Short
pub const FocalLength: u16 = 0x920a; //Type.Rational
pub const TIFF_EP_StandardID: u16 = 0x9216; //Type.Byte
pub const SubsecTime: u16 = 0x9290; //Type.Ascii | pub const FocalPlaneResolutionUnit: u16 = 0xa210; //Type.Short
pub const FocalLengthIn35mmFilm: u16 = 0xa405; //Type.Short
pub const EXIFPhotoBodySerialNumber: u16 = 0xa431; //Type.Ascii
pub const EXIFPhotoLensModel: u16 = 0xa434; //Type.Ascii
pub const DNGVersion: u16 = 0xc612; //Type.Byte
pub const DNGBackwardVersion: u16 = 0xc613; //Type.Byte
pub const UniqueCameraModel: u16 = 0xc614; //Type.Ascii
pub const CFAPlaneColor: u16 = 0xc616; //Type.Byte
pub const CFALayout: u16 = 0xc617; //Type.Short
pub const LinearizationTable: u16 = 0xc618; //Type.Short
pub const BlackLevelRepeatDim: u16 = 0xc619; //Type.Short
pub const BlackLevel: u16 = 0xc61a; //Type.Short
pub const WhiteLevel: u16 = 0xc61d; //Type.Short
pub const DefaultScale: u16 = 0xc61e; //Type.Rational
pub const DefaultCropOrigin: u16 = 0xc61f; //Type.Long
pub const DefaultCropSize: u16 = 0xc620; //Type.Long
pub const ColorMatrix1: u16 = 0xc621; //Type.Srational
pub const ColorMatrix2: u16 = 0xc622; //Type.Srational
pub const CameraCalibration1: u16 = 0xc623; //Type.Srational
pub const CameraCalibration2: u16 = 0xc624; //Type.Srational
pub const AnalogBalance: u16 = 0xc627; //Type.Rational
pub const AsShotNeutral: u16 = 0xc628; //Type.Rational
pub const BaselineExposure: u16 = 0xc62a; //Type.Srational
pub const BaselineNoise: u16 = 0xc62b; //Type.Rational
pub const BaselineSharpness: u16 = 0xc62c; //Type.Rational
pub const BayerGreenSplit: u16 = 0xc62d; //Type.Long
pub const LinearResponseLimit: u16 = 0xc62e; //Type.Rational
pub const CameraSerialNumber: u16 = 0xc62f; //Type.Ascii
pub const AntiAliasStrength: u16 = 0xc632; //Type.Rational
pub const ShadowScale: u16 = 0xc633; //Type.Rational
pub const DNGPrivateData: u16 = 0xc634; //Type.Byte
pub const MakerNoteSafety: u16 = 0xc635; //Type.Short
pub const CalibrationIlluminant1: u16 = 0xc65a; //Type.Short
pub const CalibrationIlluminant2: u16 = 0xc65b; //Type.Short
pub const BestQualityScale: u16 = 0xc65c; //Type.Rational
pub const RawDataUniqueID: u16 = 0xc65d; //Type.Byte
pub const ActiveArea: u16 = 0xc68d; //Type.Long
pub const CameraCalibrationSignature: u16 = 0xc6f3; //Type.Ascii
pub const ProfileCalibrationSignature: u16 = 0xc6f4; //Type.Ascii
pub const NoiseReductionApplied: u16 = 0xc6f7; //Type.Rational
pub const ProfileName: u16 = 0xc6f8; //Type.Ascii
pub const ProfileHueSatMapDims: u16 = 0xc6f9; //Type.Long
pub const ProfileHueSatMapData1: u16 = 0xc6fa; //Type.Float
pub const ProfileHueSatMapData2: u16 = 0xc6fb; //Type.Float
pub const ProfileEmbedPolicy: u16 = 0xc6fd; //Type.Long
pub const PreviewApplicationName: u16 = 0xc716; //Type.Ascii
pub const PreviewApplicationVersion: u16 = 0xc717; //Type.Ascii
pub const PreviewSettingsDigest: u16 = 0xc719; //Type.Byte
pub const PreviewColorSpace: u16 = 0xc71a; //Type.Long
pub const PreviewDateTime: u16 = 0xc71b; //Type.Ascii
pub const NoiseProfile: u16 = 0xc761; //Type.Double
pub const TimeCodes: u16 = 0xc763; //Type.Byte
pub const FrameRate: u16 = 0xc764; //Type.Srational
pub const OpcodeList1: u16 = 0xc740; //Type.Undefined
pub const OpcodeList2: u16 = 0xc741; //Type.Undefined
pub const ReelName: u16 = 0xc789; //Type.Ascii
pub const BaselineExposureOffset: u16 = 0xc7a5; //Type.Srational
pub const NewRawImageDigest: u16 = 0xc7a7; //Type.Byte
// extracted from file
// exiv2 -P nxy 20191226_170725.dng | grep -v "No XMP" | awk '{print "let "$2": u16 = "$1"; // "$3}'
pub const ExifTag: u16 = 0x8769; // Long
pub const ISOSpeedRatings: u16 = 0x8827; // Short
pub const TIFFEPStandardID: u16 = 0x9216; // Byte
pub const ForwardMatrix1: u16 = 0xc714; // SRational
pub const ForwardMatrix2: u16 = 0xc715; // SRational | pub const SubsecTimeOriginal: u16 = 0x9291; //Type.Ascii
pub const FocalPlaneXResolution: u16 = 0xa20e; //Type.Rational
pub const FocalPlaneYResolution: u16 = 0xa20f; //Type.Rational | random_line_split |
node_test.go | package attestor
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"math/big"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/sirupsen/logrus/hooks/test"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager/memory"
agentnodeattestor "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
"github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/pemutil"
"github.com/spiffe/spire/pkg/common/telemetry"
servernodeattestor "github.com/spiffe/spire/pkg/server/plugin/nodeattestor"
"github.com/spiffe/spire/proto/spire/api/node"
"github.com/spiffe/spire/proto/spire/common"
"github.com/spiffe/spire/test/fakes/fakeagentcatalog"
"github.com/spiffe/spire/test/fakes/fakeagentnodeattestor"
"github.com/spiffe/spire/test/fakes/fakeservernodeattestor"
"github.com/spiffe/spire/test/spiretest"
"github.com/stretchr/testify/require"
)
var (
testKey, _ = pemutil.ParseSigner([]byte(`-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgy8ps3oQaBaSUFpfd
XM13o+VSA0tcZteyTvbOdIQNVnKhRANCAAT4dPIORBjghpL5O4h+9kyzZZUAFV9F
qNV3lKIL59N7G2B4ojbhfSNneSIIpP448uPxUnaunaQZ+/m7+x9oobIp
-----END PRIVATE KEY-----
`))
)
func TestAttestor(t *testing.T) {
// create CA and server certificates
caCert := createCACertificate(t)
serverCert := createServerCertificate(t, caCert)
agentCert := createAgentCertificate(t, caCert, "/test/foo")
expiredCert := createExpiredCertificate(t, caCert)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{
{
Certificate: [][]byte{serverCert.Raw},
PrivateKey: testKey,
},
},
}
testCases := []struct {
name string
challengeResponses []string
bootstrapBundle *x509.Certificate
insecureBootstrap bool
cachedBundle []byte
cachedSVID []byte
joinToken string
err string
omitSVIDUpdate bool
overrideSVIDUpdate *node.X509SVIDUpdate
storeKey crypto.PrivateKey
failFetchingAttestationData bool
failAttestCall bool
}{
{
name: "insecure bootstrap",
insecureBootstrap: true,
},
{
name: "cached bundle empty",
cachedBundle: []byte(""),
err: "load bundle: no certs in bundle",
},
{
name: "cached bundle malformed",
cachedBundle: []byte("INVALID DER BYTES"),
err: "load bundle: error parsing bundle",
},
{
name: "fail fetching attestation data",
bootstrapBundle: caCert,
err: "fetching attestation data purposefully failed",
failFetchingAttestationData: true,
},
{
name: "response missing svid update",
bootstrapBundle: caCert,
omitSVIDUpdate: true,
err: "failed to parse attestation response: missing svid update",
},
{
name: "response has more than one svid",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
"spiffe://domain.test/also/not/used": {},
},
},
err: "failed to parse attestation response: expected 1 svid; got 2",
},
{
name: "response svid has invalid cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: []byte("INVALID")},
},
},
err: "failed to parse attestation response: invalid svid cert chain",
},
{
name: "response svid has empty cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
},
},
err: "failed to parse attestation response: empty svid cert chain",
},
{
name: "response missing trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
},
err: "failed to parse attestation response: missing trust domain bundle",
},
{
name: "response has malformed trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
Bundles: map[string]*common.Bundle{
"spiffe://domain.test": {
RootCas: []*common.Certificate{
{DerBytes: []byte("INVALID")},
},
},
},
},
err: "failed to parse attestation response: invalid trust domain bundle",
},
{
name: "success with bootstrap bundle",
bootstrapBundle: caCert,
},
{
name: "success with cached bundle",
cachedBundle: caCert.Raw,
},
{
name: "success with expired cached bundle",
bootstrapBundle: caCert,
cachedSVID: expiredCert.Raw,
},
{
name: "success with join token",
bootstrapBundle: caCert,
joinToken: "JOINTOKEN",
},
{
name: "success with challenge response",
bootstrapBundle: caCert,
challengeResponses: []string{"FOO", "BAR", "BAZ"},
},
{
name: "cached svid and private key but missing bundle",
insecureBootstrap: true,
cachedSVID: agentCert.Raw,
storeKey: testKey,
err: "SVID loaded but no bundle in cache",
},
{
name: "success with cached svid, private key, and bundle",
cachedBundle: caCert.Raw,
cachedSVID: agentCert.Raw,
storeKey: testKey,
failAttestCall: true,
},
{
name: "malformed cached svid ignored",
bootstrapBundle: caCert,
cachedSVID: []byte("INVALID"),
storeKey: testKey,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
{
name: "missing key in keymanager ignored",
bootstrapBundle: caCert,
cachedSVID: agentCert.Raw,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
require := require.New(t)
// prepare the temp directory holding the cached bundle/svid
svidCachePath, bundleCachePath, removeDir := prepareTestDir(t, testCase.cachedSVID, testCase.cachedBundle)
defer removeDir()
// load up the fake agent-side node attestor
agentNA, agentNADone := prepareAgentNA(t, fakeagentnodeattestor.Config{
Fail: testCase.failFetchingAttestationData,
Responses: testCase.challengeResponses,
})
defer agentNADone()
// load up the fake server-side node attestor
serverNA, serverNADone := prepareServerNA(t, fakeservernodeattestor.Config{
TrustDomain: "domain.test",
Data: map[string]string{
"TEST": "foo",
},
Challenges: map[string][]string{
"foo": testCase.challengeResponses,
},
})
defer serverNADone()
// load up an in-memory key manager
km, kmDone := prepareKeyManager(t, testCase.storeKey)
defer kmDone()
// initialize the catalog
catalog := fakeagentcatalog.New()
catalog.SetNodeAttestor(fakeagentcatalog.NodeAttestor("test", agentNA))
catalog.SetKeyManager(fakeagentcatalog.KeyManager(km))
// kick off the gRPC server serving the node API
serverAddr, serverDone := startNodeServer(t, tlsConfig, fakeNodeAPIConfig{
CACert: caCert,
Attestor: serverNA,
OmitSVIDUpdate: testCase.omitSVIDUpdate,
OverrideSVIDUpdate: testCase.overrideSVIDUpdate,
FailAttestCall: testCase.failAttestCall,
})
defer serverDone()
// create the attestor
log, _ := test.NewNullLogger()
attestor := New(&Config{
Catalog: catalog,
Metrics: telemetry.Blackhole{},
JoinToken: testCase.joinToken,
SVIDCachePath: svidCachePath,
BundleCachePath: bundleCachePath,
Log: log,
TrustDomain: url.URL{
Scheme: "spiffe",
Host: "domain.test",
},
TrustBundle: makeTrustBundle(testCase.bootstrapBundle),
InsecureBootstrap: testCase.insecureBootstrap,
ServerAddress: serverAddr,
})
// perform attestation
result, err := attestor.Attest(context.Background())
if testCase.err != "" {
spiretest.RequireErrorContains(t, err, testCase.err)
return
}
require.NoError(err)
require.NotNil(result)
require.Len(result.SVID, 1)
require.Len(result.SVID[0].URIs, 1)
if testCase.joinToken != "" {
require.Equal("spiffe://domain.test/spire/agent/join_token/"+testCase.joinToken, result.SVID[0].URIs[0].String())
} else {
require.Equal("spiffe://domain.test/spire/agent/test/foo", result.SVID[0].URIs[0].String())
}
require.NotNil(result.Key)
require.NotNil(result.Bundle)
rootCAs := result.Bundle.RootCAs()
require.Len(rootCAs, 1)
require.Equal(rootCAs[0].Raw, caCert.Raw)
})
}
}
func prepareTestDir(t *testing.T, cachedSVID, cachedBundle []byte) (string, string, func()) {
dir, err := ioutil.TempDir("", "spire-agent-node-attestor-")
require.NoError(t, err)
ok := false
defer func() {
if !ok {
os.RemoveAll(dir)
}
}()
svidCachePath := filepath.Join(dir, "svid.der")
bundleCachePath := filepath.Join(dir, "bundle.der")
if cachedSVID != nil {
writeFile(t, svidCachePath, cachedSVID, 0644)
}
if cachedBundle != nil {
writeFile(t, bundleCachePath, cachedBundle, 0644)
}
ok = true
return svidCachePath, bundleCachePath, func() {
os.RemoveAll(dir)
}
}
func prepareAgentNA(t *testing.T, config fakeagentnodeattestor.Config) (agentnodeattestor.NodeAttestor, func()) {
var agentNA agentnodeattestor.NodeAttestor
agentNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
agentnodeattestor.PluginServer(fakeagentnodeattestor.New(config)),
), &agentNA)
return agentNA, agentNADone
}
func prepareServerNA(t *testing.T, config fakeservernodeattestor.Config) (servernodeattestor.NodeAttestor, func()) {
var serverNA servernodeattestor.NodeAttestor
serverNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
servernodeattestor.PluginServer(fakeservernodeattestor.New("test", config)),
), &serverNA)
return serverNA, serverNADone
}
func prepareKeyManager(t *testing.T, key crypto.PrivateKey) (keymanager.KeyManager, func()) {
var km keymanager.KeyManager
kmDone := spiretest.LoadPlugin(t, memory.BuiltIn(), &km)
ok := false
defer func() {
if !ok {
kmDone()
}
}()
if key != nil {
storePrivateKey(t, km, key)
}
ok = true
return km, kmDone
}
func writeFile(t *testing.T, path string, data []byte, mode os.FileMode) {
require.NoError(t, ioutil.WriteFile(path, data, mode))
}
func createCACertificate(t *testing.T) *x509.Certificate {
tmpl := &x509.Certificate{
BasicConstraintsValid: true,
IsCA: true,
URIs: []*url.URL{idutil.TrustDomainURI("domain.test")},
}
return createCertificate(t, tmpl, tmpl)
}
func | (t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.ServerURI("domain.test")},
DNSNames: []string{"localhost"},
}
return createCertificate(t, tmpl, caCert)
}
func createAgentCertificate(t *testing.T, caCert *x509.Certificate, path string) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.AgentURI("domain.test", path)},
}
return createCertificate(t, tmpl, caCert)
}
func createExpiredCertificate(t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
NotAfter: time.Now().Add(-1 * time.Hour),
URIs: []*url.URL{idutil.AgentURI("domain.test", "/test/expired")},
}
return createCertificate(t, tmpl, caCert)
}
func createCertificate(t *testing.T, tmpl, parent *x509.Certificate) *x509.Certificate {
now := time.Now()
tmpl.SerialNumber = big.NewInt(0)
tmpl.NotBefore = now
if tmpl.NotAfter.IsZero() {
tmpl.NotAfter = now.Add(time.Hour)
}
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, testKey.Public(), testKey)
require.NoError(t, err)
cert, err := x509.ParseCertificate(certDER)
require.NoError(t, err)
return cert
}
func storePrivateKey(t *testing.T, km keymanager.KeyManager, privateKey crypto.PrivateKey) {
ecKey, ok := privateKey.(*ecdsa.PrivateKey)
require.True(t, ok, "not an EC key")
keyBytes, err := x509.MarshalECPrivateKey(ecKey)
require.NoError(t, err)
_, err = km.StorePrivateKey(context.Background(), &keymanager.StorePrivateKeyRequest{
PrivateKey: keyBytes,
})
require.NoError(t, err)
}
func makeTrustBundle(bootstrapCert *x509.Certificate) []*x509.Certificate {
var trustBundle []*x509.Certificate
if bootstrapCert != nil {
trustBundle = append(trustBundle, bootstrapCert)
}
return trustBundle
}
func TestIsSVIDValid(t *testing.T) {
now := time.Now()
tests := []struct {
Desc string
SVID []*x509.Certificate
ExpectExpired bool
}{
{
Desc: "cert expiration is in the past",
SVID: []*x509.Certificate{
{NotAfter: now.Add(-2 * time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert is about to expire",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert expiration is safely in the future",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Minute)},
},
ExpectExpired: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.Desc, func(t *testing.T) {
isExpired := isSVIDExpired(tt.SVID, func() time.Time { return now })
require.Equal(t, tt.ExpectExpired, isExpired)
})
}
}
| createServerCertificate | identifier_name |
node_test.go | package attestor
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"math/big"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/sirupsen/logrus/hooks/test"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager/memory"
agentnodeattestor "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
"github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/pemutil"
"github.com/spiffe/spire/pkg/common/telemetry"
servernodeattestor "github.com/spiffe/spire/pkg/server/plugin/nodeattestor"
"github.com/spiffe/spire/proto/spire/api/node"
"github.com/spiffe/spire/proto/spire/common"
"github.com/spiffe/spire/test/fakes/fakeagentcatalog"
"github.com/spiffe/spire/test/fakes/fakeagentnodeattestor"
"github.com/spiffe/spire/test/fakes/fakeservernodeattestor"
"github.com/spiffe/spire/test/spiretest"
"github.com/stretchr/testify/require"
)
var (
testKey, _ = pemutil.ParseSigner([]byte(`-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgy8ps3oQaBaSUFpfd
XM13o+VSA0tcZteyTvbOdIQNVnKhRANCAAT4dPIORBjghpL5O4h+9kyzZZUAFV9F
qNV3lKIL59N7G2B4ojbhfSNneSIIpP448uPxUnaunaQZ+/m7+x9oobIp
-----END PRIVATE KEY-----
`))
)
func TestAttestor(t *testing.T) {
// create CA and server certificates
caCert := createCACertificate(t)
serverCert := createServerCertificate(t, caCert)
agentCert := createAgentCertificate(t, caCert, "/test/foo")
expiredCert := createExpiredCertificate(t, caCert)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{
{
Certificate: [][]byte{serverCert.Raw},
PrivateKey: testKey,
},
},
}
testCases := []struct {
name string
challengeResponses []string
bootstrapBundle *x509.Certificate
insecureBootstrap bool
cachedBundle []byte
cachedSVID []byte
joinToken string
err string
omitSVIDUpdate bool
overrideSVIDUpdate *node.X509SVIDUpdate
storeKey crypto.PrivateKey
failFetchingAttestationData bool
failAttestCall bool
}{
{
name: "insecure bootstrap",
insecureBootstrap: true,
},
{
name: "cached bundle empty",
cachedBundle: []byte(""),
err: "load bundle: no certs in bundle",
},
{
name: "cached bundle malformed",
cachedBundle: []byte("INVALID DER BYTES"),
err: "load bundle: error parsing bundle",
},
{
name: "fail fetching attestation data",
bootstrapBundle: caCert,
err: "fetching attestation data purposefully failed",
failFetchingAttestationData: true,
},
{
name: "response missing svid update",
bootstrapBundle: caCert,
omitSVIDUpdate: true,
err: "failed to parse attestation response: missing svid update",
},
{
name: "response has more than one svid",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
"spiffe://domain.test/also/not/used": {},
},
},
err: "failed to parse attestation response: expected 1 svid; got 2",
},
{
name: "response svid has invalid cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: []byte("INVALID")},
},
},
err: "failed to parse attestation response: invalid svid cert chain",
},
{
name: "response svid has empty cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
},
},
err: "failed to parse attestation response: empty svid cert chain",
},
{
name: "response missing trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
},
err: "failed to parse attestation response: missing trust domain bundle",
},
{
name: "response has malformed trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
Bundles: map[string]*common.Bundle{
"spiffe://domain.test": {
RootCas: []*common.Certificate{
{DerBytes: []byte("INVALID")},
},
},
},
},
err: "failed to parse attestation response: invalid trust domain bundle",
},
{
name: "success with bootstrap bundle",
bootstrapBundle: caCert,
},
{
name: "success with cached bundle",
cachedBundle: caCert.Raw,
},
{
name: "success with expired cached bundle",
bootstrapBundle: caCert,
cachedSVID: expiredCert.Raw,
},
{
name: "success with join token",
bootstrapBundle: caCert,
joinToken: "JOINTOKEN",
},
{
name: "success with challenge response",
bootstrapBundle: caCert,
challengeResponses: []string{"FOO", "BAR", "BAZ"},
},
{
name: "cached svid and private key but missing bundle",
insecureBootstrap: true,
cachedSVID: agentCert.Raw,
storeKey: testKey,
err: "SVID loaded but no bundle in cache",
},
{
name: "success with cached svid, private key, and bundle",
cachedBundle: caCert.Raw,
cachedSVID: agentCert.Raw,
storeKey: testKey,
failAttestCall: true,
},
{
name: "malformed cached svid ignored",
bootstrapBundle: caCert,
cachedSVID: []byte("INVALID"),
storeKey: testKey,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
{
name: "missing key in keymanager ignored",
bootstrapBundle: caCert,
cachedSVID: agentCert.Raw,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
require := require.New(t)
// prepare the temp directory holding the cached bundle/svid
svidCachePath, bundleCachePath, removeDir := prepareTestDir(t, testCase.cachedSVID, testCase.cachedBundle)
defer removeDir()
// load up the fake agent-side node attestor
agentNA, agentNADone := prepareAgentNA(t, fakeagentnodeattestor.Config{
Fail: testCase.failFetchingAttestationData,
Responses: testCase.challengeResponses,
})
defer agentNADone()
// load up the fake server-side node attestor
serverNA, serverNADone := prepareServerNA(t, fakeservernodeattestor.Config{
TrustDomain: "domain.test",
Data: map[string]string{
"TEST": "foo",
},
Challenges: map[string][]string{
"foo": testCase.challengeResponses,
},
})
defer serverNADone()
// load up an in-memory key manager
km, kmDone := prepareKeyManager(t, testCase.storeKey)
defer kmDone()
// initialize the catalog
catalog := fakeagentcatalog.New()
catalog.SetNodeAttestor(fakeagentcatalog.NodeAttestor("test", agentNA))
catalog.SetKeyManager(fakeagentcatalog.KeyManager(km))
// kick off the gRPC server serving the node API
serverAddr, serverDone := startNodeServer(t, tlsConfig, fakeNodeAPIConfig{
CACert: caCert,
Attestor: serverNA,
OmitSVIDUpdate: testCase.omitSVIDUpdate,
OverrideSVIDUpdate: testCase.overrideSVIDUpdate,
FailAttestCall: testCase.failAttestCall,
})
defer serverDone()
// create the attestor
log, _ := test.NewNullLogger()
attestor := New(&Config{
Catalog: catalog,
Metrics: telemetry.Blackhole{},
JoinToken: testCase.joinToken,
SVIDCachePath: svidCachePath,
BundleCachePath: bundleCachePath,
Log: log,
TrustDomain: url.URL{
Scheme: "spiffe",
Host: "domain.test",
},
TrustBundle: makeTrustBundle(testCase.bootstrapBundle),
InsecureBootstrap: testCase.insecureBootstrap,
ServerAddress: serverAddr,
})
// perform attestation
result, err := attestor.Attest(context.Background())
if testCase.err != "" {
spiretest.RequireErrorContains(t, err, testCase.err)
return
}
require.NoError(err)
require.NotNil(result)
require.Len(result.SVID, 1)
require.Len(result.SVID[0].URIs, 1)
if testCase.joinToken != "" {
require.Equal("spiffe://domain.test/spire/agent/join_token/"+testCase.joinToken, result.SVID[0].URIs[0].String())
} else {
require.Equal("spiffe://domain.test/spire/agent/test/foo", result.SVID[0].URIs[0].String())
}
require.NotNil(result.Key)
require.NotNil(result.Bundle)
rootCAs := result.Bundle.RootCAs()
require.Len(rootCAs, 1)
require.Equal(rootCAs[0].Raw, caCert.Raw)
})
}
}
func prepareTestDir(t *testing.T, cachedSVID, cachedBundle []byte) (string, string, func()) {
dir, err := ioutil.TempDir("", "spire-agent-node-attestor-")
require.NoError(t, err)
ok := false
defer func() {
if !ok {
os.RemoveAll(dir)
}
}()
svidCachePath := filepath.Join(dir, "svid.der")
bundleCachePath := filepath.Join(dir, "bundle.der")
if cachedSVID != nil {
writeFile(t, svidCachePath, cachedSVID, 0644)
}
if cachedBundle != nil {
writeFile(t, bundleCachePath, cachedBundle, 0644)
}
ok = true
return svidCachePath, bundleCachePath, func() {
os.RemoveAll(dir)
}
}
func prepareAgentNA(t *testing.T, config fakeagentnodeattestor.Config) (agentnodeattestor.NodeAttestor, func()) {
var agentNA agentnodeattestor.NodeAttestor
agentNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
agentnodeattestor.PluginServer(fakeagentnodeattestor.New(config)),
), &agentNA)
return agentNA, agentNADone
}
func prepareServerNA(t *testing.T, config fakeservernodeattestor.Config) (servernodeattestor.NodeAttestor, func()) {
var serverNA servernodeattestor.NodeAttestor
serverNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
servernodeattestor.PluginServer(fakeservernodeattestor.New("test", config)),
), &serverNA)
return serverNA, serverNADone
}
func prepareKeyManager(t *testing.T, key crypto.PrivateKey) (keymanager.KeyManager, func()) {
var km keymanager.KeyManager
kmDone := spiretest.LoadPlugin(t, memory.BuiltIn(), &km)
ok := false
defer func() {
if !ok {
kmDone()
}
}()
if key != nil {
storePrivateKey(t, km, key)
}
ok = true
return km, kmDone
}
func writeFile(t *testing.T, path string, data []byte, mode os.FileMode) {
require.NoError(t, ioutil.WriteFile(path, data, mode))
}
func createCACertificate(t *testing.T) *x509.Certificate {
tmpl := &x509.Certificate{
BasicConstraintsValid: true,
IsCA: true,
URIs: []*url.URL{idutil.TrustDomainURI("domain.test")},
}
return createCertificate(t, tmpl, tmpl)
}
func createServerCertificate(t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.ServerURI("domain.test")},
DNSNames: []string{"localhost"},
}
return createCertificate(t, tmpl, caCert)
}
func createAgentCertificate(t *testing.T, caCert *x509.Certificate, path string) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.AgentURI("domain.test", path)},
}
return createCertificate(t, tmpl, caCert)
}
func createExpiredCertificate(t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
NotAfter: time.Now().Add(-1 * time.Hour),
URIs: []*url.URL{idutil.AgentURI("domain.test", "/test/expired")},
}
return createCertificate(t, tmpl, caCert)
}
func createCertificate(t *testing.T, tmpl, parent *x509.Certificate) *x509.Certificate {
now := time.Now()
tmpl.SerialNumber = big.NewInt(0)
tmpl.NotBefore = now
if tmpl.NotAfter.IsZero() {
tmpl.NotAfter = now.Add(time.Hour)
}
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, testKey.Public(), testKey)
require.NoError(t, err)
cert, err := x509.ParseCertificate(certDER)
require.NoError(t, err)
return cert
}
func storePrivateKey(t *testing.T, km keymanager.KeyManager, privateKey crypto.PrivateKey) {
ecKey, ok := privateKey.(*ecdsa.PrivateKey)
require.True(t, ok, "not an EC key")
keyBytes, err := x509.MarshalECPrivateKey(ecKey)
require.NoError(t, err)
_, err = km.StorePrivateKey(context.Background(), &keymanager.StorePrivateKeyRequest{
PrivateKey: keyBytes,
})
require.NoError(t, err)
}
func makeTrustBundle(bootstrapCert *x509.Certificate) []*x509.Certificate {
var trustBundle []*x509.Certificate
if bootstrapCert != nil |
return trustBundle
}
func TestIsSVIDValid(t *testing.T) {
now := time.Now()
tests := []struct {
Desc string
SVID []*x509.Certificate
ExpectExpired bool
}{
{
Desc: "cert expiration is in the past",
SVID: []*x509.Certificate{
{NotAfter: now.Add(-2 * time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert is about to expire",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert expiration is safely in the future",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Minute)},
},
ExpectExpired: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.Desc, func(t *testing.T) {
isExpired := isSVIDExpired(tt.SVID, func() time.Time { return now })
require.Equal(t, tt.ExpectExpired, isExpired)
})
}
}
| {
trustBundle = append(trustBundle, bootstrapCert)
} | conditional_block |
node_test.go | package attestor
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"math/big"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/sirupsen/logrus/hooks/test"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager/memory"
agentnodeattestor "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
"github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/pemutil"
"github.com/spiffe/spire/pkg/common/telemetry"
servernodeattestor "github.com/spiffe/spire/pkg/server/plugin/nodeattestor"
"github.com/spiffe/spire/proto/spire/api/node"
"github.com/spiffe/spire/proto/spire/common"
"github.com/spiffe/spire/test/fakes/fakeagentcatalog"
"github.com/spiffe/spire/test/fakes/fakeagentnodeattestor"
"github.com/spiffe/spire/test/fakes/fakeservernodeattestor"
"github.com/spiffe/spire/test/spiretest"
"github.com/stretchr/testify/require"
)
var (
testKey, _ = pemutil.ParseSigner([]byte(`-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgy8ps3oQaBaSUFpfd
XM13o+VSA0tcZteyTvbOdIQNVnKhRANCAAT4dPIORBjghpL5O4h+9kyzZZUAFV9F
qNV3lKIL59N7G2B4ojbhfSNneSIIpP448uPxUnaunaQZ+/m7+x9oobIp
-----END PRIVATE KEY-----
`))
)
func TestAttestor(t *testing.T) {
// create CA and server certificates
caCert := createCACertificate(t)
serverCert := createServerCertificate(t, caCert)
agentCert := createAgentCertificate(t, caCert, "/test/foo")
expiredCert := createExpiredCertificate(t, caCert)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{
{
Certificate: [][]byte{serverCert.Raw},
PrivateKey: testKey,
},
},
}
testCases := []struct {
name string
challengeResponses []string
bootstrapBundle *x509.Certificate
insecureBootstrap bool
cachedBundle []byte
cachedSVID []byte
joinToken string
err string
omitSVIDUpdate bool
overrideSVIDUpdate *node.X509SVIDUpdate
storeKey crypto.PrivateKey
failFetchingAttestationData bool
failAttestCall bool
}{
{
name: "insecure bootstrap",
insecureBootstrap: true,
},
{
name: "cached bundle empty",
cachedBundle: []byte(""),
err: "load bundle: no certs in bundle",
},
{
name: "cached bundle malformed",
cachedBundle: []byte("INVALID DER BYTES"),
err: "load bundle: error parsing bundle",
},
{
name: "fail fetching attestation data",
bootstrapBundle: caCert,
err: "fetching attestation data purposefully failed",
failFetchingAttestationData: true,
},
{
name: "response missing svid update",
bootstrapBundle: caCert,
omitSVIDUpdate: true,
err: "failed to parse attestation response: missing svid update",
},
{
name: "response has more than one svid",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
"spiffe://domain.test/also/not/used": {},
},
},
err: "failed to parse attestation response: expected 1 svid; got 2",
},
{
name: "response svid has invalid cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: []byte("INVALID")},
},
},
err: "failed to parse attestation response: invalid svid cert chain",
},
{
name: "response svid has empty cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
},
},
err: "failed to parse attestation response: empty svid cert chain",
},
{
name: "response missing trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
},
err: "failed to parse attestation response: missing trust domain bundle",
},
{
name: "response has malformed trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
Bundles: map[string]*common.Bundle{
"spiffe://domain.test": {
RootCas: []*common.Certificate{
{DerBytes: []byte("INVALID")},
},
},
},
},
err: "failed to parse attestation response: invalid trust domain bundle",
},
{
name: "success with bootstrap bundle",
bootstrapBundle: caCert,
},
{
name: "success with cached bundle",
cachedBundle: caCert.Raw,
},
{
name: "success with expired cached bundle",
bootstrapBundle: caCert,
cachedSVID: expiredCert.Raw,
},
{
name: "success with join token",
bootstrapBundle: caCert,
joinToken: "JOINTOKEN",
},
{
name: "success with challenge response",
bootstrapBundle: caCert,
challengeResponses: []string{"FOO", "BAR", "BAZ"},
},
{
name: "cached svid and private key but missing bundle",
insecureBootstrap: true,
cachedSVID: agentCert.Raw,
storeKey: testKey,
err: "SVID loaded but no bundle in cache",
},
{
name: "success with cached svid, private key, and bundle",
cachedBundle: caCert.Raw,
cachedSVID: agentCert.Raw,
storeKey: testKey,
failAttestCall: true,
},
{
name: "malformed cached svid ignored",
bootstrapBundle: caCert,
cachedSVID: []byte("INVALID"),
storeKey: testKey,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
{
name: "missing key in keymanager ignored",
bootstrapBundle: caCert,
cachedSVID: agentCert.Raw,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
require := require.New(t)
// prepare the temp directory holding the cached bundle/svid
svidCachePath, bundleCachePath, removeDir := prepareTestDir(t, testCase.cachedSVID, testCase.cachedBundle)
defer removeDir()
// load up the fake agent-side node attestor
agentNA, agentNADone := prepareAgentNA(t, fakeagentnodeattestor.Config{
Fail: testCase.failFetchingAttestationData,
Responses: testCase.challengeResponses,
})
defer agentNADone()
// load up the fake server-side node attestor
serverNA, serverNADone := prepareServerNA(t, fakeservernodeattestor.Config{
TrustDomain: "domain.test",
Data: map[string]string{
"TEST": "foo",
},
Challenges: map[string][]string{
"foo": testCase.challengeResponses,
},
})
defer serverNADone()
// load up an in-memory key manager
km, kmDone := prepareKeyManager(t, testCase.storeKey)
defer kmDone()
// initialize the catalog
catalog := fakeagentcatalog.New()
catalog.SetNodeAttestor(fakeagentcatalog.NodeAttestor("test", agentNA))
catalog.SetKeyManager(fakeagentcatalog.KeyManager(km))
// kick off the gRPC server serving the node API
serverAddr, serverDone := startNodeServer(t, tlsConfig, fakeNodeAPIConfig{
CACert: caCert,
Attestor: serverNA,
OmitSVIDUpdate: testCase.omitSVIDUpdate,
OverrideSVIDUpdate: testCase.overrideSVIDUpdate,
FailAttestCall: testCase.failAttestCall,
})
defer serverDone()
// create the attestor
log, _ := test.NewNullLogger()
attestor := New(&Config{
Catalog: catalog,
Metrics: telemetry.Blackhole{},
JoinToken: testCase.joinToken,
SVIDCachePath: svidCachePath,
BundleCachePath: bundleCachePath,
Log: log,
TrustDomain: url.URL{
Scheme: "spiffe",
Host: "domain.test",
},
TrustBundle: makeTrustBundle(testCase.bootstrapBundle),
InsecureBootstrap: testCase.insecureBootstrap,
ServerAddress: serverAddr,
})
// perform attestation
result, err := attestor.Attest(context.Background())
if testCase.err != "" {
spiretest.RequireErrorContains(t, err, testCase.err)
return
}
require.NoError(err)
require.NotNil(result)
require.Len(result.SVID, 1)
require.Len(result.SVID[0].URIs, 1)
if testCase.joinToken != "" {
require.Equal("spiffe://domain.test/spire/agent/join_token/"+testCase.joinToken, result.SVID[0].URIs[0].String())
} else {
require.Equal("spiffe://domain.test/spire/agent/test/foo", result.SVID[0].URIs[0].String())
}
require.NotNil(result.Key)
require.NotNil(result.Bundle)
rootCAs := result.Bundle.RootCAs()
require.Len(rootCAs, 1)
require.Equal(rootCAs[0].Raw, caCert.Raw)
})
}
}
func prepareTestDir(t *testing.T, cachedSVID, cachedBundle []byte) (string, string, func()) {
dir, err := ioutil.TempDir("", "spire-agent-node-attestor-")
require.NoError(t, err)
ok := false
defer func() {
if !ok {
os.RemoveAll(dir)
}
}()
svidCachePath := filepath.Join(dir, "svid.der")
bundleCachePath := filepath.Join(dir, "bundle.der")
if cachedSVID != nil {
writeFile(t, svidCachePath, cachedSVID, 0644)
}
if cachedBundle != nil {
writeFile(t, bundleCachePath, cachedBundle, 0644)
}
ok = true
return svidCachePath, bundleCachePath, func() {
os.RemoveAll(dir)
}
}
func prepareAgentNA(t *testing.T, config fakeagentnodeattestor.Config) (agentnodeattestor.NodeAttestor, func()) {
var agentNA agentnodeattestor.NodeAttestor
agentNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
agentnodeattestor.PluginServer(fakeagentnodeattestor.New(config)),
), &agentNA)
return agentNA, agentNADone
}
func prepareServerNA(t *testing.T, config fakeservernodeattestor.Config) (servernodeattestor.NodeAttestor, func()) |
func prepareKeyManager(t *testing.T, key crypto.PrivateKey) (keymanager.KeyManager, func()) {
var km keymanager.KeyManager
kmDone := spiretest.LoadPlugin(t, memory.BuiltIn(), &km)
ok := false
defer func() {
if !ok {
kmDone()
}
}()
if key != nil {
storePrivateKey(t, km, key)
}
ok = true
return km, kmDone
}
func writeFile(t *testing.T, path string, data []byte, mode os.FileMode) {
require.NoError(t, ioutil.WriteFile(path, data, mode))
}
func createCACertificate(t *testing.T) *x509.Certificate {
tmpl := &x509.Certificate{
BasicConstraintsValid: true,
IsCA: true,
URIs: []*url.URL{idutil.TrustDomainURI("domain.test")},
}
return createCertificate(t, tmpl, tmpl)
}
func createServerCertificate(t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.ServerURI("domain.test")},
DNSNames: []string{"localhost"},
}
return createCertificate(t, tmpl, caCert)
}
func createAgentCertificate(t *testing.T, caCert *x509.Certificate, path string) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.AgentURI("domain.test", path)},
}
return createCertificate(t, tmpl, caCert)
}
func createExpiredCertificate(t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
NotAfter: time.Now().Add(-1 * time.Hour),
URIs: []*url.URL{idutil.AgentURI("domain.test", "/test/expired")},
}
return createCertificate(t, tmpl, caCert)
}
func createCertificate(t *testing.T, tmpl, parent *x509.Certificate) *x509.Certificate {
now := time.Now()
tmpl.SerialNumber = big.NewInt(0)
tmpl.NotBefore = now
if tmpl.NotAfter.IsZero() {
tmpl.NotAfter = now.Add(time.Hour)
}
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, testKey.Public(), testKey)
require.NoError(t, err)
cert, err := x509.ParseCertificate(certDER)
require.NoError(t, err)
return cert
}
func storePrivateKey(t *testing.T, km keymanager.KeyManager, privateKey crypto.PrivateKey) {
ecKey, ok := privateKey.(*ecdsa.PrivateKey)
require.True(t, ok, "not an EC key")
keyBytes, err := x509.MarshalECPrivateKey(ecKey)
require.NoError(t, err)
_, err = km.StorePrivateKey(context.Background(), &keymanager.StorePrivateKeyRequest{
PrivateKey: keyBytes,
})
require.NoError(t, err)
}
func makeTrustBundle(bootstrapCert *x509.Certificate) []*x509.Certificate {
var trustBundle []*x509.Certificate
if bootstrapCert != nil {
trustBundle = append(trustBundle, bootstrapCert)
}
return trustBundle
}
func TestIsSVIDValid(t *testing.T) {
now := time.Now()
tests := []struct {
Desc string
SVID []*x509.Certificate
ExpectExpired bool
}{
{
Desc: "cert expiration is in the past",
SVID: []*x509.Certificate{
{NotAfter: now.Add(-2 * time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert is about to expire",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert expiration is safely in the future",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Minute)},
},
ExpectExpired: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.Desc, func(t *testing.T) {
isExpired := isSVIDExpired(tt.SVID, func() time.Time { return now })
require.Equal(t, tt.ExpectExpired, isExpired)
})
}
}
| {
var serverNA servernodeattestor.NodeAttestor
serverNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
servernodeattestor.PluginServer(fakeservernodeattestor.New("test", config)),
), &serverNA)
return serverNA, serverNADone
} | identifier_body |
node_test.go | package attestor
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"math/big"
"net/url"
"os"
"path/filepath"
"testing"
"time"
"github.com/sirupsen/logrus/hooks/test"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager"
"github.com/spiffe/spire/pkg/agent/plugin/keymanager/memory"
agentnodeattestor "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
"github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/pemutil"
"github.com/spiffe/spire/pkg/common/telemetry"
servernodeattestor "github.com/spiffe/spire/pkg/server/plugin/nodeattestor"
"github.com/spiffe/spire/proto/spire/api/node"
"github.com/spiffe/spire/proto/spire/common"
"github.com/spiffe/spire/test/fakes/fakeagentcatalog"
"github.com/spiffe/spire/test/fakes/fakeagentnodeattestor"
"github.com/spiffe/spire/test/fakes/fakeservernodeattestor"
"github.com/spiffe/spire/test/spiretest"
"github.com/stretchr/testify/require"
)
var (
testKey, _ = pemutil.ParseSigner([]byte(`-----BEGIN PRIVATE KEY-----
MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgy8ps3oQaBaSUFpfd
XM13o+VSA0tcZteyTvbOdIQNVnKhRANCAAT4dPIORBjghpL5O4h+9kyzZZUAFV9F
qNV3lKIL59N7G2B4ojbhfSNneSIIpP448uPxUnaunaQZ+/m7+x9oobIp
-----END PRIVATE KEY-----
`))
)
func TestAttestor(t *testing.T) {
// create CA and server certificates
caCert := createCACertificate(t) |
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{
{
Certificate: [][]byte{serverCert.Raw},
PrivateKey: testKey,
},
},
}
testCases := []struct {
name string
challengeResponses []string
bootstrapBundle *x509.Certificate
insecureBootstrap bool
cachedBundle []byte
cachedSVID []byte
joinToken string
err string
omitSVIDUpdate bool
overrideSVIDUpdate *node.X509SVIDUpdate
storeKey crypto.PrivateKey
failFetchingAttestationData bool
failAttestCall bool
}{
{
name: "insecure bootstrap",
insecureBootstrap: true,
},
{
name: "cached bundle empty",
cachedBundle: []byte(""),
err: "load bundle: no certs in bundle",
},
{
name: "cached bundle malformed",
cachedBundle: []byte("INVALID DER BYTES"),
err: "load bundle: error parsing bundle",
},
{
name: "fail fetching attestation data",
bootstrapBundle: caCert,
err: "fetching attestation data purposefully failed",
failFetchingAttestationData: true,
},
{
name: "response missing svid update",
bootstrapBundle: caCert,
omitSVIDUpdate: true,
err: "failed to parse attestation response: missing svid update",
},
{
name: "response has more than one svid",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
"spiffe://domain.test/also/not/used": {},
},
},
err: "failed to parse attestation response: expected 1 svid; got 2",
},
{
name: "response svid has invalid cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: []byte("INVALID")},
},
},
err: "failed to parse attestation response: invalid svid cert chain",
},
{
name: "response svid has empty cert chain",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {},
},
},
err: "failed to parse attestation response: empty svid cert chain",
},
{
name: "response missing trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
},
err: "failed to parse attestation response: missing trust domain bundle",
},
{
name: "response has malformed trust domain bundle",
bootstrapBundle: caCert,
overrideSVIDUpdate: &node.X509SVIDUpdate{
Svids: map[string]*node.X509SVID{
"spiffe://domain.test/not/used": {CertChain: agentCert.Raw},
},
Bundles: map[string]*common.Bundle{
"spiffe://domain.test": {
RootCas: []*common.Certificate{
{DerBytes: []byte("INVALID")},
},
},
},
},
err: "failed to parse attestation response: invalid trust domain bundle",
},
{
name: "success with bootstrap bundle",
bootstrapBundle: caCert,
},
{
name: "success with cached bundle",
cachedBundle: caCert.Raw,
},
{
name: "success with expired cached bundle",
bootstrapBundle: caCert,
cachedSVID: expiredCert.Raw,
},
{
name: "success with join token",
bootstrapBundle: caCert,
joinToken: "JOINTOKEN",
},
{
name: "success with challenge response",
bootstrapBundle: caCert,
challengeResponses: []string{"FOO", "BAR", "BAZ"},
},
{
name: "cached svid and private key but missing bundle",
insecureBootstrap: true,
cachedSVID: agentCert.Raw,
storeKey: testKey,
err: "SVID loaded but no bundle in cache",
},
{
name: "success with cached svid, private key, and bundle",
cachedBundle: caCert.Raw,
cachedSVID: agentCert.Raw,
storeKey: testKey,
failAttestCall: true,
},
{
name: "malformed cached svid ignored",
bootstrapBundle: caCert,
cachedSVID: []byte("INVALID"),
storeKey: testKey,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
{
name: "missing key in keymanager ignored",
bootstrapBundle: caCert,
cachedSVID: agentCert.Raw,
failAttestCall: true,
err: "attestation has been purposefully failed",
},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.name, func(t *testing.T) {
require := require.New(t)
// prepare the temp directory holding the cached bundle/svid
svidCachePath, bundleCachePath, removeDir := prepareTestDir(t, testCase.cachedSVID, testCase.cachedBundle)
defer removeDir()
// load up the fake agent-side node attestor
agentNA, agentNADone := prepareAgentNA(t, fakeagentnodeattestor.Config{
Fail: testCase.failFetchingAttestationData,
Responses: testCase.challengeResponses,
})
defer agentNADone()
// load up the fake server-side node attestor
serverNA, serverNADone := prepareServerNA(t, fakeservernodeattestor.Config{
TrustDomain: "domain.test",
Data: map[string]string{
"TEST": "foo",
},
Challenges: map[string][]string{
"foo": testCase.challengeResponses,
},
})
defer serverNADone()
// load up an in-memory key manager
km, kmDone := prepareKeyManager(t, testCase.storeKey)
defer kmDone()
// initialize the catalog
catalog := fakeagentcatalog.New()
catalog.SetNodeAttestor(fakeagentcatalog.NodeAttestor("test", agentNA))
catalog.SetKeyManager(fakeagentcatalog.KeyManager(km))
// kick off the gRPC server serving the node API
serverAddr, serverDone := startNodeServer(t, tlsConfig, fakeNodeAPIConfig{
CACert: caCert,
Attestor: serverNA,
OmitSVIDUpdate: testCase.omitSVIDUpdate,
OverrideSVIDUpdate: testCase.overrideSVIDUpdate,
FailAttestCall: testCase.failAttestCall,
})
defer serverDone()
// create the attestor
log, _ := test.NewNullLogger()
attestor := New(&Config{
Catalog: catalog,
Metrics: telemetry.Blackhole{},
JoinToken: testCase.joinToken,
SVIDCachePath: svidCachePath,
BundleCachePath: bundleCachePath,
Log: log,
TrustDomain: url.URL{
Scheme: "spiffe",
Host: "domain.test",
},
TrustBundle: makeTrustBundle(testCase.bootstrapBundle),
InsecureBootstrap: testCase.insecureBootstrap,
ServerAddress: serverAddr,
})
// perform attestation
result, err := attestor.Attest(context.Background())
if testCase.err != "" {
spiretest.RequireErrorContains(t, err, testCase.err)
return
}
require.NoError(err)
require.NotNil(result)
require.Len(result.SVID, 1)
require.Len(result.SVID[0].URIs, 1)
if testCase.joinToken != "" {
require.Equal("spiffe://domain.test/spire/agent/join_token/"+testCase.joinToken, result.SVID[0].URIs[0].String())
} else {
require.Equal("spiffe://domain.test/spire/agent/test/foo", result.SVID[0].URIs[0].String())
}
require.NotNil(result.Key)
require.NotNil(result.Bundle)
rootCAs := result.Bundle.RootCAs()
require.Len(rootCAs, 1)
require.Equal(rootCAs[0].Raw, caCert.Raw)
})
}
}
func prepareTestDir(t *testing.T, cachedSVID, cachedBundle []byte) (string, string, func()) {
dir, err := ioutil.TempDir("", "spire-agent-node-attestor-")
require.NoError(t, err)
ok := false
defer func() {
if !ok {
os.RemoveAll(dir)
}
}()
svidCachePath := filepath.Join(dir, "svid.der")
bundleCachePath := filepath.Join(dir, "bundle.der")
if cachedSVID != nil {
writeFile(t, svidCachePath, cachedSVID, 0644)
}
if cachedBundle != nil {
writeFile(t, bundleCachePath, cachedBundle, 0644)
}
ok = true
return svidCachePath, bundleCachePath, func() {
os.RemoveAll(dir)
}
}
func prepareAgentNA(t *testing.T, config fakeagentnodeattestor.Config) (agentnodeattestor.NodeAttestor, func()) {
var agentNA agentnodeattestor.NodeAttestor
agentNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
agentnodeattestor.PluginServer(fakeagentnodeattestor.New(config)),
), &agentNA)
return agentNA, agentNADone
}
func prepareServerNA(t *testing.T, config fakeservernodeattestor.Config) (servernodeattestor.NodeAttestor, func()) {
var serverNA servernodeattestor.NodeAttestor
serverNADone := spiretest.LoadPlugin(t, catalog.MakePlugin("test",
servernodeattestor.PluginServer(fakeservernodeattestor.New("test", config)),
), &serverNA)
return serverNA, serverNADone
}
func prepareKeyManager(t *testing.T, key crypto.PrivateKey) (keymanager.KeyManager, func()) {
var km keymanager.KeyManager
kmDone := spiretest.LoadPlugin(t, memory.BuiltIn(), &km)
ok := false
defer func() {
if !ok {
kmDone()
}
}()
if key != nil {
storePrivateKey(t, km, key)
}
ok = true
return km, kmDone
}
func writeFile(t *testing.T, path string, data []byte, mode os.FileMode) {
require.NoError(t, ioutil.WriteFile(path, data, mode))
}
func createCACertificate(t *testing.T) *x509.Certificate {
tmpl := &x509.Certificate{
BasicConstraintsValid: true,
IsCA: true,
URIs: []*url.URL{idutil.TrustDomainURI("domain.test")},
}
return createCertificate(t, tmpl, tmpl)
}
func createServerCertificate(t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.ServerURI("domain.test")},
DNSNames: []string{"localhost"},
}
return createCertificate(t, tmpl, caCert)
}
func createAgentCertificate(t *testing.T, caCert *x509.Certificate, path string) *x509.Certificate {
tmpl := &x509.Certificate{
URIs: []*url.URL{idutil.AgentURI("domain.test", path)},
}
return createCertificate(t, tmpl, caCert)
}
func createExpiredCertificate(t *testing.T, caCert *x509.Certificate) *x509.Certificate {
tmpl := &x509.Certificate{
NotAfter: time.Now().Add(-1 * time.Hour),
URIs: []*url.URL{idutil.AgentURI("domain.test", "/test/expired")},
}
return createCertificate(t, tmpl, caCert)
}
func createCertificate(t *testing.T, tmpl, parent *x509.Certificate) *x509.Certificate {
now := time.Now()
tmpl.SerialNumber = big.NewInt(0)
tmpl.NotBefore = now
if tmpl.NotAfter.IsZero() {
tmpl.NotAfter = now.Add(time.Hour)
}
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, testKey.Public(), testKey)
require.NoError(t, err)
cert, err := x509.ParseCertificate(certDER)
require.NoError(t, err)
return cert
}
func storePrivateKey(t *testing.T, km keymanager.KeyManager, privateKey crypto.PrivateKey) {
ecKey, ok := privateKey.(*ecdsa.PrivateKey)
require.True(t, ok, "not an EC key")
keyBytes, err := x509.MarshalECPrivateKey(ecKey)
require.NoError(t, err)
_, err = km.StorePrivateKey(context.Background(), &keymanager.StorePrivateKeyRequest{
PrivateKey: keyBytes,
})
require.NoError(t, err)
}
func makeTrustBundle(bootstrapCert *x509.Certificate) []*x509.Certificate {
var trustBundle []*x509.Certificate
if bootstrapCert != nil {
trustBundle = append(trustBundle, bootstrapCert)
}
return trustBundle
}
func TestIsSVIDValid(t *testing.T) {
now := time.Now()
tests := []struct {
Desc string
SVID []*x509.Certificate
ExpectExpired bool
}{
{
Desc: "cert expiration is in the past",
SVID: []*x509.Certificate{
{NotAfter: now.Add(-2 * time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert is about to expire",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Second)},
},
ExpectExpired: true,
},
{
Desc: "cert expiration is safely in the future",
SVID: []*x509.Certificate{
{NotAfter: now.Add(time.Minute)},
},
ExpectExpired: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.Desc, func(t *testing.T) {
isExpired := isSVIDExpired(tt.SVID, func() time.Time { return now })
require.Equal(t, tt.ExpectExpired, isExpired)
})
}
} | serverCert := createServerCertificate(t, caCert)
agentCert := createAgentCertificate(t, caCert, "/test/foo")
expiredCert := createExpiredCertificate(t, caCert) | random_line_split |
controllerserver.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package goofys
import (
"context"
"fmt"
"strings"
"github.com/csi-driver/goofys-csi-driver/pkg/util"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
azstorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
)
const goofysAccountNamePrefix = "fuse"
// CreateVolume provisions an goofys
func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
klog.Errorf("invalid create volume req: %v", req)
return nil, err
}
volumeCapabilities := req.GetVolumeCapabilities()
name := req.GetName()
if len(name) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Name must be provided")
}
if len(volumeCapabilities) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Volume capabilities must be provided")
}
volSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes())
requestGiB := int(util.RoundUpGiB(volSizeBytes))
parameters := req.GetParameters()
var storageAccountType, resourceGroup, location, accountName, containerName string
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range parameters {
switch strings.ToLower(k) {
case "skuname":
storageAccountType = v
case "storageaccounttype":
storageAccountType = v
case "location":
location = v
case "storageaccount":
accountName = v
case "resourcegroup":
resourceGroup = v
case "containername":
containerName = v
default:
return nil, fmt.Errorf("invalid option %q", k)
}
}
if resourceGroup == "" {
resourceGroup = d.cloud.ResourceGroup
}
account, accountKey, err := d.cloud.EnsureStorageAccount(accountName, storageAccountType, string(storage.BlockBlobStorage), resourceGroup, location, goofysAccountNamePrefix)
if err != nil {
return nil, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
accountName = account
if containerName == "" {
containerName = getValidContainerName(name)
}
klog.V(2).Infof("begin to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d)", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
_, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return nil, fmt.Errorf("failed to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d), error: %v", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB, err)
}
volumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, containerName)
/* todo: snapshot support
if req.GetVolumeContentSource() != nil {
contentSource := req.GetVolumeContentSource()
if contentSource.GetSnapshot() != nil {
}
}
*/
klog.V(2).Infof("create container %s on storage account %s successfully", containerName, accountName)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: parameters,
},
}, nil
}
// DeleteVolume delete an goofys
func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return nil, fmt.Errorf("invalid delete volume req: %v", req)
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in DeleteVolume failed with error: %v", volumeID, err)
return &csi.DeleteVolumeResponse{}, nil
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
klog.V(2).Infof("deleting container(%s) rg(%s) account(%s) volumeID(%s)", containerName, resourceGroupName, accountName, volumeID)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
// todo: check what value to add into DeleteContainerOptions
err = wait.ExponentialBackoff(d.cloud.RequestBackoff(), func() (bool, error) {
_, err := container.DeleteIfExists(nil)
if err != nil && !strings.Contains(err.Error(), "ContainerBeingDeleted") {
return false, fmt.Errorf("failed to delete container(%s) on account(%s), error: %v", containerName, accountName, err)
}
return true, nil
})
if err != nil {
return nil, err
}
klog.V(2).Infof("container(%s) under rg(%s) account(%s) volumeID(%s) is deleted successfully", containerName, resourceGroupName, accountName, volumeID)
return &csi.DeleteVolumeResponse{}, nil
}
// ValidateVolumeCapabilities return the capabilities of the volume
func (d *Driver) | (ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in ValidateVolumeCapabilities failed with error: %v", volumeID, err)
return nil, status.Error(codes.NotFound, err.Error())
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
exist, err := container.Exists()
if err != nil {
return nil, err
}
if !exist {
return nil, status.Error(codes.NotFound, "the requested volume does not exist")
}
// goofys supports all AccessModes, no need to check capabilities here
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
}
// ControllerGetCapabilities returns the capabilities of the Controller plugin
func (d *Driver) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
klog.V(5).Infof("Using default ControllerGetCapabilities")
return &csi.ControllerGetCapabilitiesResponse{
Capabilities: d.Cap,
}, nil
}
// GetCapacity returns the capacity of the total available storage pool
func (d *Driver) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListVolumes return all available volumes
func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerPublishVolume make a volume available on some required node
// N/A for goofys
func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerUnpublishVolume make the volume unavailable on a specified node
// N/A for goofys
func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// CreateSnapshot create a snapshot (todo)
func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// DeleteSnapshot delete a snapshot (todo)
func (d *Driver) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListSnapshots list all snapshots (todo)
func (d *Driver) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerExpandVolume controller expand volume
func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "ControllerExpandVolume is not yet implemented")
}
| ValidateVolumeCapabilities | identifier_name |
controllerserver.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package goofys
import (
"context"
"fmt"
"strings"
"github.com/csi-driver/goofys-csi-driver/pkg/util"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
azstorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
)
const goofysAccountNamePrefix = "fuse"
// CreateVolume provisions an goofys
func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
klog.Errorf("invalid create volume req: %v", req)
return nil, err
}
volumeCapabilities := req.GetVolumeCapabilities()
name := req.GetName()
if len(name) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Name must be provided")
}
if len(volumeCapabilities) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Volume capabilities must be provided")
}
volSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes())
requestGiB := int(util.RoundUpGiB(volSizeBytes))
parameters := req.GetParameters()
var storageAccountType, resourceGroup, location, accountName, containerName string
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range parameters {
switch strings.ToLower(k) {
case "skuname":
storageAccountType = v
case "storageaccounttype":
storageAccountType = v
case "location":
location = v
case "storageaccount":
accountName = v
case "resourcegroup":
resourceGroup = v
case "containername":
containerName = v
default:
return nil, fmt.Errorf("invalid option %q", k)
}
}
if resourceGroup == "" {
resourceGroup = d.cloud.ResourceGroup
}
account, accountKey, err := d.cloud.EnsureStorageAccount(accountName, storageAccountType, string(storage.BlockBlobStorage), resourceGroup, location, goofysAccountNamePrefix)
if err != nil {
return nil, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
accountName = account
if containerName == "" {
containerName = getValidContainerName(name)
}
klog.V(2).Infof("begin to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d)", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil |
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
_, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return nil, fmt.Errorf("failed to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d), error: %v", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB, err)
}
volumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, containerName)
/* todo: snapshot support
if req.GetVolumeContentSource() != nil {
contentSource := req.GetVolumeContentSource()
if contentSource.GetSnapshot() != nil {
}
}
*/
klog.V(2).Infof("create container %s on storage account %s successfully", containerName, accountName)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: parameters,
},
}, nil
}
// DeleteVolume delete an goofys
func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return nil, fmt.Errorf("invalid delete volume req: %v", req)
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in DeleteVolume failed with error: %v", volumeID, err)
return &csi.DeleteVolumeResponse{}, nil
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
klog.V(2).Infof("deleting container(%s) rg(%s) account(%s) volumeID(%s)", containerName, resourceGroupName, accountName, volumeID)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
// todo: check what value to add into DeleteContainerOptions
err = wait.ExponentialBackoff(d.cloud.RequestBackoff(), func() (bool, error) {
_, err := container.DeleteIfExists(nil)
if err != nil && !strings.Contains(err.Error(), "ContainerBeingDeleted") {
return false, fmt.Errorf("failed to delete container(%s) on account(%s), error: %v", containerName, accountName, err)
}
return true, nil
})
if err != nil {
return nil, err
}
klog.V(2).Infof("container(%s) under rg(%s) account(%s) volumeID(%s) is deleted successfully", containerName, resourceGroupName, accountName, volumeID)
return &csi.DeleteVolumeResponse{}, nil
}
// ValidateVolumeCapabilities return the capabilities of the volume
func (d *Driver) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in ValidateVolumeCapabilities failed with error: %v", volumeID, err)
return nil, status.Error(codes.NotFound, err.Error())
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
exist, err := container.Exists()
if err != nil {
return nil, err
}
if !exist {
return nil, status.Error(codes.NotFound, "the requested volume does not exist")
}
// goofys supports all AccessModes, no need to check capabilities here
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
}
// ControllerGetCapabilities returns the capabilities of the Controller plugin
func (d *Driver) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
klog.V(5).Infof("Using default ControllerGetCapabilities")
return &csi.ControllerGetCapabilitiesResponse{
Capabilities: d.Cap,
}, nil
}
// GetCapacity returns the capacity of the total available storage pool
func (d *Driver) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListVolumes return all available volumes
func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerPublishVolume make a volume available on some required node
// N/A for goofys
func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerUnpublishVolume make the volume unavailable on a specified node
// N/A for goofys
func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// CreateSnapshot create a snapshot (todo)
func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// DeleteSnapshot delete a snapshot (todo)
func (d *Driver) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListSnapshots list all snapshots (todo)
func (d *Driver) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerExpandVolume controller expand volume
func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "ControllerExpandVolume is not yet implemented")
}
| {
return nil, err
} | conditional_block |
controllerserver.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package goofys
import (
"context"
"fmt"
"strings"
"github.com/csi-driver/goofys-csi-driver/pkg/util"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
azstorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
)
const goofysAccountNamePrefix = "fuse"
// CreateVolume provisions an goofys
func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
klog.Errorf("invalid create volume req: %v", req)
return nil, err
}
volumeCapabilities := req.GetVolumeCapabilities()
name := req.GetName()
if len(name) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Name must be provided") |
volSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes())
requestGiB := int(util.RoundUpGiB(volSizeBytes))
parameters := req.GetParameters()
var storageAccountType, resourceGroup, location, accountName, containerName string
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range parameters {
switch strings.ToLower(k) {
case "skuname":
storageAccountType = v
case "storageaccounttype":
storageAccountType = v
case "location":
location = v
case "storageaccount":
accountName = v
case "resourcegroup":
resourceGroup = v
case "containername":
containerName = v
default:
return nil, fmt.Errorf("invalid option %q", k)
}
}
if resourceGroup == "" {
resourceGroup = d.cloud.ResourceGroup
}
account, accountKey, err := d.cloud.EnsureStorageAccount(accountName, storageAccountType, string(storage.BlockBlobStorage), resourceGroup, location, goofysAccountNamePrefix)
if err != nil {
return nil, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
accountName = account
if containerName == "" {
containerName = getValidContainerName(name)
}
klog.V(2).Infof("begin to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d)", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
_, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return nil, fmt.Errorf("failed to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d), error: %v", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB, err)
}
volumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, containerName)
/* todo: snapshot support
if req.GetVolumeContentSource() != nil {
contentSource := req.GetVolumeContentSource()
if contentSource.GetSnapshot() != nil {
}
}
*/
klog.V(2).Infof("create container %s on storage account %s successfully", containerName, accountName)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: parameters,
},
}, nil
}
// DeleteVolume delete an goofys
func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return nil, fmt.Errorf("invalid delete volume req: %v", req)
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in DeleteVolume failed with error: %v", volumeID, err)
return &csi.DeleteVolumeResponse{}, nil
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
klog.V(2).Infof("deleting container(%s) rg(%s) account(%s) volumeID(%s)", containerName, resourceGroupName, accountName, volumeID)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
// todo: check what value to add into DeleteContainerOptions
err = wait.ExponentialBackoff(d.cloud.RequestBackoff(), func() (bool, error) {
_, err := container.DeleteIfExists(nil)
if err != nil && !strings.Contains(err.Error(), "ContainerBeingDeleted") {
return false, fmt.Errorf("failed to delete container(%s) on account(%s), error: %v", containerName, accountName, err)
}
return true, nil
})
if err != nil {
return nil, err
}
klog.V(2).Infof("container(%s) under rg(%s) account(%s) volumeID(%s) is deleted successfully", containerName, resourceGroupName, accountName, volumeID)
return &csi.DeleteVolumeResponse{}, nil
}
// ValidateVolumeCapabilities return the capabilities of the volume
func (d *Driver) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in ValidateVolumeCapabilities failed with error: %v", volumeID, err)
return nil, status.Error(codes.NotFound, err.Error())
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
exist, err := container.Exists()
if err != nil {
return nil, err
}
if !exist {
return nil, status.Error(codes.NotFound, "the requested volume does not exist")
}
// goofys supports all AccessModes, no need to check capabilities here
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
}
// ControllerGetCapabilities returns the capabilities of the Controller plugin
func (d *Driver) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
klog.V(5).Infof("Using default ControllerGetCapabilities")
return &csi.ControllerGetCapabilitiesResponse{
Capabilities: d.Cap,
}, nil
}
// GetCapacity returns the capacity of the total available storage pool
func (d *Driver) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListVolumes return all available volumes
func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerPublishVolume make a volume available on some required node
// N/A for goofys
func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerUnpublishVolume make the volume unavailable on a specified node
// N/A for goofys
func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// CreateSnapshot create a snapshot (todo)
func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// DeleteSnapshot delete a snapshot (todo)
func (d *Driver) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListSnapshots list all snapshots (todo)
func (d *Driver) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerExpandVolume controller expand volume
func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "ControllerExpandVolume is not yet implemented")
} | }
if len(volumeCapabilities) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Volume capabilities must be provided")
} | random_line_split |
controllerserver.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package goofys
import (
"context"
"fmt"
"strings"
"github.com/csi-driver/goofys-csi-driver/pkg/util"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
azstorage "github.com/Azure/azure-sdk-for-go/storage"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog"
)
const goofysAccountNamePrefix = "fuse"
// CreateVolume provisions an goofys
func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
klog.Errorf("invalid create volume req: %v", req)
return nil, err
}
volumeCapabilities := req.GetVolumeCapabilities()
name := req.GetName()
if len(name) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Name must be provided")
}
if len(volumeCapabilities) == 0 {
return nil, status.Error(codes.InvalidArgument, "CreateVolume Volume capabilities must be provided")
}
volSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes())
requestGiB := int(util.RoundUpGiB(volSizeBytes))
parameters := req.GetParameters()
var storageAccountType, resourceGroup, location, accountName, containerName string
// Apply ProvisionerParameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for k, v := range parameters {
switch strings.ToLower(k) {
case "skuname":
storageAccountType = v
case "storageaccounttype":
storageAccountType = v
case "location":
location = v
case "storageaccount":
accountName = v
case "resourcegroup":
resourceGroup = v
case "containername":
containerName = v
default:
return nil, fmt.Errorf("invalid option %q", k)
}
}
if resourceGroup == "" {
resourceGroup = d.cloud.ResourceGroup
}
account, accountKey, err := d.cloud.EnsureStorageAccount(accountName, storageAccountType, string(storage.BlockBlobStorage), resourceGroup, location, goofysAccountNamePrefix)
if err != nil {
return nil, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
}
accountName = account
if containerName == "" {
containerName = getValidContainerName(name)
}
klog.V(2).Infof("begin to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d)", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
_, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
if err != nil {
return nil, fmt.Errorf("failed to create container(%s) on account(%s) type(%s) rg(%s) location(%s) size(%d), error: %v", containerName, accountName, storageAccountType, resourceGroup, location, requestGiB, err)
}
volumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, containerName)
/* todo: snapshot support
if req.GetVolumeContentSource() != nil {
contentSource := req.GetVolumeContentSource()
if contentSource.GetSnapshot() != nil {
}
}
*/
klog.V(2).Infof("create container %s on storage account %s successfully", containerName, accountName)
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: parameters,
},
}, nil
}
// DeleteVolume delete an goofys
func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
return nil, fmt.Errorf("invalid delete volume req: %v", req)
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in DeleteVolume failed with error: %v", volumeID, err)
return &csi.DeleteVolumeResponse{}, nil
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
klog.V(2).Infof("deleting container(%s) rg(%s) account(%s) volumeID(%s)", containerName, resourceGroupName, accountName, volumeID)
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
// todo: check what value to add into DeleteContainerOptions
err = wait.ExponentialBackoff(d.cloud.RequestBackoff(), func() (bool, error) {
_, err := container.DeleteIfExists(nil)
if err != nil && !strings.Contains(err.Error(), "ContainerBeingDeleted") {
return false, fmt.Errorf("failed to delete container(%s) on account(%s), error: %v", containerName, accountName, err)
}
return true, nil
})
if err != nil {
return nil, err
}
klog.V(2).Infof("container(%s) under rg(%s) account(%s) volumeID(%s) is deleted successfully", containerName, resourceGroupName, accountName, volumeID)
return &csi.DeleteVolumeResponse{}, nil
}
// ValidateVolumeCapabilities return the capabilities of the volume
func (d *Driver) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetVolumeCapabilities() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capabilities missing in request")
}
volumeID := req.VolumeId
resourceGroupName, accountName, containerName, err := getContainerInfo(volumeID)
if err != nil {
klog.Errorf("getContainerInfo(%s) in ValidateVolumeCapabilities failed with error: %v", volumeID, err)
return nil, status.Error(codes.NotFound, err.Error())
}
if resourceGroupName == "" {
resourceGroupName = d.cloud.ResourceGroup
}
accountKey, err := d.cloud.GetStorageAccesskey(accountName, resourceGroupName)
if err != nil {
return nil, fmt.Errorf("no key for storage account(%s) under resource group(%s), err %v", accountName, resourceGroupName, err)
}
client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, d.cloud.Environment)
if err != nil {
return nil, err
}
blobClient := client.GetBlobService()
container := blobClient.GetContainerReference(containerName)
exist, err := container.Exists()
if err != nil {
return nil, err
}
if !exist {
return nil, status.Error(codes.NotFound, "the requested volume does not exist")
}
// goofys supports all AccessModes, no need to check capabilities here
return &csi.ValidateVolumeCapabilitiesResponse{Message: ""}, nil
}
// ControllerGetCapabilities returns the capabilities of the Controller plugin
func (d *Driver) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
klog.V(5).Infof("Using default ControllerGetCapabilities")
return &csi.ControllerGetCapabilitiesResponse{
Capabilities: d.Cap,
}, nil
}
// GetCapacity returns the capacity of the total available storage pool
func (d *Driver) GetCapacity(ctx context.Context, req *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) |
// ListVolumes return all available volumes
func (d *Driver) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerPublishVolume make a volume available on some required node
// N/A for goofys
func (d *Driver) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerUnpublishVolume make the volume unavailable on a specified node
// N/A for goofys
func (d *Driver) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// CreateSnapshot create a snapshot (todo)
func (d *Driver) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// DeleteSnapshot delete a snapshot (todo)
func (d *Driver) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ListSnapshots list all snapshots (todo)
func (d *Driver) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
// ControllerExpandVolume controller expand volume
func (d *Driver) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "ControllerExpandVolume is not yet implemented")
}
| {
return nil, status.Error(codes.Unimplemented, "")
} | identifier_body |
root_mod_trait.rs | use super::*;
use crate::{prefix_type::PrefixRefTrait, utils::leak_value};
/// The root module of a dynamic library,
/// which may contain other modules,function pointers,and static references.
///
///
/// # Examples
///
/// For a more in-context example of a type implementing this trait you can look
/// at either the example in the readme for this crate,
/// or the `example/example_*_interface` crates in this crates' repository .
///
/// ### Basic
///
/// ```rust
/// use abi_stable::{library::RootModule, sabi_types::VersionStrings, StableAbi};
///
/// #[repr(C)]
/// #[derive(StableAbi)]
/// #[sabi(kind(Prefix(prefix_ref = Module_Ref, prefix_fields = Module_Prefix)))]
/// pub struct Module {
/// pub first: u8,
/// // The `#[sabi(last_prefix_field)]` attribute here means that this is
/// // the last field in this module that was defined in the
/// // first compatible version of the library,
/// #[sabi(last_prefix_field)]
/// pub second: u16,
/// pub third: u32,
/// }
/// impl RootModule for Module_Ref {
/// abi_stable::declare_root_module_statics! {Module_Ref}
/// const BASE_NAME: &'static str = "example_root_module";
/// const NAME: &'static str = "example_root_module";
/// const VERSION_STRINGS: VersionStrings = abi_stable::package_version_strings!();
/// }
///
/// # fn main(){}
/// ```
pub trait RootModule: Sized + StableAbi + PrefixRefTrait + 'static {
/// The name of the dynamic library,which is the same on all platforms.
/// This is generally the name of the `implementation crate`.
const BASE_NAME: &'static str;
/// The name of the library used in error messages.
const NAME: &'static str;
/// The version number of the library that this is a root module of.
///
/// Initialize this with
/// [`package_version_strings!()`](../macro.package_version_strings.html)
const VERSION_STRINGS: VersionStrings;
/// All the constants of this trait and supertraits.
///
/// It can safely be used as a proxy for the associated constants of this trait.
const CONSTANTS: RootModuleConsts = RootModuleConsts {
base_name: RStr::from_str(Self::BASE_NAME),
name: RStr::from_str(Self::NAME),
version_strings: Self::VERSION_STRINGS,
layout: IsLayoutChecked::Yes(<Self as StableAbi>::LAYOUT),
c_abi_testing_fns: crate::library::c_abi_testing::C_ABI_TESTING_FNS,
_priv: (),
};
/// Like `Self::CONSTANTS`,
/// except without including the type layout constant for the root module.
const CONSTANTS_NO_ABI_INFO: RootModuleConsts = RootModuleConsts {
layout: IsLayoutChecked::No,
..Self::CONSTANTS
};
/// Gets the statics for Self.
///
/// To define this associated function use:
/// [`abi_stable::declare_root_module_statics!{TypeOfSelf}`
/// ](../macro.declare_root_module_statics.html).
/// Passing `Self` instead of `TypeOfSelf` won't work.
/// | fn root_module_statics() -> &'static RootModuleStatics<Self>;
/// Gets the root module,returning None if the module is not yet loaded.
#[inline]
fn get_module() -> Option<Self> {
Self::root_module_statics().root_mod.get()
}
/// Gets the RawLibrary of the module,
/// returning None if the dynamic library failed to load
/// (it doesn't exist or layout checking failed).
///
/// Note that if the root module is initialized using `Self::load_module_with`,
/// this will return None even though `Self::get_module` does not.
///
#[inline]
fn get_raw_library() -> Option<&'static RawLibrary> {
Self::root_module_statics().raw_lib.get()
}
/// Returns the path the library would be loaded from,given a directory(folder).
fn get_library_path(directory: &Path) -> PathBuf {
let base_name = Self::BASE_NAME;
RawLibrary::path_in_directory(directory, base_name, LibrarySuffix::NoSuffix)
}
/// Loads the root module,with a closure which either
/// returns the root module or an error.
///
/// If the root module was already loaded,
/// this will return the already loaded root module,
/// without calling the closure.
fn load_module_with<F, E>(f: F) -> Result<Self, E>
where
F: FnOnce() -> Result<Self, E>,
{
Self::root_module_statics().root_mod.try_init(f)
}
/// Loads this module from the path specified by `where_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// # Warning
///
/// If this function is called within a dynamic library,
/// it must be called either within the root module loader function or
/// after that function has been called.
///
/// **DO NOT** call this in the static initializer of a dynamic library,
/// since this library relies on setting up its global state before
/// calling the root module loader.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable version used by the library is not compatible.
///
/// - `LibraryError::ParseVersionError`:
/// If the version strings in the library can't be parsed as version numbers,
/// this can only happen if the version strings are manually constructed.
///
/// - `LibraryError::IncompatibleVersionNumber`:
/// If the version number of the library is incompatible.
///
/// - `LibraryError::AbiInstability`:
/// If the layout of the root module is not the expected one.
///
/// - `LibraryError::RootModule` :
/// If the root module initializer returned an error or panicked.
///
fn load_from(where_: LibraryPath<'_>) -> Result<Self, LibraryError> {
let statics = Self::root_module_statics();
statics.root_mod.try_init(|| {
let lib = statics.raw_lib.try_init(|| -> Result<_, LibraryError> {
let raw_library = load_raw_library::<Self>(where_)?;
// if the library isn't leaked
// it would cause any use of the module to be a use after free.
//
// By leaking the library
// this allows the root module loader to do anything that'd prevent
// sound library unloading.
Ok(leak_value(raw_library))
})?;
let items = unsafe { lib_header_from_raw_library(lib)? };
items.ensure_layout::<Self>()?;
// safety: the layout was checked in the code above,
unsafe {
items
.init_root_module_with_unchecked_layout::<Self>()?
.initialization()
}
})
}
/// Loads this module from the directory specified by `where_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// Warnings and Errors are detailed in [`load_from`](#method.load_from),
///
fn load_from_directory(where_: &Path) -> Result<Self, LibraryError> {
Self::load_from(LibraryPath::Directory(where_))
}
/// Loads this module from the file at `path_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// Warnings and Errors are detailed in [`load_from`](#method.load_from),
///
fn load_from_file(path_: &Path) -> Result<Self, LibraryError> {
Self::load_from(LibraryPath::FullPath(path_))
}
/// Defines behavior that happens once the module is loaded.
///
/// This is ran in the `RootModule::load*` associated functions
/// after the root module has succesfully been loaded.
///
/// The default implementation does nothing.
fn initialization(self) -> Result<Self, LibraryError> {
Ok(self)
}
}
/// Loads the raw library at `where_`
fn load_raw_library<M>(where_: LibraryPath<'_>) -> Result<RawLibrary, LibraryError>
where
M: RootModule,
{
let path = match where_ {
LibraryPath::Directory(directory) => M::get_library_path(directory),
LibraryPath::FullPath(full_path) => full_path.to_owned(),
};
RawLibrary::load_at(&path)
}
/// Gets the LibHeader of a library.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable used by the library is not compatible.
///
/// # Safety
///
/// The LibHeader is implicitly tied to the lifetime of the library,
/// it will contain dangling `'static` references if the library is dropped before it does.
///
///
pub unsafe fn lib_header_from_raw_library(
raw_library: &RawLibrary,
) -> Result<&'static LibHeader, LibraryError> {
unsafe { abi_header_from_raw_library(raw_library)?.upgrade() }
}
/// Gets the AbiHeaderRef of a library.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// # Safety
///
/// The AbiHeaderRef is implicitly tied to the lifetime of the library,
/// it will contain dangling `'static` references if the library is dropped before it does.
///
///
pub unsafe fn abi_header_from_raw_library(
raw_library: &RawLibrary,
) -> Result<AbiHeaderRef, LibraryError> {
let mangled = ROOT_MODULE_LOADER_NAME_WITH_NUL;
let header: AbiHeaderRef = unsafe { *raw_library.get::<AbiHeaderRef>(mangled.as_bytes())? };
Ok(header)
}
/// Gets the LibHeader of the library at the path.
///
/// This leaks the underlying dynamic library,
/// if you need to do this without leaking you'll need to use
/// `lib_header_from_raw_library` instead.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable version used by the library is not compatible.
///
///
pub fn lib_header_from_path(path: &Path) -> Result<&'static LibHeader, LibraryError> {
let raw_lib = RawLibrary::load_at(path)?;
let library_getter = unsafe { lib_header_from_raw_library(&raw_lib)? };
mem::forget(raw_lib);
Ok(library_getter)
}
/// Gets the AbiHeaderRef of the library at the path.
///
/// This leaks the underlying dynamic library,
/// if you need to do this without leaking you'll need to use
/// `lib_header_from_raw_library` instead.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
///
pub fn abi_header_from_path(path: &Path) -> Result<AbiHeaderRef, LibraryError> {
let raw_lib = RawLibrary::load_at(path)?;
let library_getter = unsafe { abi_header_from_raw_library(&raw_lib)? };
mem::forget(raw_lib);
Ok(library_getter)
}
//////////////////////////////////////////////////////////////////////
macro_rules! declare_root_module_consts {
(
fields=[
$(
$(#[$field_meta:meta])*
method_docs=$method_docs:expr,
$field:ident : $field_ty:ty
),* $(,)*
]
) => (
/// All the constants of the [`RootModule`] trait for some erased type.
///
/// [`RootModule`]: ./trait.RootModule.html
#[repr(C)]
#[derive(StableAbi,Copy,Clone)]
pub struct RootModuleConsts{
$(
$(#[$field_meta])*
$field : $field_ty,
)*
_priv:(),
}
impl RootModuleConsts{
$(
#[doc=$method_docs]
pub const fn $field(&self)->$field_ty{
self.$field
}
)*
}
)
}
declare_root_module_consts! {
fields=[
method_docs="
The name of the dynamic library,which is the same on all platforms.
This is generally the name of the implementation crate.",
base_name: RStr<'static>,
method_docs="The name of the library used in error messages.",
name: RStr<'static>,
method_docs="The version number of the library this was created from.",
version_strings: VersionStrings,
method_docs="The (optional) type layout constant of the root module.",
layout: IsLayoutChecked,
method_docs="\
Functions used to test that the C abi is the same in both the library
and the loader\
",
c_abi_testing_fns:&'static CAbiTestingFns,
]
} | random_line_split | |
root_mod_trait.rs | use super::*;
use crate::{prefix_type::PrefixRefTrait, utils::leak_value};
/// The root module of a dynamic library,
/// which may contain other modules,function pointers,and static references.
///
///
/// # Examples
///
/// For a more in-context example of a type implementing this trait you can look
/// at either the example in the readme for this crate,
/// or the `example/example_*_interface` crates in this crates' repository .
///
/// ### Basic
///
/// ```rust
/// use abi_stable::{library::RootModule, sabi_types::VersionStrings, StableAbi};
///
/// #[repr(C)]
/// #[derive(StableAbi)]
/// #[sabi(kind(Prefix(prefix_ref = Module_Ref, prefix_fields = Module_Prefix)))]
/// pub struct Module {
/// pub first: u8,
/// // The `#[sabi(last_prefix_field)]` attribute here means that this is
/// // the last field in this module that was defined in the
/// // first compatible version of the library,
/// #[sabi(last_prefix_field)]
/// pub second: u16,
/// pub third: u32,
/// }
/// impl RootModule for Module_Ref {
/// abi_stable::declare_root_module_statics! {Module_Ref}
/// const BASE_NAME: &'static str = "example_root_module";
/// const NAME: &'static str = "example_root_module";
/// const VERSION_STRINGS: VersionStrings = abi_stable::package_version_strings!();
/// }
///
/// # fn main(){}
/// ```
pub trait RootModule: Sized + StableAbi + PrefixRefTrait + 'static {
/// The name of the dynamic library,which is the same on all platforms.
/// This is generally the name of the `implementation crate`.
const BASE_NAME: &'static str;
/// The name of the library used in error messages.
const NAME: &'static str;
/// The version number of the library that this is a root module of.
///
/// Initialize this with
/// [`package_version_strings!()`](../macro.package_version_strings.html)
const VERSION_STRINGS: VersionStrings;
/// All the constants of this trait and supertraits.
///
/// It can safely be used as a proxy for the associated constants of this trait.
const CONSTANTS: RootModuleConsts = RootModuleConsts {
base_name: RStr::from_str(Self::BASE_NAME),
name: RStr::from_str(Self::NAME),
version_strings: Self::VERSION_STRINGS,
layout: IsLayoutChecked::Yes(<Self as StableAbi>::LAYOUT),
c_abi_testing_fns: crate::library::c_abi_testing::C_ABI_TESTING_FNS,
_priv: (),
};
/// Like `Self::CONSTANTS`,
/// except without including the type layout constant for the root module.
const CONSTANTS_NO_ABI_INFO: RootModuleConsts = RootModuleConsts {
layout: IsLayoutChecked::No,
..Self::CONSTANTS
};
/// Gets the statics for Self.
///
/// To define this associated function use:
/// [`abi_stable::declare_root_module_statics!{TypeOfSelf}`
/// ](../macro.declare_root_module_statics.html).
/// Passing `Self` instead of `TypeOfSelf` won't work.
///
fn root_module_statics() -> &'static RootModuleStatics<Self>;
/// Gets the root module,returning None if the module is not yet loaded.
#[inline]
fn get_module() -> Option<Self> {
Self::root_module_statics().root_mod.get()
}
/// Gets the RawLibrary of the module,
/// returning None if the dynamic library failed to load
/// (it doesn't exist or layout checking failed).
///
/// Note that if the root module is initialized using `Self::load_module_with`,
/// this will return None even though `Self::get_module` does not.
///
#[inline]
fn get_raw_library() -> Option<&'static RawLibrary> {
Self::root_module_statics().raw_lib.get()
}
/// Returns the path the library would be loaded from,given a directory(folder).
fn get_library_path(directory: &Path) -> PathBuf {
let base_name = Self::BASE_NAME;
RawLibrary::path_in_directory(directory, base_name, LibrarySuffix::NoSuffix)
}
/// Loads the root module,with a closure which either
/// returns the root module or an error.
///
/// If the root module was already loaded,
/// this will return the already loaded root module,
/// without calling the closure.
fn load_module_with<F, E>(f: F) -> Result<Self, E>
where
F: FnOnce() -> Result<Self, E>,
{
Self::root_module_statics().root_mod.try_init(f)
}
/// Loads this module from the path specified by `where_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// # Warning
///
/// If this function is called within a dynamic library,
/// it must be called either within the root module loader function or
/// after that function has been called.
///
/// **DO NOT** call this in the static initializer of a dynamic library,
/// since this library relies on setting up its global state before
/// calling the root module loader.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable version used by the library is not compatible.
///
/// - `LibraryError::ParseVersionError`:
/// If the version strings in the library can't be parsed as version numbers,
/// this can only happen if the version strings are manually constructed.
///
/// - `LibraryError::IncompatibleVersionNumber`:
/// If the version number of the library is incompatible.
///
/// - `LibraryError::AbiInstability`:
/// If the layout of the root module is not the expected one.
///
/// - `LibraryError::RootModule` :
/// If the root module initializer returned an error or panicked.
///
fn load_from(where_: LibraryPath<'_>) -> Result<Self, LibraryError> {
let statics = Self::root_module_statics();
statics.root_mod.try_init(|| {
let lib = statics.raw_lib.try_init(|| -> Result<_, LibraryError> {
let raw_library = load_raw_library::<Self>(where_)?;
// if the library isn't leaked
// it would cause any use of the module to be a use after free.
//
// By leaking the library
// this allows the root module loader to do anything that'd prevent
// sound library unloading.
Ok(leak_value(raw_library))
})?;
let items = unsafe { lib_header_from_raw_library(lib)? };
items.ensure_layout::<Self>()?;
// safety: the layout was checked in the code above,
unsafe {
items
.init_root_module_with_unchecked_layout::<Self>()?
.initialization()
}
})
}
/// Loads this module from the directory specified by `where_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// Warnings and Errors are detailed in [`load_from`](#method.load_from),
///
fn load_from_directory(where_: &Path) -> Result<Self, LibraryError> {
Self::load_from(LibraryPath::Directory(where_))
}
/// Loads this module from the file at `path_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// Warnings and Errors are detailed in [`load_from`](#method.load_from),
///
fn load_from_file(path_: &Path) -> Result<Self, LibraryError> {
Self::load_from(LibraryPath::FullPath(path_))
}
/// Defines behavior that happens once the module is loaded.
///
/// This is ran in the `RootModule::load*` associated functions
/// after the root module has succesfully been loaded.
///
/// The default implementation does nothing.
fn initialization(self) -> Result<Self, LibraryError> {
Ok(self)
}
}
/// Loads the raw library at `where_`
fn load_raw_library<M>(where_: LibraryPath<'_>) -> Result<RawLibrary, LibraryError>
where
M: RootModule,
|
/// Gets the LibHeader of a library.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable used by the library is not compatible.
///
/// # Safety
///
/// The LibHeader is implicitly tied to the lifetime of the library,
/// it will contain dangling `'static` references if the library is dropped before it does.
///
///
pub unsafe fn lib_header_from_raw_library(
raw_library: &RawLibrary,
) -> Result<&'static LibHeader, LibraryError> {
unsafe { abi_header_from_raw_library(raw_library)?.upgrade() }
}
/// Gets the AbiHeaderRef of a library.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// # Safety
///
/// The AbiHeaderRef is implicitly tied to the lifetime of the library,
/// it will contain dangling `'static` references if the library is dropped before it does.
///
///
pub unsafe fn abi_header_from_raw_library(
raw_library: &RawLibrary,
) -> Result<AbiHeaderRef, LibraryError> {
let mangled = ROOT_MODULE_LOADER_NAME_WITH_NUL;
let header: AbiHeaderRef = unsafe { *raw_library.get::<AbiHeaderRef>(mangled.as_bytes())? };
Ok(header)
}
/// Gets the LibHeader of the library at the path.
///
/// This leaks the underlying dynamic library,
/// if you need to do this without leaking you'll need to use
/// `lib_header_from_raw_library` instead.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable version used by the library is not compatible.
///
///
pub fn lib_header_from_path(path: &Path) -> Result<&'static LibHeader, LibraryError> {
let raw_lib = RawLibrary::load_at(path)?;
let library_getter = unsafe { lib_header_from_raw_library(&raw_lib)? };
mem::forget(raw_lib);
Ok(library_getter)
}
/// Gets the AbiHeaderRef of the library at the path.
///
/// This leaks the underlying dynamic library,
/// if you need to do this without leaking you'll need to use
/// `lib_header_from_raw_library` instead.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
///
pub fn abi_header_from_path(path: &Path) -> Result<AbiHeaderRef, LibraryError> {
let raw_lib = RawLibrary::load_at(path)?;
let library_getter = unsafe { abi_header_from_raw_library(&raw_lib)? };
mem::forget(raw_lib);
Ok(library_getter)
}
//////////////////////////////////////////////////////////////////////
macro_rules! declare_root_module_consts {
(
fields=[
$(
$(#[$field_meta:meta])*
method_docs=$method_docs:expr,
$field:ident : $field_ty:ty
),* $(,)*
]
) => (
/// All the constants of the [`RootModule`] trait for some erased type.
///
/// [`RootModule`]: ./trait.RootModule.html
#[repr(C)]
#[derive(StableAbi,Copy,Clone)]
pub struct RootModuleConsts{
$(
$(#[$field_meta])*
$field : $field_ty,
)*
_priv:(),
}
impl RootModuleConsts{
$(
#[doc=$method_docs]
pub const fn $field(&self)->$field_ty{
self.$field
}
)*
}
)
}
declare_root_module_consts! {
fields=[
method_docs="
The name of the dynamic library,which is the same on all platforms.
This is generally the name of the implementation crate.",
base_name: RStr<'static>,
method_docs="The name of the library used in error messages.",
name: RStr<'static>,
method_docs="The version number of the library this was created from.",
version_strings: VersionStrings,
method_docs="The (optional) type layout constant of the root module.",
layout: IsLayoutChecked,
method_docs="\
Functions used to test that the C abi is the same in both the library
and the loader\
",
c_abi_testing_fns:&'static CAbiTestingFns,
]
}
| {
let path = match where_ {
LibraryPath::Directory(directory) => M::get_library_path(directory),
LibraryPath::FullPath(full_path) => full_path.to_owned(),
};
RawLibrary::load_at(&path)
} | identifier_body |
root_mod_trait.rs | use super::*;
use crate::{prefix_type::PrefixRefTrait, utils::leak_value};
/// The root module of a dynamic library,
/// which may contain other modules,function pointers,and static references.
///
///
/// # Examples
///
/// For a more in-context example of a type implementing this trait you can look
/// at either the example in the readme for this crate,
/// or the `example/example_*_interface` crates in this crates' repository .
///
/// ### Basic
///
/// ```rust
/// use abi_stable::{library::RootModule, sabi_types::VersionStrings, StableAbi};
///
/// #[repr(C)]
/// #[derive(StableAbi)]
/// #[sabi(kind(Prefix(prefix_ref = Module_Ref, prefix_fields = Module_Prefix)))]
/// pub struct Module {
/// pub first: u8,
/// // The `#[sabi(last_prefix_field)]` attribute here means that this is
/// // the last field in this module that was defined in the
/// // first compatible version of the library,
/// #[sabi(last_prefix_field)]
/// pub second: u16,
/// pub third: u32,
/// }
/// impl RootModule for Module_Ref {
/// abi_stable::declare_root_module_statics! {Module_Ref}
/// const BASE_NAME: &'static str = "example_root_module";
/// const NAME: &'static str = "example_root_module";
/// const VERSION_STRINGS: VersionStrings = abi_stable::package_version_strings!();
/// }
///
/// # fn main(){}
/// ```
pub trait RootModule: Sized + StableAbi + PrefixRefTrait + 'static {
/// The name of the dynamic library,which is the same on all platforms.
/// This is generally the name of the `implementation crate`.
const BASE_NAME: &'static str;
/// The name of the library used in error messages.
const NAME: &'static str;
/// The version number of the library that this is a root module of.
///
/// Initialize this with
/// [`package_version_strings!()`](../macro.package_version_strings.html)
const VERSION_STRINGS: VersionStrings;
/// All the constants of this trait and supertraits.
///
/// It can safely be used as a proxy for the associated constants of this trait.
const CONSTANTS: RootModuleConsts = RootModuleConsts {
base_name: RStr::from_str(Self::BASE_NAME),
name: RStr::from_str(Self::NAME),
version_strings: Self::VERSION_STRINGS,
layout: IsLayoutChecked::Yes(<Self as StableAbi>::LAYOUT),
c_abi_testing_fns: crate::library::c_abi_testing::C_ABI_TESTING_FNS,
_priv: (),
};
/// Like `Self::CONSTANTS`,
/// except without including the type layout constant for the root module.
const CONSTANTS_NO_ABI_INFO: RootModuleConsts = RootModuleConsts {
layout: IsLayoutChecked::No,
..Self::CONSTANTS
};
/// Gets the statics for Self.
///
/// To define this associated function use:
/// [`abi_stable::declare_root_module_statics!{TypeOfSelf}`
/// ](../macro.declare_root_module_statics.html).
/// Passing `Self` instead of `TypeOfSelf` won't work.
///
fn root_module_statics() -> &'static RootModuleStatics<Self>;
/// Gets the root module,returning None if the module is not yet loaded.
#[inline]
fn get_module() -> Option<Self> {
Self::root_module_statics().root_mod.get()
}
/// Gets the RawLibrary of the module,
/// returning None if the dynamic library failed to load
/// (it doesn't exist or layout checking failed).
///
/// Note that if the root module is initialized using `Self::load_module_with`,
/// this will return None even though `Self::get_module` does not.
///
#[inline]
fn get_raw_library() -> Option<&'static RawLibrary> {
Self::root_module_statics().raw_lib.get()
}
/// Returns the path the library would be loaded from,given a directory(folder).
fn get_library_path(directory: &Path) -> PathBuf {
let base_name = Self::BASE_NAME;
RawLibrary::path_in_directory(directory, base_name, LibrarySuffix::NoSuffix)
}
/// Loads the root module,with a closure which either
/// returns the root module or an error.
///
/// If the root module was already loaded,
/// this will return the already loaded root module,
/// without calling the closure.
fn load_module_with<F, E>(f: F) -> Result<Self, E>
where
F: FnOnce() -> Result<Self, E>,
{
Self::root_module_statics().root_mod.try_init(f)
}
/// Loads this module from the path specified by `where_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// # Warning
///
/// If this function is called within a dynamic library,
/// it must be called either within the root module loader function or
/// after that function has been called.
///
/// **DO NOT** call this in the static initializer of a dynamic library,
/// since this library relies on setting up its global state before
/// calling the root module loader.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable version used by the library is not compatible.
///
/// - `LibraryError::ParseVersionError`:
/// If the version strings in the library can't be parsed as version numbers,
/// this can only happen if the version strings are manually constructed.
///
/// - `LibraryError::IncompatibleVersionNumber`:
/// If the version number of the library is incompatible.
///
/// - `LibraryError::AbiInstability`:
/// If the layout of the root module is not the expected one.
///
/// - `LibraryError::RootModule` :
/// If the root module initializer returned an error or panicked.
///
fn load_from(where_: LibraryPath<'_>) -> Result<Self, LibraryError> {
let statics = Self::root_module_statics();
statics.root_mod.try_init(|| {
let lib = statics.raw_lib.try_init(|| -> Result<_, LibraryError> {
let raw_library = load_raw_library::<Self>(where_)?;
// if the library isn't leaked
// it would cause any use of the module to be a use after free.
//
// By leaking the library
// this allows the root module loader to do anything that'd prevent
// sound library unloading.
Ok(leak_value(raw_library))
})?;
let items = unsafe { lib_header_from_raw_library(lib)? };
items.ensure_layout::<Self>()?;
// safety: the layout was checked in the code above,
unsafe {
items
.init_root_module_with_unchecked_layout::<Self>()?
.initialization()
}
})
}
/// Loads this module from the directory specified by `where_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// Warnings and Errors are detailed in [`load_from`](#method.load_from),
///
fn load_from_directory(where_: &Path) -> Result<Self, LibraryError> {
Self::load_from(LibraryPath::Directory(where_))
}
/// Loads this module from the file at `path_`,
/// first loading the dynamic library if it wasn't already loaded.
///
/// Once the root module is loaded,
/// this will return the already loaded root module.
///
/// Warnings and Errors are detailed in [`load_from`](#method.load_from),
///
fn load_from_file(path_: &Path) -> Result<Self, LibraryError> {
Self::load_from(LibraryPath::FullPath(path_))
}
/// Defines behavior that happens once the module is loaded.
///
/// This is ran in the `RootModule::load*` associated functions
/// after the root module has succesfully been loaded.
///
/// The default implementation does nothing.
fn initialization(self) -> Result<Self, LibraryError> {
Ok(self)
}
}
/// Loads the raw library at `where_`
fn load_raw_library<M>(where_: LibraryPath<'_>) -> Result<RawLibrary, LibraryError>
where
M: RootModule,
{
let path = match where_ {
LibraryPath::Directory(directory) => M::get_library_path(directory),
LibraryPath::FullPath(full_path) => full_path.to_owned(),
};
RawLibrary::load_at(&path)
}
/// Gets the LibHeader of a library.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable used by the library is not compatible.
///
/// # Safety
///
/// The LibHeader is implicitly tied to the lifetime of the library,
/// it will contain dangling `'static` references if the library is dropped before it does.
///
///
pub unsafe fn lib_header_from_raw_library(
raw_library: &RawLibrary,
) -> Result<&'static LibHeader, LibraryError> {
unsafe { abi_header_from_raw_library(raw_library)?.upgrade() }
}
/// Gets the AbiHeaderRef of a library.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// # Safety
///
/// The AbiHeaderRef is implicitly tied to the lifetime of the library,
/// it will contain dangling `'static` references if the library is dropped before it does.
///
///
pub unsafe fn | (
raw_library: &RawLibrary,
) -> Result<AbiHeaderRef, LibraryError> {
let mangled = ROOT_MODULE_LOADER_NAME_WITH_NUL;
let header: AbiHeaderRef = unsafe { *raw_library.get::<AbiHeaderRef>(mangled.as_bytes())? };
Ok(header)
}
/// Gets the LibHeader of the library at the path.
///
/// This leaks the underlying dynamic library,
/// if you need to do this without leaking you'll need to use
/// `lib_header_from_raw_library` instead.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
/// - `LibraryError::InvalidAbiHeader`:
/// If the abi_stable version used by the library is not compatible.
///
///
pub fn lib_header_from_path(path: &Path) -> Result<&'static LibHeader, LibraryError> {
let raw_lib = RawLibrary::load_at(path)?;
let library_getter = unsafe { lib_header_from_raw_library(&raw_lib)? };
mem::forget(raw_lib);
Ok(library_getter)
}
/// Gets the AbiHeaderRef of the library at the path.
///
/// This leaks the underlying dynamic library,
/// if you need to do this without leaking you'll need to use
/// `lib_header_from_raw_library` instead.
///
/// # Errors
///
/// This will return these errors:
///
/// - `LibraryError::OpenError`:
/// If the dynamic library itself could not be loaded.
///
/// - `LibraryError::GetSymbolError`:
/// If the root module was not exported.
///
///
pub fn abi_header_from_path(path: &Path) -> Result<AbiHeaderRef, LibraryError> {
let raw_lib = RawLibrary::load_at(path)?;
let library_getter = unsafe { abi_header_from_raw_library(&raw_lib)? };
mem::forget(raw_lib);
Ok(library_getter)
}
//////////////////////////////////////////////////////////////////////
macro_rules! declare_root_module_consts {
(
fields=[
$(
$(#[$field_meta:meta])*
method_docs=$method_docs:expr,
$field:ident : $field_ty:ty
),* $(,)*
]
) => (
/// All the constants of the [`RootModule`] trait for some erased type.
///
/// [`RootModule`]: ./trait.RootModule.html
#[repr(C)]
#[derive(StableAbi,Copy,Clone)]
pub struct RootModuleConsts{
$(
$(#[$field_meta])*
$field : $field_ty,
)*
_priv:(),
}
impl RootModuleConsts{
$(
#[doc=$method_docs]
pub const fn $field(&self)->$field_ty{
self.$field
}
)*
}
)
}
declare_root_module_consts! {
fields=[
method_docs="
The name of the dynamic library,which is the same on all platforms.
This is generally the name of the implementation crate.",
base_name: RStr<'static>,
method_docs="The name of the library used in error messages.",
name: RStr<'static>,
method_docs="The version number of the library this was created from.",
version_strings: VersionStrings,
method_docs="The (optional) type layout constant of the root module.",
layout: IsLayoutChecked,
method_docs="\
Functions used to test that the C abi is the same in both the library
and the loader\
",
c_abi_testing_fns:&'static CAbiTestingFns,
]
}
| abi_header_from_raw_library | identifier_name |
item_classifier.py | #-------------------------------------------------------------------------------
# Name: Simple stone classification TF model
# Purpose: Learn TensorFlow 2.0
#
# Author: kol
#
# Created: 13.01.2020
# Copyright: (c) kol 2020
# Licence: MIT
#-------------------------------------------------------------------------------
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from random import randrange
IMG_HEIGHT = 20
IMG_WIDTH = 20
NUM_EPOCHS = 20
BATCH_SIZE = 32
DISPLAY_COLS = 6
CONFIDENCE_LEVEL = 0.8
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Board elements classifier model wrapper
class BoardItemClassifier:
| """This class wraps around TF model
A stone images dataset made by cc/cc_gen.py is required for model training and prediction
"""
def __init__(self, model_dir, img_dir, img_size = (IMG_WIDTH, IMG_HEIGHT), log_dir = None):
"""Constructor.
Parameters:
model_dir Directory where a model is saved
img_dir Root directory of stone images dataset
img_size Target image size
"""
self.model = None
self.model_dir, self.img_dir, self.img_size, self.log_dir = model_dir, img_dir, img_size, log_dir
self.image_data_gen = None
self.train_dataset = None
self.val_dataset = None
self.history = None
self.predict_generator = None
self.predict_dataset = None
self.predictions = None
self.class_names = np.array([item.name for item in Path(self.img_dir).glob('*') if item.is_dir()])
def exists(self):
"""Checks saved model presence"""
return Path(self.model_dir).exists()
def load(self):
"""Load a model from directory"""
print("==> Loading model from", self.model_dir)
self.model = tf.keras.models.load_model(self.model_dir)
def build(self):
"""Build new model"""
print("==> Building model", self.model_dir)
self.model = tf.keras.models.Sequential()
layers = self.get_model_layers()
for l in layers:
self.model.add(l)
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
def get_model_layers(self):
return [
tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(len(self.class_names), activation='softmax')
]
def save(self):
"""Save whole model to specified directory"""
print("==> Saving model to", self.model_dir)
self.model.save(self.model_dir)
def init_datasets(self, display_samples = False):
"""Initialize datasets for training"""
print("==> Loading images from ", self.img_dir)
self.image_data_gen = ImageDataGenerator(
rescale=1./255,
#rotation_range=30,
#shear_range=30,
#width_shift_range=.15,
#height_shift_range=.15,
#zoom_range=0.5,
validation_split=0.2)
self.train_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse',
subset='training')
self.val_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse',
subset='validation')
if display_samples:
self.display_sample_images()
def train(self, epochs = NUM_EPOCHS, display_history = False):
"""Train the model"""
print("==> Training model from", self.model_dir)
if self.model is None:
self.build()
if self.train_dataset is None:
self.init_datasets()
callbacks = []
if self.log_dir is not None:
callbacks.extend([
tf.keras.callbacks.TensorBoard(self.log_dir,
profile_batch=0,
write_graph=True)])
if self.image_data_gen is not None:
# Generator
self.history = self.model.fit_generator(
self.train_dataset,
epochs=epochs,
steps_per_epoch=self.train_dataset.samples // BATCH_SIZE,
validation_data=self.val_dataset,
validation_steps=self.val_dataset.samples // BATCH_SIZE,
callbacks = callbacks)
else:
# Dataset
self.history = self.model.fit(
self.train_dataset,
epochs=epochs,
callbacks = callbacks)
if display_history:
self.display_history()
def predict(self, num_samples = BATCH_SIZE, display_predictions = True):
"""Predict on specified number of samples"""
if self.model is None:
raise Exception("Model is empty, either build or load it")
print("==> Prediction on model from", self.model_dir)
file_names, file_labels = self.get_sample_files(num_samples)
self.predict_dataset = tf.data.Dataset.from_tensor_slices((file_names, file_labels))
self.predict_dataset = self.predict_dataset.map(self.map_fn, num_parallel_calls=AUTOTUNE)
self.predict_dataset = self.predict_dataset.batch(BATCH_SIZE)
self.predictions = self.model.predict(self.predict_dataset)
if display_predictions:
self.display_predictions()
def map_fn(self, path, label):
"""Upload an image fo given path with specified label - internal"""
image = tf.image.decode_png(tf.io.read_file(path))
image = tf.image.convert_image_dtype(image, tf.float32)
if self.img_size is not None:
image = tf.image.resize(image, self.img_size)
return image, label
def get_sample_files(self, num_samples = BATCH_SIZE):
"""Retrieve specified number of sample files from stone images dataset"""
file_names = []
file_labels = []
for n, d in enumerate(Path(self.img_dir).glob('*')):
names = [str(f) for f in Path(self.img_dir).joinpath(d).glob('*.png')]
file_names.extend(names)
labels = [float(x == d.name) for x in self.class_names]
file_labels.extend([labels] * len(names))
random_file_names = []
random_file_labels = []
for _ in range(0, num_samples):
n = randrange(0, len(file_names)-1)
random_file_names.extend([file_names[n]])
random_file_labels.extend([file_labels[n]])
file_names = tf.convert_to_tensor(random_file_names, dtype=tf.string)
file_labels = tf.convert_to_tensor(random_file_labels)
file_labels = tf.expand_dims(file_labels, axis=-1)
return file_names, file_labels
def display_sample_images(self):
"""Display up to 25 images from training dataset"""
if self.train_dataset is None:
self.init_datasets()
images, labels = next(self.train_dataset)
plt.figure(figsize=(5,5))
for n in range(min(25, images.shape[0])):
ax = plt.subplot(5,5,n+1)
plt.imshow(images[n])
if len(labels.shape) == 1:
plt.title(self.class_names[int(labels[n])].title())
else:
m = np.argmax(labels[n])
plt.title(self.class_names[int(labels[n, m])].title())
plt.axis('off')
plt.tight_layout()
plt.show()
def display_history(self):
"""Display training history"""
if self.history is None:
return
acc = self.history.history['sparse_categorical_accuracy']
val_acc = self.history.history['val_sparse_categorical_accuracy']
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
epochs = len(acc)
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
def display_predictions(self):
"""Display predictions"""
if self.predictions is None:
return
pred_iter = iter(self.predictions)
for _, elements in self.predict_dataset.enumerate():
fig = plt.figure(figsize=(8, 8))
num_rows = int(np.ceil(elements[0].shape[0] / DISPLAY_COLS))
n_elem = 1
for image, labels in zip(elements[0], elements[1]):
true_label = int(np.argmax(labels))
try:
prediction = next(pred_iter)
pred_label = int(np.argmax(prediction))
if prediction[pred_label] < CONFIDENCE_LEVEL:
pred_label = -1
except StopIteration:
break
fig.add_subplot(num_rows, DISPLAY_COLS, n_elem)
plt.xticks([])
plt.yticks([])
n_elem += 1
plt.imshow(image, cmap=plt.cm.binary)
plt.title('{} ({})'.format(
self.class_names[pred_label] if pred_label >= 0 else 'none',
self.class_names[true_label]))
plt.tight_layout()
plt.show() | identifier_body | |
item_classifier.py | #-------------------------------------------------------------------------------
# Name: Simple stone classification TF model
# Purpose: Learn TensorFlow 2.0
#
# Author: kol
#
# Created: 13.01.2020
# Copyright: (c) kol 2020
# Licence: MIT
#-------------------------------------------------------------------------------
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from random import randrange
IMG_HEIGHT = 20
IMG_WIDTH = 20
NUM_EPOCHS = 20
BATCH_SIZE = 32
DISPLAY_COLS = 6
CONFIDENCE_LEVEL = 0.8
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Board elements classifier model wrapper
class BoardItemClassifier:
"""This class wraps around TF model
A stone images dataset made by cc/cc_gen.py is required for model training and prediction
"""
def __init__(self, model_dir, img_dir, img_size = (IMG_WIDTH, IMG_HEIGHT), log_dir = None):
"""Constructor.
Parameters:
model_dir Directory where a model is saved
img_dir Root directory of stone images dataset
img_size Target image size
"""
self.model = None
self.model_dir, self.img_dir, self.img_size, self.log_dir = model_dir, img_dir, img_size, log_dir
self.image_data_gen = None
self.train_dataset = None
self.val_dataset = None
self.history = None
self.predict_generator = None
self.predict_dataset = None
self.predictions = None
self.class_names = np.array([item.name for item in Path(self.img_dir).glob('*') if item.is_dir()])
def exists(self):
"""Checks saved model presence"""
return Path(self.model_dir).exists()
def load(self):
"""Load a model from directory"""
print("==> Loading model from", self.model_dir)
self.model = tf.keras.models.load_model(self.model_dir)
def build(self):
"""Build new model"""
print("==> Building model", self.model_dir)
self.model = tf.keras.models.Sequential()
layers = self.get_model_layers()
for l in layers:
self.model.add(l)
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
def get_model_layers(self):
return [
tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(len(self.class_names), activation='softmax')
]
def save(self):
"""Save whole model to specified directory"""
print("==> Saving model to", self.model_dir)
self.model.save(self.model_dir)
def init_datasets(self, display_samples = False):
"""Initialize datasets for training"""
print("==> Loading images from ", self.img_dir)
self.image_data_gen = ImageDataGenerator(
rescale=1./255,
#rotation_range=30,
#shear_range=30,
#width_shift_range=.15,
#height_shift_range=.15,
#zoom_range=0.5,
validation_split=0.2)
self.train_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse',
subset='training')
self.val_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse',
subset='validation')
if display_samples:
self.display_sample_images()
def train(self, epochs = NUM_EPOCHS, display_history = False):
"""Train the model"""
print("==> Training model from", self.model_dir)
if self.model is None:
self.build()
if self.train_dataset is None:
|
callbacks = []
if self.log_dir is not None:
callbacks.extend([
tf.keras.callbacks.TensorBoard(self.log_dir,
profile_batch=0,
write_graph=True)])
if self.image_data_gen is not None:
# Generator
self.history = self.model.fit_generator(
self.train_dataset,
epochs=epochs,
steps_per_epoch=self.train_dataset.samples // BATCH_SIZE,
validation_data=self.val_dataset,
validation_steps=self.val_dataset.samples // BATCH_SIZE,
callbacks = callbacks)
else:
# Dataset
self.history = self.model.fit(
self.train_dataset,
epochs=epochs,
callbacks = callbacks)
if display_history:
self.display_history()
def predict(self, num_samples = BATCH_SIZE, display_predictions = True):
"""Predict on specified number of samples"""
if self.model is None:
raise Exception("Model is empty, either build or load it")
print("==> Prediction on model from", self.model_dir)
file_names, file_labels = self.get_sample_files(num_samples)
self.predict_dataset = tf.data.Dataset.from_tensor_slices((file_names, file_labels))
self.predict_dataset = self.predict_dataset.map(self.map_fn, num_parallel_calls=AUTOTUNE)
self.predict_dataset = self.predict_dataset.batch(BATCH_SIZE)
self.predictions = self.model.predict(self.predict_dataset)
if display_predictions:
self.display_predictions()
def map_fn(self, path, label):
"""Upload an image fo given path with specified label - internal"""
image = tf.image.decode_png(tf.io.read_file(path))
image = tf.image.convert_image_dtype(image, tf.float32)
if self.img_size is not None:
image = tf.image.resize(image, self.img_size)
return image, label
def get_sample_files(self, num_samples = BATCH_SIZE):
"""Retrieve specified number of sample files from stone images dataset"""
file_names = []
file_labels = []
for n, d in enumerate(Path(self.img_dir).glob('*')):
names = [str(f) for f in Path(self.img_dir).joinpath(d).glob('*.png')]
file_names.extend(names)
labels = [float(x == d.name) for x in self.class_names]
file_labels.extend([labels] * len(names))
random_file_names = []
random_file_labels = []
for _ in range(0, num_samples):
n = randrange(0, len(file_names)-1)
random_file_names.extend([file_names[n]])
random_file_labels.extend([file_labels[n]])
file_names = tf.convert_to_tensor(random_file_names, dtype=tf.string)
file_labels = tf.convert_to_tensor(random_file_labels)
file_labels = tf.expand_dims(file_labels, axis=-1)
return file_names, file_labels
def display_sample_images(self):
"""Display up to 25 images from training dataset"""
if self.train_dataset is None:
self.init_datasets()
images, labels = next(self.train_dataset)
plt.figure(figsize=(5,5))
for n in range(min(25, images.shape[0])):
ax = plt.subplot(5,5,n+1)
plt.imshow(images[n])
if len(labels.shape) == 1:
plt.title(self.class_names[int(labels[n])].title())
else:
m = np.argmax(labels[n])
plt.title(self.class_names[int(labels[n, m])].title())
plt.axis('off')
plt.tight_layout()
plt.show()
def display_history(self):
"""Display training history"""
if self.history is None:
return
acc = self.history.history['sparse_categorical_accuracy']
val_acc = self.history.history['val_sparse_categorical_accuracy']
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
epochs = len(acc)
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
def display_predictions(self):
"""Display predictions"""
if self.predictions is None:
return
pred_iter = iter(self.predictions)
for _, elements in self.predict_dataset.enumerate():
fig = plt.figure(figsize=(8, 8))
num_rows = int(np.ceil(elements[0].shape[0] / DISPLAY_COLS))
n_elem = 1
for image, labels in zip(elements[0], elements[1]):
true_label = int(np.argmax(labels))
try:
prediction = next(pred_iter)
pred_label = int(np.argmax(prediction))
if prediction[pred_label] < CONFIDENCE_LEVEL:
pred_label = -1
except StopIteration:
break
fig.add_subplot(num_rows, DISPLAY_COLS, n_elem)
plt.xticks([])
plt.yticks([])
n_elem += 1
plt.imshow(image, cmap=plt.cm.binary)
plt.title('{} ({})'.format(
self.class_names[pred_label] if pred_label >= 0 else 'none',
self.class_names[true_label]))
plt.tight_layout()
plt.show()
| self.init_datasets() | conditional_block |
item_classifier.py | #-------------------------------------------------------------------------------
# Name: Simple stone classification TF model
# Purpose: Learn TensorFlow 2.0
#
# Author: kol
#
# Created: 13.01.2020
# Copyright: (c) kol 2020
# Licence: MIT
#-------------------------------------------------------------------------------
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from random import randrange
IMG_HEIGHT = 20
IMG_WIDTH = 20
NUM_EPOCHS = 20
BATCH_SIZE = 32
DISPLAY_COLS = 6
CONFIDENCE_LEVEL = 0.8
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Board elements classifier model wrapper
class BoardItemClassifier:
"""This class wraps around TF model
A stone images dataset made by cc/cc_gen.py is required for model training and prediction
"""
def __init__(self, model_dir, img_dir, img_size = (IMG_WIDTH, IMG_HEIGHT), log_dir = None):
"""Constructor.
Parameters:
model_dir Directory where a model is saved
img_dir Root directory of stone images dataset
img_size Target image size
"""
self.model = None
self.model_dir, self.img_dir, self.img_size, self.log_dir = model_dir, img_dir, img_size, log_dir
self.image_data_gen = None
self.train_dataset = None
self.val_dataset = None
self.history = None
self.predict_generator = None
self.predict_dataset = None
self.predictions = None
self.class_names = np.array([item.name for item in Path(self.img_dir).glob('*') if item.is_dir()])
def exists(self):
"""Checks saved model presence"""
return Path(self.model_dir).exists()
def load(self):
"""Load a model from directory"""
print("==> Loading model from", self.model_dir)
self.model = tf.keras.models.load_model(self.model_dir)
def build(self):
"""Build new model"""
print("==> Building model", self.model_dir)
self.model = tf.keras.models.Sequential()
layers = self.get_model_layers()
for l in layers:
self.model.add(l)
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
def get_model_layers(self):
return [
tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(len(self.class_names), activation='softmax')
]
def save(self):
"""Save whole model to specified directory"""
print("==> Saving model to", self.model_dir)
self.model.save(self.model_dir)
def init_datasets(self, display_samples = False):
"""Initialize datasets for training"""
print("==> Loading images from ", self.img_dir)
self.image_data_gen = ImageDataGenerator(
rescale=1./255,
#rotation_range=30,
#shear_range=30,
#width_shift_range=.15,
#height_shift_range=.15,
#zoom_range=0.5,
validation_split=0.2)
self.train_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse',
subset='training')
self.val_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse', |
def train(self, epochs = NUM_EPOCHS, display_history = False):
"""Train the model"""
print("==> Training model from", self.model_dir)
if self.model is None:
self.build()
if self.train_dataset is None:
self.init_datasets()
callbacks = []
if self.log_dir is not None:
callbacks.extend([
tf.keras.callbacks.TensorBoard(self.log_dir,
profile_batch=0,
write_graph=True)])
if self.image_data_gen is not None:
# Generator
self.history = self.model.fit_generator(
self.train_dataset,
epochs=epochs,
steps_per_epoch=self.train_dataset.samples // BATCH_SIZE,
validation_data=self.val_dataset,
validation_steps=self.val_dataset.samples // BATCH_SIZE,
callbacks = callbacks)
else:
# Dataset
self.history = self.model.fit(
self.train_dataset,
epochs=epochs,
callbacks = callbacks)
if display_history:
self.display_history()
def predict(self, num_samples = BATCH_SIZE, display_predictions = True):
"""Predict on specified number of samples"""
if self.model is None:
raise Exception("Model is empty, either build or load it")
print("==> Prediction on model from", self.model_dir)
file_names, file_labels = self.get_sample_files(num_samples)
self.predict_dataset = tf.data.Dataset.from_tensor_slices((file_names, file_labels))
self.predict_dataset = self.predict_dataset.map(self.map_fn, num_parallel_calls=AUTOTUNE)
self.predict_dataset = self.predict_dataset.batch(BATCH_SIZE)
self.predictions = self.model.predict(self.predict_dataset)
if display_predictions:
self.display_predictions()
def map_fn(self, path, label):
"""Upload an image fo given path with specified label - internal"""
image = tf.image.decode_png(tf.io.read_file(path))
image = tf.image.convert_image_dtype(image, tf.float32)
if self.img_size is not None:
image = tf.image.resize(image, self.img_size)
return image, label
def get_sample_files(self, num_samples = BATCH_SIZE):
"""Retrieve specified number of sample files from stone images dataset"""
file_names = []
file_labels = []
for n, d in enumerate(Path(self.img_dir).glob('*')):
names = [str(f) for f in Path(self.img_dir).joinpath(d).glob('*.png')]
file_names.extend(names)
labels = [float(x == d.name) for x in self.class_names]
file_labels.extend([labels] * len(names))
random_file_names = []
random_file_labels = []
for _ in range(0, num_samples):
n = randrange(0, len(file_names)-1)
random_file_names.extend([file_names[n]])
random_file_labels.extend([file_labels[n]])
file_names = tf.convert_to_tensor(random_file_names, dtype=tf.string)
file_labels = tf.convert_to_tensor(random_file_labels)
file_labels = tf.expand_dims(file_labels, axis=-1)
return file_names, file_labels
def display_sample_images(self):
"""Display up to 25 images from training dataset"""
if self.train_dataset is None:
self.init_datasets()
images, labels = next(self.train_dataset)
plt.figure(figsize=(5,5))
for n in range(min(25, images.shape[0])):
ax = plt.subplot(5,5,n+1)
plt.imshow(images[n])
if len(labels.shape) == 1:
plt.title(self.class_names[int(labels[n])].title())
else:
m = np.argmax(labels[n])
plt.title(self.class_names[int(labels[n, m])].title())
plt.axis('off')
plt.tight_layout()
plt.show()
def display_history(self):
"""Display training history"""
if self.history is None:
return
acc = self.history.history['sparse_categorical_accuracy']
val_acc = self.history.history['val_sparse_categorical_accuracy']
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
epochs = len(acc)
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
def display_predictions(self):
"""Display predictions"""
if self.predictions is None:
return
pred_iter = iter(self.predictions)
for _, elements in self.predict_dataset.enumerate():
fig = plt.figure(figsize=(8, 8))
num_rows = int(np.ceil(elements[0].shape[0] / DISPLAY_COLS))
n_elem = 1
for image, labels in zip(elements[0], elements[1]):
true_label = int(np.argmax(labels))
try:
prediction = next(pred_iter)
pred_label = int(np.argmax(prediction))
if prediction[pred_label] < CONFIDENCE_LEVEL:
pred_label = -1
except StopIteration:
break
fig.add_subplot(num_rows, DISPLAY_COLS, n_elem)
plt.xticks([])
plt.yticks([])
n_elem += 1
plt.imshow(image, cmap=plt.cm.binary)
plt.title('{} ({})'.format(
self.class_names[pred_label] if pred_label >= 0 else 'none',
self.class_names[true_label]))
plt.tight_layout()
plt.show() | subset='validation')
if display_samples:
self.display_sample_images() | random_line_split |
item_classifier.py | #-------------------------------------------------------------------------------
# Name: Simple stone classification TF model
# Purpose: Learn TensorFlow 2.0
#
# Author: kol
#
# Created: 13.01.2020
# Copyright: (c) kol 2020
# Licence: MIT
#-------------------------------------------------------------------------------
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from random import randrange
IMG_HEIGHT = 20
IMG_WIDTH = 20
NUM_EPOCHS = 20
BATCH_SIZE = 32
DISPLAY_COLS = 6
CONFIDENCE_LEVEL = 0.8
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Board elements classifier model wrapper
class BoardItemClassifier:
"""This class wraps around TF model
A stone images dataset made by cc/cc_gen.py is required for model training and prediction
"""
def __init__(self, model_dir, img_dir, img_size = (IMG_WIDTH, IMG_HEIGHT), log_dir = None):
"""Constructor.
Parameters:
model_dir Directory where a model is saved
img_dir Root directory of stone images dataset
img_size Target image size
"""
self.model = None
self.model_dir, self.img_dir, self.img_size, self.log_dir = model_dir, img_dir, img_size, log_dir
self.image_data_gen = None
self.train_dataset = None
self.val_dataset = None
self.history = None
self.predict_generator = None
self.predict_dataset = None
self.predictions = None
self.class_names = np.array([item.name for item in Path(self.img_dir).glob('*') if item.is_dir()])
def exists(self):
"""Checks saved model presence"""
return Path(self.model_dir).exists()
def load(self):
"""Load a model from directory"""
print("==> Loading model from", self.model_dir)
self.model = tf.keras.models.load_model(self.model_dir)
def build(self):
"""Build new model"""
print("==> Building model", self.model_dir)
self.model = tf.keras.models.Sequential()
layers = self.get_model_layers()
for l in layers:
self.model.add(l)
self.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
def get_model_layers(self):
return [
tf.keras.layers.Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(32, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(64, 3, padding='same', activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(len(self.class_names), activation='softmax')
]
def save(self):
"""Save whole model to specified directory"""
print("==> Saving model to", self.model_dir)
self.model.save(self.model_dir)
def | (self, display_samples = False):
"""Initialize datasets for training"""
print("==> Loading images from ", self.img_dir)
self.image_data_gen = ImageDataGenerator(
rescale=1./255,
#rotation_range=30,
#shear_range=30,
#width_shift_range=.15,
#height_shift_range=.15,
#zoom_range=0.5,
validation_split=0.2)
self.train_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse',
subset='training')
self.val_dataset = self.image_data_gen.flow_from_directory(
batch_size=BATCH_SIZE,
directory=self.img_dir,
shuffle=True,
target_size=self.img_size,
class_mode='sparse',
subset='validation')
if display_samples:
self.display_sample_images()
def train(self, epochs = NUM_EPOCHS, display_history = False):
"""Train the model"""
print("==> Training model from", self.model_dir)
if self.model is None:
self.build()
if self.train_dataset is None:
self.init_datasets()
callbacks = []
if self.log_dir is not None:
callbacks.extend([
tf.keras.callbacks.TensorBoard(self.log_dir,
profile_batch=0,
write_graph=True)])
if self.image_data_gen is not None:
# Generator
self.history = self.model.fit_generator(
self.train_dataset,
epochs=epochs,
steps_per_epoch=self.train_dataset.samples // BATCH_SIZE,
validation_data=self.val_dataset,
validation_steps=self.val_dataset.samples // BATCH_SIZE,
callbacks = callbacks)
else:
# Dataset
self.history = self.model.fit(
self.train_dataset,
epochs=epochs,
callbacks = callbacks)
if display_history:
self.display_history()
def predict(self, num_samples = BATCH_SIZE, display_predictions = True):
"""Predict on specified number of samples"""
if self.model is None:
raise Exception("Model is empty, either build or load it")
print("==> Prediction on model from", self.model_dir)
file_names, file_labels = self.get_sample_files(num_samples)
self.predict_dataset = tf.data.Dataset.from_tensor_slices((file_names, file_labels))
self.predict_dataset = self.predict_dataset.map(self.map_fn, num_parallel_calls=AUTOTUNE)
self.predict_dataset = self.predict_dataset.batch(BATCH_SIZE)
self.predictions = self.model.predict(self.predict_dataset)
if display_predictions:
self.display_predictions()
def map_fn(self, path, label):
"""Upload an image fo given path with specified label - internal"""
image = tf.image.decode_png(tf.io.read_file(path))
image = tf.image.convert_image_dtype(image, tf.float32)
if self.img_size is not None:
image = tf.image.resize(image, self.img_size)
return image, label
def get_sample_files(self, num_samples = BATCH_SIZE):
"""Retrieve specified number of sample files from stone images dataset"""
file_names = []
file_labels = []
for n, d in enumerate(Path(self.img_dir).glob('*')):
names = [str(f) for f in Path(self.img_dir).joinpath(d).glob('*.png')]
file_names.extend(names)
labels = [float(x == d.name) for x in self.class_names]
file_labels.extend([labels] * len(names))
random_file_names = []
random_file_labels = []
for _ in range(0, num_samples):
n = randrange(0, len(file_names)-1)
random_file_names.extend([file_names[n]])
random_file_labels.extend([file_labels[n]])
file_names = tf.convert_to_tensor(random_file_names, dtype=tf.string)
file_labels = tf.convert_to_tensor(random_file_labels)
file_labels = tf.expand_dims(file_labels, axis=-1)
return file_names, file_labels
def display_sample_images(self):
"""Display up to 25 images from training dataset"""
if self.train_dataset is None:
self.init_datasets()
images, labels = next(self.train_dataset)
plt.figure(figsize=(5,5))
for n in range(min(25, images.shape[0])):
ax = plt.subplot(5,5,n+1)
plt.imshow(images[n])
if len(labels.shape) == 1:
plt.title(self.class_names[int(labels[n])].title())
else:
m = np.argmax(labels[n])
plt.title(self.class_names[int(labels[n, m])].title())
plt.axis('off')
plt.tight_layout()
plt.show()
def display_history(self):
"""Display training history"""
if self.history is None:
return
acc = self.history.history['sparse_categorical_accuracy']
val_acc = self.history.history['val_sparse_categorical_accuracy']
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
epochs = len(acc)
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
def display_predictions(self):
"""Display predictions"""
if self.predictions is None:
return
pred_iter = iter(self.predictions)
for _, elements in self.predict_dataset.enumerate():
fig = plt.figure(figsize=(8, 8))
num_rows = int(np.ceil(elements[0].shape[0] / DISPLAY_COLS))
n_elem = 1
for image, labels in zip(elements[0], elements[1]):
true_label = int(np.argmax(labels))
try:
prediction = next(pred_iter)
pred_label = int(np.argmax(prediction))
if prediction[pred_label] < CONFIDENCE_LEVEL:
pred_label = -1
except StopIteration:
break
fig.add_subplot(num_rows, DISPLAY_COLS, n_elem)
plt.xticks([])
plt.yticks([])
n_elem += 1
plt.imshow(image, cmap=plt.cm.binary)
plt.title('{} ({})'.format(
self.class_names[pred_label] if pred_label >= 0 else 'none',
self.class_names[true_label]))
plt.tight_layout()
plt.show()
| init_datasets | identifier_name |
engine_dcos.go | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package engine
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/ghodss/yaml"
)
func getDCOSProvisionScript(script string) string {
// add the provision script
bp, err := Asset(script)
if err != nil {
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", script))
}
return strings.Replace(strings.Replace(provisionScript, "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSBootstrapCustomData(p *api.Properties) string {
masterIPList, err := generateConsecutiveIPsList(p.MasterProfile.Count, p.MasterProfile.FirstConsecutiveStaticIP)
if err != nil {
return ""
}
for i, v := range masterIPList {
masterIPList[i] = " - " + v
}
str := getSingleLineDCOSCustomData(
p.OrchestratorProfile.OrchestratorType,
dcos2BootstrapCustomdata, 0,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSProvisionScript(dcos2BootstrapProvision),
"MASTER_IP_LIST": strings.Join(masterIPList, "\n"),
"BOOTSTRAP_IP": p.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP,
"BOOTSTRAP_OAUTH_ENABLED": strconv.FormatBool(p.OrchestratorProfile.DcosConfig.BootstrapProfile.OAuthEnabled)})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSCustomDataPublicIPStr(orchestratorType string, masterCount int) string {
if orchestratorType == api.DCOS {
var buf bytes.Buffer
for i := 0; i < masterCount; i++ {
buf.WriteString(fmt.Sprintf("reference(variables('masterVMNic')[%d]).ipConfigurations[0].properties.privateIPAddress,", i))
if i < (masterCount - 1) {
buf.WriteString(`'\\\", \\\"', `)
}
}
return buf.String()
}
return ""
}
// getSingleLineForTemplate returns the file as a single line for embedding in an arm template
func getSingleLineDCOSCustomData(orchestratorType, yamlFilename string, masterCount int, replaceMap map[string]string) string {
b, err := Asset(yamlFilename)
if err != nil {
panic(fmt.Sprintf("BUG getting yaml custom data file: %s", err.Error()))
}
yamlStr := string(b)
for k, v := range replaceMap {
yamlStr = strings.Replace(yamlStr, k, v, -1)
}
// convert to json
jsonBytes, err4 := yaml.YAMLToJSON([]byte(yamlStr))
if err4 != nil {
panic(fmt.Sprintf("BUG: %s", err4.Error()))
}
yamlStr = string(jsonBytes)
// convert to one line
yamlStr = strings.Replace(yamlStr, "\\", "\\\\", -1)
yamlStr = strings.Replace(yamlStr, "\r\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\"", "\\\"", -1)
// variable replacement
rVariable, e1 := regexp.Compile("{{{([^}]*)}}}")
if e1 != nil {
panic(fmt.Sprintf("BUG: %s", e1.Error()))
}
yamlStr = rVariable.ReplaceAllString(yamlStr, "',variables('$1'),'")
// replace the internal values
publicIPStr := getDCOSCustomDataPublicIPStr(orchestratorType, masterCount)
yamlStr = strings.Replace(yamlStr, "DCOSCUSTOMDATAPUBLICIPSTR", publicIPStr, -1)
return yamlStr
}
func getDCOSCustomDataTemplate(orchestratorType, orchestratorVersion string) string {
switch orchestratorType {
case api.DCOS:
switch orchestratorVersion {
case common.DCOSVersion1Dot8Dot8:
return dcosCustomData188
case common.DCOSVersion1Dot9Dot0:
return dcosCustomData190
case common.DCOSVersion1Dot9Dot8:
return dcosCustomData198
case common.DCOSVersion1Dot10Dot0:
return dcosCustomData110
case common.DCOSVersion1Dot11Dot0:
return dcos2CustomData1110
case common.DCOSVersion1Dot11Dot2:
return dcos2CustomData1112
}
default:
// it is a bug to get here
panic(fmt.Sprintf("BUG: invalid orchestrator %s", orchestratorType))
}
return ""
}
func | (cs *api.ContainerService) string {
masterAttributeContents := getDCOSMasterCustomNodeLabels()
masterPreprovisionExtension := ""
if cs.Properties.MasterProfile.PreprovisionExtension != nil {
masterPreprovisionExtension += "\n"
masterPreprovisionExtension += makeMasterExtensionScriptCommands(cs)
}
var bootstrapIP string
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSMasterProvisionScript(cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": masterAttributeContents,
"PREPROVISION_EXTENSION": masterPreprovisionExtension,
"ROLENAME": "master"})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSAgentProvisionScript(profile *api.AgentPoolProfile, orchProfile *api.OrchestratorProfile, bootstrapIP string) string {
// add the provision script
scriptname := dcos2Provision
if orchProfile.DcosConfig == nil || orchProfile.DcosConfig.BootstrapProfile == nil {
if profile.OSType == api.Windows {
scriptname = dcosWindowsProvision
} else {
scriptname = dcosProvision
}
}
bp, err := Asset(scriptname)
if err != nil {
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", dcosProvision))
}
// the embedded roleFileContents
var roleFileContents string
if len(profile.Ports) > 0 {
// public agents
roleFileContents = "touch /etc/mesosphere/roles/slave_public"
} else {
roleFileContents = "touch /etc/mesosphere/roles/slave"
}
provisionScript = strings.Replace(provisionScript, "ROLESFILECONTENTS", roleFileContents, -1)
provisionScript = strings.Replace(provisionScript, "BOOTSTRAP_IP", bootstrapIP, -1)
var b bytes.Buffer
b.WriteString(provisionScript)
b.WriteString("\n")
if len(orchProfile.DcosConfig.Registry) == 0 {
b.WriteString("rm /etc/docker.tar.gz\n")
}
return strings.Replace(strings.Replace(b.String(), "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
attributeContents := getDCOSAgentCustomNodeLabels(profile)
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
var agentRoleName, bootstrapIP string
if len(profile.Ports) > 0 {
agentRoleName = "slave_public"
} else {
agentRoleName = "slave"
}
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSAgentProvisionScript(profile, cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": attributeContents,
"PREPROVISION_EXTENSION": agentPreprovisionExtension,
"ROLENAME": agentRoleName})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSWindowsAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
b, err := Asset(dcosWindowsProvision)
if err != nil {
// this should never happen and this is a bug
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
// translate the parameters
csStr := string(b)
csStr = strings.Replace(csStr, "PREPROVISION_EXTENSION", agentPreprovisionExtension, -1)
csStr = strings.Replace(csStr, "\r\n", "\n", -1)
str := getBase64EncodedGzippedCustomScriptFromStr(csStr)
return fmt.Sprintf("\"customData\": \"%s\"", str)
}
// getLinkedTemplatesForExtensions returns the
// Microsoft.Resources/deployments for each extension
func getLinkedTemplatesForExtensions(properties *api.Properties) string {
var result string
extensions := properties.ExtensionProfiles
masterProfileExtensions := properties.MasterProfile.Extensions
orchestratorType := properties.OrchestratorProfile.OrchestratorType
for err, extensionProfile := range extensions {
_ = err
masterOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, masterProfileExtensions)
if masterOptedForExtension {
result += ","
dta, e := getMasterLinkedTemplateText(orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
for _, agentPoolProfile := range properties.AgentPoolProfiles {
poolProfileExtensions := agentPoolProfile.Extensions
poolOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, poolProfileExtensions)
if poolOptedForExtension {
result += ","
dta, e := getAgentPoolLinkedTemplateText(agentPoolProfile, orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
}
}
return result
}
| getDCOSMasterCustomData | identifier_name |
engine_dcos.go | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package engine
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/ghodss/yaml"
)
func getDCOSProvisionScript(script string) string {
// add the provision script
bp, err := Asset(script)
if err != nil {
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", script))
}
return strings.Replace(strings.Replace(provisionScript, "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSBootstrapCustomData(p *api.Properties) string {
masterIPList, err := generateConsecutiveIPsList(p.MasterProfile.Count, p.MasterProfile.FirstConsecutiveStaticIP)
if err != nil {
return ""
}
for i, v := range masterIPList {
masterIPList[i] = " - " + v
}
str := getSingleLineDCOSCustomData(
p.OrchestratorProfile.OrchestratorType,
dcos2BootstrapCustomdata, 0,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSProvisionScript(dcos2BootstrapProvision),
"MASTER_IP_LIST": strings.Join(masterIPList, "\n"),
"BOOTSTRAP_IP": p.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP,
"BOOTSTRAP_OAUTH_ENABLED": strconv.FormatBool(p.OrchestratorProfile.DcosConfig.BootstrapProfile.OAuthEnabled)})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSCustomDataPublicIPStr(orchestratorType string, masterCount int) string {
if orchestratorType == api.DCOS {
var buf bytes.Buffer
for i := 0; i < masterCount; i++ {
buf.WriteString(fmt.Sprintf("reference(variables('masterVMNic')[%d]).ipConfigurations[0].properties.privateIPAddress,", i))
if i < (masterCount - 1) {
buf.WriteString(`'\\\", \\\"', `)
}
}
return buf.String()
}
return ""
}
// getSingleLineForTemplate returns the file as a single line for embedding in an arm template
func getSingleLineDCOSCustomData(orchestratorType, yamlFilename string, masterCount int, replaceMap map[string]string) string {
b, err := Asset(yamlFilename)
if err != nil {
panic(fmt.Sprintf("BUG getting yaml custom data file: %s", err.Error()))
}
yamlStr := string(b)
for k, v := range replaceMap {
yamlStr = strings.Replace(yamlStr, k, v, -1)
}
// convert to json
jsonBytes, err4 := yaml.YAMLToJSON([]byte(yamlStr))
if err4 != nil {
panic(fmt.Sprintf("BUG: %s", err4.Error()))
}
yamlStr = string(jsonBytes)
// convert to one line
yamlStr = strings.Replace(yamlStr, "\\", "\\\\", -1)
yamlStr = strings.Replace(yamlStr, "\r\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\"", "\\\"", -1)
// variable replacement
rVariable, e1 := regexp.Compile("{{{([^}]*)}}}")
if e1 != nil {
panic(fmt.Sprintf("BUG: %s", e1.Error()))
}
yamlStr = rVariable.ReplaceAllString(yamlStr, "',variables('$1'),'")
// replace the internal values
publicIPStr := getDCOSCustomDataPublicIPStr(orchestratorType, masterCount)
yamlStr = strings.Replace(yamlStr, "DCOSCUSTOMDATAPUBLICIPSTR", publicIPStr, -1)
return yamlStr
}
func getDCOSCustomDataTemplate(orchestratorType, orchestratorVersion string) string {
switch orchestratorType {
case api.DCOS:
switch orchestratorVersion {
case common.DCOSVersion1Dot8Dot8:
return dcosCustomData188
case common.DCOSVersion1Dot9Dot0:
return dcosCustomData190
case common.DCOSVersion1Dot9Dot8:
return dcosCustomData198
case common.DCOSVersion1Dot10Dot0:
return dcosCustomData110
case common.DCOSVersion1Dot11Dot0:
return dcos2CustomData1110
case common.DCOSVersion1Dot11Dot2:
return dcos2CustomData1112
}
default:
// it is a bug to get here
panic(fmt.Sprintf("BUG: invalid orchestrator %s", orchestratorType))
}
return ""
}
func getDCOSMasterCustomData(cs *api.ContainerService) string |
func getDCOSAgentProvisionScript(profile *api.AgentPoolProfile, orchProfile *api.OrchestratorProfile, bootstrapIP string) string {
// add the provision script
scriptname := dcos2Provision
if orchProfile.DcosConfig == nil || orchProfile.DcosConfig.BootstrapProfile == nil {
if profile.OSType == api.Windows {
scriptname = dcosWindowsProvision
} else {
scriptname = dcosProvision
}
}
bp, err := Asset(scriptname)
if err != nil {
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", dcosProvision))
}
// the embedded roleFileContents
var roleFileContents string
if len(profile.Ports) > 0 {
// public agents
roleFileContents = "touch /etc/mesosphere/roles/slave_public"
} else {
roleFileContents = "touch /etc/mesosphere/roles/slave"
}
provisionScript = strings.Replace(provisionScript, "ROLESFILECONTENTS", roleFileContents, -1)
provisionScript = strings.Replace(provisionScript, "BOOTSTRAP_IP", bootstrapIP, -1)
var b bytes.Buffer
b.WriteString(provisionScript)
b.WriteString("\n")
if len(orchProfile.DcosConfig.Registry) == 0 {
b.WriteString("rm /etc/docker.tar.gz\n")
}
return strings.Replace(strings.Replace(b.String(), "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
attributeContents := getDCOSAgentCustomNodeLabels(profile)
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
var agentRoleName, bootstrapIP string
if len(profile.Ports) > 0 {
agentRoleName = "slave_public"
} else {
agentRoleName = "slave"
}
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSAgentProvisionScript(profile, cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": attributeContents,
"PREPROVISION_EXTENSION": agentPreprovisionExtension,
"ROLENAME": agentRoleName})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSWindowsAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
b, err := Asset(dcosWindowsProvision)
if err != nil {
// this should never happen and this is a bug
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
// translate the parameters
csStr := string(b)
csStr = strings.Replace(csStr, "PREPROVISION_EXTENSION", agentPreprovisionExtension, -1)
csStr = strings.Replace(csStr, "\r\n", "\n", -1)
str := getBase64EncodedGzippedCustomScriptFromStr(csStr)
return fmt.Sprintf("\"customData\": \"%s\"", str)
}
// getLinkedTemplatesForExtensions returns the
// Microsoft.Resources/deployments for each extension
func getLinkedTemplatesForExtensions(properties *api.Properties) string {
var result string
extensions := properties.ExtensionProfiles
masterProfileExtensions := properties.MasterProfile.Extensions
orchestratorType := properties.OrchestratorProfile.OrchestratorType
for err, extensionProfile := range extensions {
_ = err
masterOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, masterProfileExtensions)
if masterOptedForExtension {
result += ","
dta, e := getMasterLinkedTemplateText(orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
for _, agentPoolProfile := range properties.AgentPoolProfiles {
poolProfileExtensions := agentPoolProfile.Extensions
poolOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, poolProfileExtensions)
if poolOptedForExtension {
result += ","
dta, e := getAgentPoolLinkedTemplateText(agentPoolProfile, orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
}
}
return result
}
| {
masterAttributeContents := getDCOSMasterCustomNodeLabels()
masterPreprovisionExtension := ""
if cs.Properties.MasterProfile.PreprovisionExtension != nil {
masterPreprovisionExtension += "\n"
masterPreprovisionExtension += makeMasterExtensionScriptCommands(cs)
}
var bootstrapIP string
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSMasterProvisionScript(cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": masterAttributeContents,
"PREPROVISION_EXTENSION": masterPreprovisionExtension,
"ROLENAME": "master"})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
} | identifier_body |
engine_dcos.go | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package engine
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/ghodss/yaml"
)
func getDCOSProvisionScript(script string) string {
// add the provision script
bp, err := Asset(script)
if err != nil |
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", script))
}
return strings.Replace(strings.Replace(provisionScript, "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSBootstrapCustomData(p *api.Properties) string {
masterIPList, err := generateConsecutiveIPsList(p.MasterProfile.Count, p.MasterProfile.FirstConsecutiveStaticIP)
if err != nil {
return ""
}
for i, v := range masterIPList {
masterIPList[i] = " - " + v
}
str := getSingleLineDCOSCustomData(
p.OrchestratorProfile.OrchestratorType,
dcos2BootstrapCustomdata, 0,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSProvisionScript(dcos2BootstrapProvision),
"MASTER_IP_LIST": strings.Join(masterIPList, "\n"),
"BOOTSTRAP_IP": p.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP,
"BOOTSTRAP_OAUTH_ENABLED": strconv.FormatBool(p.OrchestratorProfile.DcosConfig.BootstrapProfile.OAuthEnabled)})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSCustomDataPublicIPStr(orchestratorType string, masterCount int) string {
if orchestratorType == api.DCOS {
var buf bytes.Buffer
for i := 0; i < masterCount; i++ {
buf.WriteString(fmt.Sprintf("reference(variables('masterVMNic')[%d]).ipConfigurations[0].properties.privateIPAddress,", i))
if i < (masterCount - 1) {
buf.WriteString(`'\\\", \\\"', `)
}
}
return buf.String()
}
return ""
}
// getSingleLineForTemplate returns the file as a single line for embedding in an arm template
func getSingleLineDCOSCustomData(orchestratorType, yamlFilename string, masterCount int, replaceMap map[string]string) string {
b, err := Asset(yamlFilename)
if err != nil {
panic(fmt.Sprintf("BUG getting yaml custom data file: %s", err.Error()))
}
yamlStr := string(b)
for k, v := range replaceMap {
yamlStr = strings.Replace(yamlStr, k, v, -1)
}
// convert to json
jsonBytes, err4 := yaml.YAMLToJSON([]byte(yamlStr))
if err4 != nil {
panic(fmt.Sprintf("BUG: %s", err4.Error()))
}
yamlStr = string(jsonBytes)
// convert to one line
yamlStr = strings.Replace(yamlStr, "\\", "\\\\", -1)
yamlStr = strings.Replace(yamlStr, "\r\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\"", "\\\"", -1)
// variable replacement
rVariable, e1 := regexp.Compile("{{{([^}]*)}}}")
if e1 != nil {
panic(fmt.Sprintf("BUG: %s", e1.Error()))
}
yamlStr = rVariable.ReplaceAllString(yamlStr, "',variables('$1'),'")
// replace the internal values
publicIPStr := getDCOSCustomDataPublicIPStr(orchestratorType, masterCount)
yamlStr = strings.Replace(yamlStr, "DCOSCUSTOMDATAPUBLICIPSTR", publicIPStr, -1)
return yamlStr
}
func getDCOSCustomDataTemplate(orchestratorType, orchestratorVersion string) string {
switch orchestratorType {
case api.DCOS:
switch orchestratorVersion {
case common.DCOSVersion1Dot8Dot8:
return dcosCustomData188
case common.DCOSVersion1Dot9Dot0:
return dcosCustomData190
case common.DCOSVersion1Dot9Dot8:
return dcosCustomData198
case common.DCOSVersion1Dot10Dot0:
return dcosCustomData110
case common.DCOSVersion1Dot11Dot0:
return dcos2CustomData1110
case common.DCOSVersion1Dot11Dot2:
return dcos2CustomData1112
}
default:
// it is a bug to get here
panic(fmt.Sprintf("BUG: invalid orchestrator %s", orchestratorType))
}
return ""
}
func getDCOSMasterCustomData(cs *api.ContainerService) string {
masterAttributeContents := getDCOSMasterCustomNodeLabels()
masterPreprovisionExtension := ""
if cs.Properties.MasterProfile.PreprovisionExtension != nil {
masterPreprovisionExtension += "\n"
masterPreprovisionExtension += makeMasterExtensionScriptCommands(cs)
}
var bootstrapIP string
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSMasterProvisionScript(cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": masterAttributeContents,
"PREPROVISION_EXTENSION": masterPreprovisionExtension,
"ROLENAME": "master"})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSAgentProvisionScript(profile *api.AgentPoolProfile, orchProfile *api.OrchestratorProfile, bootstrapIP string) string {
// add the provision script
scriptname := dcos2Provision
if orchProfile.DcosConfig == nil || orchProfile.DcosConfig.BootstrapProfile == nil {
if profile.OSType == api.Windows {
scriptname = dcosWindowsProvision
} else {
scriptname = dcosProvision
}
}
bp, err := Asset(scriptname)
if err != nil {
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", dcosProvision))
}
// the embedded roleFileContents
var roleFileContents string
if len(profile.Ports) > 0 {
// public agents
roleFileContents = "touch /etc/mesosphere/roles/slave_public"
} else {
roleFileContents = "touch /etc/mesosphere/roles/slave"
}
provisionScript = strings.Replace(provisionScript, "ROLESFILECONTENTS", roleFileContents, -1)
provisionScript = strings.Replace(provisionScript, "BOOTSTRAP_IP", bootstrapIP, -1)
var b bytes.Buffer
b.WriteString(provisionScript)
b.WriteString("\n")
if len(orchProfile.DcosConfig.Registry) == 0 {
b.WriteString("rm /etc/docker.tar.gz\n")
}
return strings.Replace(strings.Replace(b.String(), "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
attributeContents := getDCOSAgentCustomNodeLabels(profile)
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
var agentRoleName, bootstrapIP string
if len(profile.Ports) > 0 {
agentRoleName = "slave_public"
} else {
agentRoleName = "slave"
}
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSAgentProvisionScript(profile, cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": attributeContents,
"PREPROVISION_EXTENSION": agentPreprovisionExtension,
"ROLENAME": agentRoleName})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSWindowsAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
b, err := Asset(dcosWindowsProvision)
if err != nil {
// this should never happen and this is a bug
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
// translate the parameters
csStr := string(b)
csStr = strings.Replace(csStr, "PREPROVISION_EXTENSION", agentPreprovisionExtension, -1)
csStr = strings.Replace(csStr, "\r\n", "\n", -1)
str := getBase64EncodedGzippedCustomScriptFromStr(csStr)
return fmt.Sprintf("\"customData\": \"%s\"", str)
}
// getLinkedTemplatesForExtensions returns the
// Microsoft.Resources/deployments for each extension
func getLinkedTemplatesForExtensions(properties *api.Properties) string {
var result string
extensions := properties.ExtensionProfiles
masterProfileExtensions := properties.MasterProfile.Extensions
orchestratorType := properties.OrchestratorProfile.OrchestratorType
for err, extensionProfile := range extensions {
_ = err
masterOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, masterProfileExtensions)
if masterOptedForExtension {
result += ","
dta, e := getMasterLinkedTemplateText(orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
for _, agentPoolProfile := range properties.AgentPoolProfiles {
poolProfileExtensions := agentPoolProfile.Extensions
poolOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, poolProfileExtensions)
if poolOptedForExtension {
result += ","
dta, e := getAgentPoolLinkedTemplateText(agentPoolProfile, orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
}
}
return result
}
| {
panic(fmt.Sprintf("BUG: %s", err.Error()))
} | conditional_block |
engine_dcos.go | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package engine
import (
"bytes"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/ghodss/yaml"
)
func getDCOSProvisionScript(script string) string {
// add the provision script
bp, err := Asset(script)
if err != nil {
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", script))
}
return strings.Replace(strings.Replace(provisionScript, "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSBootstrapCustomData(p *api.Properties) string {
masterIPList, err := generateConsecutiveIPsList(p.MasterProfile.Count, p.MasterProfile.FirstConsecutiveStaticIP)
if err != nil {
return ""
}
for i, v := range masterIPList {
masterIPList[i] = " - " + v
}
str := getSingleLineDCOSCustomData(
p.OrchestratorProfile.OrchestratorType,
dcos2BootstrapCustomdata, 0,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSProvisionScript(dcos2BootstrapProvision),
"MASTER_IP_LIST": strings.Join(masterIPList, "\n"),
"BOOTSTRAP_IP": p.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP,
"BOOTSTRAP_OAUTH_ENABLED": strconv.FormatBool(p.OrchestratorProfile.DcosConfig.BootstrapProfile.OAuthEnabled)})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSCustomDataPublicIPStr(orchestratorType string, masterCount int) string {
if orchestratorType == api.DCOS {
var buf bytes.Buffer
for i := 0; i < masterCount; i++ {
buf.WriteString(fmt.Sprintf("reference(variables('masterVMNic')[%d]).ipConfigurations[0].properties.privateIPAddress,", i))
if i < (masterCount - 1) {
buf.WriteString(`'\\\", \\\"', `)
}
}
return buf.String()
}
return ""
}
// getSingleLineForTemplate returns the file as a single line for embedding in an arm template
func getSingleLineDCOSCustomData(orchestratorType, yamlFilename string, masterCount int, replaceMap map[string]string) string {
b, err := Asset(yamlFilename)
if err != nil {
panic(fmt.Sprintf("BUG getting yaml custom data file: %s", err.Error()))
}
yamlStr := string(b)
for k, v := range replaceMap {
yamlStr = strings.Replace(yamlStr, k, v, -1)
}
// convert to json
jsonBytes, err4 := yaml.YAMLToJSON([]byte(yamlStr))
if err4 != nil { | yamlStr = string(jsonBytes)
// convert to one line
yamlStr = strings.Replace(yamlStr, "\\", "\\\\", -1)
yamlStr = strings.Replace(yamlStr, "\r\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\n", "\\n", -1)
yamlStr = strings.Replace(yamlStr, "\"", "\\\"", -1)
// variable replacement
rVariable, e1 := regexp.Compile("{{{([^}]*)}}}")
if e1 != nil {
panic(fmt.Sprintf("BUG: %s", e1.Error()))
}
yamlStr = rVariable.ReplaceAllString(yamlStr, "',variables('$1'),'")
// replace the internal values
publicIPStr := getDCOSCustomDataPublicIPStr(orchestratorType, masterCount)
yamlStr = strings.Replace(yamlStr, "DCOSCUSTOMDATAPUBLICIPSTR", publicIPStr, -1)
return yamlStr
}
func getDCOSCustomDataTemplate(orchestratorType, orchestratorVersion string) string {
switch orchestratorType {
case api.DCOS:
switch orchestratorVersion {
case common.DCOSVersion1Dot8Dot8:
return dcosCustomData188
case common.DCOSVersion1Dot9Dot0:
return dcosCustomData190
case common.DCOSVersion1Dot9Dot8:
return dcosCustomData198
case common.DCOSVersion1Dot10Dot0:
return dcosCustomData110
case common.DCOSVersion1Dot11Dot0:
return dcos2CustomData1110
case common.DCOSVersion1Dot11Dot2:
return dcos2CustomData1112
}
default:
// it is a bug to get here
panic(fmt.Sprintf("BUG: invalid orchestrator %s", orchestratorType))
}
return ""
}
func getDCOSMasterCustomData(cs *api.ContainerService) string {
masterAttributeContents := getDCOSMasterCustomNodeLabels()
masterPreprovisionExtension := ""
if cs.Properties.MasterProfile.PreprovisionExtension != nil {
masterPreprovisionExtension += "\n"
masterPreprovisionExtension += makeMasterExtensionScriptCommands(cs)
}
var bootstrapIP string
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSMasterProvisionScript(cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": masterAttributeContents,
"PREPROVISION_EXTENSION": masterPreprovisionExtension,
"ROLENAME": "master"})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSAgentProvisionScript(profile *api.AgentPoolProfile, orchProfile *api.OrchestratorProfile, bootstrapIP string) string {
// add the provision script
scriptname := dcos2Provision
if orchProfile.DcosConfig == nil || orchProfile.DcosConfig.BootstrapProfile == nil {
if profile.OSType == api.Windows {
scriptname = dcosWindowsProvision
} else {
scriptname = dcosProvision
}
}
bp, err := Asset(scriptname)
if err != nil {
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
provisionScript := string(bp)
if strings.Contains(provisionScript, "'") {
panic(fmt.Sprintf("BUG: %s may not contain character '", dcosProvision))
}
// the embedded roleFileContents
var roleFileContents string
if len(profile.Ports) > 0 {
// public agents
roleFileContents = "touch /etc/mesosphere/roles/slave_public"
} else {
roleFileContents = "touch /etc/mesosphere/roles/slave"
}
provisionScript = strings.Replace(provisionScript, "ROLESFILECONTENTS", roleFileContents, -1)
provisionScript = strings.Replace(provisionScript, "BOOTSTRAP_IP", bootstrapIP, -1)
var b bytes.Buffer
b.WriteString(provisionScript)
b.WriteString("\n")
if len(orchProfile.DcosConfig.Registry) == 0 {
b.WriteString("rm /etc/docker.tar.gz\n")
}
return strings.Replace(strings.Replace(b.String(), "\r\n", "\n", -1), "\n", "\n\n ", -1)
}
func getDCOSAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
attributeContents := getDCOSAgentCustomNodeLabels(profile)
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
var agentRoleName, bootstrapIP string
if len(profile.Ports) > 0 {
agentRoleName = "slave_public"
} else {
agentRoleName = "slave"
}
if cs.Properties.OrchestratorProfile.DcosConfig != nil && cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile != nil {
bootstrapIP = cs.Properties.OrchestratorProfile.DcosConfig.BootstrapProfile.StaticIP
}
str := getSingleLineDCOSCustomData(
cs.Properties.OrchestratorProfile.OrchestratorType,
getDCOSCustomDataTemplate(cs.Properties.OrchestratorProfile.OrchestratorType, cs.Properties.OrchestratorProfile.OrchestratorVersion),
cs.Properties.MasterProfile.Count,
map[string]string{
"PROVISION_SOURCE_STR": getDCOSProvisionScript(dcosProvisionSource),
"PROVISION_STR": getDCOSAgentProvisionScript(profile, cs.Properties.OrchestratorProfile, bootstrapIP),
"ATTRIBUTES_STR": attributeContents,
"PREPROVISION_EXTENSION": agentPreprovisionExtension,
"ROLENAME": agentRoleName})
return fmt.Sprintf("\"customData\": \"[base64(concat('#cloud-config\\n\\n', '%s'))]\",", str)
}
func getDCOSWindowsAgentCustomData(cs *api.ContainerService, profile *api.AgentPoolProfile) string {
agentPreprovisionExtension := ""
if profile.PreprovisionExtension != nil {
agentPreprovisionExtension += "\n"
agentPreprovisionExtension += makeAgentExtensionScriptCommands(cs, profile)
}
b, err := Asset(dcosWindowsProvision)
if err != nil {
// this should never happen and this is a bug
panic(fmt.Sprintf("BUG: %s", err.Error()))
}
// translate the parameters
csStr := string(b)
csStr = strings.Replace(csStr, "PREPROVISION_EXTENSION", agentPreprovisionExtension, -1)
csStr = strings.Replace(csStr, "\r\n", "\n", -1)
str := getBase64EncodedGzippedCustomScriptFromStr(csStr)
return fmt.Sprintf("\"customData\": \"%s\"", str)
}
// getLinkedTemplatesForExtensions returns the
// Microsoft.Resources/deployments for each extension
func getLinkedTemplatesForExtensions(properties *api.Properties) string {
var result string
extensions := properties.ExtensionProfiles
masterProfileExtensions := properties.MasterProfile.Extensions
orchestratorType := properties.OrchestratorProfile.OrchestratorType
for err, extensionProfile := range extensions {
_ = err
masterOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, masterProfileExtensions)
if masterOptedForExtension {
result += ","
dta, e := getMasterLinkedTemplateText(orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
for _, agentPoolProfile := range properties.AgentPoolProfiles {
poolProfileExtensions := agentPoolProfile.Extensions
poolOptedForExtension, singleOrAll := validateProfileOptedForExtension(extensionProfile.Name, poolProfileExtensions)
if poolOptedForExtension {
result += ","
dta, e := getAgentPoolLinkedTemplateText(agentPoolProfile, orchestratorType, extensionProfile, singleOrAll)
if e != nil {
fmt.Println(e.Error())
return ""
}
result += dta
}
}
}
return result
} | panic(fmt.Sprintf("BUG: %s", err4.Error()))
} | random_line_split |
sim_controller.py | #!/usr/bin/env python
import numpy as np
import time, random
import sys, os, struct, socket
import psycopg2
import test_coords
import alex_random
import new_sim_utils
import sdr_kml_writer
from geo_utils import geo_utils
from beacon import beacon
from sim_data import data_utils
ENABLE_JITTER = False
ENABLE_DROPPED_PACKETS = False
ENABLE_LOCATION_HISTORY = True
ENABLE_BEACON_DELAY = False
class simulation:
def __init__(self):
"""__init__"""
self.geo_utils = geo_utils()
self.DEBUG = True
self.rx_number = 4
self.packet_number = 0
self.iterator = 1
self.packet_error_rate = 0.1
self.all_locations = []
def init_sim(self,n):
"""
initialize simulation for n receivers.
"""
self.beacon = beacon(ENABLE_BEACON_DELAY)
self.data = data_utils(n)
random.seed()
if n < 3:
print 'Number of receivers %i is less than three.' %n
print 'Simulation controller will not run.'
print 'Now exiting.'
sys.exit()
self.data.set_rx_number(n)
tx_loc = test_coords.get_tx_coords()
self.data.set_tx_location(tx_loc)
# self.data.reset_rx_location()
for i in range(n):
rx_loc = alex_random.get_random_coord()
if self.DEBUG:
print "\n\n\n\n\n\nstore location: ", rx_loc
print '\n\n\n\n\n\n'
self.data.set_rx_location(i,rx_loc)
tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)
self.data.set_rx_time_delay(tof)
id = i+1
self.data.set_rx_team_id(id)
if self.DEBUG:
print 'tx_loc: ', tx_loc
print 'rx_loc: ', rx_loc
print 'time: ', repr(tof)
print 'id: ', id
def rx_beacon_packet(self):
"""
receive a single beacon packet. this will then be copied n times.
this tries to ensure clock synchronization across receivers.
"""
self.beacon.make_packet()
rx_packet = self.beacon.tx_packet()
rx_time = np.float128('%.20f'%(time.time()))
if self.DEBUG:
print 'rx_time: ', repr(rx_time)
self.data.set_timestamp_base(rx_time)
self.data.set_beacon_packet(rx_packet)
def receiver_chain(self,h):
"""
simulate receiver chain for n repeaters
"""
self.host = h
n = self.data.get_rx_number()
beacon_packet = self.data.get_beacon_packet()
time_base = self.data.get_timestamp_base()
# lists containing data for all current teams
team_id = self.data.get_rx_team_id()
location = self.data.get_rx_location()
if ENABLE_LOCATION_HISTORY:
self.record_location_history(location)
tof = self.data.get_rx_time_delay()
if self.DEBUG:
print "\n\n\n\n\n\nretrieve location: ", location
print ''
print "type(tof): ", type(tof)
conn = psycopg2.connect(host = self.host,
user = "sdrc_user",
password = "sdrc_pass",
database = "sdrc_db")
cur = conn.cursor()
for i in range(n):
f = open('data_in.data', 'a')
(rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])
(beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])
# packet number
payload1 = struct.pack('!H', self.packet_number & 0xffff)
f.write(str(self.packet_number) + ';')
# team id
ident = team_id[i]
payload2 = struct.pack('!H', ident & 0xffff)
f.write(str(ident) + ';')
# location
if (self.iterator == 1):
loc = location[i]
else:
# old_loc = location[i]
# loc = alex_random.random_move(old_loc)
loc = alex_random.get_random_coord()
self.data.set_rx_location(i,loc)
f.write(str(loc)+';')
self.iterator += 1
payload3 = new_sim_utils.pack_loc(loc)
# toa
t = tof[i]
toa = time_base + t
# if (ENABLE_JITTER):
# jitter = self.random_timing_jitter()
# toa = toa+jitter
# else:
# pass
if self.DEBUG:
print "t = tof[i]: ", repr(t)
print "type(t): ", type (t)
print "toa = time_base + t: ", repr(toa)
print "type(toa): ", type(toa)
payload4 = new_sim_utils.pack_time(toa)
f.write(repr(toa)+';')
# beacon payload
payload5 = struct.pack('!H', rx_pktno & 0xffff)
f.write(str(rx_pktno) + ';')
payload6 = struct.pack('!H', beacon_ID & 0xffff)
f.write(str(beacon_ID) + '\n')
f.close()
# check if packet dropped
drop = self.drop_packet()
# this if evaluates true even if drop == False
# if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'
# print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
# print 'drop ', drop
# print (ENABLE_DROPPED_PACKETS and drop)
# print 'packet dropped'
# payload = ''
if ENABLE_DROPPED_PACKETS:
print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
print 'drop ', drop
if drop: # if drop == 'True'
print 'drop ', drop
print 'packet dropped'
payload = ''
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
print "len(payload): ", len(payload)
cur.execute("INSERT INTO blob_table (field_1) VALUES (%s)", (psycopg2.Binary(payload),))
conn.commit()
cur.close()
conn.close()
self.packet_number += 1
def record_location_history(self,loc):
self.all_locations.append(loc)
# if self.DEBUG:
# print 'all locations:\n', self.all_locations
# def write_location_history(self): | # # f = open('location_history','w+')
# for i in self.all_locations:
# print repr(i[0][0][0]), repr(i[0][0][1]))
# # f.write(repr(i)+'\n')
# print '\n\n\n\n\n\n\n'
# print len(i)
# # f.close()
# kml_write = sdr_kml_writer.kml_writer()
# for i in range(0,len(x_results)):
# coord = str(x_results[i])+','+str(y_results[i])
# kml_write.add_placemark('','',coord)
# kml_write.write_to_file('geoloc_kml_file.kml')
def random_timing_jitter(self):
r = random.uniform(0,1)
jitter = r*1e-9
if self.DEBUG:
print 'Random timing jitter %f seconds' %(jitter)
return jitter
def drop_packet(self):
r = random.uniform(0,1)
print 'random value: ', r
print 'error rate: ', self.packet_error_rate
if (r > self.packet_error_rate):
drop = False
else:
drop = True
if self.DEBUG:
print 'Probability of dropped packet: ', self.packet_error_rate
print 'Packet dropped? ', drop
return drop
if __name__=='__main__':
from optparse import OptionParser
usage = "usage: %prog [options] arg"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", type="string", default="128.173.90.68",
help="database host in dotted decimal form [default=%default]")
parser.add_option("-r", "--radios", type="int", default="3",
help="number of field radios to simulate [default=%default]")
parser.add_option("-i", "--iterations", type="int", default="10",
help="number of times to run simulation [default=%default]")
# parser.add_option("-d", "--drop", action="store_true", default=False,
# help="simlulate dropped packets [default=%default]")
# parser.add_option("-j", "--jitter", type="store_true", default=False,
# help="simulate clock jitter, drift... [default=%default]")
(options, args) = parser.parse_args()
main = simulation()
main.init_sim(options.radios)
for i in range(options.iterations):
main.rx_beacon_packet()
main.receiver_chain(options.host)
# main.write_location_history()
# don't use, adbapi can't handle too many db connections...
# #self.data.set_rpt_packet(payload)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sys.stdout.write("sock.connect((HOST, PORT)) ...")
# sock.connect((HOST, PORT))
# sys.stdout.write(" Done\n")
# sys.stdout.write("sock.send...")
# sock.send('%s\r\n' % payload)
# sys.stdout.write(" Done\n")
# sock.close()
# # don't use if using sockets above
# def write_to_db(self):
# data = self.data.get_rpt_packet()
# print 'conn = MySQLdb.connect'
# db = MySQLdb.connect (host = "localhost",
# user = "sdrc_user",
# passwd = "sdrc_pass",
# db = "test01")
# print 'cursor = conn.cursor ()'
# cursor = db.cursor ()
# table = 'test01_table'
# fields = '(rpt_pkt_num, rpt_team_id, rpt_location, rpt_timestamp, beacon_id, beacon_pkt_num)'
# # reset database
# cursor.execute("""DELETE FROM %s""" %(table,))
# for i in range(len(data)):
# sql = """ """
# print "loop: ",i
# payload = data[i]
# (rpt_packet_num,) = struct.unpack('!H',payload[0:2])
# (rpt_team_id,) = struct.unpack('!H',payload[2:4])
# rpt_location = new_sim_utils.unpack_loc(payload[4:24])
# rpt_timestamp = new_sim_utils.unpack_time(payload[24:36])
# (beacon_packet_num,) = struct.unpack('!H',payload[36:38])
# (beacon_id,) = struct.unpack('!H',payload[38:40])
# print type(beacon_id)
# sql = """INSERT INTO %s %s VALUES (\'%d\', \'%d\', \'%s\', \'%s\', \'%d\', \'%d\')""" %(table,fields,rpt_packet_num,
# rpt_team_id,str(rpt_location),
# repr(rpt_timestamp),beacon_id,
# beacon_packet_num)
# print sql
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# print 'db.close()'
# db.close()
# def send_rpt_packet(self):
# """
# transmit repeater packets
# """
# pass
# def run(self):
# """
# run.
# """
# pass
# def work(self):
# """
# work function.
# """
# pass
# def __str__(self):
# """
# Print data in class: simulation
# """
# string = '\n########\nSimulation START\n'
# string += 'tx_location: ' + repr(self.data.get_tx_location()) + '\n'
# string += 'rx_location: ' + repr(self.data.get_rx_location()) + '\n'
# string += 'rx_time_delay: ' + repr(self.data.get_rx_time_delay()) + '\n'
# string += 'rx_team_id: ' + str(self.data.get_rx_team_id()) + '\n'
# string += 'rpt_packet: ' + str(self.data.get_rpt_packet())
# string += '########\nSimulation END\n'
# return string
# print main
# main.write_to_db()
# # not sure if we need this here
# dist = self.geo_utils.distance(__tx_loc,__rx_loc)
# self.__set_rx_distance(__dist)
# __power = new_sim_utils.power(__dist)
# self.set_rx_power(__power)
# def add_receiver(self):
# """
# add additional receiver to simulation
# """
# pass
# # do we really need this? don't think so...
# def copy_beacon_packet(self):
# """
# make n copies of beacon packet
# """
# num = self.get_rx_number()
# beacon_packet = self.get_beacon_packet()
# for i in range(__num):
# self.set_n_beacon_packet(__beacon_packet)
# Prepare SQL query to INSERT a record into the database.
# try:
# Execute the SQL command
# Commit your changes in the database
# except:
# # Rollback in case there is any error
# print 'db.rollback()'
# db.rollback()
# # disconnect from server
# cursor = db.cursor ()
# table = 'blob_table'
# fields = '(field_1)'
# sql = """INSERT INTO %s %s VALUES (\'%\r')""" %(table,fields,payload)
# print str(sql)
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# db.close() | random_line_split | |
sim_controller.py | #!/usr/bin/env python
import numpy as np
import time, random
import sys, os, struct, socket
import psycopg2
import test_coords
import alex_random
import new_sim_utils
import sdr_kml_writer
from geo_utils import geo_utils
from beacon import beacon
from sim_data import data_utils
ENABLE_JITTER = False
ENABLE_DROPPED_PACKETS = False
ENABLE_LOCATION_HISTORY = True
ENABLE_BEACON_DELAY = False
class simulation:
def __init__(self):
|
def init_sim(self,n):
"""
initialize simulation for n receivers.
"""
self.beacon = beacon(ENABLE_BEACON_DELAY)
self.data = data_utils(n)
random.seed()
if n < 3:
print 'Number of receivers %i is less than three.' %n
print 'Simulation controller will not run.'
print 'Now exiting.'
sys.exit()
self.data.set_rx_number(n)
tx_loc = test_coords.get_tx_coords()
self.data.set_tx_location(tx_loc)
# self.data.reset_rx_location()
for i in range(n):
rx_loc = alex_random.get_random_coord()
if self.DEBUG:
print "\n\n\n\n\n\nstore location: ", rx_loc
print '\n\n\n\n\n\n'
self.data.set_rx_location(i,rx_loc)
tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)
self.data.set_rx_time_delay(tof)
id = i+1
self.data.set_rx_team_id(id)
if self.DEBUG:
print 'tx_loc: ', tx_loc
print 'rx_loc: ', rx_loc
print 'time: ', repr(tof)
print 'id: ', id
def rx_beacon_packet(self):
"""
receive a single beacon packet. this will then be copied n times.
this tries to ensure clock synchronization across receivers.
"""
self.beacon.make_packet()
rx_packet = self.beacon.tx_packet()
rx_time = np.float128('%.20f'%(time.time()))
if self.DEBUG:
print 'rx_time: ', repr(rx_time)
self.data.set_timestamp_base(rx_time)
self.data.set_beacon_packet(rx_packet)
def receiver_chain(self,h):
"""
simulate receiver chain for n repeaters
"""
self.host = h
n = self.data.get_rx_number()
beacon_packet = self.data.get_beacon_packet()
time_base = self.data.get_timestamp_base()
# lists containing data for all current teams
team_id = self.data.get_rx_team_id()
location = self.data.get_rx_location()
if ENABLE_LOCATION_HISTORY:
self.record_location_history(location)
tof = self.data.get_rx_time_delay()
if self.DEBUG:
print "\n\n\n\n\n\nretrieve location: ", location
print ''
print "type(tof): ", type(tof)
conn = psycopg2.connect(host = self.host,
user = "sdrc_user",
password = "sdrc_pass",
database = "sdrc_db")
cur = conn.cursor()
for i in range(n):
f = open('data_in.data', 'a')
(rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])
(beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])
# packet number
payload1 = struct.pack('!H', self.packet_number & 0xffff)
f.write(str(self.packet_number) + ';')
# team id
ident = team_id[i]
payload2 = struct.pack('!H', ident & 0xffff)
f.write(str(ident) + ';')
# location
if (self.iterator == 1):
loc = location[i]
else:
# old_loc = location[i]
# loc = alex_random.random_move(old_loc)
loc = alex_random.get_random_coord()
self.data.set_rx_location(i,loc)
f.write(str(loc)+';')
self.iterator += 1
payload3 = new_sim_utils.pack_loc(loc)
# toa
t = tof[i]
toa = time_base + t
# if (ENABLE_JITTER):
# jitter = self.random_timing_jitter()
# toa = toa+jitter
# else:
# pass
if self.DEBUG:
print "t = tof[i]: ", repr(t)
print "type(t): ", type (t)
print "toa = time_base + t: ", repr(toa)
print "type(toa): ", type(toa)
payload4 = new_sim_utils.pack_time(toa)
f.write(repr(toa)+';')
# beacon payload
payload5 = struct.pack('!H', rx_pktno & 0xffff)
f.write(str(rx_pktno) + ';')
payload6 = struct.pack('!H', beacon_ID & 0xffff)
f.write(str(beacon_ID) + '\n')
f.close()
# check if packet dropped
drop = self.drop_packet()
# this if evaluates true even if drop == False
# if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'
# print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
# print 'drop ', drop
# print (ENABLE_DROPPED_PACKETS and drop)
# print 'packet dropped'
# payload = ''
if ENABLE_DROPPED_PACKETS:
print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
print 'drop ', drop
if drop: # if drop == 'True'
print 'drop ', drop
print 'packet dropped'
payload = ''
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
print "len(payload): ", len(payload)
cur.execute("INSERT INTO blob_table (field_1) VALUES (%s)", (psycopg2.Binary(payload),))
conn.commit()
cur.close()
conn.close()
self.packet_number += 1
def record_location_history(self,loc):
self.all_locations.append(loc)
# if self.DEBUG:
# print 'all locations:\n', self.all_locations
# def write_location_history(self):
# # f = open('location_history','w+')
# for i in self.all_locations:
# print repr(i[0][0][0]), repr(i[0][0][1]))
# # f.write(repr(i)+'\n')
# print '\n\n\n\n\n\n\n'
# print len(i)
# # f.close()
# kml_write = sdr_kml_writer.kml_writer()
# for i in range(0,len(x_results)):
# coord = str(x_results[i])+','+str(y_results[i])
# kml_write.add_placemark('','',coord)
# kml_write.write_to_file('geoloc_kml_file.kml')
def random_timing_jitter(self):
r = random.uniform(0,1)
jitter = r*1e-9
if self.DEBUG:
print 'Random timing jitter %f seconds' %(jitter)
return jitter
def drop_packet(self):
r = random.uniform(0,1)
print 'random value: ', r
print 'error rate: ', self.packet_error_rate
if (r > self.packet_error_rate):
drop = False
else:
drop = True
if self.DEBUG:
print 'Probability of dropped packet: ', self.packet_error_rate
print 'Packet dropped? ', drop
return drop
if __name__=='__main__':
from optparse import OptionParser
usage = "usage: %prog [options] arg"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", type="string", default="128.173.90.68",
help="database host in dotted decimal form [default=%default]")
parser.add_option("-r", "--radios", type="int", default="3",
help="number of field radios to simulate [default=%default]")
parser.add_option("-i", "--iterations", type="int", default="10",
help="number of times to run simulation [default=%default]")
# parser.add_option("-d", "--drop", action="store_true", default=False,
# help="simlulate dropped packets [default=%default]")
# parser.add_option("-j", "--jitter", type="store_true", default=False,
# help="simulate clock jitter, drift... [default=%default]")
(options, args) = parser.parse_args()
main = simulation()
main.init_sim(options.radios)
for i in range(options.iterations):
main.rx_beacon_packet()
main.receiver_chain(options.host)
# main.write_location_history()
# don't use, adbapi can't handle too many db connections...
# #self.data.set_rpt_packet(payload)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sys.stdout.write("sock.connect((HOST, PORT)) ...")
# sock.connect((HOST, PORT))
# sys.stdout.write(" Done\n")
# sys.stdout.write("sock.send...")
# sock.send('%s\r\n' % payload)
# sys.stdout.write(" Done\n")
# sock.close()
# # don't use if using sockets above
# def write_to_db(self):
# data = self.data.get_rpt_packet()
# print 'conn = MySQLdb.connect'
# db = MySQLdb.connect (host = "localhost",
# user = "sdrc_user",
# passwd = "sdrc_pass",
# db = "test01")
# print 'cursor = conn.cursor ()'
# cursor = db.cursor ()
# table = 'test01_table'
# fields = '(rpt_pkt_num, rpt_team_id, rpt_location, rpt_timestamp, beacon_id, beacon_pkt_num)'
# # reset database
# cursor.execute("""DELETE FROM %s""" %(table,))
# for i in range(len(data)):
# sql = """ """
# print "loop: ",i
# payload = data[i]
# (rpt_packet_num,) = struct.unpack('!H',payload[0:2])
# (rpt_team_id,) = struct.unpack('!H',payload[2:4])
# rpt_location = new_sim_utils.unpack_loc(payload[4:24])
# rpt_timestamp = new_sim_utils.unpack_time(payload[24:36])
# (beacon_packet_num,) = struct.unpack('!H',payload[36:38])
# (beacon_id,) = struct.unpack('!H',payload[38:40])
# print type(beacon_id)
# sql = """INSERT INTO %s %s VALUES (\'%d\', \'%d\', \'%s\', \'%s\', \'%d\', \'%d\')""" %(table,fields,rpt_packet_num,
# rpt_team_id,str(rpt_location),
# repr(rpt_timestamp),beacon_id,
# beacon_packet_num)
# print sql
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# print 'db.close()'
# db.close()
# def send_rpt_packet(self):
# """
# transmit repeater packets
# """
# pass
# def run(self):
# """
# run.
# """
# pass
# def work(self):
# """
# work function.
# """
# pass
# def __str__(self):
# """
# Print data in class: simulation
# """
# string = '\n########\nSimulation START\n'
# string += 'tx_location: ' + repr(self.data.get_tx_location()) + '\n'
# string += 'rx_location: ' + repr(self.data.get_rx_location()) + '\n'
# string += 'rx_time_delay: ' + repr(self.data.get_rx_time_delay()) + '\n'
# string += 'rx_team_id: ' + str(self.data.get_rx_team_id()) + '\n'
# string += 'rpt_packet: ' + str(self.data.get_rpt_packet())
# string += '########\nSimulation END\n'
# return string
# print main
# main.write_to_db()
# # not sure if we need this here
# dist = self.geo_utils.distance(__tx_loc,__rx_loc)
# self.__set_rx_distance(__dist)
# __power = new_sim_utils.power(__dist)
# self.set_rx_power(__power)
# def add_receiver(self):
# """
# add additional receiver to simulation
# """
# pass
# # do we really need this? don't think so...
# def copy_beacon_packet(self):
# """
# make n copies of beacon packet
# """
# num = self.get_rx_number()
# beacon_packet = self.get_beacon_packet()
# for i in range(__num):
# self.set_n_beacon_packet(__beacon_packet)
# Prepare SQL query to INSERT a record into the database.
# try:
# Execute the SQL command
# Commit your changes in the database
# except:
# # Rollback in case there is any error
# print 'db.rollback()'
# db.rollback()
# # disconnect from server
# cursor = db.cursor ()
# table = 'blob_table'
# fields = '(field_1)'
# sql = """INSERT INTO %s %s VALUES (\'%\r')""" %(table,fields,payload)
# print str(sql)
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# db.close()
| """__init__"""
self.geo_utils = geo_utils()
self.DEBUG = True
self.rx_number = 4
self.packet_number = 0
self.iterator = 1
self.packet_error_rate = 0.1
self.all_locations = [] | identifier_body |
sim_controller.py | #!/usr/bin/env python
import numpy as np
import time, random
import sys, os, struct, socket
import psycopg2
import test_coords
import alex_random
import new_sim_utils
import sdr_kml_writer
from geo_utils import geo_utils
from beacon import beacon
from sim_data import data_utils
ENABLE_JITTER = False
ENABLE_DROPPED_PACKETS = False
ENABLE_LOCATION_HISTORY = True
ENABLE_BEACON_DELAY = False
class simulation:
def __init__(self):
"""__init__"""
self.geo_utils = geo_utils()
self.DEBUG = True
self.rx_number = 4
self.packet_number = 0
self.iterator = 1
self.packet_error_rate = 0.1
self.all_locations = []
def init_sim(self,n):
"""
initialize simulation for n receivers.
"""
self.beacon = beacon(ENABLE_BEACON_DELAY)
self.data = data_utils(n)
random.seed()
if n < 3:
print 'Number of receivers %i is less than three.' %n
print 'Simulation controller will not run.'
print 'Now exiting.'
sys.exit()
self.data.set_rx_number(n)
tx_loc = test_coords.get_tx_coords()
self.data.set_tx_location(tx_loc)
# self.data.reset_rx_location()
for i in range(n):
rx_loc = alex_random.get_random_coord()
if self.DEBUG:
print "\n\n\n\n\n\nstore location: ", rx_loc
print '\n\n\n\n\n\n'
self.data.set_rx_location(i,rx_loc)
tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)
self.data.set_rx_time_delay(tof)
id = i+1
self.data.set_rx_team_id(id)
if self.DEBUG:
print 'tx_loc: ', tx_loc
print 'rx_loc: ', rx_loc
print 'time: ', repr(tof)
print 'id: ', id
def | (self):
"""
receive a single beacon packet. this will then be copied n times.
this tries to ensure clock synchronization across receivers.
"""
self.beacon.make_packet()
rx_packet = self.beacon.tx_packet()
rx_time = np.float128('%.20f'%(time.time()))
if self.DEBUG:
print 'rx_time: ', repr(rx_time)
self.data.set_timestamp_base(rx_time)
self.data.set_beacon_packet(rx_packet)
def receiver_chain(self,h):
"""
simulate receiver chain for n repeaters
"""
self.host = h
n = self.data.get_rx_number()
beacon_packet = self.data.get_beacon_packet()
time_base = self.data.get_timestamp_base()
# lists containing data for all current teams
team_id = self.data.get_rx_team_id()
location = self.data.get_rx_location()
if ENABLE_LOCATION_HISTORY:
self.record_location_history(location)
tof = self.data.get_rx_time_delay()
if self.DEBUG:
print "\n\n\n\n\n\nretrieve location: ", location
print ''
print "type(tof): ", type(tof)
conn = psycopg2.connect(host = self.host,
user = "sdrc_user",
password = "sdrc_pass",
database = "sdrc_db")
cur = conn.cursor()
for i in range(n):
f = open('data_in.data', 'a')
(rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])
(beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])
# packet number
payload1 = struct.pack('!H', self.packet_number & 0xffff)
f.write(str(self.packet_number) + ';')
# team id
ident = team_id[i]
payload2 = struct.pack('!H', ident & 0xffff)
f.write(str(ident) + ';')
# location
if (self.iterator == 1):
loc = location[i]
else:
# old_loc = location[i]
# loc = alex_random.random_move(old_loc)
loc = alex_random.get_random_coord()
self.data.set_rx_location(i,loc)
f.write(str(loc)+';')
self.iterator += 1
payload3 = new_sim_utils.pack_loc(loc)
# toa
t = tof[i]
toa = time_base + t
# if (ENABLE_JITTER):
# jitter = self.random_timing_jitter()
# toa = toa+jitter
# else:
# pass
if self.DEBUG:
print "t = tof[i]: ", repr(t)
print "type(t): ", type (t)
print "toa = time_base + t: ", repr(toa)
print "type(toa): ", type(toa)
payload4 = new_sim_utils.pack_time(toa)
f.write(repr(toa)+';')
# beacon payload
payload5 = struct.pack('!H', rx_pktno & 0xffff)
f.write(str(rx_pktno) + ';')
payload6 = struct.pack('!H', beacon_ID & 0xffff)
f.write(str(beacon_ID) + '\n')
f.close()
# check if packet dropped
drop = self.drop_packet()
# this if evaluates true even if drop == False
# if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'
# print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
# print 'drop ', drop
# print (ENABLE_DROPPED_PACKETS and drop)
# print 'packet dropped'
# payload = ''
if ENABLE_DROPPED_PACKETS:
print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
print 'drop ', drop
if drop: # if drop == 'True'
print 'drop ', drop
print 'packet dropped'
payload = ''
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
print "len(payload): ", len(payload)
cur.execute("INSERT INTO blob_table (field_1) VALUES (%s)", (psycopg2.Binary(payload),))
conn.commit()
cur.close()
conn.close()
self.packet_number += 1
def record_location_history(self,loc):
self.all_locations.append(loc)
# if self.DEBUG:
# print 'all locations:\n', self.all_locations
# def write_location_history(self):
# # f = open('location_history','w+')
# for i in self.all_locations:
# print repr(i[0][0][0]), repr(i[0][0][1]))
# # f.write(repr(i)+'\n')
# print '\n\n\n\n\n\n\n'
# print len(i)
# # f.close()
# kml_write = sdr_kml_writer.kml_writer()
# for i in range(0,len(x_results)):
# coord = str(x_results[i])+','+str(y_results[i])
# kml_write.add_placemark('','',coord)
# kml_write.write_to_file('geoloc_kml_file.kml')
def random_timing_jitter(self):
r = random.uniform(0,1)
jitter = r*1e-9
if self.DEBUG:
print 'Random timing jitter %f seconds' %(jitter)
return jitter
def drop_packet(self):
r = random.uniform(0,1)
print 'random value: ', r
print 'error rate: ', self.packet_error_rate
if (r > self.packet_error_rate):
drop = False
else:
drop = True
if self.DEBUG:
print 'Probability of dropped packet: ', self.packet_error_rate
print 'Packet dropped? ', drop
return drop
if __name__=='__main__':
from optparse import OptionParser
usage = "usage: %prog [options] arg"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", type="string", default="128.173.90.68",
help="database host in dotted decimal form [default=%default]")
parser.add_option("-r", "--radios", type="int", default="3",
help="number of field radios to simulate [default=%default]")
parser.add_option("-i", "--iterations", type="int", default="10",
help="number of times to run simulation [default=%default]")
# parser.add_option("-d", "--drop", action="store_true", default=False,
# help="simlulate dropped packets [default=%default]")
# parser.add_option("-j", "--jitter", type="store_true", default=False,
# help="simulate clock jitter, drift... [default=%default]")
(options, args) = parser.parse_args()
main = simulation()
main.init_sim(options.radios)
for i in range(options.iterations):
main.rx_beacon_packet()
main.receiver_chain(options.host)
# main.write_location_history()
# don't use, adbapi can't handle too many db connections...
# #self.data.set_rpt_packet(payload)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sys.stdout.write("sock.connect((HOST, PORT)) ...")
# sock.connect((HOST, PORT))
# sys.stdout.write(" Done\n")
# sys.stdout.write("sock.send...")
# sock.send('%s\r\n' % payload)
# sys.stdout.write(" Done\n")
# sock.close()
# # don't use if using sockets above
# def write_to_db(self):
# data = self.data.get_rpt_packet()
# print 'conn = MySQLdb.connect'
# db = MySQLdb.connect (host = "localhost",
# user = "sdrc_user",
# passwd = "sdrc_pass",
# db = "test01")
# print 'cursor = conn.cursor ()'
# cursor = db.cursor ()
# table = 'test01_table'
# fields = '(rpt_pkt_num, rpt_team_id, rpt_location, rpt_timestamp, beacon_id, beacon_pkt_num)'
# # reset database
# cursor.execute("""DELETE FROM %s""" %(table,))
# for i in range(len(data)):
# sql = """ """
# print "loop: ",i
# payload = data[i]
# (rpt_packet_num,) = struct.unpack('!H',payload[0:2])
# (rpt_team_id,) = struct.unpack('!H',payload[2:4])
# rpt_location = new_sim_utils.unpack_loc(payload[4:24])
# rpt_timestamp = new_sim_utils.unpack_time(payload[24:36])
# (beacon_packet_num,) = struct.unpack('!H',payload[36:38])
# (beacon_id,) = struct.unpack('!H',payload[38:40])
# print type(beacon_id)
# sql = """INSERT INTO %s %s VALUES (\'%d\', \'%d\', \'%s\', \'%s\', \'%d\', \'%d\')""" %(table,fields,rpt_packet_num,
# rpt_team_id,str(rpt_location),
# repr(rpt_timestamp),beacon_id,
# beacon_packet_num)
# print sql
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# print 'db.close()'
# db.close()
# def send_rpt_packet(self):
# """
# transmit repeater packets
# """
# pass
# def run(self):
# """
# run.
# """
# pass
# def work(self):
# """
# work function.
# """
# pass
# def __str__(self):
# """
# Print data in class: simulation
# """
# string = '\n########\nSimulation START\n'
# string += 'tx_location: ' + repr(self.data.get_tx_location()) + '\n'
# string += 'rx_location: ' + repr(self.data.get_rx_location()) + '\n'
# string += 'rx_time_delay: ' + repr(self.data.get_rx_time_delay()) + '\n'
# string += 'rx_team_id: ' + str(self.data.get_rx_team_id()) + '\n'
# string += 'rpt_packet: ' + str(self.data.get_rpt_packet())
# string += '########\nSimulation END\n'
# return string
# print main
# main.write_to_db()
# # not sure if we need this here
# dist = self.geo_utils.distance(__tx_loc,__rx_loc)
# self.__set_rx_distance(__dist)
# __power = new_sim_utils.power(__dist)
# self.set_rx_power(__power)
# def add_receiver(self):
# """
# add additional receiver to simulation
# """
# pass
# # do we really need this? don't think so...
# def copy_beacon_packet(self):
# """
# make n copies of beacon packet
# """
# num = self.get_rx_number()
# beacon_packet = self.get_beacon_packet()
# for i in range(__num):
# self.set_n_beacon_packet(__beacon_packet)
# Prepare SQL query to INSERT a record into the database.
# try:
# Execute the SQL command
# Commit your changes in the database
# except:
# # Rollback in case there is any error
# print 'db.rollback()'
# db.rollback()
# # disconnect from server
# cursor = db.cursor ()
# table = 'blob_table'
# fields = '(field_1)'
# sql = """INSERT INTO %s %s VALUES (\'%\r')""" %(table,fields,payload)
# print str(sql)
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# db.close()
| rx_beacon_packet | identifier_name |
sim_controller.py | #!/usr/bin/env python
import numpy as np
import time, random
import sys, os, struct, socket
import psycopg2
import test_coords
import alex_random
import new_sim_utils
import sdr_kml_writer
from geo_utils import geo_utils
from beacon import beacon
from sim_data import data_utils
ENABLE_JITTER = False
ENABLE_DROPPED_PACKETS = False
ENABLE_LOCATION_HISTORY = True
ENABLE_BEACON_DELAY = False
class simulation:
def __init__(self):
"""__init__"""
self.geo_utils = geo_utils()
self.DEBUG = True
self.rx_number = 4
self.packet_number = 0
self.iterator = 1
self.packet_error_rate = 0.1
self.all_locations = []
def init_sim(self,n):
"""
initialize simulation for n receivers.
"""
self.beacon = beacon(ENABLE_BEACON_DELAY)
self.data = data_utils(n)
random.seed()
if n < 3:
print 'Number of receivers %i is less than three.' %n
print 'Simulation controller will not run.'
print 'Now exiting.'
sys.exit()
self.data.set_rx_number(n)
tx_loc = test_coords.get_tx_coords()
self.data.set_tx_location(tx_loc)
# self.data.reset_rx_location()
for i in range(n):
rx_loc = alex_random.get_random_coord()
if self.DEBUG:
print "\n\n\n\n\n\nstore location: ", rx_loc
print '\n\n\n\n\n\n'
self.data.set_rx_location(i,rx_loc)
tof = self.geo_utils.time_of_flight(rx_loc,tx_loc)
self.data.set_rx_time_delay(tof)
id = i+1
self.data.set_rx_team_id(id)
if self.DEBUG:
print 'tx_loc: ', tx_loc
print 'rx_loc: ', rx_loc
print 'time: ', repr(tof)
print 'id: ', id
def rx_beacon_packet(self):
"""
receive a single beacon packet. this will then be copied n times.
this tries to ensure clock synchronization across receivers.
"""
self.beacon.make_packet()
rx_packet = self.beacon.tx_packet()
rx_time = np.float128('%.20f'%(time.time()))
if self.DEBUG:
|
self.data.set_timestamp_base(rx_time)
self.data.set_beacon_packet(rx_packet)
def receiver_chain(self,h):
"""
simulate receiver chain for n repeaters
"""
self.host = h
n = self.data.get_rx_number()
beacon_packet = self.data.get_beacon_packet()
time_base = self.data.get_timestamp_base()
# lists containing data for all current teams
team_id = self.data.get_rx_team_id()
location = self.data.get_rx_location()
if ENABLE_LOCATION_HISTORY:
self.record_location_history(location)
tof = self.data.get_rx_time_delay()
if self.DEBUG:
print "\n\n\n\n\n\nretrieve location: ", location
print ''
print "type(tof): ", type(tof)
conn = psycopg2.connect(host = self.host,
user = "sdrc_user",
password = "sdrc_pass",
database = "sdrc_db")
cur = conn.cursor()
for i in range(n):
f = open('data_in.data', 'a')
(rx_pktno,) = struct.unpack('!H', beacon_packet[0:2])
(beacon_ID,) = struct.unpack('!H', beacon_packet[2:4])
# packet number
payload1 = struct.pack('!H', self.packet_number & 0xffff)
f.write(str(self.packet_number) + ';')
# team id
ident = team_id[i]
payload2 = struct.pack('!H', ident & 0xffff)
f.write(str(ident) + ';')
# location
if (self.iterator == 1):
loc = location[i]
else:
# old_loc = location[i]
# loc = alex_random.random_move(old_loc)
loc = alex_random.get_random_coord()
self.data.set_rx_location(i,loc)
f.write(str(loc)+';')
self.iterator += 1
payload3 = new_sim_utils.pack_loc(loc)
# toa
t = tof[i]
toa = time_base + t
# if (ENABLE_JITTER):
# jitter = self.random_timing_jitter()
# toa = toa+jitter
# else:
# pass
if self.DEBUG:
print "t = tof[i]: ", repr(t)
print "type(t): ", type (t)
print "toa = time_base + t: ", repr(toa)
print "type(toa): ", type(toa)
payload4 = new_sim_utils.pack_time(toa)
f.write(repr(toa)+';')
# beacon payload
payload5 = struct.pack('!H', rx_pktno & 0xffff)
f.write(str(rx_pktno) + ';')
payload6 = struct.pack('!H', beacon_ID & 0xffff)
f.write(str(beacon_ID) + '\n')
f.close()
# check if packet dropped
drop = self.drop_packet()
# this if evaluates true even if drop == False
# if (ENABLE_DROPPED_PACKETS and drop): # if drop == 'True'
# print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
# print 'drop ', drop
# print (ENABLE_DROPPED_PACKETS and drop)
# print 'packet dropped'
# payload = ''
if ENABLE_DROPPED_PACKETS:
print 'ENABLE_DROPPED_PACKETS ', ENABLE_DROPPED_PACKETS
print 'drop ', drop
if drop: # if drop == 'True'
print 'drop ', drop
print 'packet dropped'
payload = ''
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
else: # if drop == 'False'
payload = (payload1 + payload2 +
payload3 + payload4 +
payload5 + payload6)
print "len(payload): ", len(payload)
cur.execute("INSERT INTO blob_table (field_1) VALUES (%s)", (psycopg2.Binary(payload),))
conn.commit()
cur.close()
conn.close()
self.packet_number += 1
def record_location_history(self,loc):
self.all_locations.append(loc)
# if self.DEBUG:
# print 'all locations:\n', self.all_locations
# def write_location_history(self):
# # f = open('location_history','w+')
# for i in self.all_locations:
# print repr(i[0][0][0]), repr(i[0][0][1]))
# # f.write(repr(i)+'\n')
# print '\n\n\n\n\n\n\n'
# print len(i)
# # f.close()
# kml_write = sdr_kml_writer.kml_writer()
# for i in range(0,len(x_results)):
# coord = str(x_results[i])+','+str(y_results[i])
# kml_write.add_placemark('','',coord)
# kml_write.write_to_file('geoloc_kml_file.kml')
def random_timing_jitter(self):
r = random.uniform(0,1)
jitter = r*1e-9
if self.DEBUG:
print 'Random timing jitter %f seconds' %(jitter)
return jitter
def drop_packet(self):
r = random.uniform(0,1)
print 'random value: ', r
print 'error rate: ', self.packet_error_rate
if (r > self.packet_error_rate):
drop = False
else:
drop = True
if self.DEBUG:
print 'Probability of dropped packet: ', self.packet_error_rate
print 'Packet dropped? ', drop
return drop
if __name__=='__main__':
from optparse import OptionParser
usage = "usage: %prog [options] arg"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", type="string", default="128.173.90.68",
help="database host in dotted decimal form [default=%default]")
parser.add_option("-r", "--radios", type="int", default="3",
help="number of field radios to simulate [default=%default]")
parser.add_option("-i", "--iterations", type="int", default="10",
help="number of times to run simulation [default=%default]")
# parser.add_option("-d", "--drop", action="store_true", default=False,
# help="simlulate dropped packets [default=%default]")
# parser.add_option("-j", "--jitter", type="store_true", default=False,
# help="simulate clock jitter, drift... [default=%default]")
(options, args) = parser.parse_args()
main = simulation()
main.init_sim(options.radios)
for i in range(options.iterations):
main.rx_beacon_packet()
main.receiver_chain(options.host)
# main.write_location_history()
# don't use, adbapi can't handle too many db connections...
# #self.data.set_rpt_packet(payload)
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sys.stdout.write("sock.connect((HOST, PORT)) ...")
# sock.connect((HOST, PORT))
# sys.stdout.write(" Done\n")
# sys.stdout.write("sock.send...")
# sock.send('%s\r\n' % payload)
# sys.stdout.write(" Done\n")
# sock.close()
# # don't use if using sockets above
# def write_to_db(self):
# data = self.data.get_rpt_packet()
# print 'conn = MySQLdb.connect'
# db = MySQLdb.connect (host = "localhost",
# user = "sdrc_user",
# passwd = "sdrc_pass",
# db = "test01")
# print 'cursor = conn.cursor ()'
# cursor = db.cursor ()
# table = 'test01_table'
# fields = '(rpt_pkt_num, rpt_team_id, rpt_location, rpt_timestamp, beacon_id, beacon_pkt_num)'
# # reset database
# cursor.execute("""DELETE FROM %s""" %(table,))
# for i in range(len(data)):
# sql = """ """
# print "loop: ",i
# payload = data[i]
# (rpt_packet_num,) = struct.unpack('!H',payload[0:2])
# (rpt_team_id,) = struct.unpack('!H',payload[2:4])
# rpt_location = new_sim_utils.unpack_loc(payload[4:24])
# rpt_timestamp = new_sim_utils.unpack_time(payload[24:36])
# (beacon_packet_num,) = struct.unpack('!H',payload[36:38])
# (beacon_id,) = struct.unpack('!H',payload[38:40])
# print type(beacon_id)
# sql = """INSERT INTO %s %s VALUES (\'%d\', \'%d\', \'%s\', \'%s\', \'%d\', \'%d\')""" %(table,fields,rpt_packet_num,
# rpt_team_id,str(rpt_location),
# repr(rpt_timestamp),beacon_id,
# beacon_packet_num)
# print sql
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# print 'db.close()'
# db.close()
# def send_rpt_packet(self):
# """
# transmit repeater packets
# """
# pass
# def run(self):
# """
# run.
# """
# pass
# def work(self):
# """
# work function.
# """
# pass
# def __str__(self):
# """
# Print data in class: simulation
# """
# string = '\n########\nSimulation START\n'
# string += 'tx_location: ' + repr(self.data.get_tx_location()) + '\n'
# string += 'rx_location: ' + repr(self.data.get_rx_location()) + '\n'
# string += 'rx_time_delay: ' + repr(self.data.get_rx_time_delay()) + '\n'
# string += 'rx_team_id: ' + str(self.data.get_rx_team_id()) + '\n'
# string += 'rpt_packet: ' + str(self.data.get_rpt_packet())
# string += '########\nSimulation END\n'
# return string
# print main
# main.write_to_db()
# # not sure if we need this here
# dist = self.geo_utils.distance(__tx_loc,__rx_loc)
# self.__set_rx_distance(__dist)
# __power = new_sim_utils.power(__dist)
# self.set_rx_power(__power)
# def add_receiver(self):
# """
# add additional receiver to simulation
# """
# pass
# # do we really need this? don't think so...
# def copy_beacon_packet(self):
# """
# make n copies of beacon packet
# """
# num = self.get_rx_number()
# beacon_packet = self.get_beacon_packet()
# for i in range(__num):
# self.set_n_beacon_packet(__beacon_packet)
# Prepare SQL query to INSERT a record into the database.
# try:
# Execute the SQL command
# Commit your changes in the database
# except:
# # Rollback in case there is any error
# print 'db.rollback()'
# db.rollback()
# # disconnect from server
# cursor = db.cursor ()
# table = 'blob_table'
# fields = '(field_1)'
# sql = """INSERT INTO %s %s VALUES (\'%\r')""" %(table,fields,payload)
# print str(sql)
# print 'cursor.execute(sql)'
# cursor.execute(sql)
# print 'db.commit()'
# db.commit()
# db.close()
| print 'rx_time: ', repr(rx_time) | conditional_block |
typescript.ts | import * as path from "path";
import * as semver from "semver";
import { PROJEN_DIR, PROJEN_RC } from "../common";
import { Component } from "../component";
import {
Eslint,
EslintOptions,
Jest,
NodeProject,
NodeProjectOptions,
TypeScriptCompilerOptions,
TypescriptConfig,
TypescriptConfigOptions,
} from "../javascript";
import { SampleDir } from "../sample-file";
import { Task } from "../task";
import { TextFile } from "../textfile";
import {
Projenrc as ProjenrcTs,
ProjenrcOptions as ProjenrcTsOptions,
TypedocDocgen,
} from "../typescript";
import { deepMerge } from "../util";
export interface TypeScriptProjectOptions extends NodeProjectOptions {
/**
* Typescript artifacts output directory
*
* @default "lib"
*/
readonly libdir?: string;
/**
* Typescript sources directory.
*
* @default "src"
*/
readonly srcdir?: string;
/**
* Jest tests directory. Tests files should be named `xxx.test.ts`.
*
* If this directory is under `srcdir` (e.g. `src/test`, `src/__tests__`),
* then tests are going to be compiled into `lib/` and executed as javascript.
* If the test directory is outside of `src`, then we configure jest to
* compile the code in-memory.
*
* @default "test"
*/
readonly testdir?: string;
/**
* Setup eslint.
*
* @default true
*/
readonly eslint?: boolean;
/**
* Eslint options
* @default - opinionated default options
*/
readonly eslintOptions?: EslintOptions;
/**
* TypeScript version to use.
*
* NOTE: Typescript is not semantically versioned and should remain on the
* same minor, so we recommend using a `~` dependency (e.g. `~1.2.3`).
*
* @default "latest"
*/
readonly typescriptVersion?: string;
/**
* Docgen by Typedoc
*
* @default false
*/
readonly docgen?: boolean;
/**
* Docs directory
*
* @default "docs"
*/
readonly docsDirectory?: string;
/**
* Custom TSConfig
* @default - default options
*/
readonly tsconfig?: TypescriptConfigOptions;
/**
* Custom tsconfig options for the development tsconfig.json file (used for testing).
* @default - use the production tsconfig options
*/
readonly tsconfigDev?: TypescriptConfigOptions;
/**
* The name of the development tsconfig.json file.
*
* @default "tsconfig.dev.json"
*/
readonly tsconfigDevFile?: string;
/**
* Do not generate a `tsconfig.json` file (used by jsii projects since
* tsconfig.json is generated by the jsii compiler).
*
* @default false
*/
readonly disableTsconfig?: boolean;
/**
* Do not generate a `tsconfig.dev.json` file.
*
* @default false
*/
readonly disableTsconfigDev?: boolean;
/**
* Generate one-time sample in `src/` and `test/` if there are no files there.
* @default true
*/
readonly sampleCode?: boolean;
/**
* The .d.ts file that includes the type declarations for this module.
* @default - .d.ts file derived from the project's entrypoint (usually lib/index.d.ts)
*/
readonly entrypointTypes?: string;
/**
* Use TypeScript for your projenrc file (`.projenrc.ts`).
*
* @default false
* @pjnew true
*/
readonly projenrcTs?: boolean;
/**
* Options for .projenrc.ts
*/
readonly projenrcTsOptions?: ProjenrcTsOptions;
}
/**
* TypeScript project
* @pjid typescript
*/
export class TypeScriptProject extends NodeProject {
public readonly docgen?: boolean;
public readonly docsDirectory: string;
public readonly eslint?: Eslint;
public readonly tsconfigEslint?: TypescriptConfig;
public readonly tsconfig?: TypescriptConfig;
/**
* A typescript configuration file which covers all files (sources, tests, projen).
*/
public readonly tsconfigDev: TypescriptConfig;
/**
* The directory in which the .ts sources reside.
*/
public readonly srcdir: string;
/**
* The directory in which compiled .js files reside.
*/
public readonly libdir: string;
/**
* The directory in which tests reside.
*/
public readonly testdir: string;
/**
* The "watch" task.
*/
public readonly watchTask: Task;
constructor(options: TypeScriptProjectOptions) {
super({
...options,
// disable .projenrc.js if typescript is enabled
projenrcJs: options.projenrcTs ? false : options.projenrcJs,
jestOptions: {
...options.jestOptions,
jestConfig: {
...options.jestOptions?.jestConfig,
testMatch: options.jestOptions?.jestConfig?.testMatch ?? [],
},
},
});
this.srcdir = options.srcdir ?? "src";
this.libdir = options.libdir ?? "lib";
this.docgen = options.docgen;
this.docsDirectory = options.docsDirectory ?? "docs/";
this.compileTask.exec("tsc --build");
this.watchTask = this.addTask("watch", {
description: "Watch & compile in the background",
exec: "tsc --build -w",
});
this.testdir = options.testdir ?? "test";
this.gitignore.include(`/${this.testdir}/`);
this.npmignore?.exclude(`/${this.testdir}/`);
// if the test directory is under `src/`, then we will run our tests against
// the javascript files and not let jest compile it for us.
const compiledTests = this.testdir.startsWith(this.srcdir + path.posix.sep);
if (options.entrypointTypes || this.entrypoint !== "") {
const entrypointTypes =
options.entrypointTypes ??
`${path
.join(
path.dirname(this.entrypoint),
path.basename(this.entrypoint, ".js")
)
.replace(/\\/g, "/")}.d.ts`;
this.package.addField("types", entrypointTypes);
}
const compilerOptionDefaults: TypeScriptCompilerOptions = {
alwaysStrict: true,
declaration: true,
esModuleInterop: true,
experimentalDecorators: true,
inlineSourceMap: true,
inlineSources: true,
lib: ["es2019"],
module: "CommonJS",
noEmitOnError: false,
noFallthroughCasesInSwitch: true,
noImplicitAny: true,
noImplicitReturns: true,
noImplicitThis: true,
noUnusedLocals: true,
noUnusedParameters: true,
resolveJsonModule: true,
strict: true,
strictNullChecks: true,
strictPropertyInitialization: true,
stripInternal: true,
target: "ES2019",
};
if (options.disableTsconfigDev && options.disableTsconfig) {
throw new Error(
"Cannot specify both 'disableTsconfigDev' and 'disableTsconfig' fields."
);
}
if (!options.disableTsconfig) {
this.tsconfig = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
include: [`${this.srcdir}/**/*.ts`],
// exclude: ['node_modules'], // TODO: shouldn't we exclude node_modules?
compilerOptions: {
rootDir: this.srcdir,
outDir: this.libdir,
...compilerOptionDefaults,
},
},
options.tsconfig
)
);
}
if (options.disableTsconfigDev) {
this.tsconfigDev = this.tsconfig!;
} else {
const tsconfigDevFile = options.tsconfigDevFile ?? "tsconfig.dev.json";
this.tsconfigDev = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
fileName: tsconfigDevFile,
include: [
PROJEN_RC,
`${this.srcdir}/**/*.ts`,
`${this.testdir}/**/*.ts`,
],
exclude: ["node_modules"],
compilerOptions: compilerOptionDefaults,
},
options.tsconfig,
options.tsconfigDev
)
);
}
this.gitignore.include(`/${this.srcdir}/`);
this.npmignore?.exclude(`/${this.srcdir}/`);
if (this.srcdir !== this.libdir) {
// separated, can ignore the entire libdir
this.gitignore.exclude(`/${this.libdir}`);
} else {
// collocated, can only ignore the compiled output
this.gitignore.exclude(`/${this.libdir}/**/*.js`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts.map`);
}
this.npmignore?.include(`/${this.libdir}/`);
this.npmignore?.include(`/${this.libdir}/**/*.js`);
this.npmignore?.include(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude("/dist/");
this.npmignore?.exclude("dist"); // jsii-pacmak expects this to be "dist" and not "/dist". otherwise it will tamper with it
this.npmignore?.exclude("/tsconfig.json");
this.npmignore?.exclude("/.github/");
this.npmignore?.exclude("/.vscode/");
this.npmignore?.exclude("/.idea/");
this.npmignore?.exclude("/.projenrc.js");
this.npmignore?.exclude("tsconfig.tsbuildinfo");
if (this.jest) {
if (compiledTests) {
this.addJestCompiled(this.jest);
} else {
this.addJestNoCompile(this.jest);
}
}
if (options.eslint ?? true) {
this.eslint = new Eslint(this, {
tsconfigPath: `./${this.tsconfigDev.fileName}`,
dirs: [this.srcdir],
devdirs: [this.testdir, "build-tools"],
fileExtensions: [".ts", ".tsx"],
lintProjenRc: false,
...options.eslintOptions,
});
this.tsconfigEslint = this.tsconfigDev;
}
if (!this.parent && options.projenrcTs) {
new ProjenrcTs(this, options.projenrcTsOptions);
}
const tsver = options.typescriptVersion
? `@${options.typescriptVersion}`
: "";
this.addDevDeps(
`typescript${tsver}`,
// @types/node versions numbers match the node runtime versions' major.minor, however, new
// releases are only created when API changes are included in a node release... We might for
// example have dependencies that require `node >= 12.22`, but as 12.21 and 12.22 did not
// include API changes, `@types/node@12.20.x` is the "correct" version to use. As it is not
// possible to easily determine the correct version to use, we pick up the latest version.
//
// Additionally, we default to tracking the 12.x line, as the current earliest LTS release of
// node is 12.x, so this is what corresponds to the broadest compatibility with supported node
// runtimes.
`@types/node@^${semver.major(this.package.minNodeVersion ?? "16.0.0")}`
);
// generate sample code in `src` and `lib` if these directories are empty or non-existent.
if (options.sampleCode ?? true) {
new SampleCode(this);
}
if (this.docgen) |
}
/**
* Tests are compiled to `lib/TESTDIR`, so we don't need jest to compile them
* for us. just run them directly from javascript.
*/
private addJestCompiled(jest: Jest) {
this.addDevDeps(`@types/jest${jest.jestVersion}`);
const testout = path.posix.relative(this.srcdir, this.testdir);
const libtest = path.posix.join(this.libdir, testout);
const srctest = this.testdir;
this.npmignore?.exclude(`/${libtest}/`);
jest.addTestMatch(`**/${libtest}/**/?(*.)+(spec|test).js?(x)`);
jest.addWatchIgnorePattern(`/${this.srcdir}/`);
const resolveSnapshotPath = (test: string, ext: string) => {
const fullpath = test.replace(libtest, srctest);
return path.join(
path.dirname(fullpath),
"__snapshots__",
path.basename(fullpath, ".js") + ".ts" + ext
);
};
const resolveTestPath = (snap: string, ext: string) => {
const filename = path.basename(snap, ".ts" + ext) + ".js";
const dir = path.dirname(path.dirname(snap)).replace(srctest, libtest);
return path.join(dir, filename);
};
const resolver = new TextFile(
this,
path.posix.join(PROJEN_DIR, "jest-snapshot-resolver.js")
);
if (!resolver.marker) {
resolver.addLine(`// ${resolver.marker}`);
}
resolver.addLine('const path = require("path");');
resolver.addLine(`const libtest = "${libtest}";`);
resolver.addLine(`const srctest= "${srctest}";`);
resolver.addLine("module.exports = {");
resolver.addLine(
` resolveSnapshotPath: ${resolveSnapshotPath.toString()},`
);
resolver.addLine(` resolveTestPath: ${resolveTestPath.toString()},`);
resolver.addLine(
" testPathForConsistencyCheck: path.join('some', '__tests__', 'example.test.js')"
);
resolver.addLine("};");
jest.addSnapshotResolver(`./${resolver.path}`);
}
private addJestNoCompile(jest: Jest) {
this.addDevDeps(
`@types/jest${jest.jestVersion}`,
`ts-jest${jest.jestVersion}`
);
jest.addTestMatch(`<rootDir>/${this.srcdir}/**/__tests__/**/*.ts?(x)`);
jest.addTestMatch(
`<rootDir>/(${this.testdir}|${this.srcdir})/**/*(*.)@(spec|test).ts?(x)`
);
// add relevant deps
if (!jest.config.preset) {
jest.config.preset = "ts-jest";
}
jest.config.globals = deepMerge([
{
"ts-jest": {
tsconfig: this.tsconfigDev.fileName,
},
},
jest.config.globals,
]);
}
}
class SampleCode extends Component {
constructor(project: TypeScriptProject) {
super(project);
const srcCode = [
"export class Hello {",
" public sayHello() {",
" return 'hello, world!';",
" }",
"}",
].join("\n");
const testCode = [
"import { Hello } from '../src';",
"",
"test('hello', () => {",
" expect(new Hello().sayHello()).toBe('hello, world!');",
"});",
].join("\n");
new SampleDir(project, project.srcdir, {
files: {
"index.ts": srcCode,
},
});
if (project.jest) {
new SampleDir(project, project.testdir, {
files: {
"hello.test.ts": testCode,
},
});
}
}
}
/**
* TypeScript app.
*
* @pjid typescript-app
*/
export class TypeScriptAppProject extends TypeScriptProject {
constructor(options: TypeScriptProjectOptions) {
super({
allowLibraryDependencies: false,
releaseWorkflow: false,
entrypoint: "", // "main" is not needed in typescript apps
package: false,
...options,
});
}
}
/**
* @deprecated use `TypeScriptProject`
*/
export class TypeScriptLibraryProject extends TypeScriptProject {}
/**
* @deprecated use TypeScriptProjectOptions
*/
export interface TypeScriptLibraryProjectOptions
extends TypeScriptProjectOptions {}
/**
* @internal
*/
export function mergeTsconfigOptions(
...options: (TypescriptConfigOptions | undefined)[]
): TypescriptConfigOptions {
const definedOptions = options.filter(Boolean) as TypescriptConfigOptions[];
return definedOptions.reduce<TypescriptConfigOptions>(
(previous, current) => ({
...previous,
...current,
include: [...(previous.include ?? []), ...(current.include ?? [])],
exclude: [...(previous.exclude ?? []), ...(current.exclude ?? [])],
compilerOptions: {
...previous.compilerOptions,
...current.compilerOptions,
},
}),
{ compilerOptions: {} }
);
}
| {
new TypedocDocgen(this);
} | conditional_block |
typescript.ts | import * as path from "path";
import * as semver from "semver";
import { PROJEN_DIR, PROJEN_RC } from "../common";
import { Component } from "../component";
import {
Eslint,
EslintOptions,
Jest,
NodeProject,
NodeProjectOptions,
TypeScriptCompilerOptions,
TypescriptConfig,
TypescriptConfigOptions,
} from "../javascript";
import { SampleDir } from "../sample-file";
import { Task } from "../task";
import { TextFile } from "../textfile";
import {
Projenrc as ProjenrcTs,
ProjenrcOptions as ProjenrcTsOptions,
TypedocDocgen,
} from "../typescript";
import { deepMerge } from "../util";
export interface TypeScriptProjectOptions extends NodeProjectOptions {
/**
* Typescript artifacts output directory
*
* @default "lib"
*/
readonly libdir?: string;
/**
* Typescript sources directory.
*
* @default "src"
*/
readonly srcdir?: string;
/**
* Jest tests directory. Tests files should be named `xxx.test.ts`.
*
* If this directory is under `srcdir` (e.g. `src/test`, `src/__tests__`),
* then tests are going to be compiled into `lib/` and executed as javascript.
* If the test directory is outside of `src`, then we configure jest to
* compile the code in-memory.
*
* @default "test"
*/
readonly testdir?: string;
/**
* Setup eslint.
*
* @default true
*/
readonly eslint?: boolean;
/**
* Eslint options
* @default - opinionated default options
*/
readonly eslintOptions?: EslintOptions;
/**
* TypeScript version to use.
*
* NOTE: Typescript is not semantically versioned and should remain on the
* same minor, so we recommend using a `~` dependency (e.g. `~1.2.3`).
*
* @default "latest"
*/
readonly typescriptVersion?: string;
/**
* Docgen by Typedoc
*
* @default false
*/
readonly docgen?: boolean;
/**
* Docs directory
*
* @default "docs"
*/
readonly docsDirectory?: string;
/**
* Custom TSConfig
* @default - default options
*/
readonly tsconfig?: TypescriptConfigOptions;
/**
* Custom tsconfig options for the development tsconfig.json file (used for testing).
* @default - use the production tsconfig options
*/
readonly tsconfigDev?: TypescriptConfigOptions;
/**
* The name of the development tsconfig.json file.
*
* @default "tsconfig.dev.json"
*/
readonly tsconfigDevFile?: string;
/**
* Do not generate a `tsconfig.json` file (used by jsii projects since
* tsconfig.json is generated by the jsii compiler).
*
* @default false
*/
readonly disableTsconfig?: boolean;
/**
* Do not generate a `tsconfig.dev.json` file.
*
* @default false
*/
readonly disableTsconfigDev?: boolean;
/**
* Generate one-time sample in `src/` and `test/` if there are no files there.
* @default true
*/
readonly sampleCode?: boolean;
/**
* The .d.ts file that includes the type declarations for this module.
* @default - .d.ts file derived from the project's entrypoint (usually lib/index.d.ts)
*/
readonly entrypointTypes?: string;
/**
* Use TypeScript for your projenrc file (`.projenrc.ts`).
*
* @default false
* @pjnew true
*/
readonly projenrcTs?: boolean;
/**
* Options for .projenrc.ts
*/
readonly projenrcTsOptions?: ProjenrcTsOptions;
}
/**
* TypeScript project
* @pjid typescript
*/
export class TypeScriptProject extends NodeProject {
public readonly docgen?: boolean;
public readonly docsDirectory: string;
public readonly eslint?: Eslint;
public readonly tsconfigEslint?: TypescriptConfig;
public readonly tsconfig?: TypescriptConfig;
/**
* A typescript configuration file which covers all files (sources, tests, projen).
*/
public readonly tsconfigDev: TypescriptConfig;
/**
* The directory in which the .ts sources reside.
*/
public readonly srcdir: string;
/**
* The directory in which compiled .js files reside.
*/
public readonly libdir: string;
/**
* The directory in which tests reside.
*/
public readonly testdir: string;
/**
* The "watch" task.
*/
public readonly watchTask: Task;
constructor(options: TypeScriptProjectOptions) {
super({
...options,
// disable .projenrc.js if typescript is enabled
projenrcJs: options.projenrcTs ? false : options.projenrcJs,
jestOptions: {
...options.jestOptions,
jestConfig: {
...options.jestOptions?.jestConfig,
testMatch: options.jestOptions?.jestConfig?.testMatch ?? [],
},
},
});
this.srcdir = options.srcdir ?? "src";
this.libdir = options.libdir ?? "lib";
this.docgen = options.docgen;
this.docsDirectory = options.docsDirectory ?? "docs/";
this.compileTask.exec("tsc --build");
this.watchTask = this.addTask("watch", {
description: "Watch & compile in the background",
exec: "tsc --build -w",
});
this.testdir = options.testdir ?? "test";
this.gitignore.include(`/${this.testdir}/`);
this.npmignore?.exclude(`/${this.testdir}/`);
// if the test directory is under `src/`, then we will run our tests against
// the javascript files and not let jest compile it for us.
const compiledTests = this.testdir.startsWith(this.srcdir + path.posix.sep);
if (options.entrypointTypes || this.entrypoint !== "") {
const entrypointTypes =
options.entrypointTypes ??
`${path
.join(
path.dirname(this.entrypoint),
path.basename(this.entrypoint, ".js")
)
.replace(/\\/g, "/")}.d.ts`;
this.package.addField("types", entrypointTypes);
}
const compilerOptionDefaults: TypeScriptCompilerOptions = {
alwaysStrict: true,
declaration: true,
esModuleInterop: true,
experimentalDecorators: true,
inlineSourceMap: true,
inlineSources: true,
lib: ["es2019"],
module: "CommonJS",
noEmitOnError: false,
noFallthroughCasesInSwitch: true,
noImplicitAny: true,
noImplicitReturns: true,
noImplicitThis: true,
noUnusedLocals: true,
noUnusedParameters: true,
resolveJsonModule: true,
strict: true,
strictNullChecks: true,
strictPropertyInitialization: true,
stripInternal: true,
target: "ES2019",
};
if (options.disableTsconfigDev && options.disableTsconfig) {
throw new Error(
"Cannot specify both 'disableTsconfigDev' and 'disableTsconfig' fields."
);
}
if (!options.disableTsconfig) {
this.tsconfig = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
include: [`${this.srcdir}/**/*.ts`],
// exclude: ['node_modules'], // TODO: shouldn't we exclude node_modules?
compilerOptions: {
rootDir: this.srcdir,
outDir: this.libdir,
...compilerOptionDefaults,
},
},
options.tsconfig
)
);
}
if (options.disableTsconfigDev) {
this.tsconfigDev = this.tsconfig!;
} else {
const tsconfigDevFile = options.tsconfigDevFile ?? "tsconfig.dev.json";
this.tsconfigDev = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
fileName: tsconfigDevFile,
include: [
PROJEN_RC,
`${this.srcdir}/**/*.ts`,
`${this.testdir}/**/*.ts`,
],
exclude: ["node_modules"],
compilerOptions: compilerOptionDefaults,
},
options.tsconfig,
options.tsconfigDev
)
);
}
this.gitignore.include(`/${this.srcdir}/`);
this.npmignore?.exclude(`/${this.srcdir}/`);
if (this.srcdir !== this.libdir) {
// separated, can ignore the entire libdir
this.gitignore.exclude(`/${this.libdir}`);
} else {
// collocated, can only ignore the compiled output
this.gitignore.exclude(`/${this.libdir}/**/*.js`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts.map`);
}
this.npmignore?.include(`/${this.libdir}/`);
this.npmignore?.include(`/${this.libdir}/**/*.js`);
this.npmignore?.include(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude("/dist/");
this.npmignore?.exclude("dist"); // jsii-pacmak expects this to be "dist" and not "/dist". otherwise it will tamper with it
this.npmignore?.exclude("/tsconfig.json");
this.npmignore?.exclude("/.github/");
this.npmignore?.exclude("/.vscode/");
this.npmignore?.exclude("/.idea/");
this.npmignore?.exclude("/.projenrc.js");
this.npmignore?.exclude("tsconfig.tsbuildinfo");
if (this.jest) {
if (compiledTests) {
this.addJestCompiled(this.jest);
} else {
this.addJestNoCompile(this.jest);
}
}
if (options.eslint ?? true) {
this.eslint = new Eslint(this, {
tsconfigPath: `./${this.tsconfigDev.fileName}`,
dirs: [this.srcdir],
devdirs: [this.testdir, "build-tools"],
fileExtensions: [".ts", ".tsx"],
lintProjenRc: false,
...options.eslintOptions,
});
this.tsconfigEslint = this.tsconfigDev;
}
if (!this.parent && options.projenrcTs) {
new ProjenrcTs(this, options.projenrcTsOptions);
}
const tsver = options.typescriptVersion
? `@${options.typescriptVersion}`
: "";
this.addDevDeps(
`typescript${tsver}`,
// @types/node versions numbers match the node runtime versions' major.minor, however, new
// releases are only created when API changes are included in a node release... We might for
// example have dependencies that require `node >= 12.22`, but as 12.21 and 12.22 did not
// include API changes, `@types/node@12.20.x` is the "correct" version to use. As it is not
// possible to easily determine the correct version to use, we pick up the latest version.
//
// Additionally, we default to tracking the 12.x line, as the current earliest LTS release of
// node is 12.x, so this is what corresponds to the broadest compatibility with supported node
// runtimes.
`@types/node@^${semver.major(this.package.minNodeVersion ?? "16.0.0")}`
);
// generate sample code in `src` and `lib` if these directories are empty or non-existent.
if (options.sampleCode ?? true) {
new SampleCode(this);
}
if (this.docgen) {
new TypedocDocgen(this);
}
}
/**
* Tests are compiled to `lib/TESTDIR`, so we don't need jest to compile them
* for us. just run them directly from javascript.
*/
private addJestCompiled(jest: Jest) {
this.addDevDeps(`@types/jest${jest.jestVersion}`);
const testout = path.posix.relative(this.srcdir, this.testdir);
const libtest = path.posix.join(this.libdir, testout);
const srctest = this.testdir;
this.npmignore?.exclude(`/${libtest}/`);
jest.addTestMatch(`**/${libtest}/**/?(*.)+(spec|test).js?(x)`);
jest.addWatchIgnorePattern(`/${this.srcdir}/`);
const resolveSnapshotPath = (test: string, ext: string) => {
const fullpath = test.replace(libtest, srctest);
return path.join(
path.dirname(fullpath),
"__snapshots__",
path.basename(fullpath, ".js") + ".ts" + ext
);
};
const resolveTestPath = (snap: string, ext: string) => {
const filename = path.basename(snap, ".ts" + ext) + ".js";
const dir = path.dirname(path.dirname(snap)).replace(srctest, libtest);
return path.join(dir, filename);
};
const resolver = new TextFile(
this,
path.posix.join(PROJEN_DIR, "jest-snapshot-resolver.js")
);
if (!resolver.marker) {
resolver.addLine(`// ${resolver.marker}`);
}
resolver.addLine('const path = require("path");');
resolver.addLine(`const libtest = "${libtest}";`);
resolver.addLine(`const srctest= "${srctest}";`);
resolver.addLine("module.exports = {");
resolver.addLine(
` resolveSnapshotPath: ${resolveSnapshotPath.toString()},`
);
resolver.addLine(` resolveTestPath: ${resolveTestPath.toString()},`);
resolver.addLine(
" testPathForConsistencyCheck: path.join('some', '__tests__', 'example.test.js')"
);
resolver.addLine("};");
jest.addSnapshotResolver(`./${resolver.path}`);
}
private addJestNoCompile(jest: Jest) {
this.addDevDeps(
`@types/jest${jest.jestVersion}`,
`ts-jest${jest.jestVersion}`
);
jest.addTestMatch(`<rootDir>/${this.srcdir}/**/__tests__/**/*.ts?(x)`);
jest.addTestMatch(
`<rootDir>/(${this.testdir}|${this.srcdir})/**/*(*.)@(spec|test).ts?(x)`
);
// add relevant deps
if (!jest.config.preset) {
jest.config.preset = "ts-jest";
}
jest.config.globals = deepMerge([
{
"ts-jest": {
tsconfig: this.tsconfigDev.fileName,
},
},
jest.config.globals,
]);
}
}
class | extends Component {
constructor(project: TypeScriptProject) {
super(project);
const srcCode = [
"export class Hello {",
" public sayHello() {",
" return 'hello, world!';",
" }",
"}",
].join("\n");
const testCode = [
"import { Hello } from '../src';",
"",
"test('hello', () => {",
" expect(new Hello().sayHello()).toBe('hello, world!');",
"});",
].join("\n");
new SampleDir(project, project.srcdir, {
files: {
"index.ts": srcCode,
},
});
if (project.jest) {
new SampleDir(project, project.testdir, {
files: {
"hello.test.ts": testCode,
},
});
}
}
}
/**
* TypeScript app.
*
* @pjid typescript-app
*/
export class TypeScriptAppProject extends TypeScriptProject {
constructor(options: TypeScriptProjectOptions) {
super({
allowLibraryDependencies: false,
releaseWorkflow: false,
entrypoint: "", // "main" is not needed in typescript apps
package: false,
...options,
});
}
}
/**
* @deprecated use `TypeScriptProject`
*/
export class TypeScriptLibraryProject extends TypeScriptProject {}
/**
* @deprecated use TypeScriptProjectOptions
*/
export interface TypeScriptLibraryProjectOptions
extends TypeScriptProjectOptions {}
/**
* @internal
*/
export function mergeTsconfigOptions(
...options: (TypescriptConfigOptions | undefined)[]
): TypescriptConfigOptions {
const definedOptions = options.filter(Boolean) as TypescriptConfigOptions[];
return definedOptions.reduce<TypescriptConfigOptions>(
(previous, current) => ({
...previous,
...current,
include: [...(previous.include ?? []), ...(current.include ?? [])],
exclude: [...(previous.exclude ?? []), ...(current.exclude ?? [])],
compilerOptions: {
...previous.compilerOptions,
...current.compilerOptions,
},
}),
{ compilerOptions: {} }
);
}
| SampleCode | identifier_name |
typescript.ts | import * as path from "path";
import * as semver from "semver";
import { PROJEN_DIR, PROJEN_RC } from "../common";
import { Component } from "../component";
import {
Eslint,
EslintOptions,
Jest,
NodeProject,
NodeProjectOptions,
TypeScriptCompilerOptions,
TypescriptConfig,
TypescriptConfigOptions,
} from "../javascript";
import { SampleDir } from "../sample-file";
import { Task } from "../task";
import { TextFile } from "../textfile";
import {
Projenrc as ProjenrcTs,
ProjenrcOptions as ProjenrcTsOptions,
TypedocDocgen,
} from "../typescript";
import { deepMerge } from "../util";
export interface TypeScriptProjectOptions extends NodeProjectOptions {
/**
* Typescript artifacts output directory
*
* @default "lib"
*/
readonly libdir?: string;
/**
* Typescript sources directory.
*
* @default "src"
*/
readonly srcdir?: string;
/**
* Jest tests directory. Tests files should be named `xxx.test.ts`.
*
* If this directory is under `srcdir` (e.g. `src/test`, `src/__tests__`),
* then tests are going to be compiled into `lib/` and executed as javascript.
* If the test directory is outside of `src`, then we configure jest to
* compile the code in-memory.
*
* @default "test"
*/
readonly testdir?: string;
/**
* Setup eslint.
*
* @default true
*/
readonly eslint?: boolean;
/**
* Eslint options
* @default - opinionated default options
*/
readonly eslintOptions?: EslintOptions;
/**
* TypeScript version to use.
*
* NOTE: Typescript is not semantically versioned and should remain on the
* same minor, so we recommend using a `~` dependency (e.g. `~1.2.3`).
*
* @default "latest"
*/
readonly typescriptVersion?: string;
/**
* Docgen by Typedoc
*
* @default false
*/
readonly docgen?: boolean;
/**
* Docs directory
*
* @default "docs"
*/
readonly docsDirectory?: string;
/**
* Custom TSConfig
* @default - default options
*/
readonly tsconfig?: TypescriptConfigOptions;
/**
* Custom tsconfig options for the development tsconfig.json file (used for testing).
* @default - use the production tsconfig options
*/
readonly tsconfigDev?: TypescriptConfigOptions;
/**
* The name of the development tsconfig.json file.
*
* @default "tsconfig.dev.json"
*/
readonly tsconfigDevFile?: string;
/**
* Do not generate a `tsconfig.json` file (used by jsii projects since
* tsconfig.json is generated by the jsii compiler).
*
* @default false
*/
readonly disableTsconfig?: boolean;
/**
* Do not generate a `tsconfig.dev.json` file.
*
* @default false
*/
readonly disableTsconfigDev?: boolean;
/**
* Generate one-time sample in `src/` and `test/` if there are no files there.
* @default true
*/
readonly sampleCode?: boolean;
/**
* The .d.ts file that includes the type declarations for this module.
* @default - .d.ts file derived from the project's entrypoint (usually lib/index.d.ts)
*/
readonly entrypointTypes?: string;
/**
* Use TypeScript for your projenrc file (`.projenrc.ts`).
*
* @default false
* @pjnew true
*/
readonly projenrcTs?: boolean;
/**
* Options for .projenrc.ts
*/
readonly projenrcTsOptions?: ProjenrcTsOptions;
}
/**
* TypeScript project
* @pjid typescript
*/
export class TypeScriptProject extends NodeProject {
public readonly docgen?: boolean;
public readonly docsDirectory: string;
public readonly eslint?: Eslint;
public readonly tsconfigEslint?: TypescriptConfig;
public readonly tsconfig?: TypescriptConfig;
/**
* A typescript configuration file which covers all files (sources, tests, projen).
*/
public readonly tsconfigDev: TypescriptConfig;
/**
* The directory in which the .ts sources reside.
*/
public readonly srcdir: string;
/**
* The directory in which compiled .js files reside.
*/
public readonly libdir: string;
/**
* The directory in which tests reside.
*/
public readonly testdir: string;
/**
* The "watch" task.
*/
public readonly watchTask: Task;
constructor(options: TypeScriptProjectOptions) {
super({
...options,
// disable .projenrc.js if typescript is enabled
projenrcJs: options.projenrcTs ? false : options.projenrcJs,
jestOptions: {
...options.jestOptions,
jestConfig: {
...options.jestOptions?.jestConfig,
testMatch: options.jestOptions?.jestConfig?.testMatch ?? [],
},
},
});
this.srcdir = options.srcdir ?? "src";
this.libdir = options.libdir ?? "lib";
this.docgen = options.docgen;
this.docsDirectory = options.docsDirectory ?? "docs/";
this.compileTask.exec("tsc --build");
this.watchTask = this.addTask("watch", {
description: "Watch & compile in the background",
exec: "tsc --build -w",
});
this.testdir = options.testdir ?? "test";
this.gitignore.include(`/${this.testdir}/`);
this.npmignore?.exclude(`/${this.testdir}/`);
// if the test directory is under `src/`, then we will run our tests against
// the javascript files and not let jest compile it for us.
const compiledTests = this.testdir.startsWith(this.srcdir + path.posix.sep);
if (options.entrypointTypes || this.entrypoint !== "") {
const entrypointTypes =
options.entrypointTypes ??
`${path
.join(
path.dirname(this.entrypoint),
path.basename(this.entrypoint, ".js")
)
.replace(/\\/g, "/")}.d.ts`;
this.package.addField("types", entrypointTypes);
}
const compilerOptionDefaults: TypeScriptCompilerOptions = {
alwaysStrict: true,
declaration: true,
esModuleInterop: true,
experimentalDecorators: true,
inlineSourceMap: true,
inlineSources: true,
lib: ["es2019"],
module: "CommonJS",
noEmitOnError: false,
noFallthroughCasesInSwitch: true,
noImplicitAny: true,
noImplicitReturns: true,
noImplicitThis: true,
noUnusedLocals: true,
noUnusedParameters: true,
resolveJsonModule: true,
strict: true,
strictNullChecks: true,
strictPropertyInitialization: true,
stripInternal: true,
target: "ES2019",
};
if (options.disableTsconfigDev && options.disableTsconfig) {
throw new Error(
"Cannot specify both 'disableTsconfigDev' and 'disableTsconfig' fields."
);
}
if (!options.disableTsconfig) {
this.tsconfig = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
include: [`${this.srcdir}/**/*.ts`],
// exclude: ['node_modules'], // TODO: shouldn't we exclude node_modules?
compilerOptions: {
rootDir: this.srcdir,
outDir: this.libdir,
...compilerOptionDefaults,
},
},
options.tsconfig
)
);
}
if (options.disableTsconfigDev) {
this.tsconfigDev = this.tsconfig!;
} else {
const tsconfigDevFile = options.tsconfigDevFile ?? "tsconfig.dev.json";
this.tsconfigDev = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
fileName: tsconfigDevFile,
include: [
PROJEN_RC,
`${this.srcdir}/**/*.ts`,
`${this.testdir}/**/*.ts`,
],
exclude: ["node_modules"],
compilerOptions: compilerOptionDefaults,
},
options.tsconfig,
options.tsconfigDev
)
);
}
this.gitignore.include(`/${this.srcdir}/`);
this.npmignore?.exclude(`/${this.srcdir}/`);
if (this.srcdir !== this.libdir) {
// separated, can ignore the entire libdir
this.gitignore.exclude(`/${this.libdir}`);
} else {
// collocated, can only ignore the compiled output
this.gitignore.exclude(`/${this.libdir}/**/*.js`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts.map`);
}
this.npmignore?.include(`/${this.libdir}/`);
this.npmignore?.include(`/${this.libdir}/**/*.js`);
this.npmignore?.include(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude("/dist/");
this.npmignore?.exclude("dist"); // jsii-pacmak expects this to be "dist" and not "/dist". otherwise it will tamper with it
this.npmignore?.exclude("/tsconfig.json");
this.npmignore?.exclude("/.github/");
this.npmignore?.exclude("/.vscode/");
this.npmignore?.exclude("/.idea/");
this.npmignore?.exclude("/.projenrc.js");
this.npmignore?.exclude("tsconfig.tsbuildinfo");
if (this.jest) {
if (compiledTests) {
this.addJestCompiled(this.jest);
} else {
this.addJestNoCompile(this.jest);
}
}
if (options.eslint ?? true) {
this.eslint = new Eslint(this, {
tsconfigPath: `./${this.tsconfigDev.fileName}`,
dirs: [this.srcdir],
devdirs: [this.testdir, "build-tools"],
fileExtensions: [".ts", ".tsx"],
lintProjenRc: false,
...options.eslintOptions,
});
this.tsconfigEslint = this.tsconfigDev;
}
if (!this.parent && options.projenrcTs) {
new ProjenrcTs(this, options.projenrcTsOptions);
}
const tsver = options.typescriptVersion
? `@${options.typescriptVersion}`
: "";
this.addDevDeps(
`typescript${tsver}`,
// @types/node versions numbers match the node runtime versions' major.minor, however, new
// releases are only created when API changes are included in a node release... We might for
// example have dependencies that require `node >= 12.22`, but as 12.21 and 12.22 did not
// include API changes, `@types/node@12.20.x` is the "correct" version to use. As it is not
// possible to easily determine the correct version to use, we pick up the latest version.
//
// Additionally, we default to tracking the 12.x line, as the current earliest LTS release of
// node is 12.x, so this is what corresponds to the broadest compatibility with supported node
// runtimes.
`@types/node@^${semver.major(this.package.minNodeVersion ?? "16.0.0")}`
);
// generate sample code in `src` and `lib` if these directories are empty or non-existent.
if (options.sampleCode ?? true) {
new SampleCode(this);
}
if (this.docgen) {
new TypedocDocgen(this);
}
}
/**
* Tests are compiled to `lib/TESTDIR`, so we don't need jest to compile them
* for us. just run them directly from javascript.
*/
private addJestCompiled(jest: Jest) {
this.addDevDeps(`@types/jest${jest.jestVersion}`);
const testout = path.posix.relative(this.srcdir, this.testdir);
const libtest = path.posix.join(this.libdir, testout);
const srctest = this.testdir;
this.npmignore?.exclude(`/${libtest}/`);
jest.addTestMatch(`**/${libtest}/**/?(*.)+(spec|test).js?(x)`);
jest.addWatchIgnorePattern(`/${this.srcdir}/`);
const resolveSnapshotPath = (test: string, ext: string) => {
const fullpath = test.replace(libtest, srctest);
return path.join(
path.dirname(fullpath),
"__snapshots__",
path.basename(fullpath, ".js") + ".ts" + ext
);
};
const resolveTestPath = (snap: string, ext: string) => {
const filename = path.basename(snap, ".ts" + ext) + ".js";
const dir = path.dirname(path.dirname(snap)).replace(srctest, libtest);
return path.join(dir, filename);
};
const resolver = new TextFile(
this,
path.posix.join(PROJEN_DIR, "jest-snapshot-resolver.js")
);
if (!resolver.marker) {
resolver.addLine(`// ${resolver.marker}`);
}
resolver.addLine('const path = require("path");');
resolver.addLine(`const libtest = "${libtest}";`);
resolver.addLine(`const srctest= "${srctest}";`);
resolver.addLine("module.exports = {");
resolver.addLine(
` resolveSnapshotPath: ${resolveSnapshotPath.toString()},`
);
resolver.addLine(` resolveTestPath: ${resolveTestPath.toString()},`);
resolver.addLine(
" testPathForConsistencyCheck: path.join('some', '__tests__', 'example.test.js')"
);
resolver.addLine("};");
jest.addSnapshotResolver(`./${resolver.path}`);
}
private addJestNoCompile(jest: Jest) |
}
class SampleCode extends Component {
constructor(project: TypeScriptProject) {
super(project);
const srcCode = [
"export class Hello {",
" public sayHello() {",
" return 'hello, world!';",
" }",
"}",
].join("\n");
const testCode = [
"import { Hello } from '../src';",
"",
"test('hello', () => {",
" expect(new Hello().sayHello()).toBe('hello, world!');",
"});",
].join("\n");
new SampleDir(project, project.srcdir, {
files: {
"index.ts": srcCode,
},
});
if (project.jest) {
new SampleDir(project, project.testdir, {
files: {
"hello.test.ts": testCode,
},
});
}
}
}
/**
* TypeScript app.
*
* @pjid typescript-app
*/
export class TypeScriptAppProject extends TypeScriptProject {
constructor(options: TypeScriptProjectOptions) {
super({
allowLibraryDependencies: false,
releaseWorkflow: false,
entrypoint: "", // "main" is not needed in typescript apps
package: false,
...options,
});
}
}
/**
* @deprecated use `TypeScriptProject`
*/
export class TypeScriptLibraryProject extends TypeScriptProject {}
/**
* @deprecated use TypeScriptProjectOptions
*/
export interface TypeScriptLibraryProjectOptions
extends TypeScriptProjectOptions {}
/**
* @internal
*/
export function mergeTsconfigOptions(
...options: (TypescriptConfigOptions | undefined)[]
): TypescriptConfigOptions {
const definedOptions = options.filter(Boolean) as TypescriptConfigOptions[];
return definedOptions.reduce<TypescriptConfigOptions>(
(previous, current) => ({
...previous,
...current,
include: [...(previous.include ?? []), ...(current.include ?? [])],
exclude: [...(previous.exclude ?? []), ...(current.exclude ?? [])],
compilerOptions: {
...previous.compilerOptions,
...current.compilerOptions,
},
}),
{ compilerOptions: {} }
);
}
| {
this.addDevDeps(
`@types/jest${jest.jestVersion}`,
`ts-jest${jest.jestVersion}`
);
jest.addTestMatch(`<rootDir>/${this.srcdir}/**/__tests__/**/*.ts?(x)`);
jest.addTestMatch(
`<rootDir>/(${this.testdir}|${this.srcdir})/**/*(*.)@(spec|test).ts?(x)`
);
// add relevant deps
if (!jest.config.preset) {
jest.config.preset = "ts-jest";
}
jest.config.globals = deepMerge([
{
"ts-jest": {
tsconfig: this.tsconfigDev.fileName,
},
},
jest.config.globals,
]);
} | identifier_body |
typescript.ts | import * as path from "path";
import * as semver from "semver";
import { PROJEN_DIR, PROJEN_RC } from "../common";
import { Component } from "../component";
import {
Eslint,
EslintOptions,
Jest,
NodeProject,
NodeProjectOptions,
TypeScriptCompilerOptions,
TypescriptConfig,
TypescriptConfigOptions,
} from "../javascript";
import { SampleDir } from "../sample-file";
import { Task } from "../task";
import { TextFile } from "../textfile";
import {
Projenrc as ProjenrcTs,
ProjenrcOptions as ProjenrcTsOptions,
TypedocDocgen,
} from "../typescript";
import { deepMerge } from "../util";
export interface TypeScriptProjectOptions extends NodeProjectOptions {
/**
* Typescript artifacts output directory
*
* @default "lib"
*/
readonly libdir?: string;
/**
* Typescript sources directory.
*
* @default "src"
*/
readonly srcdir?: string;
/**
* Jest tests directory. Tests files should be named `xxx.test.ts`.
*
* If this directory is under `srcdir` (e.g. `src/test`, `src/__tests__`),
* then tests are going to be compiled into `lib/` and executed as javascript.
* If the test directory is outside of `src`, then we configure jest to
* compile the code in-memory.
*
* @default "test"
*/
readonly testdir?: string;
/**
* Setup eslint.
*
* @default true
*/
readonly eslint?: boolean;
/**
* Eslint options
* @default - opinionated default options
*/
readonly eslintOptions?: EslintOptions;
/**
* TypeScript version to use.
*
* NOTE: Typescript is not semantically versioned and should remain on the
* same minor, so we recommend using a `~` dependency (e.g. `~1.2.3`).
*
* @default "latest"
*/
readonly typescriptVersion?: string;
/**
* Docgen by Typedoc
*
* @default false
*/
readonly docgen?: boolean;
/**
* Docs directory
*
* @default "docs"
*/
readonly docsDirectory?: string;
/**
* Custom TSConfig
* @default - default options
*/
readonly tsconfig?: TypescriptConfigOptions;
/**
* Custom tsconfig options for the development tsconfig.json file (used for testing).
* @default - use the production tsconfig options
*/
readonly tsconfigDev?: TypescriptConfigOptions;
/**
* The name of the development tsconfig.json file.
*
* @default "tsconfig.dev.json"
*/
readonly tsconfigDevFile?: string;
/**
* Do not generate a `tsconfig.json` file (used by jsii projects since
* tsconfig.json is generated by the jsii compiler).
*
* @default false
*/
readonly disableTsconfig?: boolean;
/**
* Do not generate a `tsconfig.dev.json` file.
*
* @default false
*/
readonly disableTsconfigDev?: boolean;
/**
* Generate one-time sample in `src/` and `test/` if there are no files there.
* @default true
*/
readonly sampleCode?: boolean;
/**
* The .d.ts file that includes the type declarations for this module.
* @default - .d.ts file derived from the project's entrypoint (usually lib/index.d.ts)
*/
readonly entrypointTypes?: string;
/**
* Use TypeScript for your projenrc file (`.projenrc.ts`).
*
* @default false
* @pjnew true
*/
readonly projenrcTs?: boolean;
/**
* Options for .projenrc.ts
*/
readonly projenrcTsOptions?: ProjenrcTsOptions;
}
/**
* TypeScript project
* @pjid typescript
*/
export class TypeScriptProject extends NodeProject {
public readonly docgen?: boolean;
public readonly docsDirectory: string;
public readonly eslint?: Eslint;
public readonly tsconfigEslint?: TypescriptConfig;
public readonly tsconfig?: TypescriptConfig;
/**
* A typescript configuration file which covers all files (sources, tests, projen).
*/
public readonly tsconfigDev: TypescriptConfig;
/**
* The directory in which the .ts sources reside.
*/
public readonly srcdir: string;
/**
* The directory in which compiled .js files reside.
*/
public readonly libdir: string;
/**
* The directory in which tests reside.
*/
public readonly testdir: string;
/**
* The "watch" task.
*/
public readonly watchTask: Task;
constructor(options: TypeScriptProjectOptions) {
super({
...options,
// disable .projenrc.js if typescript is enabled
projenrcJs: options.projenrcTs ? false : options.projenrcJs,
jestOptions: {
...options.jestOptions,
jestConfig: {
...options.jestOptions?.jestConfig,
testMatch: options.jestOptions?.jestConfig?.testMatch ?? [],
},
},
});
this.srcdir = options.srcdir ?? "src";
this.libdir = options.libdir ?? "lib";
this.docgen = options.docgen;
this.docsDirectory = options.docsDirectory ?? "docs/";
this.compileTask.exec("tsc --build");
this.watchTask = this.addTask("watch", {
description: "Watch & compile in the background",
exec: "tsc --build -w",
});
this.testdir = options.testdir ?? "test";
this.gitignore.include(`/${this.testdir}/`);
this.npmignore?.exclude(`/${this.testdir}/`);
// if the test directory is under `src/`, then we will run our tests against
// the javascript files and not let jest compile it for us.
const compiledTests = this.testdir.startsWith(this.srcdir + path.posix.sep);
if (options.entrypointTypes || this.entrypoint !== "") {
const entrypointTypes =
options.entrypointTypes ??
`${path
.join(
path.dirname(this.entrypoint),
path.basename(this.entrypoint, ".js")
)
.replace(/\\/g, "/")}.d.ts`;
this.package.addField("types", entrypointTypes);
}
const compilerOptionDefaults: TypeScriptCompilerOptions = {
alwaysStrict: true,
declaration: true,
esModuleInterop: true,
experimentalDecorators: true,
inlineSourceMap: true,
inlineSources: true,
lib: ["es2019"],
module: "CommonJS",
noEmitOnError: false,
noFallthroughCasesInSwitch: true,
noImplicitAny: true,
noImplicitReturns: true,
noImplicitThis: true,
noUnusedLocals: true,
noUnusedParameters: true,
resolveJsonModule: true,
strict: true,
strictNullChecks: true,
strictPropertyInitialization: true,
stripInternal: true,
target: "ES2019",
};
if (options.disableTsconfigDev && options.disableTsconfig) {
throw new Error(
"Cannot specify both 'disableTsconfigDev' and 'disableTsconfig' fields."
);
}
if (!options.disableTsconfig) {
this.tsconfig = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
include: [`${this.srcdir}/**/*.ts`],
// exclude: ['node_modules'], // TODO: shouldn't we exclude node_modules?
compilerOptions: {
rootDir: this.srcdir,
outDir: this.libdir,
...compilerOptionDefaults,
},
},
options.tsconfig
)
);
}
if (options.disableTsconfigDev) {
this.tsconfigDev = this.tsconfig!; | this.tsconfigDev = new TypescriptConfig(
this,
mergeTsconfigOptions(
{
fileName: tsconfigDevFile,
include: [
PROJEN_RC,
`${this.srcdir}/**/*.ts`,
`${this.testdir}/**/*.ts`,
],
exclude: ["node_modules"],
compilerOptions: compilerOptionDefaults,
},
options.tsconfig,
options.tsconfigDev
)
);
}
this.gitignore.include(`/${this.srcdir}/`);
this.npmignore?.exclude(`/${this.srcdir}/`);
if (this.srcdir !== this.libdir) {
// separated, can ignore the entire libdir
this.gitignore.exclude(`/${this.libdir}`);
} else {
// collocated, can only ignore the compiled output
this.gitignore.exclude(`/${this.libdir}/**/*.js`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude(`/${this.libdir}/**/*.d.ts.map`);
}
this.npmignore?.include(`/${this.libdir}/`);
this.npmignore?.include(`/${this.libdir}/**/*.js`);
this.npmignore?.include(`/${this.libdir}/**/*.d.ts`);
this.gitignore.exclude("/dist/");
this.npmignore?.exclude("dist"); // jsii-pacmak expects this to be "dist" and not "/dist". otherwise it will tamper with it
this.npmignore?.exclude("/tsconfig.json");
this.npmignore?.exclude("/.github/");
this.npmignore?.exclude("/.vscode/");
this.npmignore?.exclude("/.idea/");
this.npmignore?.exclude("/.projenrc.js");
this.npmignore?.exclude("tsconfig.tsbuildinfo");
if (this.jest) {
if (compiledTests) {
this.addJestCompiled(this.jest);
} else {
this.addJestNoCompile(this.jest);
}
}
if (options.eslint ?? true) {
this.eslint = new Eslint(this, {
tsconfigPath: `./${this.tsconfigDev.fileName}`,
dirs: [this.srcdir],
devdirs: [this.testdir, "build-tools"],
fileExtensions: [".ts", ".tsx"],
lintProjenRc: false,
...options.eslintOptions,
});
this.tsconfigEslint = this.tsconfigDev;
}
if (!this.parent && options.projenrcTs) {
new ProjenrcTs(this, options.projenrcTsOptions);
}
const tsver = options.typescriptVersion
? `@${options.typescriptVersion}`
: "";
this.addDevDeps(
`typescript${tsver}`,
// @types/node versions numbers match the node runtime versions' major.minor, however, new
// releases are only created when API changes are included in a node release... We might for
// example have dependencies that require `node >= 12.22`, but as 12.21 and 12.22 did not
// include API changes, `@types/node@12.20.x` is the "correct" version to use. As it is not
// possible to easily determine the correct version to use, we pick up the latest version.
//
// Additionally, we default to tracking the 12.x line, as the current earliest LTS release of
// node is 12.x, so this is what corresponds to the broadest compatibility with supported node
// runtimes.
`@types/node@^${semver.major(this.package.minNodeVersion ?? "16.0.0")}`
);
// generate sample code in `src` and `lib` if these directories are empty or non-existent.
if (options.sampleCode ?? true) {
new SampleCode(this);
}
if (this.docgen) {
new TypedocDocgen(this);
}
}
/**
* Tests are compiled to `lib/TESTDIR`, so we don't need jest to compile them
* for us. just run them directly from javascript.
*/
private addJestCompiled(jest: Jest) {
this.addDevDeps(`@types/jest${jest.jestVersion}`);
const testout = path.posix.relative(this.srcdir, this.testdir);
const libtest = path.posix.join(this.libdir, testout);
const srctest = this.testdir;
this.npmignore?.exclude(`/${libtest}/`);
jest.addTestMatch(`**/${libtest}/**/?(*.)+(spec|test).js?(x)`);
jest.addWatchIgnorePattern(`/${this.srcdir}/`);
const resolveSnapshotPath = (test: string, ext: string) => {
const fullpath = test.replace(libtest, srctest);
return path.join(
path.dirname(fullpath),
"__snapshots__",
path.basename(fullpath, ".js") + ".ts" + ext
);
};
const resolveTestPath = (snap: string, ext: string) => {
const filename = path.basename(snap, ".ts" + ext) + ".js";
const dir = path.dirname(path.dirname(snap)).replace(srctest, libtest);
return path.join(dir, filename);
};
const resolver = new TextFile(
this,
path.posix.join(PROJEN_DIR, "jest-snapshot-resolver.js")
);
if (!resolver.marker) {
resolver.addLine(`// ${resolver.marker}`);
}
resolver.addLine('const path = require("path");');
resolver.addLine(`const libtest = "${libtest}";`);
resolver.addLine(`const srctest= "${srctest}";`);
resolver.addLine("module.exports = {");
resolver.addLine(
` resolveSnapshotPath: ${resolveSnapshotPath.toString()},`
);
resolver.addLine(` resolveTestPath: ${resolveTestPath.toString()},`);
resolver.addLine(
" testPathForConsistencyCheck: path.join('some', '__tests__', 'example.test.js')"
);
resolver.addLine("};");
jest.addSnapshotResolver(`./${resolver.path}`);
}
private addJestNoCompile(jest: Jest) {
this.addDevDeps(
`@types/jest${jest.jestVersion}`,
`ts-jest${jest.jestVersion}`
);
jest.addTestMatch(`<rootDir>/${this.srcdir}/**/__tests__/**/*.ts?(x)`);
jest.addTestMatch(
`<rootDir>/(${this.testdir}|${this.srcdir})/**/*(*.)@(spec|test).ts?(x)`
);
// add relevant deps
if (!jest.config.preset) {
jest.config.preset = "ts-jest";
}
jest.config.globals = deepMerge([
{
"ts-jest": {
tsconfig: this.tsconfigDev.fileName,
},
},
jest.config.globals,
]);
}
}
class SampleCode extends Component {
constructor(project: TypeScriptProject) {
super(project);
const srcCode = [
"export class Hello {",
" public sayHello() {",
" return 'hello, world!';",
" }",
"}",
].join("\n");
const testCode = [
"import { Hello } from '../src';",
"",
"test('hello', () => {",
" expect(new Hello().sayHello()).toBe('hello, world!');",
"});",
].join("\n");
new SampleDir(project, project.srcdir, {
files: {
"index.ts": srcCode,
},
});
if (project.jest) {
new SampleDir(project, project.testdir, {
files: {
"hello.test.ts": testCode,
},
});
}
}
}
/**
* TypeScript app.
*
* @pjid typescript-app
*/
export class TypeScriptAppProject extends TypeScriptProject {
constructor(options: TypeScriptProjectOptions) {
super({
allowLibraryDependencies: false,
releaseWorkflow: false,
entrypoint: "", // "main" is not needed in typescript apps
package: false,
...options,
});
}
}
/**
* @deprecated use `TypeScriptProject`
*/
export class TypeScriptLibraryProject extends TypeScriptProject {}
/**
* @deprecated use TypeScriptProjectOptions
*/
export interface TypeScriptLibraryProjectOptions
extends TypeScriptProjectOptions {}
/**
* @internal
*/
export function mergeTsconfigOptions(
...options: (TypescriptConfigOptions | undefined)[]
): TypescriptConfigOptions {
const definedOptions = options.filter(Boolean) as TypescriptConfigOptions[];
return definedOptions.reduce<TypescriptConfigOptions>(
(previous, current) => ({
...previous,
...current,
include: [...(previous.include ?? []), ...(current.include ?? [])],
exclude: [...(previous.exclude ?? []), ...(current.exclude ?? [])],
compilerOptions: {
...previous.compilerOptions,
...current.compilerOptions,
},
}),
{ compilerOptions: {} }
);
} | } else {
const tsconfigDevFile = options.tsconfigDevFile ?? "tsconfig.dev.json"; | random_line_split |
builder.py | import os
import time
import numpy as np
import moderngl as mg
import imageio as ii
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread
from PyQt5.QtCore import pyqtSignal
from PyQt5.Qt import Qt
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# global consts: do not change during runtime
width, height = 600, 400
capture_width, capture_height = 1920, 1080
record_width, record_height = 1920, 1088
def log(*arg):
"""
wraps built-in print for additional extendability
"""
context = str(*arg)
print("[Texture Builder] {}".format(context))
class FSEventHandler(FileSystemEventHandler):
"""
simple file system event handler for watchdog observer calls callback on mod
"""
def __init__(self, callback):
super(FSEventHandler, self).__init__()
self.callback = callback
def on_modified(self, e):
return self.callback()
class WatchDog(QThread):
"""
watching ./gl directory, on modified, call given bark_callback
running on separated thread
"""
bark = pyqtSignal()
def __init__(self, bark_callback):
super(WatchDog, self).__init__()
self.ehandler = FSEventHandler(self.on_watch)
self.bark.connect(bark_callback)
def on_watch(self):
self.bark.emit()
def run(self):
"""
start oberserver in another separated thread, and WatchDog thread only monitors it
"""
observer = Observer()
observer.schedule(self.ehandler, "./gl", True)
observer.start()
observer.join()
class GLUtil(object):
"""
some utility methods
"""
@classmethod
def screen_vao(cls, gl, program):
"""
generate simplest screen filling quad
"""
vbo = [
-1.0, -1.0,
+1.0, -1.0,
-1.0, +1.0,
+1.0, +1.0,
]
vbo = np.array(vbo).astype(np.float32)
vbo = [(gl.buffer(vbo), "2f", "in_pos")]
ibo = [0, 1, 2, 1, 2, 3]
ibo = np.array(ibo).astype(np.int32)
ibo = gl.buffer(ibo)
vao = gl.vertex_array(program, vbo, ibo)
return vao
@classmethod
def shader(cls, path, **karg):
context = None
with open(path, 'r') as fp:
context = fp.read()
for k, v in karg.items():
context = context.replace(k, v)
lines = []
for line in context.splitlines():
if line.startswith("#include "):
lines.append(GLUtil.shader(line.split(" ")[1]))
continue
lines.append(line)
return context
@classmethod
def serialize_buffer(cls, gl_buffer, w, h):
"""
need better performance here
"""
data = gl_buffer.read()
data = np.frombuffer(data, dtype=np.float32)
data = data.reshape((h, w, 4))
data = np.multiply(data, 255.0)
data = data.astype(np.uint8)
return data
class Renderer(QtWidgets.QOpenGLWidget):
def __init__(self):
super(Renderer, self).__init__()
self.setMinimumSize(width, height)
self.setMaximumSize(width, height)
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.watchdog = WatchDog(self.recompile)
self.watchdog.start()
def get_filepath(self, template):
i = 0
file_name = template.format(i)
while os.path.exists(file_name):
i += 1
file_name = template.format(i)
return file_name
def build_prog(self, gl):
"""
.
"""
prog = gl.program(
vertex_shader=GLUtil.shader("./gl/vs.glsl"),
fragment_shader=GLUtil.shader("./gl/fs.glsl"),
)
u_time = None
u_width = None
u_height = None
if "u_time" in prog:
u_time = prog["u_time"]
if "u_width" in prog:
u_width = prog["u_width"]
if "u_height" in prog:
u_height = prog["u_height"]
return prog, [u_time, u_width, u_height]
def set_gpu_wh(self, width, height):
if self.u_width:
self.u_width.value = width
if self.u_cswidth:
self.u_cswidth.value = width
if self.u_height:
self.u_height.value = height
if self.u_csheight:
self.u_csheight.value = height
def build_cs(self, gl):
"""
simple compute shader run after screen rendering
"""
cs = gl.compute_shader(GLUtil.shader("./gl/cs/cs.glsl"))
u_time = None
u_width = None
u_height = None
if "u_time" in cs:
u_time = cs["u_time"]
if "u_width" in cs:
u_width = cs["u_width"]
if "u_height" in cs:
u_height = cs["u_height"]
buf_in = gl.buffer(reserve=width * height * 4 * 4)
buf_in.bind_to_storage_buffer(0)
buf_out = gl.buffer(reserve=width * height * 4 * 4)
buf_out.bind_to_storage_buffer(1)
return cs, [u_time, u_width, u_height], [buf_in, buf_out]
def recompile(self):
"""
called everytime any files under gl directory changes
"""
self.vaos = []
try:
self.program, uniforms = self.build_prog(self.gl)
self.u_time, self.u_width, self.u_height = uniforms
vao = GLUtil.screen_vao(self.gl, self.program)
self.vaos.append(vao)
self.compute, uniforms, buffers = self.build_cs(self.gl)
self.u_cstime, self.u_cswidth, self.u_csheight = uniforms
self.buf_in, self.buf_out = buffers
self.set_gpu_wh(width, height)
self.gx, self.gy = int(width / 8), int(height / 8)
self.set_gpu_time()
log("[Renderer] shader recompiled.")
except Exception as e:
log(e)
def initializeGL(self):
"""
called only once when start
"""
self.gl = mg.create_context()
self.recompile()
self.to_capture = False
self.capture_texture = self.gl.texture((capture_width, capture_height), 4, dtype="f4")
capture_framebuffer = self.gl.framebuffer([self.capture_texture])
self.capture_scope = self.gl.scope(capture_framebuffer)
self.to_record = False
self.record_texture = self.gl.texture((record_width, record_height), 4, dtype="f4")
record_framebuffer = self.gl.framebuffer([self.record_texture])
self.record_scope = self.gl.scope(record_framebuffer)
self.recording = None
self.to_capture_buffer_in = False
self.to_capture_buffer_out = False
def set_gpu_time(self):
t = time.time() % 1000
if self.u_time:
self.u_time.value = t
if self.u_cstime:
self.u_cstime.value = t
def paintGL(self):
"""
called every frame
"""
# run compute shader
self.compute.run(self.gx, self.gy)
# update screen
self.set_gpu_time()
for vao in self.vaos:
vao.render()
# save to png
if self.to_capture:
log("capturing..")
with self.capture_scope:
self.set_gpu_wh(capture_width, capture_height)
for vao in self.vaos:
vao.render()
log("captured! storing..")
dst = self.get_filepath("./capture_{}.jpg")
data = GLUtil.serialize_buffer(self.capture_texture, capture_width, capture_height)
data = data[:, :, :-1]
ii.imwrite(dst, data)
log("stored!")
self.set_gpu_wh(width, height)
self.to_capture = False
# init save to video
if self.to_record:
with self.record_scope:
self.set_gpu_wh(record_width, record_height)
for vao in self.vaos:
vao.render()
if not self.recording:
log("start recording..")
dst = self.get_filepath("./capture_{}.mp4")
self.recording = ii.get_writer(dst, fps=30)
data = GLUtil.serialize_buffer(self.record_texture, record_width, record_height)
self.recording.append_data(data)
self.set_gpu_wh(width, height)
# close save to video
else:
if self.recording:
self.recording.close()
log("finished recording!")
self.recording = None
if self.to_capture_buffer_in:
dst = self.get_filepath("./buf_in_{}.png")
data = GLUtil.serialize_buffer(self.buf_in, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_in = False
log("buf_in captured")
if self.to_capture_buffer_out:
dst = self.get_filepath("./buf_out_{}.png")
data = GLUtil.serialize_buffer(self.buf_out, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_out = False
log("buf_out captured")
# force update frame
self.update()
def keyPressEvent(self, e):
"""
left ctrl: start/stop recording on press/release
"""
k = e.key()
# left ctrl
if k == 16777249:
self.to_record = True
def keyReleaseEvent(self, e):
"""
space bar: capture frame buffer
z: capture buf_in buffer
x: capture buf_out buffer
left ctrl: start/stop recording on press/release
"""
k = e.key()
# space bar
if k == 32:
self.to_capture = True
# z
elif k == 90:
|
# x
elif k == 88:
self.to_capture_buffer_out = True
# left ctrl
elif k == 16777249:
self.to_record = False
# undefined
else:
log("undefined key pressed: {}".format(k))
def main():
app = QtWidgets.QApplication([])
renderer = Renderer()
renderer.show()
app.exec()
if __name__ == "__main__":
main()
| self.to_capture_buffer_in = True | conditional_block |
builder.py | import os
import time
import numpy as np
import moderngl as mg
import imageio as ii
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread
from PyQt5.QtCore import pyqtSignal
from PyQt5.Qt import Qt
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# global consts: do not change during runtime
width, height = 600, 400
capture_width, capture_height = 1920, 1080
record_width, record_height = 1920, 1088
def log(*arg):
"""
wraps built-in print for additional extendability
"""
context = str(*arg)
print("[Texture Builder] {}".format(context))
class FSEventHandler(FileSystemEventHandler):
"""
simple file system event handler for watchdog observer calls callback on mod
"""
def __init__(self, callback):
super(FSEventHandler, self).__init__()
self.callback = callback
def on_modified(self, e):
return self.callback()
class WatchDog(QThread):
"""
watching ./gl directory, on modified, call given bark_callback
running on separated thread
"""
bark = pyqtSignal()
def __init__(self, bark_callback):
super(WatchDog, self).__init__()
self.ehandler = FSEventHandler(self.on_watch)
self.bark.connect(bark_callback)
def on_watch(self):
self.bark.emit()
def run(self):
"""
start oberserver in another separated thread, and WatchDog thread only monitors it
"""
observer = Observer()
observer.schedule(self.ehandler, "./gl", True)
observer.start()
observer.join()
class GLUtil(object):
"""
some utility methods
"""
@classmethod
def | (cls, gl, program):
"""
generate simplest screen filling quad
"""
vbo = [
-1.0, -1.0,
+1.0, -1.0,
-1.0, +1.0,
+1.0, +1.0,
]
vbo = np.array(vbo).astype(np.float32)
vbo = [(gl.buffer(vbo), "2f", "in_pos")]
ibo = [0, 1, 2, 1, 2, 3]
ibo = np.array(ibo).astype(np.int32)
ibo = gl.buffer(ibo)
vao = gl.vertex_array(program, vbo, ibo)
return vao
@classmethod
def shader(cls, path, **karg):
context = None
with open(path, 'r') as fp:
context = fp.read()
for k, v in karg.items():
context = context.replace(k, v)
lines = []
for line in context.splitlines():
if line.startswith("#include "):
lines.append(GLUtil.shader(line.split(" ")[1]))
continue
lines.append(line)
return context
@classmethod
def serialize_buffer(cls, gl_buffer, w, h):
"""
need better performance here
"""
data = gl_buffer.read()
data = np.frombuffer(data, dtype=np.float32)
data = data.reshape((h, w, 4))
data = np.multiply(data, 255.0)
data = data.astype(np.uint8)
return data
class Renderer(QtWidgets.QOpenGLWidget):
def __init__(self):
super(Renderer, self).__init__()
self.setMinimumSize(width, height)
self.setMaximumSize(width, height)
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.watchdog = WatchDog(self.recompile)
self.watchdog.start()
def get_filepath(self, template):
i = 0
file_name = template.format(i)
while os.path.exists(file_name):
i += 1
file_name = template.format(i)
return file_name
def build_prog(self, gl):
"""
.
"""
prog = gl.program(
vertex_shader=GLUtil.shader("./gl/vs.glsl"),
fragment_shader=GLUtil.shader("./gl/fs.glsl"),
)
u_time = None
u_width = None
u_height = None
if "u_time" in prog:
u_time = prog["u_time"]
if "u_width" in prog:
u_width = prog["u_width"]
if "u_height" in prog:
u_height = prog["u_height"]
return prog, [u_time, u_width, u_height]
def set_gpu_wh(self, width, height):
if self.u_width:
self.u_width.value = width
if self.u_cswidth:
self.u_cswidth.value = width
if self.u_height:
self.u_height.value = height
if self.u_csheight:
self.u_csheight.value = height
def build_cs(self, gl):
"""
simple compute shader run after screen rendering
"""
cs = gl.compute_shader(GLUtil.shader("./gl/cs/cs.glsl"))
u_time = None
u_width = None
u_height = None
if "u_time" in cs:
u_time = cs["u_time"]
if "u_width" in cs:
u_width = cs["u_width"]
if "u_height" in cs:
u_height = cs["u_height"]
buf_in = gl.buffer(reserve=width * height * 4 * 4)
buf_in.bind_to_storage_buffer(0)
buf_out = gl.buffer(reserve=width * height * 4 * 4)
buf_out.bind_to_storage_buffer(1)
return cs, [u_time, u_width, u_height], [buf_in, buf_out]
def recompile(self):
"""
called everytime any files under gl directory changes
"""
self.vaos = []
try:
self.program, uniforms = self.build_prog(self.gl)
self.u_time, self.u_width, self.u_height = uniforms
vao = GLUtil.screen_vao(self.gl, self.program)
self.vaos.append(vao)
self.compute, uniforms, buffers = self.build_cs(self.gl)
self.u_cstime, self.u_cswidth, self.u_csheight = uniforms
self.buf_in, self.buf_out = buffers
self.set_gpu_wh(width, height)
self.gx, self.gy = int(width / 8), int(height / 8)
self.set_gpu_time()
log("[Renderer] shader recompiled.")
except Exception as e:
log(e)
def initializeGL(self):
"""
called only once when start
"""
self.gl = mg.create_context()
self.recompile()
self.to_capture = False
self.capture_texture = self.gl.texture((capture_width, capture_height), 4, dtype="f4")
capture_framebuffer = self.gl.framebuffer([self.capture_texture])
self.capture_scope = self.gl.scope(capture_framebuffer)
self.to_record = False
self.record_texture = self.gl.texture((record_width, record_height), 4, dtype="f4")
record_framebuffer = self.gl.framebuffer([self.record_texture])
self.record_scope = self.gl.scope(record_framebuffer)
self.recording = None
self.to_capture_buffer_in = False
self.to_capture_buffer_out = False
def set_gpu_time(self):
t = time.time() % 1000
if self.u_time:
self.u_time.value = t
if self.u_cstime:
self.u_cstime.value = t
def paintGL(self):
"""
called every frame
"""
# run compute shader
self.compute.run(self.gx, self.gy)
# update screen
self.set_gpu_time()
for vao in self.vaos:
vao.render()
# save to png
if self.to_capture:
log("capturing..")
with self.capture_scope:
self.set_gpu_wh(capture_width, capture_height)
for vao in self.vaos:
vao.render()
log("captured! storing..")
dst = self.get_filepath("./capture_{}.jpg")
data = GLUtil.serialize_buffer(self.capture_texture, capture_width, capture_height)
data = data[:, :, :-1]
ii.imwrite(dst, data)
log("stored!")
self.set_gpu_wh(width, height)
self.to_capture = False
# init save to video
if self.to_record:
with self.record_scope:
self.set_gpu_wh(record_width, record_height)
for vao in self.vaos:
vao.render()
if not self.recording:
log("start recording..")
dst = self.get_filepath("./capture_{}.mp4")
self.recording = ii.get_writer(dst, fps=30)
data = GLUtil.serialize_buffer(self.record_texture, record_width, record_height)
self.recording.append_data(data)
self.set_gpu_wh(width, height)
# close save to video
else:
if self.recording:
self.recording.close()
log("finished recording!")
self.recording = None
if self.to_capture_buffer_in:
dst = self.get_filepath("./buf_in_{}.png")
data = GLUtil.serialize_buffer(self.buf_in, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_in = False
log("buf_in captured")
if self.to_capture_buffer_out:
dst = self.get_filepath("./buf_out_{}.png")
data = GLUtil.serialize_buffer(self.buf_out, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_out = False
log("buf_out captured")
# force update frame
self.update()
def keyPressEvent(self, e):
"""
left ctrl: start/stop recording on press/release
"""
k = e.key()
# left ctrl
if k == 16777249:
self.to_record = True
def keyReleaseEvent(self, e):
"""
space bar: capture frame buffer
z: capture buf_in buffer
x: capture buf_out buffer
left ctrl: start/stop recording on press/release
"""
k = e.key()
# space bar
if k == 32:
self.to_capture = True
# z
elif k == 90:
self.to_capture_buffer_in = True
# x
elif k == 88:
self.to_capture_buffer_out = True
# left ctrl
elif k == 16777249:
self.to_record = False
# undefined
else:
log("undefined key pressed: {}".format(k))
def main():
app = QtWidgets.QApplication([])
renderer = Renderer()
renderer.show()
app.exec()
if __name__ == "__main__":
main()
| screen_vao | identifier_name |
builder.py | import os
import time
import numpy as np
import moderngl as mg
import imageio as ii
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread
from PyQt5.QtCore import pyqtSignal
from PyQt5.Qt import Qt
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# global consts: do not change during runtime
width, height = 600, 400
capture_width, capture_height = 1920, 1080
record_width, record_height = 1920, 1088
def log(*arg):
"""
wraps built-in print for additional extendability
"""
context = str(*arg)
print("[Texture Builder] {}".format(context))
class FSEventHandler(FileSystemEventHandler):
"""
simple file system event handler for watchdog observer calls callback on mod
"""
def __init__(self, callback):
super(FSEventHandler, self).__init__()
self.callback = callback
def on_modified(self, e):
return self.callback()
class WatchDog(QThread):
"""
watching ./gl directory, on modified, call given bark_callback
running on separated thread
"""
bark = pyqtSignal()
def __init__(self, bark_callback):
super(WatchDog, self).__init__()
self.ehandler = FSEventHandler(self.on_watch)
self.bark.connect(bark_callback)
def on_watch(self):
self.bark.emit()
def run(self):
"""
start oberserver in another separated thread, and WatchDog thread only monitors it
"""
observer = Observer()
observer.schedule(self.ehandler, "./gl", True)
observer.start()
observer.join()
class GLUtil(object):
"""
some utility methods
"""
@classmethod
def screen_vao(cls, gl, program):
"""
generate simplest screen filling quad
"""
vbo = [
-1.0, -1.0,
+1.0, -1.0,
-1.0, +1.0,
+1.0, +1.0,
]
vbo = np.array(vbo).astype(np.float32)
vbo = [(gl.buffer(vbo), "2f", "in_pos")]
ibo = [0, 1, 2, 1, 2, 3]
ibo = np.array(ibo).astype(np.int32)
ibo = gl.buffer(ibo)
vao = gl.vertex_array(program, vbo, ibo)
return vao
@classmethod
def shader(cls, path, **karg):
context = None
with open(path, 'r') as fp:
context = fp.read()
for k, v in karg.items():
context = context.replace(k, v)
lines = []
for line in context.splitlines():
if line.startswith("#include "):
lines.append(GLUtil.shader(line.split(" ")[1]))
continue
lines.append(line)
return context
@classmethod
def serialize_buffer(cls, gl_buffer, w, h):
"""
need better performance here
"""
data = gl_buffer.read()
data = np.frombuffer(data, dtype=np.float32)
data = data.reshape((h, w, 4))
data = np.multiply(data, 255.0)
data = data.astype(np.uint8)
return data
class Renderer(QtWidgets.QOpenGLWidget):
def __init__(self):
super(Renderer, self).__init__()
self.setMinimumSize(width, height)
self.setMaximumSize(width, height)
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.watchdog = WatchDog(self.recompile)
self.watchdog.start()
def get_filepath(self, template):
i = 0
file_name = template.format(i)
while os.path.exists(file_name):
i += 1
file_name = template.format(i)
return file_name
def build_prog(self, gl):
"""
.
"""
prog = gl.program(
vertex_shader=GLUtil.shader("./gl/vs.glsl"),
fragment_shader=GLUtil.shader("./gl/fs.glsl"),
)
u_time = None
u_width = None
u_height = None
if "u_time" in prog:
u_time = prog["u_time"]
if "u_width" in prog:
u_width = prog["u_width"]
if "u_height" in prog:
u_height = prog["u_height"]
return prog, [u_time, u_width, u_height]
def set_gpu_wh(self, width, height):
if self.u_width:
self.u_width.value = width
if self.u_cswidth:
self.u_cswidth.value = width
if self.u_height:
self.u_height.value = height
if self.u_csheight:
self.u_csheight.value = height
def build_cs(self, gl):
"""
simple compute shader run after screen rendering
"""
cs = gl.compute_shader(GLUtil.shader("./gl/cs/cs.glsl"))
u_time = None
u_width = None
u_height = None
if "u_time" in cs:
u_time = cs["u_time"]
if "u_width" in cs:
u_width = cs["u_width"]
if "u_height" in cs:
u_height = cs["u_height"]
buf_in = gl.buffer(reserve=width * height * 4 * 4)
buf_in.bind_to_storage_buffer(0)
buf_out = gl.buffer(reserve=width * height * 4 * 4)
buf_out.bind_to_storage_buffer(1)
return cs, [u_time, u_width, u_height], [buf_in, buf_out]
def recompile(self):
"""
called everytime any files under gl directory changes
"""
self.vaos = []
try:
self.program, uniforms = self.build_prog(self.gl)
self.u_time, self.u_width, self.u_height = uniforms
vao = GLUtil.screen_vao(self.gl, self.program)
self.vaos.append(vao)
self.compute, uniforms, buffers = self.build_cs(self.gl)
self.u_cstime, self.u_cswidth, self.u_csheight = uniforms
self.buf_in, self.buf_out = buffers
self.set_gpu_wh(width, height)
self.gx, self.gy = int(width / 8), int(height / 8)
self.set_gpu_time()
log("[Renderer] shader recompiled.")
except Exception as e:
log(e)
def initializeGL(self):
"""
called only once when start
"""
self.gl = mg.create_context()
self.recompile()
self.to_capture = False
self.capture_texture = self.gl.texture((capture_width, capture_height), 4, dtype="f4")
capture_framebuffer = self.gl.framebuffer([self.capture_texture])
self.capture_scope = self.gl.scope(capture_framebuffer)
self.to_record = False
self.record_texture = self.gl.texture((record_width, record_height), 4, dtype="f4")
record_framebuffer = self.gl.framebuffer([self.record_texture])
self.record_scope = self.gl.scope(record_framebuffer)
self.recording = None
self.to_capture_buffer_in = False
self.to_capture_buffer_out = False
def set_gpu_time(self):
t = time.time() % 1000
if self.u_time:
self.u_time.value = t
if self.u_cstime:
self.u_cstime.value = t
def paintGL(self):
"""
called every frame
"""
# run compute shader
self.compute.run(self.gx, self.gy)
# update screen
self.set_gpu_time()
for vao in self.vaos:
vao.render()
# save to png
if self.to_capture:
log("capturing..")
with self.capture_scope:
self.set_gpu_wh(capture_width, capture_height)
for vao in self.vaos:
vao.render()
log("captured! storing..")
dst = self.get_filepath("./capture_{}.jpg")
data = GLUtil.serialize_buffer(self.capture_texture, capture_width, capture_height)
data = data[:, :, :-1]
ii.imwrite(dst, data)
log("stored!")
self.set_gpu_wh(width, height)
self.to_capture = False
# init save to video
if self.to_record:
with self.record_scope:
self.set_gpu_wh(record_width, record_height)
for vao in self.vaos:
vao.render()
if not self.recording:
log("start recording..")
dst = self.get_filepath("./capture_{}.mp4")
self.recording = ii.get_writer(dst, fps=30)
data = GLUtil.serialize_buffer(self.record_texture, record_width, record_height)
self.recording.append_data(data)
self.set_gpu_wh(width, height)
# close save to video
else:
if self.recording:
self.recording.close()
log("finished recording!")
self.recording = None
if self.to_capture_buffer_in:
dst = self.get_filepath("./buf_in_{}.png")
data = GLUtil.serialize_buffer(self.buf_in, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_in = False
log("buf_in captured")
if self.to_capture_buffer_out:
dst = self.get_filepath("./buf_out_{}.png")
data = GLUtil.serialize_buffer(self.buf_out, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_out = False
log("buf_out captured")
# force update frame
self.update()
def keyPressEvent(self, e):
"""
left ctrl: start/stop recording on press/release
"""
k = e.key()
# left ctrl
if k == 16777249:
self.to_record = True
def keyReleaseEvent(self, e):
"""
space bar: capture frame buffer
z: capture buf_in buffer
x: capture buf_out buffer
left ctrl: start/stop recording on press/release
"""
k = e.key()
# space bar
if k == 32:
self.to_capture = True
# z
elif k == 90:
self.to_capture_buffer_in = True
# x
elif k == 88:
self.to_capture_buffer_out = True
# left ctrl
elif k == 16777249:
self.to_record = False
# undefined
else:
log("undefined key pressed: {}".format(k))
| app.exec()
if __name__ == "__main__":
main() | def main():
app = QtWidgets.QApplication([])
renderer = Renderer()
renderer.show() | random_line_split |
builder.py | import os
import time
import numpy as np
import moderngl as mg
import imageio as ii
from PyQt5 import QtWidgets
from PyQt5.QtCore import QThread
from PyQt5.QtCore import pyqtSignal
from PyQt5.Qt import Qt
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
# global consts: do not change during runtime
width, height = 600, 400
capture_width, capture_height = 1920, 1080
record_width, record_height = 1920, 1088
def log(*arg):
"""
wraps built-in print for additional extendability
"""
context = str(*arg)
print("[Texture Builder] {}".format(context))
class FSEventHandler(FileSystemEventHandler):
"""
simple file system event handler for watchdog observer calls callback on mod
"""
def __init__(self, callback):
super(FSEventHandler, self).__init__()
self.callback = callback
def on_modified(self, e):
return self.callback()
class WatchDog(QThread):
"""
watching ./gl directory, on modified, call given bark_callback
running on separated thread
"""
bark = pyqtSignal()
def __init__(self, bark_callback):
super(WatchDog, self).__init__()
self.ehandler = FSEventHandler(self.on_watch)
self.bark.connect(bark_callback)
def on_watch(self):
self.bark.emit()
def run(self):
"""
start oberserver in another separated thread, and WatchDog thread only monitors it
"""
observer = Observer()
observer.schedule(self.ehandler, "./gl", True)
observer.start()
observer.join()
class GLUtil(object):
"""
some utility methods
"""
@classmethod
def screen_vao(cls, gl, program):
"""
generate simplest screen filling quad
"""
vbo = [
-1.0, -1.0,
+1.0, -1.0,
-1.0, +1.0,
+1.0, +1.0,
]
vbo = np.array(vbo).astype(np.float32)
vbo = [(gl.buffer(vbo), "2f", "in_pos")]
ibo = [0, 1, 2, 1, 2, 3]
ibo = np.array(ibo).astype(np.int32)
ibo = gl.buffer(ibo)
vao = gl.vertex_array(program, vbo, ibo)
return vao
@classmethod
def shader(cls, path, **karg):
context = None
with open(path, 'r') as fp:
context = fp.read()
for k, v in karg.items():
context = context.replace(k, v)
lines = []
for line in context.splitlines():
if line.startswith("#include "):
lines.append(GLUtil.shader(line.split(" ")[1]))
continue
lines.append(line)
return context
@classmethod
def serialize_buffer(cls, gl_buffer, w, h):
"""
need better performance here
"""
data = gl_buffer.read()
data = np.frombuffer(data, dtype=np.float32)
data = data.reshape((h, w, 4))
data = np.multiply(data, 255.0)
data = data.astype(np.uint8)
return data
class Renderer(QtWidgets.QOpenGLWidget):
def __init__(self):
|
def get_filepath(self, template):
i = 0
file_name = template.format(i)
while os.path.exists(file_name):
i += 1
file_name = template.format(i)
return file_name
def build_prog(self, gl):
"""
.
"""
prog = gl.program(
vertex_shader=GLUtil.shader("./gl/vs.glsl"),
fragment_shader=GLUtil.shader("./gl/fs.glsl"),
)
u_time = None
u_width = None
u_height = None
if "u_time" in prog:
u_time = prog["u_time"]
if "u_width" in prog:
u_width = prog["u_width"]
if "u_height" in prog:
u_height = prog["u_height"]
return prog, [u_time, u_width, u_height]
def set_gpu_wh(self, width, height):
if self.u_width:
self.u_width.value = width
if self.u_cswidth:
self.u_cswidth.value = width
if self.u_height:
self.u_height.value = height
if self.u_csheight:
self.u_csheight.value = height
def build_cs(self, gl):
"""
simple compute shader run after screen rendering
"""
cs = gl.compute_shader(GLUtil.shader("./gl/cs/cs.glsl"))
u_time = None
u_width = None
u_height = None
if "u_time" in cs:
u_time = cs["u_time"]
if "u_width" in cs:
u_width = cs["u_width"]
if "u_height" in cs:
u_height = cs["u_height"]
buf_in = gl.buffer(reserve=width * height * 4 * 4)
buf_in.bind_to_storage_buffer(0)
buf_out = gl.buffer(reserve=width * height * 4 * 4)
buf_out.bind_to_storage_buffer(1)
return cs, [u_time, u_width, u_height], [buf_in, buf_out]
def recompile(self):
"""
called everytime any files under gl directory changes
"""
self.vaos = []
try:
self.program, uniforms = self.build_prog(self.gl)
self.u_time, self.u_width, self.u_height = uniforms
vao = GLUtil.screen_vao(self.gl, self.program)
self.vaos.append(vao)
self.compute, uniforms, buffers = self.build_cs(self.gl)
self.u_cstime, self.u_cswidth, self.u_csheight = uniforms
self.buf_in, self.buf_out = buffers
self.set_gpu_wh(width, height)
self.gx, self.gy = int(width / 8), int(height / 8)
self.set_gpu_time()
log("[Renderer] shader recompiled.")
except Exception as e:
log(e)
def initializeGL(self):
"""
called only once when start
"""
self.gl = mg.create_context()
self.recompile()
self.to_capture = False
self.capture_texture = self.gl.texture((capture_width, capture_height), 4, dtype="f4")
capture_framebuffer = self.gl.framebuffer([self.capture_texture])
self.capture_scope = self.gl.scope(capture_framebuffer)
self.to_record = False
self.record_texture = self.gl.texture((record_width, record_height), 4, dtype="f4")
record_framebuffer = self.gl.framebuffer([self.record_texture])
self.record_scope = self.gl.scope(record_framebuffer)
self.recording = None
self.to_capture_buffer_in = False
self.to_capture_buffer_out = False
def set_gpu_time(self):
t = time.time() % 1000
if self.u_time:
self.u_time.value = t
if self.u_cstime:
self.u_cstime.value = t
def paintGL(self):
"""
called every frame
"""
# run compute shader
self.compute.run(self.gx, self.gy)
# update screen
self.set_gpu_time()
for vao in self.vaos:
vao.render()
# save to png
if self.to_capture:
log("capturing..")
with self.capture_scope:
self.set_gpu_wh(capture_width, capture_height)
for vao in self.vaos:
vao.render()
log("captured! storing..")
dst = self.get_filepath("./capture_{}.jpg")
data = GLUtil.serialize_buffer(self.capture_texture, capture_width, capture_height)
data = data[:, :, :-1]
ii.imwrite(dst, data)
log("stored!")
self.set_gpu_wh(width, height)
self.to_capture = False
# init save to video
if self.to_record:
with self.record_scope:
self.set_gpu_wh(record_width, record_height)
for vao in self.vaos:
vao.render()
if not self.recording:
log("start recording..")
dst = self.get_filepath("./capture_{}.mp4")
self.recording = ii.get_writer(dst, fps=30)
data = GLUtil.serialize_buffer(self.record_texture, record_width, record_height)
self.recording.append_data(data)
self.set_gpu_wh(width, height)
# close save to video
else:
if self.recording:
self.recording.close()
log("finished recording!")
self.recording = None
if self.to_capture_buffer_in:
dst = self.get_filepath("./buf_in_{}.png")
data = GLUtil.serialize_buffer(self.buf_in, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_in = False
log("buf_in captured")
if self.to_capture_buffer_out:
dst = self.get_filepath("./buf_out_{}.png")
data = GLUtil.serialize_buffer(self.buf_out, width, height)
ii.imwrite(dst, data)
self.to_capture_buffer_out = False
log("buf_out captured")
# force update frame
self.update()
def keyPressEvent(self, e):
"""
left ctrl: start/stop recording on press/release
"""
k = e.key()
# left ctrl
if k == 16777249:
self.to_record = True
def keyReleaseEvent(self, e):
"""
space bar: capture frame buffer
z: capture buf_in buffer
x: capture buf_out buffer
left ctrl: start/stop recording on press/release
"""
k = e.key()
# space bar
if k == 32:
self.to_capture = True
# z
elif k == 90:
self.to_capture_buffer_in = True
# x
elif k == 88:
self.to_capture_buffer_out = True
# left ctrl
elif k == 16777249:
self.to_record = False
# undefined
else:
log("undefined key pressed: {}".format(k))
def main():
app = QtWidgets.QApplication([])
renderer = Renderer()
renderer.show()
app.exec()
if __name__ == "__main__":
main()
| super(Renderer, self).__init__()
self.setMinimumSize(width, height)
self.setMaximumSize(width, height)
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.watchdog = WatchDog(self.recompile)
self.watchdog.start() | identifier_body |
wine_recommender.py | import numpy as np
import pandas as pd
import os
import cPickle
from time import time
from sklearn.utils import shuffle
from collections import defaultdict, Counter
import pyspark
from pyspark import SparkContext
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel
import math
def get_ratings_data(ratings_path):
start = time()
data = cPickle.load(open(ratings_path, 'r'))
end = time()
print "Time Elapsed = {:.3} seconds".format(end - start)
return data
def create_cust_tag_bridge_rdd(sc, data):
'''Create user tags/user ids bride rdd,
create int:cust_tag key value pairs,
spark can't read string user ids'''
unique_user_tags = np.unique([row[0] for row in data])
index_to_int = np.arange(0, len(unique_user_tags) * 100, 100)
cust_tag_bridge = [ (tag_hash, tag_int) for tag_hash, tag_int in zip(unique_user_tags, index_to_int)]
return sc.parallelize(cust_tag_bridge)
def create_products_rdd(products_df):
'''Creates products_rdd
Input: products_df, pandas dataframe
Output: products_rdd, spark rdd'''
# create products_rdd
products_rdd = sc.parallelize(products_df.values.tolist())
# format --> (productKey, (productID, Appellation, Varietal, Vinyard) )
products_rdd = products_rdd.map(lambda row: (row[0], (row[1], row[2], row[3], row[4], row[5]) ) )
return products_rdd
def create_clean_data_rdd(data, cust_tag_bridge_rdd):
'''Transform ratings data into spark readable format --> (user_id, productKey, rating)
Input: data: list, cust_tag_bridge_rdd: spark rdd
Output: clean_data_rdd, spark rdd'''
data_rdd = sc.parallelize(data)
tag_data_bridge_rdd = data_rdd.map(lambda row: (row[0], (row[1], row[2]) ))
clean_data_rdd = \
tag_data_bridge_rdd.sortByKey()\
.join( cust_tag_bridge_rdd.sortByKey())\
.map(lambda row: ( row[1][1], row[1][0][0], row[1][0][1]))
return clean_data_rdd
def get_spark_context(n_cups = 3, local = True, remote_cluster_path=None):
# number of nodes in local spark cluster
n_worker_cups = n_cups
if local == True:
|
elif local == False:
print "Create spark context for remote cluster..."
sc = pyspark.SparkContext(master = remote_cluster_path)
return
else:
print "ERROR: local is set to False, however remote_cluster_path is not specified!"
def get_clean_data_rdd(sc, return_cust_brige_rdd = False):
'''Loads ratings from master file and formats data into model readable form.
data --> (user_id, productKey, rating)'''
# load data
data = get_ratings_data(ratings_path)
# assigne each user hash tag a user_id
cust_tag_bridge_rdd = create_cust_tag_bridge_rdd(sc, data)
# model readable format
clean_data_rdd = create_clean_data_rdd(data, cust_tag_bridge_rdd)
if return_cust_brige_rdd == False:
cust_tag_bridge_rdd.unpersist()
return clean_data_rdd
else:
return clean_data_rdd, cust_tag_bridge_rdd
def train_model(training_RDD):
# TODO: still need to optimize hyperparameters in a grid search
seed = 5L
iterations = 30
regularization_parameter = 0.1
rank = 20
model = ALS.train(training_RDD,
rank=rank,
seed=seed,
iterations=iterations,
lambda_=regularization_parameter,
nonnegative=True)
return model
def get_trained_model(sc, ratings_path, save_model_path=None, return_clean_data_rdd=False):
'''Loads rating data from file, trains model, and returns a fitted model'''
print "load data and build RDDs..."
clean_data_rdd = get_clean_data_rdd(sc, return_cust_brige_rdd = False)
print "Training Model..."
start = time()
fitted_model = train_model(clean_data_rdd )
end = time()
print "Training Model: Time Elapsed = {:.3} \n".format(end - start)
if save_model_path != None:
# Save model
print "saving model to path: {}".format(save_model_path)
fitted_model.save(sc ,save_model_path)
if return_clean_data_rdd:
return fitted_model, clean_data_rdd
else:
# restore memory resources
clean_data_rdd.unpersist()
return fitted_model
def load_model(sc, model_path):
'''Load trained model that has been saved to file.
It is more efficient to train a model once, then make predictions.'''
# load model
fitted_model = MatrixFactorizationModel.load(sc, model_path)
return fitted_model
def get_userID_moiveID_pairs(sc, user_id, clean_data_rdd):
'''In order to get recommendations for a user, we need to build an RDD with (user_id, wine_id)
pairs for wines that the user has not previously purchased.'''
# ( user_id, movie_id, rating )
# get user_id's movie ids in a list
movie_ids = clean_data_rdd.filter(lambda row: row[0] == user_id )\
.map(lambda row: row[1]).collect()
# get wine_ids that user_id has not purchased
unpurchased_wines = clean_data_rdd.filter(lambda row: row[0] != user_id )\
.filter(lambda row: row[2] not in movie_ids)\
.map(lambda row: (user_id, row[1] ) ).distinct()
return unpurchased_wines
def get_user_recommendations(fitted_model, unpurchased_wines):
user_recs = fitted_model.predictAll(unpurchased_wines)
return user_recs
def format_user_recs(user_recs, cust_tag_bridge_rdd, products_path, thresh ):
'''Reformat user recommendations so it's human readable and in preperation for curation.
This function swaps the user_id back to the original user hash tag, and attachs the wine
features (i.e. productID, appellation, varieatl, ...) '''
# value validated in Spark_Recommendation_Model_Validation notebook
threshold = thresh
validated_user_recs = user_recs.filter(lambda row: row[2] >= threshold )
# format --> (product key, predicted rating, user hash tag)
wineID_rating_userHash = \
validated_user_recs.map(lambda row: (row[0], (row[1], row[2]) ) )\
.join(cust_tag_bridge_rdd\
.map(lambda row: (row[1], row[0])))\
.map(lambda row: (row[1][0][0],
(row[1][0][1],
row[1][1] ) ))
products_df = pd.read_pickle(products_path)
products_rdd = create_products_rdd(products_df)
# Key:Value pair RDD
# format --> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
clean_user_recs = \
wineID_rating_userHash.join(products_rdd)\
.map(lambda row: ( row[1][0][1],
(row[0],
row[1][1][0],
row[1][1][1],
row[1][1][2],
row[1][1][3],
row[1][1][4],
row[1][0][0])))
return clean_user_recs
def curate_top_wines(top_varietal_recs, top_varietals):
final_recs = defaultdict(list)
for var in top_varietals:
var_cnt = 1
for row in top_varietal_recs:
if row[1][3] == var:
if var_cnt <= 3:
var_cnt += 1
#final_recs.append((row[0], row[1][:-1]))
final_recs[row[0]].append(row[1][:-1])
return final_recs
def get_top_rec_varietals(clean_user_recs):
'''Returns the top 3 wines from the top 3 varietals for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_dicts]
var_count = Counter(varietals)
# get top 3 most recommender varietals for this user
top_varietals = [row[0] for row in var_count.most_common()[0:3]]
top_varietal_recs = clean_user_recs.filter(lambda row: row[1][3] in top_varietals ).collect()
return curate_top_wines(top_varietal_recs, top_varietals)
def get_top_reds_and_whites(clean_user_recs):
'''Returns top rated wines, 5 red and 5 white for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
red_white_recs_dict = defaultdict(list)
white_cnt = 1
red_cnt = 1
for rec in user_recs_dicts:
if rec[1][5] == "White Wines":
if white_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
white_cnt += 1
else:
if red_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
red_cnt += 1
return red_white_recs_dict
def get_user_ids_for_recommendations(cust_tag_bridge_rdd):
'''This function returns user ids from the cust_tag_bridge_rdd.
For now, it only return the first user_id in the rdd.'''
# results are inside of a list
return cust_tag_bridge_rdd.map(lambda row: row[1]).collect()
def check_top_varietal_wine_count(most_common_varietals):
'''Checks if top variatls have at lease 3 wines'''
cnt = 0
for row in most_common_varietals:
if row[1] >= 3:
cnt += 1
return cnt
if __name__ == '__main__':
start_rs = time()
# data files
home = "/Users/Alexander/Wine_Recommender/data/"
ratings_path = home + "spark_ready_data.pkl"
products_path = home + "wine_products.pkl"
rec_results_path = home + "user_rec_results.pkl"
# trained recommender path
model_path = "/Users/Alexander/Wine_Recommender/models/spark_recommender"
n_local_cpus = 3
# value validated in Spark_Recommendation_Model_Validation notebook
rating_threshold = 7
n_varietials = 3
print "get_spark_context..."
# get sparkContext
sc = get_spark_context(n_cups = n_local_cpus,
local = True,
remote_cluster_path=None)
print "get_clean_data_rdd..."
clean_data_rdd, cust_tag_bridge_rdd = get_clean_data_rdd(sc,
return_cust_brige_rdd = True)
print 'get_trained_model...'
# Model can be saved to a file only once; otherwise, spark will throw an error
fitted_model = get_trained_model(sc,
ratings_path,
save_model_path=model_path)
# print "load_model..."
# fitted_model = load_model(sc,
# model_path)
print "get_user_ids_for_recommendations..."
user_ids = get_user_ids_for_recommendations(cust_tag_bridge_rdd)
r_w_cnt = 0
results = []
for i, user_id in enumerate(user_ids[0:3]):
loop_start = time()
#all previously unpurchased wines will be passed into the model for a predicted rating
#print "get_userID_moiveID_pairs..."
unpurchased_wines = get_userID_moiveID_pairs(sc,
user_id,
clean_data_rdd)
#print "get_user_recommendations..."
user_recs = get_user_recommendations(fitted_model,
unpurchased_wines)
clean_user_recs = format_user_recs(user_recs,
cust_tag_bridge_rdd,
products_path,
rating_threshold)
# Curate Recommendations into Varietal Sub-Genres
# Return the top 3 rated wines from the the top 3 most recommended varietals.
# If there aren't at least 3 wines form 3 varietals,
# Then return the top 5 reds and the top 5 whitesn (though this shouldn't be a problem).
# check for 3 wines, 3 varieatls condition
# format -> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
user_recs_tups = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_tups]
var_count = Counter(varietals)
most_common_varietals = var_count.most_common()[:n_varietials]
# check 1 --> varietal count
# check 2 --> top 3 varietals have at least 3 wines to choose from
if len(var_count) >= n_varietials and check_top_varietal_wine_count(most_common_varietals) == n_varietials:
#print "get_top_rec_varietals..."
final_recs = get_top_rec_varietals(clean_user_recs)
else:
#print "get_top_reds_and_whites..."
r_w_cnt += 1
final_recs = get_top_reds_and_whites(clean_user_recs)
results.append(final_recs)
if i % 1 == 0:
loop_end = time()
print "User {}, Time Elapsed {:.3} mins".format(i, (loop_end - loop_start)/60)
print "saving final_recs to file..."
# save recommendation results to file
cPickle.dump(results, open(rec_results_path, 'w'))
print "stoping spark context..."
sc.stop()
end_rc = time()
print "Red_White_Rec_Counter = {}".format(r_w_cnt)
print "Total Time Elapsed for RS = {:.4} mins".format((end_rc - start_rs)/60)
| print "Create spark context for local cluster..."
sc = pyspark.SparkContext(master = "local[{}]".format(n_worker_cups))
return sc | conditional_block |
wine_recommender.py | import numpy as np
import pandas as pd
import os
import cPickle
from time import time
from sklearn.utils import shuffle
from collections import defaultdict, Counter
import pyspark
from pyspark import SparkContext
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel
import math
def get_ratings_data(ratings_path):
start = time()
data = cPickle.load(open(ratings_path, 'r'))
end = time()
print "Time Elapsed = {:.3} seconds".format(end - start)
return data
def create_cust_tag_bridge_rdd(sc, data):
'''Create user tags/user ids bride rdd,
create int:cust_tag key value pairs,
spark can't read string user ids'''
unique_user_tags = np.unique([row[0] for row in data])
index_to_int = np.arange(0, len(unique_user_tags) * 100, 100)
cust_tag_bridge = [ (tag_hash, tag_int) for tag_hash, tag_int in zip(unique_user_tags, index_to_int)]
return sc.parallelize(cust_tag_bridge)
def create_products_rdd(products_df):
'''Creates products_rdd
Input: products_df, pandas dataframe
Output: products_rdd, spark rdd'''
# create products_rdd
products_rdd = sc.parallelize(products_df.values.tolist())
# format --> (productKey, (productID, Appellation, Varietal, Vinyard) )
products_rdd = products_rdd.map(lambda row: (row[0], (row[1], row[2], row[3], row[4], row[5]) ) )
return products_rdd
def create_clean_data_rdd(data, cust_tag_bridge_rdd):
'''Transform ratings data into spark readable format --> (user_id, productKey, rating)
Input: data: list, cust_tag_bridge_rdd: spark rdd
Output: clean_data_rdd, spark rdd'''
data_rdd = sc.parallelize(data)
tag_data_bridge_rdd = data_rdd.map(lambda row: (row[0], (row[1], row[2]) ))
clean_data_rdd = \
tag_data_bridge_rdd.sortByKey()\
.join( cust_tag_bridge_rdd.sortByKey())\
.map(lambda row: ( row[1][1], row[1][0][0], row[1][0][1]))
return clean_data_rdd
def get_spark_context(n_cups = 3, local = True, remote_cluster_path=None):
# number of nodes in local spark cluster
n_worker_cups = n_cups
if local == True:
print "Create spark context for local cluster..."
sc = pyspark.SparkContext(master = "local[{}]".format(n_worker_cups))
return sc
elif local == False:
print "Create spark context for remote cluster..."
sc = pyspark.SparkContext(master = remote_cluster_path)
return
else:
print "ERROR: local is set to False, however remote_cluster_path is not specified!"
def get_clean_data_rdd(sc, return_cust_brige_rdd = False):
'''Loads ratings from master file and formats data into model readable form.
data --> (user_id, productKey, rating)'''
# load data
data = get_ratings_data(ratings_path)
# assigne each user hash tag a user_id
cust_tag_bridge_rdd = create_cust_tag_bridge_rdd(sc, data)
# model readable format
clean_data_rdd = create_clean_data_rdd(data, cust_tag_bridge_rdd)
if return_cust_brige_rdd == False:
cust_tag_bridge_rdd.unpersist()
return clean_data_rdd
else:
return clean_data_rdd, cust_tag_bridge_rdd
def train_model(training_RDD): | iterations = 30
regularization_parameter = 0.1
rank = 20
model = ALS.train(training_RDD,
rank=rank,
seed=seed,
iterations=iterations,
lambda_=regularization_parameter,
nonnegative=True)
return model
def get_trained_model(sc, ratings_path, save_model_path=None, return_clean_data_rdd=False):
'''Loads rating data from file, trains model, and returns a fitted model'''
print "load data and build RDDs..."
clean_data_rdd = get_clean_data_rdd(sc, return_cust_brige_rdd = False)
print "Training Model..."
start = time()
fitted_model = train_model(clean_data_rdd )
end = time()
print "Training Model: Time Elapsed = {:.3} \n".format(end - start)
if save_model_path != None:
# Save model
print "saving model to path: {}".format(save_model_path)
fitted_model.save(sc ,save_model_path)
if return_clean_data_rdd:
return fitted_model, clean_data_rdd
else:
# restore memory resources
clean_data_rdd.unpersist()
return fitted_model
def load_model(sc, model_path):
'''Load trained model that has been saved to file.
It is more efficient to train a model once, then make predictions.'''
# load model
fitted_model = MatrixFactorizationModel.load(sc, model_path)
return fitted_model
def get_userID_moiveID_pairs(sc, user_id, clean_data_rdd):
'''In order to get recommendations for a user, we need to build an RDD with (user_id, wine_id)
pairs for wines that the user has not previously purchased.'''
# ( user_id, movie_id, rating )
# get user_id's movie ids in a list
movie_ids = clean_data_rdd.filter(lambda row: row[0] == user_id )\
.map(lambda row: row[1]).collect()
# get wine_ids that user_id has not purchased
unpurchased_wines = clean_data_rdd.filter(lambda row: row[0] != user_id )\
.filter(lambda row: row[2] not in movie_ids)\
.map(lambda row: (user_id, row[1] ) ).distinct()
return unpurchased_wines
def get_user_recommendations(fitted_model, unpurchased_wines):
user_recs = fitted_model.predictAll(unpurchased_wines)
return user_recs
def format_user_recs(user_recs, cust_tag_bridge_rdd, products_path, thresh ):
'''Reformat user recommendations so it's human readable and in preperation for curation.
This function swaps the user_id back to the original user hash tag, and attachs the wine
features (i.e. productID, appellation, varieatl, ...) '''
# value validated in Spark_Recommendation_Model_Validation notebook
threshold = thresh
validated_user_recs = user_recs.filter(lambda row: row[2] >= threshold )
# format --> (product key, predicted rating, user hash tag)
wineID_rating_userHash = \
validated_user_recs.map(lambda row: (row[0], (row[1], row[2]) ) )\
.join(cust_tag_bridge_rdd\
.map(lambda row: (row[1], row[0])))\
.map(lambda row: (row[1][0][0],
(row[1][0][1],
row[1][1] ) ))
products_df = pd.read_pickle(products_path)
products_rdd = create_products_rdd(products_df)
# Key:Value pair RDD
# format --> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
clean_user_recs = \
wineID_rating_userHash.join(products_rdd)\
.map(lambda row: ( row[1][0][1],
(row[0],
row[1][1][0],
row[1][1][1],
row[1][1][2],
row[1][1][3],
row[1][1][4],
row[1][0][0])))
return clean_user_recs
def curate_top_wines(top_varietal_recs, top_varietals):
final_recs = defaultdict(list)
for var in top_varietals:
var_cnt = 1
for row in top_varietal_recs:
if row[1][3] == var:
if var_cnt <= 3:
var_cnt += 1
#final_recs.append((row[0], row[1][:-1]))
final_recs[row[0]].append(row[1][:-1])
return final_recs
def get_top_rec_varietals(clean_user_recs):
'''Returns the top 3 wines from the top 3 varietals for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_dicts]
var_count = Counter(varietals)
# get top 3 most recommender varietals for this user
top_varietals = [row[0] for row in var_count.most_common()[0:3]]
top_varietal_recs = clean_user_recs.filter(lambda row: row[1][3] in top_varietals ).collect()
return curate_top_wines(top_varietal_recs, top_varietals)
def get_top_reds_and_whites(clean_user_recs):
'''Returns top rated wines, 5 red and 5 white for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
red_white_recs_dict = defaultdict(list)
white_cnt = 1
red_cnt = 1
for rec in user_recs_dicts:
if rec[1][5] == "White Wines":
if white_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
white_cnt += 1
else:
if red_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
red_cnt += 1
return red_white_recs_dict
def get_user_ids_for_recommendations(cust_tag_bridge_rdd):
'''This function returns user ids from the cust_tag_bridge_rdd.
For now, it only return the first user_id in the rdd.'''
# results are inside of a list
return cust_tag_bridge_rdd.map(lambda row: row[1]).collect()
def check_top_varietal_wine_count(most_common_varietals):
'''Checks if top variatls have at lease 3 wines'''
cnt = 0
for row in most_common_varietals:
if row[1] >= 3:
cnt += 1
return cnt
if __name__ == '__main__':
start_rs = time()
# data files
home = "/Users/Alexander/Wine_Recommender/data/"
ratings_path = home + "spark_ready_data.pkl"
products_path = home + "wine_products.pkl"
rec_results_path = home + "user_rec_results.pkl"
# trained recommender path
model_path = "/Users/Alexander/Wine_Recommender/models/spark_recommender"
n_local_cpus = 3
# value validated in Spark_Recommendation_Model_Validation notebook
rating_threshold = 7
n_varietials = 3
print "get_spark_context..."
# get sparkContext
sc = get_spark_context(n_cups = n_local_cpus,
local = True,
remote_cluster_path=None)
print "get_clean_data_rdd..."
clean_data_rdd, cust_tag_bridge_rdd = get_clean_data_rdd(sc,
return_cust_brige_rdd = True)
print 'get_trained_model...'
# Model can be saved to a file only once; otherwise, spark will throw an error
fitted_model = get_trained_model(sc,
ratings_path,
save_model_path=model_path)
# print "load_model..."
# fitted_model = load_model(sc,
# model_path)
print "get_user_ids_for_recommendations..."
user_ids = get_user_ids_for_recommendations(cust_tag_bridge_rdd)
r_w_cnt = 0
results = []
for i, user_id in enumerate(user_ids[0:3]):
loop_start = time()
#all previously unpurchased wines will be passed into the model for a predicted rating
#print "get_userID_moiveID_pairs..."
unpurchased_wines = get_userID_moiveID_pairs(sc,
user_id,
clean_data_rdd)
#print "get_user_recommendations..."
user_recs = get_user_recommendations(fitted_model,
unpurchased_wines)
clean_user_recs = format_user_recs(user_recs,
cust_tag_bridge_rdd,
products_path,
rating_threshold)
# Curate Recommendations into Varietal Sub-Genres
# Return the top 3 rated wines from the the top 3 most recommended varietals.
# If there aren't at least 3 wines form 3 varietals,
# Then return the top 5 reds and the top 5 whitesn (though this shouldn't be a problem).
# check for 3 wines, 3 varieatls condition
# format -> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
user_recs_tups = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_tups]
var_count = Counter(varietals)
most_common_varietals = var_count.most_common()[:n_varietials]
# check 1 --> varietal count
# check 2 --> top 3 varietals have at least 3 wines to choose from
if len(var_count) >= n_varietials and check_top_varietal_wine_count(most_common_varietals) == n_varietials:
#print "get_top_rec_varietals..."
final_recs = get_top_rec_varietals(clean_user_recs)
else:
#print "get_top_reds_and_whites..."
r_w_cnt += 1
final_recs = get_top_reds_and_whites(clean_user_recs)
results.append(final_recs)
if i % 1 == 0:
loop_end = time()
print "User {}, Time Elapsed {:.3} mins".format(i, (loop_end - loop_start)/60)
print "saving final_recs to file..."
# save recommendation results to file
cPickle.dump(results, open(rec_results_path, 'w'))
print "stoping spark context..."
sc.stop()
end_rc = time()
print "Red_White_Rec_Counter = {}".format(r_w_cnt)
print "Total Time Elapsed for RS = {:.4} mins".format((end_rc - start_rs)/60) | # TODO: still need to optimize hyperparameters in a grid search
seed = 5L | random_line_split |
wine_recommender.py | import numpy as np
import pandas as pd
import os
import cPickle
from time import time
from sklearn.utils import shuffle
from collections import defaultdict, Counter
import pyspark
from pyspark import SparkContext
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel
import math
def get_ratings_data(ratings_path):
start = time()
data = cPickle.load(open(ratings_path, 'r'))
end = time()
print "Time Elapsed = {:.3} seconds".format(end - start)
return data
def create_cust_tag_bridge_rdd(sc, data):
'''Create user tags/user ids bride rdd,
create int:cust_tag key value pairs,
spark can't read string user ids'''
unique_user_tags = np.unique([row[0] for row in data])
index_to_int = np.arange(0, len(unique_user_tags) * 100, 100)
cust_tag_bridge = [ (tag_hash, tag_int) for tag_hash, tag_int in zip(unique_user_tags, index_to_int)]
return sc.parallelize(cust_tag_bridge)
def create_products_rdd(products_df):
'''Creates products_rdd
Input: products_df, pandas dataframe
Output: products_rdd, spark rdd'''
# create products_rdd
products_rdd = sc.parallelize(products_df.values.tolist())
# format --> (productKey, (productID, Appellation, Varietal, Vinyard) )
products_rdd = products_rdd.map(lambda row: (row[0], (row[1], row[2], row[3], row[4], row[5]) ) )
return products_rdd
def create_clean_data_rdd(data, cust_tag_bridge_rdd):
'''Transform ratings data into spark readable format --> (user_id, productKey, rating)
Input: data: list, cust_tag_bridge_rdd: spark rdd
Output: clean_data_rdd, spark rdd'''
data_rdd = sc.parallelize(data)
tag_data_bridge_rdd = data_rdd.map(lambda row: (row[0], (row[1], row[2]) ))
clean_data_rdd = \
tag_data_bridge_rdd.sortByKey()\
.join( cust_tag_bridge_rdd.sortByKey())\
.map(lambda row: ( row[1][1], row[1][0][0], row[1][0][1]))
return clean_data_rdd
def get_spark_context(n_cups = 3, local = True, remote_cluster_path=None):
# number of nodes in local spark cluster
n_worker_cups = n_cups
if local == True:
print "Create spark context for local cluster..."
sc = pyspark.SparkContext(master = "local[{}]".format(n_worker_cups))
return sc
elif local == False:
print "Create spark context for remote cluster..."
sc = pyspark.SparkContext(master = remote_cluster_path)
return
else:
print "ERROR: local is set to False, however remote_cluster_path is not specified!"
def get_clean_data_rdd(sc, return_cust_brige_rdd = False):
'''Loads ratings from master file and formats data into model readable form.
data --> (user_id, productKey, rating)'''
# load data
data = get_ratings_data(ratings_path)
# assigne each user hash tag a user_id
cust_tag_bridge_rdd = create_cust_tag_bridge_rdd(sc, data)
# model readable format
clean_data_rdd = create_clean_data_rdd(data, cust_tag_bridge_rdd)
if return_cust_brige_rdd == False:
cust_tag_bridge_rdd.unpersist()
return clean_data_rdd
else:
return clean_data_rdd, cust_tag_bridge_rdd
def train_model(training_RDD):
# TODO: still need to optimize hyperparameters in a grid search
seed = 5L
iterations = 30
regularization_parameter = 0.1
rank = 20
model = ALS.train(training_RDD,
rank=rank,
seed=seed,
iterations=iterations,
lambda_=regularization_parameter,
nonnegative=True)
return model
def get_trained_model(sc, ratings_path, save_model_path=None, return_clean_data_rdd=False):
'''Loads rating data from file, trains model, and returns a fitted model'''
print "load data and build RDDs..."
clean_data_rdd = get_clean_data_rdd(sc, return_cust_brige_rdd = False)
print "Training Model..."
start = time()
fitted_model = train_model(clean_data_rdd )
end = time()
print "Training Model: Time Elapsed = {:.3} \n".format(end - start)
if save_model_path != None:
# Save model
print "saving model to path: {}".format(save_model_path)
fitted_model.save(sc ,save_model_path)
if return_clean_data_rdd:
return fitted_model, clean_data_rdd
else:
# restore memory resources
clean_data_rdd.unpersist()
return fitted_model
def load_model(sc, model_path):
'''Load trained model that has been saved to file.
It is more efficient to train a model once, then make predictions.'''
# load model
fitted_model = MatrixFactorizationModel.load(sc, model_path)
return fitted_model
def get_userID_moiveID_pairs(sc, user_id, clean_data_rdd):
'''In order to get recommendations for a user, we need to build an RDD with (user_id, wine_id)
pairs for wines that the user has not previously purchased.'''
# ( user_id, movie_id, rating )
# get user_id's movie ids in a list
movie_ids = clean_data_rdd.filter(lambda row: row[0] == user_id )\
.map(lambda row: row[1]).collect()
# get wine_ids that user_id has not purchased
unpurchased_wines = clean_data_rdd.filter(lambda row: row[0] != user_id )\
.filter(lambda row: row[2] not in movie_ids)\
.map(lambda row: (user_id, row[1] ) ).distinct()
return unpurchased_wines
def get_user_recommendations(fitted_model, unpurchased_wines):
user_recs = fitted_model.predictAll(unpurchased_wines)
return user_recs
def format_user_recs(user_recs, cust_tag_bridge_rdd, products_path, thresh ):
'''Reformat user recommendations so it's human readable and in preperation for curation.
This function swaps the user_id back to the original user hash tag, and attachs the wine
features (i.e. productID, appellation, varieatl, ...) '''
# value validated in Spark_Recommendation_Model_Validation notebook
threshold = thresh
validated_user_recs = user_recs.filter(lambda row: row[2] >= threshold )
# format --> (product key, predicted rating, user hash tag)
wineID_rating_userHash = \
validated_user_recs.map(lambda row: (row[0], (row[1], row[2]) ) )\
.join(cust_tag_bridge_rdd\
.map(lambda row: (row[1], row[0])))\
.map(lambda row: (row[1][0][0],
(row[1][0][1],
row[1][1] ) ))
products_df = pd.read_pickle(products_path)
products_rdd = create_products_rdd(products_df)
# Key:Value pair RDD
# format --> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
clean_user_recs = \
wineID_rating_userHash.join(products_rdd)\
.map(lambda row: ( row[1][0][1],
(row[0],
row[1][1][0],
row[1][1][1],
row[1][1][2],
row[1][1][3],
row[1][1][4],
row[1][0][0])))
return clean_user_recs
def curate_top_wines(top_varietal_recs, top_varietals):
final_recs = defaultdict(list)
for var in top_varietals:
var_cnt = 1
for row in top_varietal_recs:
if row[1][3] == var:
if var_cnt <= 3:
var_cnt += 1
#final_recs.append((row[0], row[1][:-1]))
final_recs[row[0]].append(row[1][:-1])
return final_recs
def get_top_rec_varietals(clean_user_recs):
'''Returns the top 3 wines from the top 3 varietals for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_dicts]
var_count = Counter(varietals)
# get top 3 most recommender varietals for this user
top_varietals = [row[0] for row in var_count.most_common()[0:3]]
top_varietal_recs = clean_user_recs.filter(lambda row: row[1][3] in top_varietals ).collect()
return curate_top_wines(top_varietal_recs, top_varietals)
def get_top_reds_and_whites(clean_user_recs):
'''Returns top rated wines, 5 red and 5 white for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
red_white_recs_dict = defaultdict(list)
white_cnt = 1
red_cnt = 1
for rec in user_recs_dicts:
if rec[1][5] == "White Wines":
if white_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
white_cnt += 1
else:
if red_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
red_cnt += 1
return red_white_recs_dict
def get_user_ids_for_recommendations(cust_tag_bridge_rdd):
'''This function returns user ids from the cust_tag_bridge_rdd.
For now, it only return the first user_id in the rdd.'''
# results are inside of a list
return cust_tag_bridge_rdd.map(lambda row: row[1]).collect()
def check_top_varietal_wine_count(most_common_varietals):
|
if __name__ == '__main__':
start_rs = time()
# data files
home = "/Users/Alexander/Wine_Recommender/data/"
ratings_path = home + "spark_ready_data.pkl"
products_path = home + "wine_products.pkl"
rec_results_path = home + "user_rec_results.pkl"
# trained recommender path
model_path = "/Users/Alexander/Wine_Recommender/models/spark_recommender"
n_local_cpus = 3
# value validated in Spark_Recommendation_Model_Validation notebook
rating_threshold = 7
n_varietials = 3
print "get_spark_context..."
# get sparkContext
sc = get_spark_context(n_cups = n_local_cpus,
local = True,
remote_cluster_path=None)
print "get_clean_data_rdd..."
clean_data_rdd, cust_tag_bridge_rdd = get_clean_data_rdd(sc,
return_cust_brige_rdd = True)
print 'get_trained_model...'
# Model can be saved to a file only once; otherwise, spark will throw an error
fitted_model = get_trained_model(sc,
ratings_path,
save_model_path=model_path)
# print "load_model..."
# fitted_model = load_model(sc,
# model_path)
print "get_user_ids_for_recommendations..."
user_ids = get_user_ids_for_recommendations(cust_tag_bridge_rdd)
r_w_cnt = 0
results = []
for i, user_id in enumerate(user_ids[0:3]):
loop_start = time()
#all previously unpurchased wines will be passed into the model for a predicted rating
#print "get_userID_moiveID_pairs..."
unpurchased_wines = get_userID_moiveID_pairs(sc,
user_id,
clean_data_rdd)
#print "get_user_recommendations..."
user_recs = get_user_recommendations(fitted_model,
unpurchased_wines)
clean_user_recs = format_user_recs(user_recs,
cust_tag_bridge_rdd,
products_path,
rating_threshold)
# Curate Recommendations into Varietal Sub-Genres
# Return the top 3 rated wines from the the top 3 most recommended varietals.
# If there aren't at least 3 wines form 3 varietals,
# Then return the top 5 reds and the top 5 whitesn (though this shouldn't be a problem).
# check for 3 wines, 3 varieatls condition
# format -> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
user_recs_tups = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_tups]
var_count = Counter(varietals)
most_common_varietals = var_count.most_common()[:n_varietials]
# check 1 --> varietal count
# check 2 --> top 3 varietals have at least 3 wines to choose from
if len(var_count) >= n_varietials and check_top_varietal_wine_count(most_common_varietals) == n_varietials:
#print "get_top_rec_varietals..."
final_recs = get_top_rec_varietals(clean_user_recs)
else:
#print "get_top_reds_and_whites..."
r_w_cnt += 1
final_recs = get_top_reds_and_whites(clean_user_recs)
results.append(final_recs)
if i % 1 == 0:
loop_end = time()
print "User {}, Time Elapsed {:.3} mins".format(i, (loop_end - loop_start)/60)
print "saving final_recs to file..."
# save recommendation results to file
cPickle.dump(results, open(rec_results_path, 'w'))
print "stoping spark context..."
sc.stop()
end_rc = time()
print "Red_White_Rec_Counter = {}".format(r_w_cnt)
print "Total Time Elapsed for RS = {:.4} mins".format((end_rc - start_rs)/60)
| '''Checks if top variatls have at lease 3 wines'''
cnt = 0
for row in most_common_varietals:
if row[1] >= 3:
cnt += 1
return cnt | identifier_body |
wine_recommender.py | import numpy as np
import pandas as pd
import os
import cPickle
from time import time
from sklearn.utils import shuffle
from collections import defaultdict, Counter
import pyspark
from pyspark import SparkContext
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel
import math
def get_ratings_data(ratings_path):
start = time()
data = cPickle.load(open(ratings_path, 'r'))
end = time()
print "Time Elapsed = {:.3} seconds".format(end - start)
return data
def create_cust_tag_bridge_rdd(sc, data):
'''Create user tags/user ids bride rdd,
create int:cust_tag key value pairs,
spark can't read string user ids'''
unique_user_tags = np.unique([row[0] for row in data])
index_to_int = np.arange(0, len(unique_user_tags) * 100, 100)
cust_tag_bridge = [ (tag_hash, tag_int) for tag_hash, tag_int in zip(unique_user_tags, index_to_int)]
return sc.parallelize(cust_tag_bridge)
def create_products_rdd(products_df):
'''Creates products_rdd
Input: products_df, pandas dataframe
Output: products_rdd, spark rdd'''
# create products_rdd
products_rdd = sc.parallelize(products_df.values.tolist())
# format --> (productKey, (productID, Appellation, Varietal, Vinyard) )
products_rdd = products_rdd.map(lambda row: (row[0], (row[1], row[2], row[3], row[4], row[5]) ) )
return products_rdd
def create_clean_data_rdd(data, cust_tag_bridge_rdd):
'''Transform ratings data into spark readable format --> (user_id, productKey, rating)
Input: data: list, cust_tag_bridge_rdd: spark rdd
Output: clean_data_rdd, spark rdd'''
data_rdd = sc.parallelize(data)
tag_data_bridge_rdd = data_rdd.map(lambda row: (row[0], (row[1], row[2]) ))
clean_data_rdd = \
tag_data_bridge_rdd.sortByKey()\
.join( cust_tag_bridge_rdd.sortByKey())\
.map(lambda row: ( row[1][1], row[1][0][0], row[1][0][1]))
return clean_data_rdd
def get_spark_context(n_cups = 3, local = True, remote_cluster_path=None):
# number of nodes in local spark cluster
n_worker_cups = n_cups
if local == True:
print "Create spark context for local cluster..."
sc = pyspark.SparkContext(master = "local[{}]".format(n_worker_cups))
return sc
elif local == False:
print "Create spark context for remote cluster..."
sc = pyspark.SparkContext(master = remote_cluster_path)
return
else:
print "ERROR: local is set to False, however remote_cluster_path is not specified!"
def get_clean_data_rdd(sc, return_cust_brige_rdd = False):
'''Loads ratings from master file and formats data into model readable form.
data --> (user_id, productKey, rating)'''
# load data
data = get_ratings_data(ratings_path)
# assigne each user hash tag a user_id
cust_tag_bridge_rdd = create_cust_tag_bridge_rdd(sc, data)
# model readable format
clean_data_rdd = create_clean_data_rdd(data, cust_tag_bridge_rdd)
if return_cust_brige_rdd == False:
cust_tag_bridge_rdd.unpersist()
return clean_data_rdd
else:
return clean_data_rdd, cust_tag_bridge_rdd
def train_model(training_RDD):
# TODO: still need to optimize hyperparameters in a grid search
seed = 5L
iterations = 30
regularization_parameter = 0.1
rank = 20
model = ALS.train(training_RDD,
rank=rank,
seed=seed,
iterations=iterations,
lambda_=regularization_parameter,
nonnegative=True)
return model
def get_trained_model(sc, ratings_path, save_model_path=None, return_clean_data_rdd=False):
'''Loads rating data from file, trains model, and returns a fitted model'''
print "load data and build RDDs..."
clean_data_rdd = get_clean_data_rdd(sc, return_cust_brige_rdd = False)
print "Training Model..."
start = time()
fitted_model = train_model(clean_data_rdd )
end = time()
print "Training Model: Time Elapsed = {:.3} \n".format(end - start)
if save_model_path != None:
# Save model
print "saving model to path: {}".format(save_model_path)
fitted_model.save(sc ,save_model_path)
if return_clean_data_rdd:
return fitted_model, clean_data_rdd
else:
# restore memory resources
clean_data_rdd.unpersist()
return fitted_model
def load_model(sc, model_path):
'''Load trained model that has been saved to file.
It is more efficient to train a model once, then make predictions.'''
# load model
fitted_model = MatrixFactorizationModel.load(sc, model_path)
return fitted_model
def get_userID_moiveID_pairs(sc, user_id, clean_data_rdd):
'''In order to get recommendations for a user, we need to build an RDD with (user_id, wine_id)
pairs for wines that the user has not previously purchased.'''
# ( user_id, movie_id, rating )
# get user_id's movie ids in a list
movie_ids = clean_data_rdd.filter(lambda row: row[0] == user_id )\
.map(lambda row: row[1]).collect()
# get wine_ids that user_id has not purchased
unpurchased_wines = clean_data_rdd.filter(lambda row: row[0] != user_id )\
.filter(lambda row: row[2] not in movie_ids)\
.map(lambda row: (user_id, row[1] ) ).distinct()
return unpurchased_wines
def get_user_recommendations(fitted_model, unpurchased_wines):
user_recs = fitted_model.predictAll(unpurchased_wines)
return user_recs
def format_user_recs(user_recs, cust_tag_bridge_rdd, products_path, thresh ):
'''Reformat user recommendations so it's human readable and in preperation for curation.
This function swaps the user_id back to the original user hash tag, and attachs the wine
features (i.e. productID, appellation, varieatl, ...) '''
# value validated in Spark_Recommendation_Model_Validation notebook
threshold = thresh
validated_user_recs = user_recs.filter(lambda row: row[2] >= threshold )
# format --> (product key, predicted rating, user hash tag)
wineID_rating_userHash = \
validated_user_recs.map(lambda row: (row[0], (row[1], row[2]) ) )\
.join(cust_tag_bridge_rdd\
.map(lambda row: (row[1], row[0])))\
.map(lambda row: (row[1][0][0],
(row[1][0][1],
row[1][1] ) ))
products_df = pd.read_pickle(products_path)
products_rdd = create_products_rdd(products_df)
# Key:Value pair RDD
# format --> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
clean_user_recs = \
wineID_rating_userHash.join(products_rdd)\
.map(lambda row: ( row[1][0][1],
(row[0],
row[1][1][0],
row[1][1][1],
row[1][1][2],
row[1][1][3],
row[1][1][4],
row[1][0][0])))
return clean_user_recs
def curate_top_wines(top_varietal_recs, top_varietals):
final_recs = defaultdict(list)
for var in top_varietals:
var_cnt = 1
for row in top_varietal_recs:
if row[1][3] == var:
if var_cnt <= 3:
var_cnt += 1
#final_recs.append((row[0], row[1][:-1]))
final_recs[row[0]].append(row[1][:-1])
return final_recs
def get_top_rec_varietals(clean_user_recs):
'''Returns the top 3 wines from the top 3 varietals for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_dicts]
var_count = Counter(varietals)
# get top 3 most recommender varietals for this user
top_varietals = [row[0] for row in var_count.most_common()[0:3]]
top_varietal_recs = clean_user_recs.filter(lambda row: row[1][3] in top_varietals ).collect()
return curate_top_wines(top_varietal_recs, top_varietals)
def get_top_reds_and_whites(clean_user_recs):
'''Returns top rated wines, 5 red and 5 white for user'''
# { custumer tag : (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) }
user_recs_dicts = clean_user_recs.collect()
red_white_recs_dict = defaultdict(list)
white_cnt = 1
red_cnt = 1
for rec in user_recs_dicts:
if rec[1][5] == "White Wines":
if white_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
white_cnt += 1
else:
if red_cnt <= 5:
red_white_recs_dict[rec[0]].append(rec[1])
red_cnt += 1
return red_white_recs_dict
def get_user_ids_for_recommendations(cust_tag_bridge_rdd):
'''This function returns user ids from the cust_tag_bridge_rdd.
For now, it only return the first user_id in the rdd.'''
# results are inside of a list
return cust_tag_bridge_rdd.map(lambda row: row[1]).collect()
def | (most_common_varietals):
'''Checks if top variatls have at lease 3 wines'''
cnt = 0
for row in most_common_varietals:
if row[1] >= 3:
cnt += 1
return cnt
if __name__ == '__main__':
start_rs = time()
# data files
home = "/Users/Alexander/Wine_Recommender/data/"
ratings_path = home + "spark_ready_data.pkl"
products_path = home + "wine_products.pkl"
rec_results_path = home + "user_rec_results.pkl"
# trained recommender path
model_path = "/Users/Alexander/Wine_Recommender/models/spark_recommender"
n_local_cpus = 3
# value validated in Spark_Recommendation_Model_Validation notebook
rating_threshold = 7
n_varietials = 3
print "get_spark_context..."
# get sparkContext
sc = get_spark_context(n_cups = n_local_cpus,
local = True,
remote_cluster_path=None)
print "get_clean_data_rdd..."
clean_data_rdd, cust_tag_bridge_rdd = get_clean_data_rdd(sc,
return_cust_brige_rdd = True)
print 'get_trained_model...'
# Model can be saved to a file only once; otherwise, spark will throw an error
fitted_model = get_trained_model(sc,
ratings_path,
save_model_path=model_path)
# print "load_model..."
# fitted_model = load_model(sc,
# model_path)
print "get_user_ids_for_recommendations..."
user_ids = get_user_ids_for_recommendations(cust_tag_bridge_rdd)
r_w_cnt = 0
results = []
for i, user_id in enumerate(user_ids[0:3]):
loop_start = time()
#all previously unpurchased wines will be passed into the model for a predicted rating
#print "get_userID_moiveID_pairs..."
unpurchased_wines = get_userID_moiveID_pairs(sc,
user_id,
clean_data_rdd)
#print "get_user_recommendations..."
user_recs = get_user_recommendations(fitted_model,
unpurchased_wines)
clean_user_recs = format_user_recs(user_recs,
cust_tag_bridge_rdd,
products_path,
rating_threshold)
# Curate Recommendations into Varietal Sub-Genres
# Return the top 3 rated wines from the the top 3 most recommended varietals.
# If there aren't at least 3 wines form 3 varietals,
# Then return the top 5 reds and the top 5 whitesn (though this shouldn't be a problem).
# check for 3 wines, 3 varieatls condition
# format -> (custumer tag, (productKey , productID, Appellation, Varietal, Vineyard, wine type, Rating ) )
user_recs_tups = clean_user_recs.collect()
varietals = [row[1][3] for row in user_recs_tups]
var_count = Counter(varietals)
most_common_varietals = var_count.most_common()[:n_varietials]
# check 1 --> varietal count
# check 2 --> top 3 varietals have at least 3 wines to choose from
if len(var_count) >= n_varietials and check_top_varietal_wine_count(most_common_varietals) == n_varietials:
#print "get_top_rec_varietals..."
final_recs = get_top_rec_varietals(clean_user_recs)
else:
#print "get_top_reds_and_whites..."
r_w_cnt += 1
final_recs = get_top_reds_and_whites(clean_user_recs)
results.append(final_recs)
if i % 1 == 0:
loop_end = time()
print "User {}, Time Elapsed {:.3} mins".format(i, (loop_end - loop_start)/60)
print "saving final_recs to file..."
# save recommendation results to file
cPickle.dump(results, open(rec_results_path, 'w'))
print "stoping spark context..."
sc.stop()
end_rc = time()
print "Red_White_Rec_Counter = {}".format(r_w_cnt)
print "Total Time Elapsed for RS = {:.4} mins".format((end_rc - start_rs)/60)
| check_top_varietal_wine_count | identifier_name |
Helicopter-OMO.py | # A model of the relative price effects of monetary shocks via helicopter drop vs. by open market operations.
# Download the paper at https://ssrn.com/abstract=2545488
from itertools import combinations
from colour import Color
import pandas
from helipad import *
from math import sqrt
heli = Helipad()
#===============
# STORE AND BANK CLASSES
# Have to come before adding the primitives
#===============
class Store(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
#Start with equilibrium prices. Not strictly necessary, but it eliminates the burn-in period. See eq. A7
sm=sum([1/sqrt(model.goodParam('prod',g)) for g in model.nonMoneyGoods]) * M0/(model.param('agents_agent')*(len(model.nonMoneyGoods)+sum([1+model.breedParam('rbd', b, prim='agent') for b in model.primitives['agent']['breeds']])))
self.price = {g:sm/(sqrt(model.goodParam('prod',g))) for g in model.nonMoneyGoods}
self.invTarget = {g:model.goodParam('prod',g)*model.param('agents_agent') for g in model.nonMoneyGoods}
self.portion = {g:1/(len(model.nonMoneyGoods)) for g in model.nonMoneyGoods} #Capital allocation
self.wage = 0
self.cashDemand = 0
if hasattr(self, 'bank'):
self.pavg = 0
self.projects = []
self.defaults = 0
def step(self, stage):
super().step(stage)
N = self.model.param('agents_agent')
#Calculate wages
self.cashDemand = N * self.wage #Hold enough cash for one period's disbursements
newwage = (self.balance - self.cashDemand) / N
if newwage < 1: newwage = 1
self.wage = (self.wage * self.model.param('wStick') + newwage)/(1 + self.model.param('wStick'))
if self.wage * N > self.balance: self.wage = self.balance / N #Budget constraint
#Hire labor, with individualized wage shocks
labor = 0
for a in self.model.agents['agent']:
if self.wage < 0: self.wage = 0
wage = random.normal(self.wage, self.wage/2 + 0.1) #Can't have zero stdev
wage = 0 if wage < 0 else wage #Wage bounded from below by 0
self.pay(a, wage)
labor += 1
tPrice = sum([self.price[good] for good in self.model.nonMoneyGoods])
avg, stdev = {},{} #Hang onto these for use with credit calculations
for i in self.model.nonMoneyGoods:
#Keep track of typical demand
#Target sufficient inventory to handle 1.5 standard deviations above mean demand for the last 50 periods
history = pandas.Series(self.model.data.getLast('demand-'+i, 50)) + pandas.Series(self.model.data.getLast('shortage-'+i, 50))
avg[i], stdev[i] = history.mean(), history.std()
itt = (1 if isnan(avg[i]) else avg[i]) + 1.5 * (1 if isnan(stdev[i]) else stdev[i])
self.invTarget[i] = (self.invTarget[i] + itt)/2 #Smooth it a bit
#Set prices
#Change in the direction of hitting the inventory target
# self.price[i] += log(self.invTarget[i] / (self.inventory[i][0] + self.lastShortage[i])) #Jim's pricing rule?
self.price[i] += (self.invTarget[i] - self.goods[i] + self.model.data.getLast('shortage-'+i))/100 #/150
#Adjust in proportion to the rate of inventory change
#Positive deltaInv indicates falling inventory; negative deltaInv rising inventory
lasti = self.model.data.getLast('inv-'+i,2)[0] if self.model.t > 1 else 0
deltaInv = lasti - self.goods[i]
self.price[i] *= (1 + deltaInv/(50 ** self.model.param('pSmooth')))
if self.price[i] < 0: self.price[i] = 1
#Produce stuff
self.portion[i] = (self.model.param('kImmob') * self.portion[i] + self.price[i]/tPrice) / (self.model.param('kImmob') + 1) #Calculate capital allocation
self.goods[i] = self.goods[i] + self.portion[i] * labor * self.model.goodParam('prod',i)
#Intertemporal transactions
if hasattr(self, 'bank') and self.model.t > 0:
#Stipulate some demand for credit, we can worry about microfoundations later
self.bank.amortize(self, self.bank.credit[self.id].owe/1.5)
self.bank.borrow(self, self.model.cb.ngdp * (1-self.bank.i))
class Bank(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
self.i = .1 #Per-period interest rate
self.targetRR = 0.25
self.lastWithdrawal = 0
self.inflation = 0
self.accounts = {} #Liabilities
self.credit = {} #Assets
self.dif = 0 #How much credit was rationed
self.defaultTotal = 0
self.pLast = 50 #Initial price level, equal to average of initial prices
def account(self, customer):
return self.accounts[customer.id] if customer.id in self.accounts else 0
def setupAccount(self, customer):
if customer.id in self.accounts: return False #If you already have an account
self.accounts[customer.id] = 0 #Liabilities
self.credit[customer.id] = Loan(customer, self) #Assets
#Assets and liabilities should return the same thing
#Any difference gets disbursed as interest on deposits
@property
def assets(self):
return self.goods[self.model.moneyGood] + sum([l.owe for l in self.credit.values()]) #Reserves
@property
def liabilities(self):
return sum(list(self.accounts.values())) #Values returns a dict_values object, not a list. So wrap it in list()
@property
def loans(self):
return self.assets - self.goods[self.model.moneyGood]
@property
def reserveRatio(self):
l = self.liabilities
if l == 0: return 1
else: return self.goods[self.model.moneyGood] / l
@property
def realInterest(self): return self.i - self.inflation
#amt<0 to withdraw
def deposit(self, customer, amt):
amt = customer.pay(self, amt)
self.accounts[customer.id] += amt #Credit account
if amt<0: self.lastWithdrawal -= amt
return amt
def transfer(self, customer, recipient, amt):
if self.accounts[customer.id] < amt: amt = self.accounts[customer.id]
self.accounts[customer.id] -= amt
self.accounts[recipient.id] += amt
return amt
def borrow(self, customer, amt):
if amt < 0.01: return 0 #Skip blanks and float errors
l = self.credit[customer.id]
#Refinance anything with a higher interest rate
for n,loan in enumerate(l.loans):
if loan['i'] >= self.i:
amt += loan['amount']
del l.loans[n]
#Increase assets
l.loans.append({
'amount': amt,
'i': self.i
})
self.accounts[customer.id] += amt #Increase liabilities
return amt #How much you actually borrowed
#Returns the amount you actually pay – the lesser of amt or your outstanding balance
def amortize(self, customer, amt):
if |
def step(self, stage):
self.lastWithdrawal = 0
for l in self.credit: self.credit[l].step()
#Pay interest on deposits
lia = self.liabilities
profit = self.assets - lia
if profit > self.model.param('agents_agent'):
print('Disbursing profit of $',profit)
for id, a in self.accounts.items():
self.accounts[id] += profit/lia * a
# # Set target reserve ratio
# if self.model.t > 2:
# wd = self.model.data.getLast('withdrawals', 50)
# mn, st = mean(wd), stdev(wd)
# if isnan(mn) or isnan(st): mn, st = .1, .1
# ttargetRR = (mn + 2 * st) / lia
# self.targetRR = (49*self.targetRR + ttargetRR)/50
#Calculate inflation as the unweighted average price change over all goods
if self.model.t >= 2:
inflation = self.model.cb.P/self.pLast - 1
self.pLast = self.model.cb.P #Remember the price from this period before altering it for the next period
self.inflation = (19 * self.inflation + inflation) / 20 #Decaying average
#Set interest rate and/or minimum repayment schedule
#Count potential borrowing in the interest rate adjustment
targeti = self.i * self.targetRR / (self.reserveRatio)
#Adjust in proportion to the rate of reserve change
#Positive deltaReserves indicates falling reserves; negative deltaReserves rising inventory
if self.model.t > 2:
deltaReserves = (self.lastReserves - self.goods[self.model.moneyGood])/self.model.cb.P
targeti *= (1 + deltaReserves/(20 ** self.model.param('pSmooth')))
self.i = (self.i * 24 + targeti)/25 #Interest rate stickiness
self.lastReserves = self.goods[self.model.moneyGood]
#Upper and lower interest rate bounds
if self.i > 1 + self.inflation: self.i = 1 + self.inflation #interest rate cap at 100%
if self.i < self.inflation + 0.005: self.i = self.inflation + 0.005 #no negative real rates
if self.i < 0.005: self.i = 0.005 #no negative nominal rates
class Loan():
def __init__(self, customer, bank):
self.customer = customer
self.bank = bank
self.loans = []
self.amortizeAmt = 0
@property
def owe(self): return sum([l['amount'] for l in self.loans])
def step(self):
#Charge the minimum repayment if the agent hasn't already amortized more than that amount
minRepay = 0
for l in self.loans:
iLoan = l['amount'] * l['i']
minRepay += iLoan #You have to pay at least the interest each period
l['amount'] += iLoan #Roll over the remainder at the original interest rate
#If they haven't paid the minimum this period, charge it
amtz = minRepay - self.amortizeAmt
defaulted = False
if amtz > 0:
if amtz > self.bank.accounts[self.customer.id]: #Can't charge them more than they have in the bank
defaulted = True
amtz = self.bank.accounts[self.customer.id]
# print(self.bank.model.t, ': Agent', self.customer.id, 'defaulted $', self.owe - amtz)
self.bank.amortize(self.customer, amtz)
if defaulted:
for n, l in enumerate(self.loans):
self.loans[n]['amount'] /= 2
self.bank.defaultTotal += l['amount']/2
##Cap defaults at the loan amount. Otherwise if i>1, defaulting results in negative debt
# if l['i'] >= 1:
# self.bank.defaultTotal += l['amount']
# del self.loans[n]
# else:
# l['amount'] -= l['amount'] * l['i']
# self.bank.defaultTotal += l['amount'] * l['i']
self.amortizeAmt = 0
#===============
# CONFIGURATION
#===============
heli.addPrimitive('bank', Bank, dflt=1, low=0, high=10, priority=1)
heli.addPrimitive('store', Store, dflt=1, low=0, high=10, priority=2)
heli.addPrimitive('agent', Agent, dflt=50, low=1, high=100, priority=3)
# Configure how many breeds there are and what good each consumes
# In this model, goods and breeds correspond, but they don't necessarily have to
breeds = [
('hobbit', 'jam', 'D73229'),
('dwarf', 'axe', '2D8DBE'),
# ('elf', 'lembas', 'CCBB22')
]
AgentGoods = {}
for b in breeds:
heli.addBreed(b[0], b[2], prim='agent')
heli.addGood(b[1], b[2])
AgentGoods[b[0]] = b[1] #Hang on to this list for future looping
M0 = 120000
heli.addGood('cash', '009900', money=True)
heli.order = 'random'
#Disable the irrelevant checkboxes if the banking model isn't selected
#Callback for the dist parameter
def bankChecks(gui, val=None):
nobank = gui.model.param('dist')!='omo'
gui.model.param('agents_bank', 0 if nobank else 1)
for i in ['debt', 'rr', 'i']:
gui.checks[i].disabled(nobank)
for b in gui.model.primitives['agent']['breeds'].keys():
gui.sliders['breed_agent-liqPref-'+b].config(state='disabled' if nobank else 'normal')
#Since the param callback takes different parameters than the GUI callback
def bankCheckWrapper(model, var, val): bankChecks(model.gui, val)
heli.addHook('terminate', bankChecks) #Reset the disabled checkmarks when terminating a model
heli.addHook('GUIPostInit', bankChecks) #Set the disabled checkmarks on initialization
# UPDATE CALLBACKS
def storeUpdater(model, var, val):
if model.hasModel: setattr(model.agents['store'][0], var, val)
def ngdpUpdater(model, var, val):
if model.hasModel: model.cb.ngdpTarget = val if not val else model.cb.ngdp
def rbalUpdater(model, var, breed, val):
if model.hasModel:
if var=='rbd':
beta = val/(1+val)
for a in model.agents['agent']:
if hasattr(a, 'utility') and a.breed == breed:
a.utility.coeffs['rbal'] = beta
a.utility.coeffs['good'] = 1-beta
elif var=='liqPref':
for a in model.agents['agent']:
if a.breed == breed:
a.liqPref = val
#Set up the info for the sliders on the control panel
#These variables attach to the Helicopter object
#Each parameter requires a corresponding routine in Helicopter.updateVar()
heli.addParameter('ngdpTarget', 'NGDP Target', 'check', dflt=False, callback=ngdpUpdater)
heli.addParameter('dist', 'Distribution', 'menu', dflt='prop', opts={
'prop': 'Helicopter/Proportional',
'lump': 'Helicopter/Lump Sum',
'omo': 'Open Market Operation'
}, runtime=False, callback=bankCheckWrapper)
heli.params['agents_bank'][1]['type'] = 'hidden'
heli.params['agents_store'][1]['type'] = 'hidden'
heli.addParameter('pSmooth', 'Price Smoothness', 'slider', dflt=1.5, opts={'low': 1, 'high': 3, 'step': 0.05}, callback=storeUpdater)
heli.addParameter('wStick', 'Wage Stickiness', 'slider', dflt=10, opts={'low': 1, 'high': 50, 'step': 1}, callback=storeUpdater)
heli.addParameter('kImmob', 'Capital Immobility', 'slider', dflt=100, opts={'low': 1, 'high': 150, 'step': 1}, callback=storeUpdater)
#Low Es means the two are complements (0=perfect complements)
#High Es means the two are substitutes (infinity=perfect substitutes)
#Doesn't really affect anything though – even utility – so don't bother exposing it
heli.addParameter('sigma', 'Elast. of substitution', 'hidden', dflt=.5, opts={'low': 0, 'high': 10, 'step': 0.1})
heli.addBreedParam('rbd', 'Demand for Real Balances', 'slider', dflt={'hobbit':7, 'dwarf': 35}, opts={'low':1, 'high': 50, 'step': 1}, prim='agent', callback=rbalUpdater)
heli.addBreedParam('liqPref', 'Demand for Liquidity', 'slider', dflt={'hobbit': 0.1, 'dwarf': 0.3}, opts={'low':0, 'high': 1, 'step': 0.01}, prim='agent', callback=rbalUpdater, desc='The proportion of the agent\'s balances he desires to keep in cash')
heli.addGoodParam('prod', 'Productivity', 'slider', dflt=1.75, opts={'low':0.1, 'high': 2, 'step': 0.1}) #If you shock productivity, make sure to call rbalupdater
#Takes as input the slider value, outputs b_g. See equation (A8) in the paper.
def rbaltodemand(breed):
def reporter(model):
rbd = model.breedParam('rbd', breed, prim='agent')
beta = rbd/(1+rbd)
return (beta/(1-beta)) * len(model.goods) * sqrt(model.goodParam('prod',AgentGoods[breed])) / sum([1/sqrt(pr) for pr in model.goodParam('prod').values()])
return reporter
#Data Collection
heli.defaultPlots.append('prices')
heli.addPlot('inventory', 'Inventory', 3)
heli.addPlot('rbal', 'Real Balances', 5)
heli.addPlot('ngdp', 'NGDP', 7, selected=False)
heli.addPlot('capital', 'Production', 9, selected=False)
heli.addPlot('wage', 'Wage', 11, selected=False)
heli.addPlot('debt', 'Debt', selected=False)
heli.addPlot('rr', 'Reserve Ratio', selected=False)
heli.addPlot('i', 'Interest Rate', selected=False)
heli.addSeries('capital', lambda t: 1/len(heli.primitives['agent']['breeds']), '', 'CCCCCC')
for breed, d in heli.primitives['agent']['breeds'].items():
heli.data.addReporter('rbalDemand-'+breed, rbaltodemand(breed))
heli.data.addReporter('eCons-'+breed, heli.data.agentReporter('expCons', 'agent', breed=breed, stat='sum'))
# heli.data.addReporter('rWage-'+breed, lambda model: heli.data.agentReporter('wage', 'store')(model) / heli.data.agentReporter('price', 'store', good=b.good)(model))
# heli.data.addReporter('expWage', heli.data.agentReporter('expWage', 'agent'))
heli.data.addReporter('rBal-'+breed, heli.data.agentReporter('realBalances', 'agent', breed=breed))
heli.data.addReporter('invTarget-'+AgentGoods[breed], heli.data.agentReporter('invTarget', 'store', good=AgentGoods[breed]))
heli.data.addReporter('portion-'+AgentGoods[breed], heli.data.agentReporter('portion', 'store', good=AgentGoods[breed]))
heli.addSeries('demand', 'eCons-'+breed, breed.title()+'s\' Expected Consumption', d.color2)
heli.addSeries('rbal', 'rbalDemand-'+breed, breed.title()+' Target Balances', d.color2)
heli.addSeries('rbal', 'rBal-'+breed, breed.title()+ 'Real Balances', d.color)
heli.addSeries('inventory', 'invTarget-'+AgentGoods[breed], AgentGoods[breed].title()+' Inventory Target', heli.goods[AgentGoods[breed]].color2)
heli.addSeries('capital', 'portion-'+AgentGoods[breed], AgentGoods[breed].title()+' Capital', heli.goods[AgentGoods[breed]].color)
# heli.addSeries('Wage', 'expWage', 'Expected Wage', '999999')
#Do this one separately so it draws on top
for good, g in heli.nonMoneyGoods.items():
heli.data.addReporter('inv-'+good, heli.data.agentReporter('goods', 'store', good=good))
heli.addSeries('inventory', 'inv-'+good, good.title()+' Inventory', g.color)
#Price ratio plots
def ratioReporter(item1, item2):
def reporter(model):
return model.data.agentReporter('price', 'store', good=item1)(model)/model.data.agentReporter('price', 'store', good=item2)(model)
return reporter
heli.addPlot('ratios', 'Price Ratios', position=3, logscale=True)
heli.addSeries('ratios', lambda t: 1, '', 'CCCCCC') #plots ratio of 1 for reference without recording a column of ones
for r in combinations(heli.nonMoneyGoods.keys(), 2):
heli.data.addReporter('ratio-'+r[0]+'-'+r[1], ratioReporter(r[0], r[1]))
c1, c2 = heli.goods[r[0]].color, heli.goods[r[1]].color
c3 = Color(red=(c1.red+c2.red)/2, green=(c1.green+c2.green)/2, blue=(c1.blue+c2.blue)/2)
heli.addSeries('ratios', 'ratio-'+r[0]+'-'+r[1], r[0].title()+'/'+r[1].title()+' Ratio', c3)
heli.defaultPlots.extend(['rbal', 'ratios', 'inventory'])
heli.data.addReporter('ngdp', lambda model: model.cb.ngdp)
heli.addSeries('ngdp', 'ngdp', 'NGDP', '000000')
heli.data.addReporter('P', lambda model: model.cb.P)
heli.data.addReporter('storeCash', heli.data.agentReporter('balance', 'store'))
heli.addSeries('money', 'storeCash', 'Store Cash', '777777')
heli.data.addReporter('StoreCashDemand', heli.data.agentReporter('cashDemand', 'store'))
heli.addSeries('money', 'StoreCashDemand', 'Store Cash Demand', 'CCCCCC')
heli.data.addReporter('wage', heli.data.agentReporter('wage', 'store'))
heli.addSeries('wage', 'wage', 'Wage', '000000')
#================
# AGENT BEHAVIOR
#================
#
# General
#
#Don't bother keeping track of the bank-specific variables unless the banking system is there
#Do this here rather than at the beginning so we can decide at runtime
def modelPreSetup(model):
if model.param('agents_bank') > 0:
model.data.addReporter('defaults', model.data.agentReporter('defaultTotal', 'bank'))
model.data.addReporter('debt', model.data.agentReporter('loans', 'bank'))
model.data.addReporter('reserveRatio', model.data.agentReporter('reserveRatio', 'bank'))
model.data.addReporter('targetRR', model.data.agentReporter('targetRR', 'bank'))
model.data.addReporter('i', model.data.agentReporter('i', 'bank'))
model.data.addReporter('r', model.data.agentReporter('realInterest', 'bank'))
model.data.addReporter('inflation', model.data.agentReporter('inflation', 'bank'))
model.data.addReporter('withdrawals', model.data.agentReporter('lastWithdrawal', 'bank'))
model.data.addReporter('M2', lambda model: model.cb.M2)
model.addSeries('money', 'defaults', 'Defaults', 'CC0000')
model.addSeries('money', 'M2', 'Money Supply', '000000')
model.addSeries('debt', 'debt', 'Outstanding Debt', '000000')
model.addSeries('rr', 'targetRR', 'Target', '777777')
model.addSeries('rr', 'reserveRatio', 'Reserve Ratio', '000000')
model.addSeries('i', 'i', 'Nominal interest', '000000')
model.addSeries('i', 'r', 'Real interest', '0000CC')
model.addSeries('i', 'inflation', 'Inflation', 'CC0000')
heli.addHook('modelPreSetup', modelPreSetup)
#
# Agents
#
from helipad.utility import CES
#Choose a bank if necessary
def baseAgentInit(agent, model):
if model.param('agents_bank') > 0 and agent.primitive != 'bank':
agent.bank = model.agents['bank'][0]
agent.bank.setupAccount(agent)
heli.addHook('baseAgentInit', baseAgentInit)
def agentInit(agent, model):
agent.store = model.agents['store'][0]
agent.item = AgentGoods[agent.breed]
rbd = model.breedParam('rbd', agent.breed, prim='agent')
beta = rbd/(rbd+1)
agent.utility = CES(['good','rbal'], agent.model.param('sigma'), {'good': 1-beta, 'rbal': beta })
agent.expCons = model.goodParam('prod', agent.item)
#Set cash endowment to equilibrium value based on parameters. Not strictly necessary but avoids the burn-in period.
agent.goods[model.moneyGood] = agent.store.price[agent.item] * rbaltodemand(agent.breed)(heli)
if model.param('agents_bank') > 0:
agent.liqPref = model.breedParam('liqPref', agent.breed, prim='agent')
heli.addHook('agentInit', agentInit)
def agentStep(agent, model, stage):
itemPrice = agent.store.price[agent.item]
b = agent.balance/itemPrice #Real balances
q = agent.utility.demand(agent.balance, {'good': itemPrice, 'rbal': itemPrice})['good'] #Equimarginal condition given CES between real balances and consumption
basicq = q #Save this for later since we adjust q
bought = agent.buy(agent.store, agent.item, q, itemPrice)
if agent.goods[model.moneyGood] < 0: agent.goods[model.moneyGood] = 0 #Floating point error gives infinitessimaly negative cash sometimes
agent.utils = agent.utility.calculate({'good': agent.goods[agent.item], 'rbal': agent.balance/itemPrice}) if hasattr(agent,'utility') else 0 #Get utility
agent.goods[agent.item] = 0 #Consume goods
negadjust = q - bought #Update your consumption expectations if the store has a shortage
if negadjust > basicq: negadjust = basicq
agent.expCons = (19 * agent.expCons + basicq-negadjust)/20 #Set expected consumption as a decaying average of consumption history
#Deposit cash in the bank at the end of each period
if hasattr(agent, 'bank'):
tCash = agent.liqPref*agent.balance
agent.bank.deposit(agent, agent.goods[agent.model.moneyGood]-tCash)
heli.addHook('agentStep', agentStep)
def realBalances(agent):
if not hasattr(agent, 'store'): return 0
return agent.balance/agent.store.price[agent.item]
# return agent.balance/agent.model.cb.P
Agent.realBalances = property(realBalances)
#Use the bank if the bank exists
def buy(agent, partner, good, q, p):
if hasattr(agent, 'bank'):
bal = agent.bank.account(agent)
if p*q > bal:
amount = bal
leftover = (p*q - bal)/q
else:
amount = p*q
leftover = 0
agent.bank.transfer(agent, partner, amount)
return (q, leftover)
heli.addHook('buy', buy)
#Use the bank if the bank exists
def pay(agent, recipient, amount, model):
if hasattr(agent, 'bank') and recipient.primitive != 'bank' and agent.primitive != 'bank':
bal = agent.bank.account(agent)
if amount > bal: #If there are not enough funds
trans = bal
amount -= bal
else:
trans = amount
amount = 0
agent.bank.transfer(agent, recipient, trans)
return amount #Should be zero. Anything leftover gets paid in cash
heli.addHook('pay', pay)
def checkBalance(agent, balance, model):
if hasattr(agent, 'bank') and agent.primitive != 'bank':
balance += agent.bank.account(agent)
return balance
heli.addHook('checkBalance', checkBalance)
#
# Central Bank
#
class CentralBank(baseAgent):
ngdpAvg = 0
ngdp = 0
primitive = 'cb'
def __init__(self, id, model):
super().__init__(None, id, model)
self.id = id
self.model = model
self.ngdpTarget = False if not model.param('ngdpTarget') else 10000
def step(self):
#Record macroeconomic vars at the end of the last stage
#Getting demand has it lagged one period…
self.ngdp = sum([self.model.data.getLast('demand-'+good) * self.model.agents['store'][0].price[good] for good in self.model.nonMoneyGoods])
if not self.ngdpAvg: self.ngdpAvg = self.ngdp
self.ngdpAvg = (2 * self.ngdpAvg + self.ngdp) / 3
#Set macroeconomic targets
expand = 0
if self.ngdpTarget: expand = self.ngdpTarget - self.ngdpAvg
if self.model.param('agents_bank') > 0: expand *= self.model.agents['bank'][0].reserveRatio
if expand != 0: self.expand(expand)
def expand(self, amount):
#Deposit with each bank in proportion to their liabilities
if 'bank' in self.model.primitives and self.model.param('agents_bank') > 0:
self.goods[self.model.moneyGood] += amount
r = self.model.agents['bank'][0].goods[self.model.moneyGood]
if -amount > r: amount = -r + 1
self.model.agents['bank'][0].deposit(self, amount)
elif self.model.param('dist') == 'lump':
amt = amount/self.model.param('agents_agent')
for a in self.model.agents['agent']:
a.goods[self.model.moneyGood] += amt
else:
M0 = self.M0
for a in self.model.allagents.values():
a.goods[self.model.moneyGood] += a.goods[self.model.moneyGood]/M0 * amount
@property
def M0(self):
return self.model.data.agentReporter('goods', 'all', good=self.model.moneyGood, stat='sum')(self.model)
@M0.setter
def M0(self, value): self.expand(value - self.M0)
@property
def M2(self):
if 'bank' not in self.model.primitives or self.model.param('agents_bank') == 0: return self.M0
return sum([a.balance for a in self.model.allagents.values()])
#Price level
#Average good prices at each store, then average all of those together weighted by the store's sale volume
#Figure out whether to break this out or not
@property
def P(self):
denom = 0
numer = 0
if not 'store' in self.model.agents: return None
return mean(array(list(self.model.agents['store'][0].price.values())))
# for s in self.model.agents['store']:
# volume = sum(list(s.lastDemand.values()))
# numer += mean(array(list(s.price.values()))) * volume
# denom += volume
#
# if denom==0: return 1
# else: return numer/denom
def modelPostSetup(model): model.cb = CentralBank(0, model)
heli.addHook('modelPostSetup', modelPostSetup)
def modelPostStep(model): model.cb.step() #Step the central bank last
heli.addHook('modelPostStep', modelPostStep)
#========
# SHOCKS
#========
#Random shock to dwarf cash demand
def shock(v):
c = random.normal(v, 4)
return c if c >= 1 else 1
heli.shocks.register('Dwarf real balances', 'rbd', shock, heli.shocks.randn(2), paramType='breed', obj='dwarf', prim='agent')
#Shock the money supply
def mshock(model):
# return v*2
pct = random.normal(1, 15)
m = model.cb.M0 * (1+pct/100)
if m < 10000: m = 10000 #Things get weird when there's a money shortage
model.cb.M0 = m
heli.shocks.register('M0 (2% prob)', None, mshock, heli.shocks.randn(2), desc="Shocks the money supply a random percentage (µ=1, σ=15) with 2% probability each period")
heli.launchGUI() | amt < 0.001: return 0 #Skip blanks and float errors
l = self.credit[customer.id] #Your loan object
l.amortizeAmt += amt #Count it toward minimum repayment
leftover = amt
#Reduce assets; amortize in the order borrowed
while leftover > 0 and len(l.loans) > 0:
if leftover >= l.loans[0]['amount']:
leftover -= l.loans[0]['amount']
del l.loans[0]
else:
l.loans[0]['amount'] -= leftover
leftover = 0
self.accounts[customer.id] -= (amt - leftover) #Reduce liabilities
return amt - leftover #How much you amortized
| identifier_body |
Helicopter-OMO.py | # A model of the relative price effects of monetary shocks via helicopter drop vs. by open market operations.
# Download the paper at https://ssrn.com/abstract=2545488
from itertools import combinations
from colour import Color
import pandas
from helipad import *
from math import sqrt
heli = Helipad()
#===============
# STORE AND BANK CLASSES
# Have to come before adding the primitives
#===============
class Store(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
#Start with equilibrium prices. Not strictly necessary, but it eliminates the burn-in period. See eq. A7
sm=sum([1/sqrt(model.goodParam('prod',g)) for g in model.nonMoneyGoods]) * M0/(model.param('agents_agent')*(len(model.nonMoneyGoods)+sum([1+model.breedParam('rbd', b, prim='agent') for b in model.primitives['agent']['breeds']])))
self.price = {g:sm/(sqrt(model.goodParam('prod',g))) for g in model.nonMoneyGoods}
self.invTarget = {g:model.goodParam('prod',g)*model.param('agents_agent') for g in model.nonMoneyGoods}
self.portion = {g:1/(len(model.nonMoneyGoods)) for g in model.nonMoneyGoods} #Capital allocation
self.wage = 0
self.cashDemand = 0
if hasattr(self, 'bank'):
self.pavg = 0
self.projects = []
self.defaults = 0
def step(self, stage):
super().step(stage)
N = self.model.param('agents_agent')
#Calculate wages
self.cashDemand = N * self.wage #Hold enough cash for one period's disbursements
newwage = (self.balance - self.cashDemand) / N
if newwage < 1: newwage = 1
self.wage = (self.wage * self.model.param('wStick') + newwage)/(1 + self.model.param('wStick'))
if self.wage * N > self.balance: self.wage = self.balance / N #Budget constraint
#Hire labor, with individualized wage shocks
labor = 0
for a in self.model.agents['agent']:
if self.wage < 0: self.wage = 0
wage = random.normal(self.wage, self.wage/2 + 0.1) #Can't have zero stdev
wage = 0 if wage < 0 else wage #Wage bounded from below by 0
self.pay(a, wage)
labor += 1
tPrice = sum([self.price[good] for good in self.model.nonMoneyGoods])
avg, stdev = {},{} #Hang onto these for use with credit calculations
for i in self.model.nonMoneyGoods:
#Keep track of typical demand
#Target sufficient inventory to handle 1.5 standard deviations above mean demand for the last 50 periods
history = pandas.Series(self.model.data.getLast('demand-'+i, 50)) + pandas.Series(self.model.data.getLast('shortage-'+i, 50))
avg[i], stdev[i] = history.mean(), history.std()
itt = (1 if isnan(avg[i]) else avg[i]) + 1.5 * (1 if isnan(stdev[i]) else stdev[i])
self.invTarget[i] = (self.invTarget[i] + itt)/2 #Smooth it a bit
#Set prices
#Change in the direction of hitting the inventory target
# self.price[i] += log(self.invTarget[i] / (self.inventory[i][0] + self.lastShortage[i])) #Jim's pricing rule?
self.price[i] += (self.invTarget[i] - self.goods[i] + self.model.data.getLast('shortage-'+i))/100 #/150
#Adjust in proportion to the rate of inventory change
#Positive deltaInv indicates falling inventory; negative deltaInv rising inventory
lasti = self.model.data.getLast('inv-'+i,2)[0] if self.model.t > 1 else 0
deltaInv = lasti - self.goods[i]
self.price[i] *= (1 + deltaInv/(50 ** self.model.param('pSmooth')))
if self.price[i] < 0: self.price[i] = 1
#Produce stuff
self.portion[i] = (self.model.param('kImmob') * self.portion[i] + self.price[i]/tPrice) / (self.model.param('kImmob') + 1) #Calculate capital allocation
self.goods[i] = self.goods[i] + self.portion[i] * labor * self.model.goodParam('prod',i)
#Intertemporal transactions
if hasattr(self, 'bank') and self.model.t > 0:
#Stipulate some demand for credit, we can worry about microfoundations later
self.bank.amortize(self, self.bank.credit[self.id].owe/1.5)
self.bank.borrow(self, self.model.cb.ngdp * (1-self.bank.i))
class Bank(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
self.i = .1 #Per-period interest rate
self.targetRR = 0.25
self.lastWithdrawal = 0
self.inflation = 0
self.accounts = {} #Liabilities
self.credit = {} #Assets
self.dif = 0 #How much credit was rationed
self.defaultTotal = 0
self.pLast = 50 #Initial price level, equal to average of initial prices
def account(self, customer):
return self.accounts[customer.id] if customer.id in self.accounts else 0
def setupAccount(self, customer):
if customer.id in self.accounts: return False #If you already have an account
self.accounts[customer.id] = 0 #Liabilities
self.credit[customer.id] = Loan(customer, self) #Assets
#Assets and liabilities should return the same thing
#Any difference gets disbursed as interest on deposits
@property
def assets(self):
return self.goods[self.model.moneyGood] + sum([l.owe for l in self.credit.values()]) #Reserves
@property
def liabilities(self):
return sum(list(self.accounts.values())) #Values returns a dict_values object, not a list. So wrap it in list()
@property
def loans(self):
return self.assets - self.goods[self.model.moneyGood]
@property
def reserveRatio(self):
l = self.liabilities
if l == 0: return 1
else: return self.goods[self.model.moneyGood] / l
@property
def realInterest(self): return self.i - self.inflation
#amt<0 to withdraw
def deposit(self, customer, amt):
amt = customer.pay(self, amt)
self.accounts[customer.id] += amt #Credit account
if amt<0: self.lastWithdrawal -= amt
return amt
def transfer(self, customer, recipient, amt):
if self.accounts[customer.id] < amt: amt = self.accounts[customer.id]
self.accounts[customer.id] -= amt
self.accounts[recipient.id] += amt
return amt
def borrow(self, customer, amt):
if amt < 0.01: return 0 #Skip blanks and float errors
l = self.credit[customer.id]
#Refinance anything with a higher interest rate
for n,loan in enumerate(l.loans):
if loan['i'] >= self.i:
amt += loan['amount']
del l.loans[n]
#Increase assets
l.loans.append({
'amount': amt,
'i': self.i
})
self.accounts[customer.id] += amt #Increase liabilities
return amt #How much you actually borrowed
#Returns the amount you actually pay – the lesser of amt or your outstanding balance
def amortize(self, customer, amt):
if amt < 0.001: return 0 #Skip blanks and float errors
l = self.credit[customer.id] #Your loan object
l.amortizeAmt += amt #Count it toward minimum repayment
leftover = amt
#Reduce assets; amortize in the order borrowed
while leftover > 0 and len(l.loans) > 0:
if leftover >= l.loans[0]['amount']:
leftover -= l.loans[0]['amount']
del l.loans[0]
else:
l.loans[0]['amount'] -= leftover
leftover = 0
self.accounts[customer.id] -= (amt - leftover) #Reduce liabilities
return amt - leftover #How much you amortized
def step(self, stage):
self.lastWithdrawal = 0
for l in self.credit: self.credit[l].step()
#Pay interest on deposits
lia = self.liabilities
profit = self.assets - lia
if profit > self.model.param('agents_agent'):
print('Disbursing profit of $',profit)
for id, a in self.accounts.items():
self.accounts[id] += profit/lia * a
# # Set target reserve ratio
# if self.model.t > 2:
# wd = self.model.data.getLast('withdrawals', 50)
# mn, st = mean(wd), stdev(wd)
# if isnan(mn) or isnan(st): mn, st = .1, .1
# ttargetRR = (mn + 2 * st) / lia
# self.targetRR = (49*self.targetRR + ttargetRR)/50
#Calculate inflation as the unweighted average price change over all goods
if self.model.t >= 2:
inflation = self.model.cb.P/self.pLast - 1
self.pLast = self.model.cb.P #Remember the price from this period before altering it for the next period
self.inflation = (19 * self.inflation + inflation) / 20 #Decaying average
#Set interest rate and/or minimum repayment schedule
#Count potential borrowing in the interest rate adjustment
targeti = self.i * self.targetRR / (self.reserveRatio)
#Adjust in proportion to the rate of reserve change
#Positive deltaReserves indicates falling reserves; negative deltaReserves rising inventory
if self.model.t > 2:
deltaReserves = (self.lastReserves - self.goods[self.model.moneyGood])/self.model.cb.P
targeti *= (1 + deltaReserves/(20 ** self.model.param('pSmooth')))
self.i = (self.i * 24 + targeti)/25 #Interest rate stickiness
self.lastReserves = self.goods[self.model.moneyGood]
#Upper and lower interest rate bounds
if self.i > 1 + self.inflation: self.i = 1 + self.inflation #interest rate cap at 100%
if self.i < self.inflation + 0.005: self.i = self.inflation + 0.005 #no negative real rates
if self.i < 0.005: self.i = 0.005 #no negative nominal rates
class Loan():
def __init__(self, customer, bank):
self.customer = customer
self.bank = bank
self.loans = []
self.amortizeAmt = 0
@property
def owe(self): return sum([l['amount'] for l in self.loans])
def step(self):
#Charge the minimum repayment if the agent hasn't already amortized more than that amount
minRepay = 0
for l in self.loans:
iLoan = l['amount'] * l['i']
minRepay += iLoan #You have to pay at least the interest each period
l['amount'] += iLoan #Roll over the remainder at the original interest rate
#If they haven't paid the minimum this period, charge it
amtz = minRepay - self.amortizeAmt
defaulted = False
if amtz > 0:
if amtz > self.bank.accounts[self.customer.id]: #Can't charge them more than they have in the bank
defaulted = True
amtz = self.bank.accounts[self.customer.id]
# print(self.bank.model.t, ': Agent', self.customer.id, 'defaulted $', self.owe - amtz)
self.bank.amortize(self.customer, amtz)
if defaulted:
for n, l in enumerate(self.loans):
self.loans[n]['amount'] /= 2
self.bank.defaultTotal += l['amount']/2
##Cap defaults at the loan amount. Otherwise if i>1, defaulting results in negative debt
# if l['i'] >= 1:
# self.bank.defaultTotal += l['amount']
# del self.loans[n]
# else:
# l['amount'] -= l['amount'] * l['i']
# self.bank.defaultTotal += l['amount'] * l['i']
self.amortizeAmt = 0
#===============
# CONFIGURATION
#===============
heli.addPrimitive('bank', Bank, dflt=1, low=0, high=10, priority=1)
heli.addPrimitive('store', Store, dflt=1, low=0, high=10, priority=2)
heli.addPrimitive('agent', Agent, dflt=50, low=1, high=100, priority=3)
# Configure how many breeds there are and what good each consumes
# In this model, goods and breeds correspond, but they don't necessarily have to
breeds = [
('hobbit', 'jam', 'D73229'),
('dwarf', 'axe', '2D8DBE'),
# ('elf', 'lembas', 'CCBB22')
]
AgentGoods = {}
for b in breeds:
heli.addBreed(b[0], b[2], prim='agent')
heli.addGood(b[1], b[2])
AgentGoods[b[0]] = b[1] #Hang on to this list for future looping
M0 = 120000
heli.addGood('cash', '009900', money=True)
heli.order = 'random'
#Disable the irrelevant checkboxes if the banking model isn't selected
#Callback for the dist parameter
def bankChecks(gui, val=None):
nobank = gui.model.param('dist')!='omo'
gui.model.param('agents_bank', 0 if nobank else 1)
for i in ['debt', 'rr', 'i']:
gui.checks[i].disabled(nobank)
for b in gui.model.primitives['agent']['breeds'].keys():
gui.sliders['breed_agent-liqPref-'+b].config(state='disabled' if nobank else 'normal')
#Since the param callback takes different parameters than the GUI callback
def bankCheckWrapper(model, var, val): bankChecks(model.gui, val)
heli.addHook('terminate', bankChecks) #Reset the disabled checkmarks when terminating a model
heli.addHook('GUIPostInit', bankChecks) #Set the disabled checkmarks on initialization
# UPDATE CALLBACKS
def storeUpdater(model, var, val):
if model.hasModel: setattr(model.agents['store'][0], var, val)
def ngdpUpdater(model, var, val):
if model.hasModel: model.cb.ngdpTarget = val if not val else model.cb.ngdp
def rbalUpdater(model, var, breed, val):
if model.hasModel:
if var=='rbd':
beta = val/(1+val)
for a in model.agents['agent']:
if hasattr(a, 'utility') and a.breed == breed:
a.utility.coeffs['rbal'] = beta
a.utility.coeffs['good'] = 1-beta
elif var=='liqPref':
for a in model.agents['agent']:
if a.breed == breed:
a.liqPref = val
#Set up the info for the sliders on the control panel
#These variables attach to the Helicopter object
#Each parameter requires a corresponding routine in Helicopter.updateVar()
heli.addParameter('ngdpTarget', 'NGDP Target', 'check', dflt=False, callback=ngdpUpdater)
heli.addParameter('dist', 'Distribution', 'menu', dflt='prop', opts={
'prop': 'Helicopter/Proportional',
'lump': 'Helicopter/Lump Sum',
'omo': 'Open Market Operation'
}, runtime=False, callback=bankCheckWrapper)
heli.params['agents_bank'][1]['type'] = 'hidden'
heli.params['agents_store'][1]['type'] = 'hidden'
heli.addParameter('pSmooth', 'Price Smoothness', 'slider', dflt=1.5, opts={'low': 1, 'high': 3, 'step': 0.05}, callback=storeUpdater)
heli.addParameter('wStick', 'Wage Stickiness', 'slider', dflt=10, opts={'low': 1, 'high': 50, 'step': 1}, callback=storeUpdater)
heli.addParameter('kImmob', 'Capital Immobility', 'slider', dflt=100, opts={'low': 1, 'high': 150, 'step': 1}, callback=storeUpdater)
#Low Es means the two are complements (0=perfect complements)
#High Es means the two are substitutes (infinity=perfect substitutes)
#Doesn't really affect anything though – even utility – so don't bother exposing it
heli.addParameter('sigma', 'Elast. of substitution', 'hidden', dflt=.5, opts={'low': 0, 'high': 10, 'step': 0.1})
heli.addBreedParam('rbd', 'Demand for Real Balances', 'slider', dflt={'hobbit':7, 'dwarf': 35}, opts={'low':1, 'high': 50, 'step': 1}, prim='agent', callback=rbalUpdater)
heli.addBreedParam('liqPref', 'Demand for Liquidity', 'slider', dflt={'hobbit': 0.1, 'dwarf': 0.3}, opts={'low':0, 'high': 1, 'step': 0.01}, prim='agent', callback=rbalUpdater, desc='The proportion of the agent\'s balances he desires to keep in cash')
heli.addGoodParam('prod', 'Productivity', 'slider', dflt=1.75, opts={'low':0.1, 'high': 2, 'step': 0.1}) #If you shock productivity, make sure to call rbalupdater
#Takes as input the slider value, outputs b_g. See equation (A8) in the paper.
def rbaltodemand(breed):
def report | ):
rbd = model.breedParam('rbd', breed, prim='agent')
beta = rbd/(1+rbd)
return (beta/(1-beta)) * len(model.goods) * sqrt(model.goodParam('prod',AgentGoods[breed])) / sum([1/sqrt(pr) for pr in model.goodParam('prod').values()])
return reporter
#Data Collection
heli.defaultPlots.append('prices')
heli.addPlot('inventory', 'Inventory', 3)
heli.addPlot('rbal', 'Real Balances', 5)
heli.addPlot('ngdp', 'NGDP', 7, selected=False)
heli.addPlot('capital', 'Production', 9, selected=False)
heli.addPlot('wage', 'Wage', 11, selected=False)
heli.addPlot('debt', 'Debt', selected=False)
heli.addPlot('rr', 'Reserve Ratio', selected=False)
heli.addPlot('i', 'Interest Rate', selected=False)
heli.addSeries('capital', lambda t: 1/len(heli.primitives['agent']['breeds']), '', 'CCCCCC')
for breed, d in heli.primitives['agent']['breeds'].items():
heli.data.addReporter('rbalDemand-'+breed, rbaltodemand(breed))
heli.data.addReporter('eCons-'+breed, heli.data.agentReporter('expCons', 'agent', breed=breed, stat='sum'))
# heli.data.addReporter('rWage-'+breed, lambda model: heli.data.agentReporter('wage', 'store')(model) / heli.data.agentReporter('price', 'store', good=b.good)(model))
# heli.data.addReporter('expWage', heli.data.agentReporter('expWage', 'agent'))
heli.data.addReporter('rBal-'+breed, heli.data.agentReporter('realBalances', 'agent', breed=breed))
heli.data.addReporter('invTarget-'+AgentGoods[breed], heli.data.agentReporter('invTarget', 'store', good=AgentGoods[breed]))
heli.data.addReporter('portion-'+AgentGoods[breed], heli.data.agentReporter('portion', 'store', good=AgentGoods[breed]))
heli.addSeries('demand', 'eCons-'+breed, breed.title()+'s\' Expected Consumption', d.color2)
heli.addSeries('rbal', 'rbalDemand-'+breed, breed.title()+' Target Balances', d.color2)
heli.addSeries('rbal', 'rBal-'+breed, breed.title()+ 'Real Balances', d.color)
heli.addSeries('inventory', 'invTarget-'+AgentGoods[breed], AgentGoods[breed].title()+' Inventory Target', heli.goods[AgentGoods[breed]].color2)
heli.addSeries('capital', 'portion-'+AgentGoods[breed], AgentGoods[breed].title()+' Capital', heli.goods[AgentGoods[breed]].color)
# heli.addSeries('Wage', 'expWage', 'Expected Wage', '999999')
#Do this one separately so it draws on top
for good, g in heli.nonMoneyGoods.items():
heli.data.addReporter('inv-'+good, heli.data.agentReporter('goods', 'store', good=good))
heli.addSeries('inventory', 'inv-'+good, good.title()+' Inventory', g.color)
#Price ratio plots
def ratioReporter(item1, item2):
def reporter(model):
return model.data.agentReporter('price', 'store', good=item1)(model)/model.data.agentReporter('price', 'store', good=item2)(model)
return reporter
heli.addPlot('ratios', 'Price Ratios', position=3, logscale=True)
heli.addSeries('ratios', lambda t: 1, '', 'CCCCCC') #plots ratio of 1 for reference without recording a column of ones
for r in combinations(heli.nonMoneyGoods.keys(), 2):
heli.data.addReporter('ratio-'+r[0]+'-'+r[1], ratioReporter(r[0], r[1]))
c1, c2 = heli.goods[r[0]].color, heli.goods[r[1]].color
c3 = Color(red=(c1.red+c2.red)/2, green=(c1.green+c2.green)/2, blue=(c1.blue+c2.blue)/2)
heli.addSeries('ratios', 'ratio-'+r[0]+'-'+r[1], r[0].title()+'/'+r[1].title()+' Ratio', c3)
heli.defaultPlots.extend(['rbal', 'ratios', 'inventory'])
heli.data.addReporter('ngdp', lambda model: model.cb.ngdp)
heli.addSeries('ngdp', 'ngdp', 'NGDP', '000000')
heli.data.addReporter('P', lambda model: model.cb.P)
heli.data.addReporter('storeCash', heli.data.agentReporter('balance', 'store'))
heli.addSeries('money', 'storeCash', 'Store Cash', '777777')
heli.data.addReporter('StoreCashDemand', heli.data.agentReporter('cashDemand', 'store'))
heli.addSeries('money', 'StoreCashDemand', 'Store Cash Demand', 'CCCCCC')
heli.data.addReporter('wage', heli.data.agentReporter('wage', 'store'))
heli.addSeries('wage', 'wage', 'Wage', '000000')
#================
# AGENT BEHAVIOR
#================
#
# General
#
#Don't bother keeping track of the bank-specific variables unless the banking system is there
#Do this here rather than at the beginning so we can decide at runtime
def modelPreSetup(model):
if model.param('agents_bank') > 0:
model.data.addReporter('defaults', model.data.agentReporter('defaultTotal', 'bank'))
model.data.addReporter('debt', model.data.agentReporter('loans', 'bank'))
model.data.addReporter('reserveRatio', model.data.agentReporter('reserveRatio', 'bank'))
model.data.addReporter('targetRR', model.data.agentReporter('targetRR', 'bank'))
model.data.addReporter('i', model.data.agentReporter('i', 'bank'))
model.data.addReporter('r', model.data.agentReporter('realInterest', 'bank'))
model.data.addReporter('inflation', model.data.agentReporter('inflation', 'bank'))
model.data.addReporter('withdrawals', model.data.agentReporter('lastWithdrawal', 'bank'))
model.data.addReporter('M2', lambda model: model.cb.M2)
model.addSeries('money', 'defaults', 'Defaults', 'CC0000')
model.addSeries('money', 'M2', 'Money Supply', '000000')
model.addSeries('debt', 'debt', 'Outstanding Debt', '000000')
model.addSeries('rr', 'targetRR', 'Target', '777777')
model.addSeries('rr', 'reserveRatio', 'Reserve Ratio', '000000')
model.addSeries('i', 'i', 'Nominal interest', '000000')
model.addSeries('i', 'r', 'Real interest', '0000CC')
model.addSeries('i', 'inflation', 'Inflation', 'CC0000')
heli.addHook('modelPreSetup', modelPreSetup)
#
# Agents
#
from helipad.utility import CES
#Choose a bank if necessary
def baseAgentInit(agent, model):
if model.param('agents_bank') > 0 and agent.primitive != 'bank':
agent.bank = model.agents['bank'][0]
agent.bank.setupAccount(agent)
heli.addHook('baseAgentInit', baseAgentInit)
def agentInit(agent, model):
agent.store = model.agents['store'][0]
agent.item = AgentGoods[agent.breed]
rbd = model.breedParam('rbd', agent.breed, prim='agent')
beta = rbd/(rbd+1)
agent.utility = CES(['good','rbal'], agent.model.param('sigma'), {'good': 1-beta, 'rbal': beta })
agent.expCons = model.goodParam('prod', agent.item)
#Set cash endowment to equilibrium value based on parameters. Not strictly necessary but avoids the burn-in period.
agent.goods[model.moneyGood] = agent.store.price[agent.item] * rbaltodemand(agent.breed)(heli)
if model.param('agents_bank') > 0:
agent.liqPref = model.breedParam('liqPref', agent.breed, prim='agent')
heli.addHook('agentInit', agentInit)
def agentStep(agent, model, stage):
itemPrice = agent.store.price[agent.item]
b = agent.balance/itemPrice #Real balances
q = agent.utility.demand(agent.balance, {'good': itemPrice, 'rbal': itemPrice})['good'] #Equimarginal condition given CES between real balances and consumption
basicq = q #Save this for later since we adjust q
bought = agent.buy(agent.store, agent.item, q, itemPrice)
if agent.goods[model.moneyGood] < 0: agent.goods[model.moneyGood] = 0 #Floating point error gives infinitessimaly negative cash sometimes
agent.utils = agent.utility.calculate({'good': agent.goods[agent.item], 'rbal': agent.balance/itemPrice}) if hasattr(agent,'utility') else 0 #Get utility
agent.goods[agent.item] = 0 #Consume goods
negadjust = q - bought #Update your consumption expectations if the store has a shortage
if negadjust > basicq: negadjust = basicq
agent.expCons = (19 * agent.expCons + basicq-negadjust)/20 #Set expected consumption as a decaying average of consumption history
#Deposit cash in the bank at the end of each period
if hasattr(agent, 'bank'):
tCash = agent.liqPref*agent.balance
agent.bank.deposit(agent, agent.goods[agent.model.moneyGood]-tCash)
heli.addHook('agentStep', agentStep)
def realBalances(agent):
if not hasattr(agent, 'store'): return 0
return agent.balance/agent.store.price[agent.item]
# return agent.balance/agent.model.cb.P
Agent.realBalances = property(realBalances)
#Use the bank if the bank exists
def buy(agent, partner, good, q, p):
if hasattr(agent, 'bank'):
bal = agent.bank.account(agent)
if p*q > bal:
amount = bal
leftover = (p*q - bal)/q
else:
amount = p*q
leftover = 0
agent.bank.transfer(agent, partner, amount)
return (q, leftover)
heli.addHook('buy', buy)
#Use the bank if the bank exists
def pay(agent, recipient, amount, model):
if hasattr(agent, 'bank') and recipient.primitive != 'bank' and agent.primitive != 'bank':
bal = agent.bank.account(agent)
if amount > bal: #If there are not enough funds
trans = bal
amount -= bal
else:
trans = amount
amount = 0
agent.bank.transfer(agent, recipient, trans)
return amount #Should be zero. Anything leftover gets paid in cash
heli.addHook('pay', pay)
def checkBalance(agent, balance, model):
if hasattr(agent, 'bank') and agent.primitive != 'bank':
balance += agent.bank.account(agent)
return balance
heli.addHook('checkBalance', checkBalance)
#
# Central Bank
#
class CentralBank(baseAgent):
ngdpAvg = 0
ngdp = 0
primitive = 'cb'
def __init__(self, id, model):
super().__init__(None, id, model)
self.id = id
self.model = model
self.ngdpTarget = False if not model.param('ngdpTarget') else 10000
def step(self):
#Record macroeconomic vars at the end of the last stage
#Getting demand has it lagged one period…
self.ngdp = sum([self.model.data.getLast('demand-'+good) * self.model.agents['store'][0].price[good] for good in self.model.nonMoneyGoods])
if not self.ngdpAvg: self.ngdpAvg = self.ngdp
self.ngdpAvg = (2 * self.ngdpAvg + self.ngdp) / 3
#Set macroeconomic targets
expand = 0
if self.ngdpTarget: expand = self.ngdpTarget - self.ngdpAvg
if self.model.param('agents_bank') > 0: expand *= self.model.agents['bank'][0].reserveRatio
if expand != 0: self.expand(expand)
def expand(self, amount):
#Deposit with each bank in proportion to their liabilities
if 'bank' in self.model.primitives and self.model.param('agents_bank') > 0:
self.goods[self.model.moneyGood] += amount
r = self.model.agents['bank'][0].goods[self.model.moneyGood]
if -amount > r: amount = -r + 1
self.model.agents['bank'][0].deposit(self, amount)
elif self.model.param('dist') == 'lump':
amt = amount/self.model.param('agents_agent')
for a in self.model.agents['agent']:
a.goods[self.model.moneyGood] += amt
else:
M0 = self.M0
for a in self.model.allagents.values():
a.goods[self.model.moneyGood] += a.goods[self.model.moneyGood]/M0 * amount
@property
def M0(self):
return self.model.data.agentReporter('goods', 'all', good=self.model.moneyGood, stat='sum')(self.model)
@M0.setter
def M0(self, value): self.expand(value - self.M0)
@property
def M2(self):
if 'bank' not in self.model.primitives or self.model.param('agents_bank') == 0: return self.M0
return sum([a.balance for a in self.model.allagents.values()])
#Price level
#Average good prices at each store, then average all of those together weighted by the store's sale volume
#Figure out whether to break this out or not
@property
def P(self):
denom = 0
numer = 0
if not 'store' in self.model.agents: return None
return mean(array(list(self.model.agents['store'][0].price.values())))
# for s in self.model.agents['store']:
# volume = sum(list(s.lastDemand.values()))
# numer += mean(array(list(s.price.values()))) * volume
# denom += volume
#
# if denom==0: return 1
# else: return numer/denom
def modelPostSetup(model): model.cb = CentralBank(0, model)
heli.addHook('modelPostSetup', modelPostSetup)
def modelPostStep(model): model.cb.step() #Step the central bank last
heli.addHook('modelPostStep', modelPostStep)
#========
# SHOCKS
#========
#Random shock to dwarf cash demand
def shock(v):
c = random.normal(v, 4)
return c if c >= 1 else 1
heli.shocks.register('Dwarf real balances', 'rbd', shock, heli.shocks.randn(2), paramType='breed', obj='dwarf', prim='agent')
#Shock the money supply
def mshock(model):
# return v*2
pct = random.normal(1, 15)
m = model.cb.M0 * (1+pct/100)
if m < 10000: m = 10000 #Things get weird when there's a money shortage
model.cb.M0 = m
heli.shocks.register('M0 (2% prob)', None, mshock, heli.shocks.randn(2), desc="Shocks the money supply a random percentage (µ=1, σ=15) with 2% probability each period")
heli.launchGUI() | er(model | identifier_name |
Helicopter-OMO.py | # A model of the relative price effects of monetary shocks via helicopter drop vs. by open market operations.
# Download the paper at https://ssrn.com/abstract=2545488
from itertools import combinations
from colour import Color
import pandas
from helipad import *
from math import sqrt
heli = Helipad()
#===============
# STORE AND BANK CLASSES
# Have to come before adding the primitives
#===============
class Store(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
#Start with equilibrium prices. Not strictly necessary, but it eliminates the burn-in period. See eq. A7
sm=sum([1/sqrt(model.goodParam('prod',g)) for g in model.nonMoneyGoods]) * M0/(model.param('agents_agent')*(len(model.nonMoneyGoods)+sum([1+model.breedParam('rbd', b, prim='agent') for b in model.primitives['agent']['breeds']])))
self.price = {g:sm/(sqrt(model.goodParam('prod',g))) for g in model.nonMoneyGoods}
self.invTarget = {g:model.goodParam('prod',g)*model.param('agents_agent') for g in model.nonMoneyGoods}
self.portion = {g:1/(len(model.nonMoneyGoods)) for g in model.nonMoneyGoods} #Capital allocation
self.wage = 0
self.cashDemand = 0
if hasattr(self, 'bank'):
self.pavg = 0
self.projects = []
self.defaults = 0
def step(self, stage):
super().step(stage)
N = self.model.param('agents_agent')
#Calculate wages
self.cashDemand = N * self.wage #Hold enough cash for one period's disbursements
newwage = (self.balance - self.cashDemand) / N
if newwage < 1: newwage = 1
self.wage = (self.wage * self.model.param('wStick') + newwage)/(1 + self.model.param('wStick'))
if self.wage * N > self.balance: self.wage = self.balance / N #Budget constraint
#Hire labor, with individualized wage shocks
labor = 0
for a in self.model.agents['agent']:
if self.wage < 0: self.wage = 0
wage = random.normal(self.wage, self.wage/2 + 0.1) #Can't have zero stdev
wage = 0 if wage < 0 else wage #Wage bounded from below by 0
self.pay(a, wage)
labor += 1
tPrice = sum([self.price[good] for good in self.model.nonMoneyGoods])
avg, stdev = {},{} #Hang onto these for use with credit calculations
for i in self.model.nonMoneyGoods:
#Keep track of typical demand
#Target sufficient inventory to handle 1.5 standard deviations above mean demand for the last 50 periods
history = pandas.Series(self.model.data.getLast('demand-'+i, 50)) + pandas.Series(self.model.data.getLast('shortage-'+i, 50))
avg[i], stdev[i] = history.mean(), history.std()
itt = (1 if isnan(avg[i]) else avg[i]) + 1.5 * (1 if isnan(stdev[i]) else stdev[i])
self.invTarget[i] = (self.invTarget[i] + itt)/2 #Smooth it a bit
#Set prices
#Change in the direction of hitting the inventory target
# self.price[i] += log(self.invTarget[i] / (self.inventory[i][0] + self.lastShortage[i])) #Jim's pricing rule?
self.price[i] += (self.invTarget[i] - self.goods[i] + self.model.data.getLast('shortage-'+i))/100 #/150
#Adjust in proportion to the rate of inventory change
#Positive deltaInv indicates falling inventory; negative deltaInv rising inventory
lasti = self.model.data.getLast('inv-'+i,2)[0] if self.model.t > 1 else 0
deltaInv = lasti - self.goods[i]
self.price[i] *= (1 + deltaInv/(50 ** self.model.param('pSmooth')))
if self.price[i] < 0: self.price[i] = 1
#Produce stuff
self.portion[i] = (self.model.param('kImmob') * self.portion[i] + self.price[i]/tPrice) / (self.model.param('kImmob') + 1) #Calculate capital allocation
self.goods[i] = self.goods[i] + self.portion[i] * labor * self.model.goodParam('prod',i)
#Intertemporal transactions
if hasattr(self, 'bank') and self.model.t > 0:
#Stipulate some demand for credit, we can worry about microfoundations later
self.bank.amortize(self, self.bank.credit[self.id].owe/1.5)
self.bank.borrow(self, self.model.cb.ngdp * (1-self.bank.i))
class Bank(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
self.i = .1 #Per-period interest rate
self.targetRR = 0.25
self.lastWithdrawal = 0
self.inflation = 0
self.accounts = {} #Liabilities
self.credit = {} #Assets
self.dif = 0 #How much credit was rationed
self.defaultTotal = 0
self.pLast = 50 #Initial price level, equal to average of initial prices
def account(self, customer):
return self.accounts[customer.id] if customer.id in self.accounts else 0
def setupAccount(self, customer):
if customer.id in self.accounts: return False #If you already have an account
self.accounts[customer.id] = 0 #Liabilities
self.credit[customer.id] = Loan(customer, self) #Assets
#Assets and liabilities should return the same thing
#Any difference gets disbursed as interest on deposits
@property
def assets(self):
return self.goods[self.model.moneyGood] + sum([l.owe for l in self.credit.values()]) #Reserves
@property
def liabilities(self):
return sum(list(self.accounts.values())) #Values returns a dict_values object, not a list. So wrap it in list()
@property
def loans(self):
return self.assets - self.goods[self.model.moneyGood]
@property
def reserveRatio(self):
l = self.liabilities
if l == 0: return 1
else: return self.goods[self.model.moneyGood] / l
@property
def realInterest(self): return self.i - self.inflation
#amt<0 to withdraw
def deposit(self, customer, amt):
amt = customer.pay(self, amt)
self.accounts[customer.id] += amt #Credit account
if amt<0: self.lastWithdrawal -= amt
return amt
def transfer(self, customer, recipient, amt):
if self.accounts[customer.id] < amt: amt = self.accounts[customer.id]
self.accounts[customer.id] -= amt
self.accounts[recipient.id] += amt
return amt
def borrow(self, customer, amt):
if amt < 0.01: return 0 #Skip blanks and float errors
l = self.credit[customer.id]
#Refinance anything with a higher interest rate
for n,loan in enumerate(l.loans):
if loan['i'] >= self.i:
amt += loan['amount']
del l.loans[n]
#Increase assets
l.loans.append({
'amount': amt,
'i': self.i
})
self.accounts[customer.id] += amt #Increase liabilities
return amt #How much you actually borrowed
#Returns the amount you actually pay – the lesser of amt or your outstanding balance
def amortize(self, customer, amt):
if amt < 0.001: return 0 #Skip blanks and float errors
l = self.credit[customer.id] #Your loan object
l.amortizeAmt += amt #Count it toward minimum repayment
leftover = amt
#Reduce assets; amortize in the order borrowed
while leftover > 0 and len(l.loans) > 0:
if leftover >= l.loans[0]['amount']:
leftover -= l.loans[0]['amount']
del l.loans[0]
else:
l.loans[0]['amount'] -= leftover
leftover = 0
self.accounts[customer.id] -= (amt - leftover) #Reduce liabilities
return amt - leftover #How much you amortized
def step(self, stage):
self.lastWithdrawal = 0
for l in self.credit: self.credit[l].step()
#Pay interest on deposits
lia = self.liabilities
profit = self.assets - lia
if profit > self.model.param('agents_agent'):
print('Disbursing profit of $',profit)
for id, a in self.accounts.items():
self.accounts[id] += profit/lia * a
# # Set target reserve ratio
# if self.model.t > 2:
# wd = self.model.data.getLast('withdrawals', 50)
# mn, st = mean(wd), stdev(wd)
# if isnan(mn) or isnan(st): mn, st = .1, .1
# ttargetRR = (mn + 2 * st) / lia
# self.targetRR = (49*self.targetRR + ttargetRR)/50
#Calculate inflation as the unweighted average price change over all goods
if self.model.t >= 2:
inflation = self.model.cb.P/self.pLast - 1
self.pLast = self.model.cb.P #Remember the price from this period before altering it for the next period
self.inflation = (19 * self.inflation + inflation) / 20 #Decaying average
#Set interest rate and/or minimum repayment schedule
#Count potential borrowing in the interest rate adjustment
targeti = self.i * self.targetRR / (self.reserveRatio)
#Adjust in proportion to the rate of reserve change
#Positive deltaReserves indicates falling reserves; negative deltaReserves rising inventory
if self.model.t > 2:
deltaReserves = (self.lastReserves - self.goods[self.model.moneyGood])/self.model.cb.P
targeti *= (1 + deltaReserves/(20 ** self.model.param('pSmooth')))
self.i = (self.i * 24 + targeti)/25 #Interest rate stickiness
self.lastReserves = self.goods[self.model.moneyGood]
#Upper and lower interest rate bounds
if self.i > 1 + self.inflation: self.i = 1 + self.inflation #interest rate cap at 100%
if self.i < self.inflation + 0.005: self.i = self.inflation + 0.005 #no negative real rates
if self.i < 0.005: self.i = 0.005 #no negative nominal rates
class Loan():
def __init__(self, customer, bank):
self.customer = customer
self.bank = bank
self.loans = []
self.amortizeAmt = 0
@property
def owe(self): return sum([l['amount'] for l in self.loans])
def step(self):
#Charge the minimum repayment if the agent hasn't already amortized more than that amount
minRepay = 0
for l in self.loans:
iLoan = l['amount'] * l['i']
minRepay += iLoan #You have to pay at least the interest each period
l['amount'] += iLoan #Roll over the remainder at the original interest rate
#If they haven't paid the minimum this period, charge it
amtz = minRepay - self.amortizeAmt
defaulted = False
if amtz > 0:
if amtz > self.bank.accounts[self.customer.id]: #Can't charge them more than they have in the bank
defaulted = True
amtz = self.bank.accounts[self.customer.id]
# print(self.bank.model.t, ': Agent', self.customer.id, 'defaulted $', self.owe - amtz)
self.bank.amortize(self.customer, amtz)
if defaulted:
for n, l in enumerate(self.loans):
self.loans[n]['amount'] /= 2
self.bank.defaultTotal += l['amount']/2
##Cap defaults at the loan amount. Otherwise if i>1, defaulting results in negative debt
# if l['i'] >= 1:
# self.bank.defaultTotal += l['amount']
# del self.loans[n]
# else:
# l['amount'] -= l['amount'] * l['i']
# self.bank.defaultTotal += l['amount'] * l['i']
self.amortizeAmt = 0
#===============
# CONFIGURATION
#===============
heli.addPrimitive('bank', Bank, dflt=1, low=0, high=10, priority=1)
heli.addPrimitive('store', Store, dflt=1, low=0, high=10, priority=2)
heli.addPrimitive('agent', Agent, dflt=50, low=1, high=100, priority=3)
# Configure how many breeds there are and what good each consumes
# In this model, goods and breeds correspond, but they don't necessarily have to
breeds = [
('hobbit', 'jam', 'D73229'),
('dwarf', 'axe', '2D8DBE'),
# ('elf', 'lembas', 'CCBB22')
]
AgentGoods = {}
for b in breeds:
heli.addBreed(b[0], b[2], prim='agent')
heli.addGood(b[1], b[2])
AgentGoods[b[0]] = b[1] #Hang on to this list for future looping
M0 = 120000
heli.addGood('cash', '009900', money=True)
heli.order = 'random'
#Disable the irrelevant checkboxes if the banking model isn't selected
#Callback for the dist parameter
def bankChecks(gui, val=None):
nobank = gui.model.param('dist')!='omo'
gui.model.param('agents_bank', 0 if nobank else 1)
for i in ['debt', 'rr', 'i']:
gui.checks[i].disabled(nobank)
for b in gui.model.primitives['agent']['breeds'].keys():
gui.sliders['breed_agent-liqPref-'+b].config(state='disabled' if nobank else 'normal')
#Since the param callback takes different parameters than the GUI callback
def bankCheckWrapper(model, var, val): bankChecks(model.gui, val)
heli.addHook('terminate', bankChecks) #Reset the disabled checkmarks when terminating a model
heli.addHook('GUIPostInit', bankChecks) #Set the disabled checkmarks on initialization
# UPDATE CALLBACKS
def storeUpdater(model, var, val):
if model.hasModel: setattr(model.agents['store'][0], var, val)
def ngdpUpdater(model, var, val):
if model.hasModel: model.cb.ngdpTarget = val if not val else model.cb.ngdp
def rbalUpdater(model, var, breed, val):
if model.hasModel:
if var=='rbd':
beta = val/(1+val)
for a in model.agents['agent']:
if hasattr(a, 'utility') and a.breed == breed:
a.utility.coeffs['rbal'] = beta
a.utility.coeffs['good'] = 1-beta
elif var=='liqPref':
for a in model.agents['agent']:
if a.breed == breed:
a.liqPref = val
#Set up the info for the sliders on the control panel
#These variables attach to the Helicopter object
#Each parameter requires a corresponding routine in Helicopter.updateVar()
heli.addParameter('ngdpTarget', 'NGDP Target', 'check', dflt=False, callback=ngdpUpdater)
heli.addParameter('dist', 'Distribution', 'menu', dflt='prop', opts={
'prop': 'Helicopter/Proportional',
'lump': 'Helicopter/Lump Sum',
'omo': 'Open Market Operation'
}, runtime=False, callback=bankCheckWrapper)
heli.params['agents_bank'][1]['type'] = 'hidden'
heli.params['agents_store'][1]['type'] = 'hidden'
heli.addParameter('pSmooth', 'Price Smoothness', 'slider', dflt=1.5, opts={'low': 1, 'high': 3, 'step': 0.05}, callback=storeUpdater)
heli.addParameter('wStick', 'Wage Stickiness', 'slider', dflt=10, opts={'low': 1, 'high': 50, 'step': 1}, callback=storeUpdater)
heli.addParameter('kImmob', 'Capital Immobility', 'slider', dflt=100, opts={'low': 1, 'high': 150, 'step': 1}, callback=storeUpdater)
#Low Es means the two are complements (0=perfect complements)
#High Es means the two are substitutes (infinity=perfect substitutes)
#Doesn't really affect anything though – even utility – so don't bother exposing it
heli.addParameter('sigma', 'Elast. of substitution', 'hidden', dflt=.5, opts={'low': 0, 'high': 10, 'step': 0.1})
heli.addBreedParam('rbd', 'Demand for Real Balances', 'slider', dflt={'hobbit':7, 'dwarf': 35}, opts={'low':1, 'high': 50, 'step': 1}, prim='agent', callback=rbalUpdater)
heli.addBreedParam('liqPref', 'Demand for Liquidity', 'slider', dflt={'hobbit': 0.1, 'dwarf': 0.3}, opts={'low':0, 'high': 1, 'step': 0.01}, prim='agent', callback=rbalUpdater, desc='The proportion of the agent\'s balances he desires to keep in cash')
heli.addGoodParam('prod', 'Productivity', 'slider', dflt=1.75, opts={'low':0.1, 'high': 2, 'step': 0.1}) #If you shock productivity, make sure to call rbalupdater
#Takes as input the slider value, outputs b_g. See equation (A8) in the paper.
def rbaltodemand(breed):
def reporter(model):
rbd = model.breedParam('rbd', breed, prim='agent')
beta = rbd/(1+rbd)
return (beta/(1-beta)) * len(model.goods) * sqrt(model.goodParam('prod',AgentGoods[breed])) / sum([1/sqrt(pr) for pr in model.goodParam('prod').values()])
return reporter
#Data Collection
heli.defaultPlots.append('prices')
heli.addPlot('inventory', 'Inventory', 3)
heli.addPlot('rbal', 'Real Balances', 5)
heli.addPlot('ngdp', 'NGDP', 7, selected=False)
heli.addPlot('capital', 'Production', 9, selected=False)
heli.addPlot('wage', 'Wage', 11, selected=False)
heli.addPlot('debt', 'Debt', selected=False)
heli.addPlot('rr', 'Reserve Ratio', selected=False)
heli.addPlot('i', 'Interest Rate', selected=False)
heli.addSeries('capital', lambda t: 1/len(heli.primitives['agent']['breeds']), '', 'CCCCCC')
for breed, d in heli.primitives['agent']['breeds'].items():
heli.data.addReporter('rbalDemand-'+breed, rbaltodemand(breed))
heli.data.addReporter('eCons-'+breed, heli.data.agentReporter('expCons', 'agent', breed=breed, stat='sum'))
# heli.data.addReporter('rWage-'+breed, lambda model: heli.data.agentReporter('wage', 'store')(model) / heli.data.agentReporter('price', 'store', good=b.good)(model))
# heli.data.addReporter('expWage', heli.data.agentReporter('expWage', 'agent'))
heli.data.addReporter('rBal-'+breed, heli.data.agentReporter('realBalances', 'agent', breed=breed))
heli.data.addReporter('invTarget-'+AgentGoods[breed], heli.data.agentReporter('invTarget', 'store', good=AgentGoods[breed]))
heli.data.addReporter('portion-'+AgentGoods[breed], heli.data.agentReporter('portion', 'store', good=AgentGoods[breed]))
heli.addSeries('demand', 'eCons-'+breed, breed.title()+'s\' Expected Consumption', d.color2)
heli.addSeries('rbal', 'rbalDemand-'+breed, breed.title()+' Target Balances', d.color2)
heli.addSeries('rbal', 'rBal-'+breed, breed.title()+ 'Real Balances', d.color)
heli.addSeries('inventory', 'invTarget-'+AgentGoods[breed], AgentGoods[breed].title()+' Inventory Target', heli.goods[AgentGoods[breed]].color2)
heli.addSeries('capital', 'portion-'+AgentGoods[breed], AgentGoods[breed].title()+' Capital', heli.goods[AgentGoods[breed]].color)
# heli.addSeries('Wage', 'expWage', 'Expected Wage', '999999')
#Do this one separately so it draws on top
for good, g in heli.nonMoneyGoods.items():
heli.data.addReporter('inv-'+good, heli.data.agentReporter('goods', 'store', good=good))
heli.addSeries('inventory', 'inv-'+good, good.title()+' Inventory', g.color)
#Price ratio plots
def ratioReporter(item1, item2):
def reporter(model):
return model.data.agentReporter('price', 'store', good=item1)(model)/model.data.agentReporter('price', 'store', good=item2)(model)
return reporter
heli.addPlot('ratios', 'Price Ratios', position=3, logscale=True)
heli.addSeries('ratios', lambda t: 1, '', 'CCCCCC') #plots ratio of 1 for reference without recording a column of ones
for r in combinations(heli.nonMoneyGoods.keys(), 2):
heli.data.addReporter('ratio-'+r[0]+'-'+r[1], ratioReporter(r[0], r[1]))
c1, c2 = heli.goods[r[0]].color, heli.goods[r[1]].color
c3 = Color(red=(c1.red+c2.red)/2, green=(c1.green+c2.green)/2, blue=(c1.blue+c2.blue)/2)
heli.addSeries('ratios', 'ratio-'+r[0]+'-'+r[1], r[0].title()+'/'+r[1].title()+' Ratio', c3)
heli.defaultPlots.extend(['rbal', 'ratios', 'inventory'])
heli.data.addReporter('ngdp', lambda model: model.cb.ngdp)
heli.addSeries('ngdp', 'ngdp', 'NGDP', '000000')
heli.data.addReporter('P', lambda model: model.cb.P)
heli.data.addReporter('storeCash', heli.data.agentReporter('balance', 'store'))
heli.addSeries('money', 'storeCash', 'Store Cash', '777777')
heli.data.addReporter('StoreCashDemand', heli.data.agentReporter('cashDemand', 'store'))
heli.addSeries('money', 'StoreCashDemand', 'Store Cash Demand', 'CCCCCC')
heli.data.addReporter('wage', heli.data.agentReporter('wage', 'store'))
heli.addSeries('wage', 'wage', 'Wage', '000000')
#================
# AGENT BEHAVIOR
#================
#
# General
#
#Don't bother keeping track of the bank-specific variables unless the banking system is there
#Do this here rather than at the beginning so we can decide at runtime
def modelPreSetup(model):
if model.param('agents_bank') > 0:
model.data.addReporter('defaults', model.data.agentReporter('defaultTotal', 'bank'))
model.data.addReporter('debt', model.data.agentReporter('loans', 'bank'))
model.data.addReporter('reserveRatio', model.data.agentReporter('reserveRatio', 'bank'))
model.data.addReporter('targetRR', model.data.agentReporter('targetRR', 'bank'))
model.data.addReporter('i', model.data.agentReporter('i', 'bank'))
model.data.addReporter('r', model.data.agentReporter('realInterest', 'bank'))
model.data.addReporter('inflation', model.data.agentReporter('inflation', 'bank'))
model.data.addReporter('withdrawals', model.data.agentReporter('lastWithdrawal', 'bank'))
model.data.addReporter('M2', lambda model: model.cb.M2)
model.addSeries('money', 'defaults', 'Defaults', 'CC0000')
model.addSeries('money', 'M2', 'Money Supply', '000000')
model.addSeries('debt', 'debt', 'Outstanding Debt', '000000')
model.addSeries('rr', 'targetRR', 'Target', '777777')
model.addSeries('rr', 'reserveRatio', 'Reserve Ratio', '000000')
model.addSeries('i', 'i', 'Nominal interest', '000000')
model.addSeries('i', 'r', 'Real interest', '0000CC')
model.addSeries('i', 'inflation', 'Inflation', 'CC0000')
heli.addHook('modelPreSetup', modelPreSetup)
#
# Agents
#
from helipad.utility import CES
#Choose a bank if necessary
def baseAgentInit(agent, model):
if model.param('agents_bank') > 0 and agent.primitive != 'bank':
agent.bank = model.agents['bank'][0]
agent.bank.setupAccount(agent)
heli.addHook('baseAgentInit', baseAgentInit)
def agentInit(agent, model):
agent.store = model.agents['store'][0]
agent.item = AgentGoods[agent.breed]
rbd = model.breedParam('rbd', agent.breed, prim='agent')
beta = rbd/(rbd+1)
agent.utility = CES(['good','rbal'], agent.model.param('sigma'), {'good': 1-beta, 'rbal': beta })
agent.expCons = model.goodParam('prod', agent.item)
#Set cash endowment to equilibrium value based on parameters. Not strictly necessary but avoids the burn-in period.
agent.goods[model.moneyGood] = agent.store.price[agent.item] * rbaltodemand(agent.breed)(heli)
if model.param('agents_bank') > 0:
agent.liqPref = model.breedParam('liqPref', agent.breed, prim='agent')
heli.addHook('agentInit', agentInit)
def agentStep(agent, model, stage):
itemPrice = agent.store.price[agent.item]
b = agent.balance/itemPrice #Real balances
q = agent.utility.demand(agent.balance, {'good': itemPrice, 'rbal': itemPrice})['good'] #Equimarginal condition given CES between real balances and consumption
basicq = q #Save this for later since we adjust q
bought = agent.buy(agent.store, agent.item, q, itemPrice)
if agent.goods[model.moneyGood] < 0: agent.goods[model.moneyGood] = 0 #Floating point error gives infinitessimaly negative cash sometimes
agent.utils = agent.utility.calculate({'good': agent.goods[agent.item], 'rbal': agent.balance/itemPrice}) if hasattr(agent,'utility') else 0 #Get utility
agent.goods[agent.item] = 0 #Consume goods
negadjust = q - bought #Update your consumption expectations if the store has a shortage
if negadjust > basicq: negadjust = basicq
agent.expCons = (19 * agent.expCons + basicq-negadjust)/20 #Set expected consumption as a decaying average of consumption history
#Deposit cash in the bank at the end of each period
if hasattr(agent, 'bank'):
tCash = agent.liqPref*agent.balance
agent.bank.deposit(agent, agent.goods[agent.model.moneyGood]-tCash)
heli.addHook('agentStep', agentStep)
def realBalances(agent):
if not hasattr(agent, 'store'): return 0
return agent.balance/agent.store.price[agent.item]
# return agent.balance/agent.model.cb.P
Agent.realBalances = property(realBalances)
#Use the bank if the bank exists
def buy(agent, partner, good, q, p):
if hasattr(agent, 'bank'):
bal = agent.bank.account(agent)
if p*q > bal:
amount = bal
leftover = (p*q - bal)/q
else:
amount = p*q
leftover = 0
agent.bank.transfer(agent, partner, amount)
return (q, leftover)
heli.addHook('buy', buy)
#Use the bank if the bank exists
def pay(agent, recipient, amount, model):
if hasattr(agent, 'bank') and recipient.primitive != 'bank' and agent.primitive != 'bank':
bal = agent.bank.account(agent)
if amount > bal: #If there are not enough funds
trans = bal
amount -= bal
else:
trans = amount
amount = 0
agent.bank.transfer(agent, recipient, trans)
return amount #Should be zero. Anything leftover gets paid in cash
heli.addHook('pay', pay)
def checkBalance(agent, balance, model):
if hasattr(agent, 'bank') and agent.primitive != 'bank':
balance += agent.bank.account(agent)
return balance
heli.addHook('checkBalance', checkBalance)
# |
class CentralBank(baseAgent):
ngdpAvg = 0
ngdp = 0
primitive = 'cb'
def __init__(self, id, model):
super().__init__(None, id, model)
self.id = id
self.model = model
self.ngdpTarget = False if not model.param('ngdpTarget') else 10000
def step(self):
#Record macroeconomic vars at the end of the last stage
#Getting demand has it lagged one period…
self.ngdp = sum([self.model.data.getLast('demand-'+good) * self.model.agents['store'][0].price[good] for good in self.model.nonMoneyGoods])
if not self.ngdpAvg: self.ngdpAvg = self.ngdp
self.ngdpAvg = (2 * self.ngdpAvg + self.ngdp) / 3
#Set macroeconomic targets
expand = 0
if self.ngdpTarget: expand = self.ngdpTarget - self.ngdpAvg
if self.model.param('agents_bank') > 0: expand *= self.model.agents['bank'][0].reserveRatio
if expand != 0: self.expand(expand)
def expand(self, amount):
#Deposit with each bank in proportion to their liabilities
if 'bank' in self.model.primitives and self.model.param('agents_bank') > 0:
self.goods[self.model.moneyGood] += amount
r = self.model.agents['bank'][0].goods[self.model.moneyGood]
if -amount > r: amount = -r + 1
self.model.agents['bank'][0].deposit(self, amount)
elif self.model.param('dist') == 'lump':
amt = amount/self.model.param('agents_agent')
for a in self.model.agents['agent']:
a.goods[self.model.moneyGood] += amt
else:
M0 = self.M0
for a in self.model.allagents.values():
a.goods[self.model.moneyGood] += a.goods[self.model.moneyGood]/M0 * amount
@property
def M0(self):
return self.model.data.agentReporter('goods', 'all', good=self.model.moneyGood, stat='sum')(self.model)
@M0.setter
def M0(self, value): self.expand(value - self.M0)
@property
def M2(self):
if 'bank' not in self.model.primitives or self.model.param('agents_bank') == 0: return self.M0
return sum([a.balance for a in self.model.allagents.values()])
#Price level
#Average good prices at each store, then average all of those together weighted by the store's sale volume
#Figure out whether to break this out or not
@property
def P(self):
denom = 0
numer = 0
if not 'store' in self.model.agents: return None
return mean(array(list(self.model.agents['store'][0].price.values())))
# for s in self.model.agents['store']:
# volume = sum(list(s.lastDemand.values()))
# numer += mean(array(list(s.price.values()))) * volume
# denom += volume
#
# if denom==0: return 1
# else: return numer/denom
def modelPostSetup(model): model.cb = CentralBank(0, model)
heli.addHook('modelPostSetup', modelPostSetup)
def modelPostStep(model): model.cb.step() #Step the central bank last
heli.addHook('modelPostStep', modelPostStep)
#========
# SHOCKS
#========
#Random shock to dwarf cash demand
def shock(v):
c = random.normal(v, 4)
return c if c >= 1 else 1
heli.shocks.register('Dwarf real balances', 'rbd', shock, heli.shocks.randn(2), paramType='breed', obj='dwarf', prim='agent')
#Shock the money supply
def mshock(model):
# return v*2
pct = random.normal(1, 15)
m = model.cb.M0 * (1+pct/100)
if m < 10000: m = 10000 #Things get weird when there's a money shortage
model.cb.M0 = m
heli.shocks.register('M0 (2% prob)', None, mshock, heli.shocks.randn(2), desc="Shocks the money supply a random percentage (µ=1, σ=15) with 2% probability each period")
heli.launchGUI() | # Central Bank
# | random_line_split |
Helicopter-OMO.py | # A model of the relative price effects of monetary shocks via helicopter drop vs. by open market operations.
# Download the paper at https://ssrn.com/abstract=2545488
from itertools import combinations
from colour import Color
import pandas
from helipad import *
from math import sqrt
heli = Helipad()
#===============
# STORE AND BANK CLASSES
# Have to come before adding the primitives
#===============
class Store(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
#Start with equilibrium prices. Not strictly necessary, but it eliminates the burn-in period. See eq. A7
sm=sum([1/sqrt(model.goodParam('prod',g)) for g in model.nonMoneyGoods]) * M0/(model.param('agents_agent')*(len(model.nonMoneyGoods)+sum([1+model.breedParam('rbd', b, prim='agent') for b in model.primitives['agent']['breeds']])))
self.price = {g:sm/(sqrt(model.goodParam('prod',g))) for g in model.nonMoneyGoods}
self.invTarget = {g:model.goodParam('prod',g)*model.param('agents_agent') for g in model.nonMoneyGoods}
self.portion = {g:1/(len(model.nonMoneyGoods)) for g in model.nonMoneyGoods} #Capital allocation
self.wage = 0
self.cashDemand = 0
if hasattr(self, 'bank'):
self.pavg = 0
self.projects = []
self.defaults = 0
def step(self, stage):
super().step(stage)
N = self.model.param('agents_agent')
#Calculate wages
self.cashDemand = N * self.wage #Hold enough cash for one period's disbursements
newwage = (self.balance - self.cashDemand) / N
if newwage < 1: newwage = 1
self.wage = (self.wage * self.model.param('wStick') + newwage)/(1 + self.model.param('wStick'))
if self.wage * N > self.balance: self.wage = self.balance / N #Budget constraint
#Hire labor, with individualized wage shocks
labor = 0
for a in self.model.agents['agent']:
if self.wage < 0: self.wage = 0
wage = random.normal(self.wage, self.wage/2 + 0.1) #Can't have zero stdev
wage = 0 if wage < 0 else wage #Wage bounded from below by 0
self.pay(a, wage)
labor += 1
tPrice = sum([self.price[good] for good in self.model.nonMoneyGoods])
avg, stdev = {},{} #Hang onto these for use with credit calculations
for i in self.model.nonMoneyGoods:
#Keep track of typical demand
#Target sufficient inventory to handle 1.5 standard deviations above mean demand for the last 50 periods
history = pandas.Series(self.model.data.getLast('demand-'+i, 50)) + pandas.Series(self.model.data.getLast('shortage-'+i, 50))
avg[i], stdev[i] = history.mean(), history.std()
itt = (1 if isnan(avg[i]) else avg[i]) + 1.5 * (1 if isnan(stdev[i]) else stdev[i])
self.invTarget[i] = (self.invTarget[i] + itt)/2 #Smooth it a bit
#Set prices
#Change in the direction of hitting the inventory target
# self.price[i] += log(self.invTarget[i] / (self.inventory[i][0] + self.lastShortage[i])) #Jim's pricing rule?
self.price[i] += (self.invTarget[i] - self.goods[i] + self.model.data.getLast('shortage-'+i))/100 #/150
#Adjust in proportion to the rate of inventory change
#Positive deltaInv indicates falling inventory; negative deltaInv rising inventory
lasti = self.model.data.getLast('inv-'+i,2)[0] if self.model.t > 1 else 0
deltaInv = lasti - self.goods[i]
self.price[i] *= (1 + deltaInv/(50 ** self.model.param('pSmooth')))
if self.price[i] < 0: self.price[i] = 1
#Produce stuff
self.portion[i] = (self.model.param('kImmob') * self.portion[i] + self.price[i]/tPrice) / (self.model.param('kImmob') + 1) #Calculate capital allocation
self.goods[i] = self.goods[i] + self.portion[i] * labor * self.model.goodParam('prod',i)
#Intertemporal transactions
if hasattr(self, 'bank') and self.model.t > 0:
#Stipulate some demand for credit, we can worry about microfoundations later
self.bank.amortize(self, self.bank.credit[self.id].owe/1.5)
self.bank.borrow(self, self.model.cb.ngdp * (1-self.bank.i))
class Bank(baseAgent):
def __init__(self, breed, id, model):
super().__init__(breed, id, model)
self.i = .1 #Per-period interest rate
self.targetRR = 0.25
self.lastWithdrawal = 0
self.inflation = 0
self.accounts = {} #Liabilities
self.credit = {} #Assets
self.dif = 0 #How much credit was rationed
self.defaultTotal = 0
self.pLast = 50 #Initial price level, equal to average of initial prices
def account(self, customer):
return self.accounts[customer.id] if customer.id in self.accounts else 0
def setupAccount(self, customer):
if customer.id in self.accounts: return False #If you already have an account
self.accounts[customer.id] = 0 #Liabilities
self.credit[customer.id] = Loan(customer, self) #Assets
#Assets and liabilities should return the same thing
#Any difference gets disbursed as interest on deposits
@property
def assets(self):
return self.goods[self.model.moneyGood] + sum([l.owe for l in self.credit.values()]) #Reserves
@property
def liabilities(self):
return sum(list(self.accounts.values())) #Values returns a dict_values object, not a list. So wrap it in list()
@property
def loans(self):
return self.assets - self.goods[self.model.moneyGood]
@property
def reserveRatio(self):
l = self.liabilities
if l == 0: return 1
else: return self.goods[self.model.moneyGood] / l
@property
def realInterest(self): return self.i - self.inflation
#amt<0 to withdraw
def deposit(self, customer, amt):
amt = customer.pay(self, amt)
self.accounts[customer.id] += amt #Credit account
if amt<0: self.lastWithdrawal -= amt
return amt
def transfer(self, customer, recipient, amt):
if self.accounts[customer.id] < amt: amt = self.accounts[customer.id]
self.accounts[customer.id] -= amt
self.accounts[recipient.id] += amt
return amt
def borrow(self, customer, amt):
if amt < 0.01: return 0 #Skip blanks and float errors
l = self.credit[customer.id]
#Refinance anything with a higher interest rate
for n,loan in enumerate(l.loans):
if loan['i'] >= self.i:
amt += loan['amount']
del l.loans[n]
#Increase assets
l.loans.append({
'amount': amt,
'i': self.i
})
self.accounts[customer.id] += amt #Increase liabilities
return amt #How much you actually borrowed
#Returns the amount you actually pay – the lesser of amt or your outstanding balance
def amortize(self, customer, amt):
if amt < 0.001: return 0 #Skip blanks and float errors
l = self.credit[customer.id] #Your loan object
l.amortizeAmt += amt #Count it toward minimum repayment
leftover = amt
#Reduce assets; amortize in the order borrowed
while leftover > 0 and len(l.loans) > 0:
if leftover >= l.loans[0]['amount']:
leftover -= l.loans[0]['amount']
del l.loans[0]
else:
l.loans[0]['amount'] -= leftover
leftover = 0
self.accounts[customer.id] -= (amt - leftover) #Reduce liabilities
return amt - leftover #How much you amortized
def step(self, stage):
self.lastWithdrawal = 0
for l in self.credit: self.credit[l].step()
#Pay interest on deposits
lia = self.liabilities
profit = self.assets - lia
if profit > self.model.param('agents_agent'):
print('Disbursing profit of $',profit)
for id, a in self.accounts.items():
self.accounts[id] += profit/lia * a
# # Set target reserve ratio
# if self.model.t > 2:
# wd = self.model.data.getLast('withdrawals', 50)
# mn, st = mean(wd), stdev(wd)
# if isnan(mn) or isnan(st): mn, st = .1, .1
# ttargetRR = (mn + 2 * st) / lia
# self.targetRR = (49*self.targetRR + ttargetRR)/50
#Calculate inflation as the unweighted average price change over all goods
if self.model.t >= 2:
inflation = self.model.cb.P/self.pLast - 1
self.pLast = self.model.cb.P #Remember the price from this period before altering it for the next period
self.inflation = (19 * self.inflation + inflation) / 20 #Decaying average
#Set interest rate and/or minimum repayment schedule
#Count potential borrowing in the interest rate adjustment
targeti = self.i * self.targetRR / (self.reserveRatio)
#Adjust in proportion to the rate of reserve change
#Positive deltaReserves indicates falling reserves; negative deltaReserves rising inventory
if self.model.t > 2:
deltaReserves = (self.lastReserves - self.goods[self.model.moneyGood])/self.model.cb.P
targeti *= (1 + deltaReserves/(20 ** self.model.param('pSmooth')))
self.i = (self.i * 24 + targeti)/25 #Interest rate stickiness
self.lastReserves = self.goods[self.model.moneyGood]
#Upper and lower interest rate bounds
if self.i > 1 + self.inflation: se | if self.i < self.inflation + 0.005: self.i = self.inflation + 0.005 #no negative real rates
if self.i < 0.005: self.i = 0.005 #no negative nominal rates
class Loan():
def __init__(self, customer, bank):
self.customer = customer
self.bank = bank
self.loans = []
self.amortizeAmt = 0
@property
def owe(self): return sum([l['amount'] for l in self.loans])
def step(self):
#Charge the minimum repayment if the agent hasn't already amortized more than that amount
minRepay = 0
for l in self.loans:
iLoan = l['amount'] * l['i']
minRepay += iLoan #You have to pay at least the interest each period
l['amount'] += iLoan #Roll over the remainder at the original interest rate
#If they haven't paid the minimum this period, charge it
amtz = minRepay - self.amortizeAmt
defaulted = False
if amtz > 0:
if amtz > self.bank.accounts[self.customer.id]: #Can't charge them more than they have in the bank
defaulted = True
amtz = self.bank.accounts[self.customer.id]
# print(self.bank.model.t, ': Agent', self.customer.id, 'defaulted $', self.owe - amtz)
self.bank.amortize(self.customer, amtz)
if defaulted:
for n, l in enumerate(self.loans):
self.loans[n]['amount'] /= 2
self.bank.defaultTotal += l['amount']/2
##Cap defaults at the loan amount. Otherwise if i>1, defaulting results in negative debt
# if l['i'] >= 1:
# self.bank.defaultTotal += l['amount']
# del self.loans[n]
# else:
# l['amount'] -= l['amount'] * l['i']
# self.bank.defaultTotal += l['amount'] * l['i']
self.amortizeAmt = 0
#===============
# CONFIGURATION
#===============
heli.addPrimitive('bank', Bank, dflt=1, low=0, high=10, priority=1)
heli.addPrimitive('store', Store, dflt=1, low=0, high=10, priority=2)
heli.addPrimitive('agent', Agent, dflt=50, low=1, high=100, priority=3)
# Configure how many breeds there are and what good each consumes
# In this model, goods and breeds correspond, but they don't necessarily have to
breeds = [
('hobbit', 'jam', 'D73229'),
('dwarf', 'axe', '2D8DBE'),
# ('elf', 'lembas', 'CCBB22')
]
AgentGoods = {}
for b in breeds:
heli.addBreed(b[0], b[2], prim='agent')
heli.addGood(b[1], b[2])
AgentGoods[b[0]] = b[1] #Hang on to this list for future looping
M0 = 120000
heli.addGood('cash', '009900', money=True)
heli.order = 'random'
#Disable the irrelevant checkboxes if the banking model isn't selected
#Callback for the dist parameter
def bankChecks(gui, val=None):
nobank = gui.model.param('dist')!='omo'
gui.model.param('agents_bank', 0 if nobank else 1)
for i in ['debt', 'rr', 'i']:
gui.checks[i].disabled(nobank)
for b in gui.model.primitives['agent']['breeds'].keys():
gui.sliders['breed_agent-liqPref-'+b].config(state='disabled' if nobank else 'normal')
#Since the param callback takes different parameters than the GUI callback
def bankCheckWrapper(model, var, val): bankChecks(model.gui, val)
heli.addHook('terminate', bankChecks) #Reset the disabled checkmarks when terminating a model
heli.addHook('GUIPostInit', bankChecks) #Set the disabled checkmarks on initialization
# UPDATE CALLBACKS
def storeUpdater(model, var, val):
if model.hasModel: setattr(model.agents['store'][0], var, val)
def ngdpUpdater(model, var, val):
if model.hasModel: model.cb.ngdpTarget = val if not val else model.cb.ngdp
def rbalUpdater(model, var, breed, val):
if model.hasModel:
if var=='rbd':
beta = val/(1+val)
for a in model.agents['agent']:
if hasattr(a, 'utility') and a.breed == breed:
a.utility.coeffs['rbal'] = beta
a.utility.coeffs['good'] = 1-beta
elif var=='liqPref':
for a in model.agents['agent']:
if a.breed == breed:
a.liqPref = val
#Set up the info for the sliders on the control panel
#These variables attach to the Helicopter object
#Each parameter requires a corresponding routine in Helicopter.updateVar()
heli.addParameter('ngdpTarget', 'NGDP Target', 'check', dflt=False, callback=ngdpUpdater)
heli.addParameter('dist', 'Distribution', 'menu', dflt='prop', opts={
'prop': 'Helicopter/Proportional',
'lump': 'Helicopter/Lump Sum',
'omo': 'Open Market Operation'
}, runtime=False, callback=bankCheckWrapper)
heli.params['agents_bank'][1]['type'] = 'hidden'
heli.params['agents_store'][1]['type'] = 'hidden'
heli.addParameter('pSmooth', 'Price Smoothness', 'slider', dflt=1.5, opts={'low': 1, 'high': 3, 'step': 0.05}, callback=storeUpdater)
heli.addParameter('wStick', 'Wage Stickiness', 'slider', dflt=10, opts={'low': 1, 'high': 50, 'step': 1}, callback=storeUpdater)
heli.addParameter('kImmob', 'Capital Immobility', 'slider', dflt=100, opts={'low': 1, 'high': 150, 'step': 1}, callback=storeUpdater)
#Low Es means the two are complements (0=perfect complements)
#High Es means the two are substitutes (infinity=perfect substitutes)
#Doesn't really affect anything though – even utility – so don't bother exposing it
heli.addParameter('sigma', 'Elast. of substitution', 'hidden', dflt=.5, opts={'low': 0, 'high': 10, 'step': 0.1})
heli.addBreedParam('rbd', 'Demand for Real Balances', 'slider', dflt={'hobbit':7, 'dwarf': 35}, opts={'low':1, 'high': 50, 'step': 1}, prim='agent', callback=rbalUpdater)
heli.addBreedParam('liqPref', 'Demand for Liquidity', 'slider', dflt={'hobbit': 0.1, 'dwarf': 0.3}, opts={'low':0, 'high': 1, 'step': 0.01}, prim='agent', callback=rbalUpdater, desc='The proportion of the agent\'s balances he desires to keep in cash')
heli.addGoodParam('prod', 'Productivity', 'slider', dflt=1.75, opts={'low':0.1, 'high': 2, 'step': 0.1}) #If you shock productivity, make sure to call rbalupdater
#Takes as input the slider value, outputs b_g. See equation (A8) in the paper.
def rbaltodemand(breed):
def reporter(model):
rbd = model.breedParam('rbd', breed, prim='agent')
beta = rbd/(1+rbd)
return (beta/(1-beta)) * len(model.goods) * sqrt(model.goodParam('prod',AgentGoods[breed])) / sum([1/sqrt(pr) for pr in model.goodParam('prod').values()])
return reporter
#Data Collection
heli.defaultPlots.append('prices')
heli.addPlot('inventory', 'Inventory', 3)
heli.addPlot('rbal', 'Real Balances', 5)
heli.addPlot('ngdp', 'NGDP', 7, selected=False)
heli.addPlot('capital', 'Production', 9, selected=False)
heli.addPlot('wage', 'Wage', 11, selected=False)
heli.addPlot('debt', 'Debt', selected=False)
heli.addPlot('rr', 'Reserve Ratio', selected=False)
heli.addPlot('i', 'Interest Rate', selected=False)
heli.addSeries('capital', lambda t: 1/len(heli.primitives['agent']['breeds']), '', 'CCCCCC')
for breed, d in heli.primitives['agent']['breeds'].items():
heli.data.addReporter('rbalDemand-'+breed, rbaltodemand(breed))
heli.data.addReporter('eCons-'+breed, heli.data.agentReporter('expCons', 'agent', breed=breed, stat='sum'))
# heli.data.addReporter('rWage-'+breed, lambda model: heli.data.agentReporter('wage', 'store')(model) / heli.data.agentReporter('price', 'store', good=b.good)(model))
# heli.data.addReporter('expWage', heli.data.agentReporter('expWage', 'agent'))
heli.data.addReporter('rBal-'+breed, heli.data.agentReporter('realBalances', 'agent', breed=breed))
heli.data.addReporter('invTarget-'+AgentGoods[breed], heli.data.agentReporter('invTarget', 'store', good=AgentGoods[breed]))
heli.data.addReporter('portion-'+AgentGoods[breed], heli.data.agentReporter('portion', 'store', good=AgentGoods[breed]))
heli.addSeries('demand', 'eCons-'+breed, breed.title()+'s\' Expected Consumption', d.color2)
heli.addSeries('rbal', 'rbalDemand-'+breed, breed.title()+' Target Balances', d.color2)
heli.addSeries('rbal', 'rBal-'+breed, breed.title()+ 'Real Balances', d.color)
heli.addSeries('inventory', 'invTarget-'+AgentGoods[breed], AgentGoods[breed].title()+' Inventory Target', heli.goods[AgentGoods[breed]].color2)
heli.addSeries('capital', 'portion-'+AgentGoods[breed], AgentGoods[breed].title()+' Capital', heli.goods[AgentGoods[breed]].color)
# heli.addSeries('Wage', 'expWage', 'Expected Wage', '999999')
#Do this one separately so it draws on top
for good, g in heli.nonMoneyGoods.items():
heli.data.addReporter('inv-'+good, heli.data.agentReporter('goods', 'store', good=good))
heli.addSeries('inventory', 'inv-'+good, good.title()+' Inventory', g.color)
#Price ratio plots
def ratioReporter(item1, item2):
def reporter(model):
return model.data.agentReporter('price', 'store', good=item1)(model)/model.data.agentReporter('price', 'store', good=item2)(model)
return reporter
heli.addPlot('ratios', 'Price Ratios', position=3, logscale=True)
heli.addSeries('ratios', lambda t: 1, '', 'CCCCCC') #plots ratio of 1 for reference without recording a column of ones
for r in combinations(heli.nonMoneyGoods.keys(), 2):
heli.data.addReporter('ratio-'+r[0]+'-'+r[1], ratioReporter(r[0], r[1]))
c1, c2 = heli.goods[r[0]].color, heli.goods[r[1]].color
c3 = Color(red=(c1.red+c2.red)/2, green=(c1.green+c2.green)/2, blue=(c1.blue+c2.blue)/2)
heli.addSeries('ratios', 'ratio-'+r[0]+'-'+r[1], r[0].title()+'/'+r[1].title()+' Ratio', c3)
heli.defaultPlots.extend(['rbal', 'ratios', 'inventory'])
heli.data.addReporter('ngdp', lambda model: model.cb.ngdp)
heli.addSeries('ngdp', 'ngdp', 'NGDP', '000000')
heli.data.addReporter('P', lambda model: model.cb.P)
heli.data.addReporter('storeCash', heli.data.agentReporter('balance', 'store'))
heli.addSeries('money', 'storeCash', 'Store Cash', '777777')
heli.data.addReporter('StoreCashDemand', heli.data.agentReporter('cashDemand', 'store'))
heli.addSeries('money', 'StoreCashDemand', 'Store Cash Demand', 'CCCCCC')
heli.data.addReporter('wage', heli.data.agentReporter('wage', 'store'))
heli.addSeries('wage', 'wage', 'Wage', '000000')
#================
# AGENT BEHAVIOR
#================
#
# General
#
#Don't bother keeping track of the bank-specific variables unless the banking system is there
#Do this here rather than at the beginning so we can decide at runtime
def modelPreSetup(model):
if model.param('agents_bank') > 0:
model.data.addReporter('defaults', model.data.agentReporter('defaultTotal', 'bank'))
model.data.addReporter('debt', model.data.agentReporter('loans', 'bank'))
model.data.addReporter('reserveRatio', model.data.agentReporter('reserveRatio', 'bank'))
model.data.addReporter('targetRR', model.data.agentReporter('targetRR', 'bank'))
model.data.addReporter('i', model.data.agentReporter('i', 'bank'))
model.data.addReporter('r', model.data.agentReporter('realInterest', 'bank'))
model.data.addReporter('inflation', model.data.agentReporter('inflation', 'bank'))
model.data.addReporter('withdrawals', model.data.agentReporter('lastWithdrawal', 'bank'))
model.data.addReporter('M2', lambda model: model.cb.M2)
model.addSeries('money', 'defaults', 'Defaults', 'CC0000')
model.addSeries('money', 'M2', 'Money Supply', '000000')
model.addSeries('debt', 'debt', 'Outstanding Debt', '000000')
model.addSeries('rr', 'targetRR', 'Target', '777777')
model.addSeries('rr', 'reserveRatio', 'Reserve Ratio', '000000')
model.addSeries('i', 'i', 'Nominal interest', '000000')
model.addSeries('i', 'r', 'Real interest', '0000CC')
model.addSeries('i', 'inflation', 'Inflation', 'CC0000')
heli.addHook('modelPreSetup', modelPreSetup)
#
# Agents
#
from helipad.utility import CES
#Choose a bank if necessary
def baseAgentInit(agent, model):
if model.param('agents_bank') > 0 and agent.primitive != 'bank':
agent.bank = model.agents['bank'][0]
agent.bank.setupAccount(agent)
heli.addHook('baseAgentInit', baseAgentInit)
def agentInit(agent, model):
agent.store = model.agents['store'][0]
agent.item = AgentGoods[agent.breed]
rbd = model.breedParam('rbd', agent.breed, prim='agent')
beta = rbd/(rbd+1)
agent.utility = CES(['good','rbal'], agent.model.param('sigma'), {'good': 1-beta, 'rbal': beta })
agent.expCons = model.goodParam('prod', agent.item)
#Set cash endowment to equilibrium value based on parameters. Not strictly necessary but avoids the burn-in period.
agent.goods[model.moneyGood] = agent.store.price[agent.item] * rbaltodemand(agent.breed)(heli)
if model.param('agents_bank') > 0:
agent.liqPref = model.breedParam('liqPref', agent.breed, prim='agent')
heli.addHook('agentInit', agentInit)
def agentStep(agent, model, stage):
itemPrice = agent.store.price[agent.item]
b = agent.balance/itemPrice #Real balances
q = agent.utility.demand(agent.balance, {'good': itemPrice, 'rbal': itemPrice})['good'] #Equimarginal condition given CES between real balances and consumption
basicq = q #Save this for later since we adjust q
bought = agent.buy(agent.store, agent.item, q, itemPrice)
if agent.goods[model.moneyGood] < 0: agent.goods[model.moneyGood] = 0 #Floating point error gives infinitessimaly negative cash sometimes
agent.utils = agent.utility.calculate({'good': agent.goods[agent.item], 'rbal': agent.balance/itemPrice}) if hasattr(agent,'utility') else 0 #Get utility
agent.goods[agent.item] = 0 #Consume goods
negadjust = q - bought #Update your consumption expectations if the store has a shortage
if negadjust > basicq: negadjust = basicq
agent.expCons = (19 * agent.expCons + basicq-negadjust)/20 #Set expected consumption as a decaying average of consumption history
#Deposit cash in the bank at the end of each period
if hasattr(agent, 'bank'):
tCash = agent.liqPref*agent.balance
agent.bank.deposit(agent, agent.goods[agent.model.moneyGood]-tCash)
heli.addHook('agentStep', agentStep)
def realBalances(agent):
if not hasattr(agent, 'store'): return 0
return agent.balance/agent.store.price[agent.item]
# return agent.balance/agent.model.cb.P
Agent.realBalances = property(realBalances)
#Use the bank if the bank exists
def buy(agent, partner, good, q, p):
if hasattr(agent, 'bank'):
bal = agent.bank.account(agent)
if p*q > bal:
amount = bal
leftover = (p*q - bal)/q
else:
amount = p*q
leftover = 0
agent.bank.transfer(agent, partner, amount)
return (q, leftover)
heli.addHook('buy', buy)
#Use the bank if the bank exists
def pay(agent, recipient, amount, model):
if hasattr(agent, 'bank') and recipient.primitive != 'bank' and agent.primitive != 'bank':
bal = agent.bank.account(agent)
if amount > bal: #If there are not enough funds
trans = bal
amount -= bal
else:
trans = amount
amount = 0
agent.bank.transfer(agent, recipient, trans)
return amount #Should be zero. Anything leftover gets paid in cash
heli.addHook('pay', pay)
def checkBalance(agent, balance, model):
if hasattr(agent, 'bank') and agent.primitive != 'bank':
balance += agent.bank.account(agent)
return balance
heli.addHook('checkBalance', checkBalance)
#
# Central Bank
#
class CentralBank(baseAgent):
ngdpAvg = 0
ngdp = 0
primitive = 'cb'
def __init__(self, id, model):
super().__init__(None, id, model)
self.id = id
self.model = model
self.ngdpTarget = False if not model.param('ngdpTarget') else 10000
def step(self):
#Record macroeconomic vars at the end of the last stage
#Getting demand has it lagged one period…
self.ngdp = sum([self.model.data.getLast('demand-'+good) * self.model.agents['store'][0].price[good] for good in self.model.nonMoneyGoods])
if not self.ngdpAvg: self.ngdpAvg = self.ngdp
self.ngdpAvg = (2 * self.ngdpAvg + self.ngdp) / 3
#Set macroeconomic targets
expand = 0
if self.ngdpTarget: expand = self.ngdpTarget - self.ngdpAvg
if self.model.param('agents_bank') > 0: expand *= self.model.agents['bank'][0].reserveRatio
if expand != 0: self.expand(expand)
def expand(self, amount):
#Deposit with each bank in proportion to their liabilities
if 'bank' in self.model.primitives and self.model.param('agents_bank') > 0:
self.goods[self.model.moneyGood] += amount
r = self.model.agents['bank'][0].goods[self.model.moneyGood]
if -amount > r: amount = -r + 1
self.model.agents['bank'][0].deposit(self, amount)
elif self.model.param('dist') == 'lump':
amt = amount/self.model.param('agents_agent')
for a in self.model.agents['agent']:
a.goods[self.model.moneyGood] += amt
else:
M0 = self.M0
for a in self.model.allagents.values():
a.goods[self.model.moneyGood] += a.goods[self.model.moneyGood]/M0 * amount
@property
def M0(self):
return self.model.data.agentReporter('goods', 'all', good=self.model.moneyGood, stat='sum')(self.model)
@M0.setter
def M0(self, value): self.expand(value - self.M0)
@property
def M2(self):
if 'bank' not in self.model.primitives or self.model.param('agents_bank') == 0: return self.M0
return sum([a.balance for a in self.model.allagents.values()])
#Price level
#Average good prices at each store, then average all of those together weighted by the store's sale volume
#Figure out whether to break this out or not
@property
def P(self):
denom = 0
numer = 0
if not 'store' in self.model.agents: return None
return mean(array(list(self.model.agents['store'][0].price.values())))
# for s in self.model.agents['store']:
# volume = sum(list(s.lastDemand.values()))
# numer += mean(array(list(s.price.values()))) * volume
# denom += volume
#
# if denom==0: return 1
# else: return numer/denom
def modelPostSetup(model): model.cb = CentralBank(0, model)
heli.addHook('modelPostSetup', modelPostSetup)
def modelPostStep(model): model.cb.step() #Step the central bank last
heli.addHook('modelPostStep', modelPostStep)
#========
# SHOCKS
#========
#Random shock to dwarf cash demand
def shock(v):
c = random.normal(v, 4)
return c if c >= 1 else 1
heli.shocks.register('Dwarf real balances', 'rbd', shock, heli.shocks.randn(2), paramType='breed', obj='dwarf', prim='agent')
#Shock the money supply
def mshock(model):
# return v*2
pct = random.normal(1, 15)
m = model.cb.M0 * (1+pct/100)
if m < 10000: m = 10000 #Things get weird when there's a money shortage
model.cb.M0 = m
heli.shocks.register('M0 (2% prob)', None, mshock, heli.shocks.randn(2), desc="Shocks the money supply a random percentage (µ=1, σ=15) with 2% probability each period")
heli.launchGUI() | lf.i = 1 + self.inflation #interest rate cap at 100%
| conditional_block |
shared.go | // Copyright 2017 Monax Industries Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"path"
"strconv"
"testing"
"time"
acm "github.com/hyperledger/burrow/account"
"github.com/hyperledger/burrow/config"
"github.com/hyperledger/burrow/core"
core_types "github.com/hyperledger/burrow/core/types"
genesis "github.com/hyperledger/burrow/genesis"
"github.com/hyperledger/burrow/logging/lifecycle"
"github.com/hyperledger/burrow/manager/burrow-mint/evm"
ptypes "github.com/hyperledger/burrow/permission/types"
"github.com/hyperledger/burrow/rpc/tendermint/client"
edbcli "github.com/hyperledger/burrow/rpc/tendermint/client"
rpc_types "github.com/hyperledger/burrow/rpc/tendermint/core/types"
"github.com/hyperledger/burrow/server"
"github.com/hyperledger/burrow/test/fixtures"
"github.com/hyperledger/burrow/txs"
"github.com/hyperledger/burrow/word256"
"github.com/spf13/viper"
"github.com/tendermint/go-crypto"
rpcclient "github.com/tendermint/go-rpc/client"
"github.com/tendermint/tendermint/types"
)
const chainID = "RPC_Test_Chain"
// global variables for use across all tests
var (
serverConfig *server.ServerConfig
rootWorkDir string
mempoolCount = 0
websocketAddr string
genesisDoc *genesis.GenesisDoc
websocketEndpoint string
users = makeUsers(5) // make keys
jsonRpcClient client.RPCClient | testCore *core.Core
)
// We use this to wrap tests
func TestWrapper(runner func() int) int {
fmt.Println("Running with integration TestWrapper (rpc/tendermint/test/shared_test.go)...")
ffs := fixtures.NewFileFixtures("burrow")
defer func() {
// Tendermint likes to try and save to priv_validator.json after its been
// asked to shutdown so we pause to try and avoid collision
time.Sleep(time.Second)
ffs.RemoveAll()
}()
vm.SetDebug(true)
err := initGlobalVariables(ffs)
if err != nil {
panic(err)
}
tmServer, err := testCore.NewGatewayTendermint(serverConfig)
defer func() {
// Shutdown -- make sure we don't hit a race on ffs.RemoveAll
tmServer.Shutdown()
testCore.Stop()
}()
if err != nil {
panic(err)
}
return runner()
}
// initialize config and create new node
func initGlobalVariables(ffs *fixtures.FileFixtures) error {
configBytes, err := config.GetConfigurationFileBytes(chainID,
"test_single_node",
"",
"burrow",
true,
"46657",
"burrow serve")
if err != nil {
return err
}
genesisBytes, err := genesisFileBytesFromUsers(chainID, users)
if err != nil {
return err
}
testConfigFile := ffs.AddFile("config.toml", string(configBytes))
rootWorkDir = ffs.AddDir("rootWorkDir")
rootDataDir := ffs.AddDir("rootDataDir")
genesisFile := ffs.AddFile("rootWorkDir/genesis.json", string(genesisBytes))
genesisDoc = genesis.GenesisDocFromJSON(genesisBytes)
if ffs.Error != nil {
return ffs.Error
}
testConfig := viper.New()
testConfig.SetConfigFile(testConfigFile)
err = testConfig.ReadInConfig()
if err != nil {
return err
}
sconf, err := core.LoadServerConfig(chainID, testConfig)
if err != nil {
return err
}
serverConfig = sconf
rpcAddr := serverConfig.Tendermint.RpcLocalAddress
websocketAddr = rpcAddr
websocketEndpoint = "/websocket"
consensusConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "consensus")
if err != nil {
return err
}
managerConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "manager")
if err != nil {
return err
}
// Set up priv_validator.json before we start tendermint (otherwise it will
// create its own one.
saveNewPriv()
logger, _ := lifecycle.NewStdErrLogger()
// To spill tendermint logs on the floor:
// lifecycle.CaptureTendermintLog15Output(loggers.NewNoopInfoTraceLogger())
lifecycle.CaptureTendermintLog15Output(logger)
lifecycle.CaptureStdlibLogOutput(logger)
testCore, err = core.NewCore("testCore", consensusConfig, managerConfig,
logger)
if err != nil {
return err
}
jsonRpcClient = rpcclient.NewJSONRPCClient(rpcAddr)
httpClient = rpcclient.NewURIClient(rpcAddr)
clients = map[string]client.RPCClient{
"JSONRPC": jsonRpcClient,
"HTTP": httpClient,
}
return nil
}
// Deterministic account generation helper. Pass number of accounts to make
func makeUsers(n int) []*acm.PrivAccount {
accounts := []*acm.PrivAccount{}
for i := 0; i < n; i++ {
secret := "mysecret" + strconv.Itoa(i)
user := acm.GenPrivAccountFromSecret(secret)
accounts = append(accounts, user)
}
return accounts
}
func genesisFileBytesFromUsers(chainName string, accounts []*acm.PrivAccount) ([]byte, error) {
if len(accounts) < 1 {
return nil, errors.New("Please pass in at least 1 account to be the validator")
}
genesisValidators := make([]*genesis.GenesisValidator, 1)
genesisAccounts := make([]*genesis.GenesisAccount, len(accounts))
genesisValidators[0] = genesisValidatorFromPrivAccount(accounts[0])
for i, acc := range accounts {
genesisAccounts[i] = genesisAccountFromPrivAccount(acc)
}
return genesis.GenerateGenesisFileBytes(chainName, genesisAccounts, genesisValidators)
}
func genesisValidatorFromPrivAccount(account *acm.PrivAccount) *genesis.GenesisValidator {
return &genesis.GenesisValidator{
Amount: 1000000,
Name: fmt.Sprintf("full-account_%X", account.Address),
PubKey: account.PubKey,
UnbondTo: []genesis.BasicAccount{
{
Address: account.Address,
Amount: 100,
},
},
}
}
func genesisAccountFromPrivAccount(account *acm.PrivAccount) *genesis.GenesisAccount {
return genesis.NewGenesisAccount(account.Address, 100000,
fmt.Sprintf("account_%X", account.Address), &ptypes.DefaultAccountPermissions)
}
func saveNewPriv() {
// Save new priv_validator file.
priv := &types.PrivValidator{
Address: users[0].Address,
PubKey: crypto.PubKeyEd25519(users[0].PubKey.(crypto.PubKeyEd25519)),
PrivKey: crypto.PrivKeyEd25519(users[0].PrivKey.(crypto.PrivKeyEd25519)),
}
priv.SetFile(path.Join(rootWorkDir, "priv_validator.json"))
priv.Save()
}
//-------------------------------------------------------------------------------
// some default transaction functions
func makeDefaultSendTx(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewSendTx()
tx.AddInputWithNonce(users[0].PubKey, amt, nonce+1)
tx.AddOutput(addr, amt)
return tx
}
func makeDefaultSendTxSigned(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
tx := makeDefaultSendTx(t, client, addr, amt)
tx.SignInput(chainID, 0, users[0])
return tx
}
func makeDefaultCallTx(t *testing.T, client client.RPCClient, addr, code []byte, amt, gasLim,
fee int64) *txs.CallTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewCallTxWithNonce(users[0].PubKey, addr, code, amt, gasLim, fee,
nonce+1)
tx.Sign(chainID, users[0])
return tx
}
func makeDefaultNameTx(t *testing.T, client client.RPCClient, name, value string, amt,
fee int64) *txs.NameTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewNameTxWithNonce(users[0].PubKey, name, value, amt, fee, nonce+1)
tx.Sign(chainID, users[0])
return tx
}
//-------------------------------------------------------------------------------
// rpc call wrappers (fail on err)
// get an account's nonce
func getNonce(t *testing.T, client client.RPCClient, addr []byte) int {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
if ac == nil {
return 0
}
return ac.Sequence
}
// get the account
func getAccount(t *testing.T, client client.RPCClient, addr []byte) *acm.Account {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
return ac
}
// sign transaction
func signTx(t *testing.T, client client.RPCClient, tx txs.Tx,
privAcc *acm.PrivAccount) txs.Tx {
signedTx, err := edbcli.SignTx(client, tx, []*acm.PrivAccount{privAcc})
if err != nil {
t.Fatal(err)
}
return signedTx
}
// broadcast transaction
func broadcastTx(t *testing.T, client client.RPCClient, tx txs.Tx) txs.Receipt {
rec, err := edbcli.BroadcastTx(client, tx)
if err != nil {
t.Fatal(err)
}
mempoolCount += 1
return rec
}
// dump all storage for an account. currently unused
func dumpStorage(t *testing.T, addr []byte) *rpc_types.ResultDumpStorage {
client := clients["HTTP"]
resp, err := edbcli.DumpStorage(client, addr)
if err != nil {
t.Fatal(err)
}
return resp
}
func getStorage(t *testing.T, client client.RPCClient, addr, key []byte) []byte {
resp, err := edbcli.GetStorage(client, addr, key)
if err != nil {
t.Fatal(err)
}
return resp
}
func callCode(t *testing.T, client client.RPCClient, fromAddress, code, data,
expected []byte) {
resp, err := edbcli.CallCode(client, fromAddress, code, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
}
func callContract(t *testing.T, client client.RPCClient, fromAddress, toAddress,
data, expected []byte) {
resp, err := edbcli.Call(client, fromAddress, toAddress, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
}
// get the namereg entry
func getNameRegEntry(t *testing.T, client client.RPCClient, name string) *core_types.NameRegEntry {
entry, err := edbcli.GetName(client, name)
if err != nil {
t.Fatal(err)
}
return entry
}
// Returns a positive int64 hash of text (consumers want int64 instead of uint64)
func hashString(text string) int64 {
hasher := fnv.New64()
hasher.Write([]byte(text))
value := int64(hasher.Sum64())
// Flip the sign if we wrapped
if value < 0 {
return -value
}
return value
}
//--------------------------------------------------------------------------------
// utility verification function
// simple contract returns 5 + 6 = 0xb
func simpleContract() ([]byte, []byte, []byte) {
// this is the code we want to run when the contract is called
contractCode := []byte{0x60, 0x5, 0x60, 0x6, 0x1, 0x60, 0x0, 0x52, 0x60, 0x20,
0x60, 0x0, 0xf3}
// the is the code we need to return the contractCode when the contract is initialized
lenCode := len(contractCode)
// push code to the stack
//code := append([]byte{byte(0x60 + lenCode - 1)}, RightPadWord256(contractCode).Bytes()...)
code := append([]byte{0x7f},
word256.RightPadWord256(contractCode).Bytes()...)
// store it in memory
code = append(code, []byte{0x60, 0x0, 0x52}...)
// return whats in memory
//code = append(code, []byte{0x60, byte(32 - lenCode), 0x60, byte(lenCode), 0xf3}...)
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
}
// simple call contract calls another contract
func simpleCallContract(addr []byte) ([]byte, []byte, []byte) {
gas1, gas2 := byte(0x1), byte(0x1)
value := byte(0x1)
inOff, inSize := byte(0x0), byte(0x0) // no call data
retOff, retSize := byte(0x0), byte(0x20)
// this is the code we want to run (call a contract and return)
contractCode := []byte{0x60, retSize, 0x60, retOff, 0x60, inSize, 0x60, inOff,
0x60, value, 0x73}
contractCode = append(contractCode, addr...)
contractCode = append(contractCode, []byte{0x61, gas1, gas2, 0xf1, 0x60, 0x20,
0x60, 0x0, 0xf3}...)
// the is the code we need to return; the contractCode when the contract is initialized
// it should copy the code from the input into memory
lenCode := len(contractCode)
memOff := byte(0x0)
inOff = byte(0xc) // length of code before codeContract
length := byte(lenCode)
code := []byte{0x60, length, 0x60, inOff, 0x60, memOff, 0x37}
// return whats in memory
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
code = append(code, contractCode...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
} | httpClient client.RPCClient
clients map[string]client.RPCClient | random_line_split |
shared.go | // Copyright 2017 Monax Industries Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"path"
"strconv"
"testing"
"time"
acm "github.com/hyperledger/burrow/account"
"github.com/hyperledger/burrow/config"
"github.com/hyperledger/burrow/core"
core_types "github.com/hyperledger/burrow/core/types"
genesis "github.com/hyperledger/burrow/genesis"
"github.com/hyperledger/burrow/logging/lifecycle"
"github.com/hyperledger/burrow/manager/burrow-mint/evm"
ptypes "github.com/hyperledger/burrow/permission/types"
"github.com/hyperledger/burrow/rpc/tendermint/client"
edbcli "github.com/hyperledger/burrow/rpc/tendermint/client"
rpc_types "github.com/hyperledger/burrow/rpc/tendermint/core/types"
"github.com/hyperledger/burrow/server"
"github.com/hyperledger/burrow/test/fixtures"
"github.com/hyperledger/burrow/txs"
"github.com/hyperledger/burrow/word256"
"github.com/spf13/viper"
"github.com/tendermint/go-crypto"
rpcclient "github.com/tendermint/go-rpc/client"
"github.com/tendermint/tendermint/types"
)
const chainID = "RPC_Test_Chain"
// global variables for use across all tests
var (
serverConfig *server.ServerConfig
rootWorkDir string
mempoolCount = 0
websocketAddr string
genesisDoc *genesis.GenesisDoc
websocketEndpoint string
users = makeUsers(5) // make keys
jsonRpcClient client.RPCClient
httpClient client.RPCClient
clients map[string]client.RPCClient
testCore *core.Core
)
// We use this to wrap tests
func TestWrapper(runner func() int) int {
fmt.Println("Running with integration TestWrapper (rpc/tendermint/test/shared_test.go)...")
ffs := fixtures.NewFileFixtures("burrow")
defer func() {
// Tendermint likes to try and save to priv_validator.json after its been
// asked to shutdown so we pause to try and avoid collision
time.Sleep(time.Second)
ffs.RemoveAll()
}()
vm.SetDebug(true)
err := initGlobalVariables(ffs)
if err != nil {
panic(err)
}
tmServer, err := testCore.NewGatewayTendermint(serverConfig)
defer func() {
// Shutdown -- make sure we don't hit a race on ffs.RemoveAll
tmServer.Shutdown()
testCore.Stop()
}()
if err != nil {
panic(err)
}
return runner()
}
// initialize config and create new node
func initGlobalVariables(ffs *fixtures.FileFixtures) error {
configBytes, err := config.GetConfigurationFileBytes(chainID,
"test_single_node",
"",
"burrow",
true,
"46657",
"burrow serve")
if err != nil {
return err
}
genesisBytes, err := genesisFileBytesFromUsers(chainID, users)
if err != nil {
return err
}
testConfigFile := ffs.AddFile("config.toml", string(configBytes))
rootWorkDir = ffs.AddDir("rootWorkDir")
rootDataDir := ffs.AddDir("rootDataDir")
genesisFile := ffs.AddFile("rootWorkDir/genesis.json", string(genesisBytes))
genesisDoc = genesis.GenesisDocFromJSON(genesisBytes)
if ffs.Error != nil {
return ffs.Error
}
testConfig := viper.New()
testConfig.SetConfigFile(testConfigFile)
err = testConfig.ReadInConfig()
if err != nil {
return err
}
sconf, err := core.LoadServerConfig(chainID, testConfig)
if err != nil {
return err
}
serverConfig = sconf
rpcAddr := serverConfig.Tendermint.RpcLocalAddress
websocketAddr = rpcAddr
websocketEndpoint = "/websocket"
consensusConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "consensus")
if err != nil {
return err
}
managerConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "manager")
if err != nil {
return err
}
// Set up priv_validator.json before we start tendermint (otherwise it will
// create its own one.
saveNewPriv()
logger, _ := lifecycle.NewStdErrLogger()
// To spill tendermint logs on the floor:
// lifecycle.CaptureTendermintLog15Output(loggers.NewNoopInfoTraceLogger())
lifecycle.CaptureTendermintLog15Output(logger)
lifecycle.CaptureStdlibLogOutput(logger)
testCore, err = core.NewCore("testCore", consensusConfig, managerConfig,
logger)
if err != nil {
return err
}
jsonRpcClient = rpcclient.NewJSONRPCClient(rpcAddr)
httpClient = rpcclient.NewURIClient(rpcAddr)
clients = map[string]client.RPCClient{
"JSONRPC": jsonRpcClient,
"HTTP": httpClient,
}
return nil
}
// Deterministic account generation helper. Pass number of accounts to make
func makeUsers(n int) []*acm.PrivAccount {
accounts := []*acm.PrivAccount{}
for i := 0; i < n; i++ {
secret := "mysecret" + strconv.Itoa(i)
user := acm.GenPrivAccountFromSecret(secret)
accounts = append(accounts, user)
}
return accounts
}
func genesisFileBytesFromUsers(chainName string, accounts []*acm.PrivAccount) ([]byte, error) {
if len(accounts) < 1 {
return nil, errors.New("Please pass in at least 1 account to be the validator")
}
genesisValidators := make([]*genesis.GenesisValidator, 1)
genesisAccounts := make([]*genesis.GenesisAccount, len(accounts))
genesisValidators[0] = genesisValidatorFromPrivAccount(accounts[0])
for i, acc := range accounts {
genesisAccounts[i] = genesisAccountFromPrivAccount(acc)
}
return genesis.GenerateGenesisFileBytes(chainName, genesisAccounts, genesisValidators)
}
func | (account *acm.PrivAccount) *genesis.GenesisValidator {
return &genesis.GenesisValidator{
Amount: 1000000,
Name: fmt.Sprintf("full-account_%X", account.Address),
PubKey: account.PubKey,
UnbondTo: []genesis.BasicAccount{
{
Address: account.Address,
Amount: 100,
},
},
}
}
func genesisAccountFromPrivAccount(account *acm.PrivAccount) *genesis.GenesisAccount {
return genesis.NewGenesisAccount(account.Address, 100000,
fmt.Sprintf("account_%X", account.Address), &ptypes.DefaultAccountPermissions)
}
func saveNewPriv() {
// Save new priv_validator file.
priv := &types.PrivValidator{
Address: users[0].Address,
PubKey: crypto.PubKeyEd25519(users[0].PubKey.(crypto.PubKeyEd25519)),
PrivKey: crypto.PrivKeyEd25519(users[0].PrivKey.(crypto.PrivKeyEd25519)),
}
priv.SetFile(path.Join(rootWorkDir, "priv_validator.json"))
priv.Save()
}
//-------------------------------------------------------------------------------
// some default transaction functions
func makeDefaultSendTx(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewSendTx()
tx.AddInputWithNonce(users[0].PubKey, amt, nonce+1)
tx.AddOutput(addr, amt)
return tx
}
func makeDefaultSendTxSigned(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
tx := makeDefaultSendTx(t, client, addr, amt)
tx.SignInput(chainID, 0, users[0])
return tx
}
func makeDefaultCallTx(t *testing.T, client client.RPCClient, addr, code []byte, amt, gasLim,
fee int64) *txs.CallTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewCallTxWithNonce(users[0].PubKey, addr, code, amt, gasLim, fee,
nonce+1)
tx.Sign(chainID, users[0])
return tx
}
func makeDefaultNameTx(t *testing.T, client client.RPCClient, name, value string, amt,
fee int64) *txs.NameTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewNameTxWithNonce(users[0].PubKey, name, value, amt, fee, nonce+1)
tx.Sign(chainID, users[0])
return tx
}
//-------------------------------------------------------------------------------
// rpc call wrappers (fail on err)
// get an account's nonce
func getNonce(t *testing.T, client client.RPCClient, addr []byte) int {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
if ac == nil {
return 0
}
return ac.Sequence
}
// get the account
func getAccount(t *testing.T, client client.RPCClient, addr []byte) *acm.Account {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
return ac
}
// sign transaction
func signTx(t *testing.T, client client.RPCClient, tx txs.Tx,
privAcc *acm.PrivAccount) txs.Tx {
signedTx, err := edbcli.SignTx(client, tx, []*acm.PrivAccount{privAcc})
if err != nil {
t.Fatal(err)
}
return signedTx
}
// broadcast transaction
func broadcastTx(t *testing.T, client client.RPCClient, tx txs.Tx) txs.Receipt {
rec, err := edbcli.BroadcastTx(client, tx)
if err != nil {
t.Fatal(err)
}
mempoolCount += 1
return rec
}
// dump all storage for an account. currently unused
func dumpStorage(t *testing.T, addr []byte) *rpc_types.ResultDumpStorage {
client := clients["HTTP"]
resp, err := edbcli.DumpStorage(client, addr)
if err != nil {
t.Fatal(err)
}
return resp
}
func getStorage(t *testing.T, client client.RPCClient, addr, key []byte) []byte {
resp, err := edbcli.GetStorage(client, addr, key)
if err != nil {
t.Fatal(err)
}
return resp
}
func callCode(t *testing.T, client client.RPCClient, fromAddress, code, data,
expected []byte) {
resp, err := edbcli.CallCode(client, fromAddress, code, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
}
func callContract(t *testing.T, client client.RPCClient, fromAddress, toAddress,
data, expected []byte) {
resp, err := edbcli.Call(client, fromAddress, toAddress, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
}
// get the namereg entry
func getNameRegEntry(t *testing.T, client client.RPCClient, name string) *core_types.NameRegEntry {
entry, err := edbcli.GetName(client, name)
if err != nil {
t.Fatal(err)
}
return entry
}
// Returns a positive int64 hash of text (consumers want int64 instead of uint64)
func hashString(text string) int64 {
hasher := fnv.New64()
hasher.Write([]byte(text))
value := int64(hasher.Sum64())
// Flip the sign if we wrapped
if value < 0 {
return -value
}
return value
}
//--------------------------------------------------------------------------------
// utility verification function
// simple contract returns 5 + 6 = 0xb
func simpleContract() ([]byte, []byte, []byte) {
// this is the code we want to run when the contract is called
contractCode := []byte{0x60, 0x5, 0x60, 0x6, 0x1, 0x60, 0x0, 0x52, 0x60, 0x20,
0x60, 0x0, 0xf3}
// the is the code we need to return the contractCode when the contract is initialized
lenCode := len(contractCode)
// push code to the stack
//code := append([]byte{byte(0x60 + lenCode - 1)}, RightPadWord256(contractCode).Bytes()...)
code := append([]byte{0x7f},
word256.RightPadWord256(contractCode).Bytes()...)
// store it in memory
code = append(code, []byte{0x60, 0x0, 0x52}...)
// return whats in memory
//code = append(code, []byte{0x60, byte(32 - lenCode), 0x60, byte(lenCode), 0xf3}...)
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
}
// simple call contract calls another contract
func simpleCallContract(addr []byte) ([]byte, []byte, []byte) {
gas1, gas2 := byte(0x1), byte(0x1)
value := byte(0x1)
inOff, inSize := byte(0x0), byte(0x0) // no call data
retOff, retSize := byte(0x0), byte(0x20)
// this is the code we want to run (call a contract and return)
contractCode := []byte{0x60, retSize, 0x60, retOff, 0x60, inSize, 0x60, inOff,
0x60, value, 0x73}
contractCode = append(contractCode, addr...)
contractCode = append(contractCode, []byte{0x61, gas1, gas2, 0xf1, 0x60, 0x20,
0x60, 0x0, 0xf3}...)
// the is the code we need to return; the contractCode when the contract is initialized
// it should copy the code from the input into memory
lenCode := len(contractCode)
memOff := byte(0x0)
inOff = byte(0xc) // length of code before codeContract
length := byte(lenCode)
code := []byte{0x60, length, 0x60, inOff, 0x60, memOff, 0x37}
// return whats in memory
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
code = append(code, contractCode...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
}
| genesisValidatorFromPrivAccount | identifier_name |
shared.go | // Copyright 2017 Monax Industries Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"path"
"strconv"
"testing"
"time"
acm "github.com/hyperledger/burrow/account"
"github.com/hyperledger/burrow/config"
"github.com/hyperledger/burrow/core"
core_types "github.com/hyperledger/burrow/core/types"
genesis "github.com/hyperledger/burrow/genesis"
"github.com/hyperledger/burrow/logging/lifecycle"
"github.com/hyperledger/burrow/manager/burrow-mint/evm"
ptypes "github.com/hyperledger/burrow/permission/types"
"github.com/hyperledger/burrow/rpc/tendermint/client"
edbcli "github.com/hyperledger/burrow/rpc/tendermint/client"
rpc_types "github.com/hyperledger/burrow/rpc/tendermint/core/types"
"github.com/hyperledger/burrow/server"
"github.com/hyperledger/burrow/test/fixtures"
"github.com/hyperledger/burrow/txs"
"github.com/hyperledger/burrow/word256"
"github.com/spf13/viper"
"github.com/tendermint/go-crypto"
rpcclient "github.com/tendermint/go-rpc/client"
"github.com/tendermint/tendermint/types"
)
const chainID = "RPC_Test_Chain"
// global variables for use across all tests
var (
serverConfig *server.ServerConfig
rootWorkDir string
mempoolCount = 0
websocketAddr string
genesisDoc *genesis.GenesisDoc
websocketEndpoint string
users = makeUsers(5) // make keys
jsonRpcClient client.RPCClient
httpClient client.RPCClient
clients map[string]client.RPCClient
testCore *core.Core
)
// We use this to wrap tests
func TestWrapper(runner func() int) int {
fmt.Println("Running with integration TestWrapper (rpc/tendermint/test/shared_test.go)...")
ffs := fixtures.NewFileFixtures("burrow")
defer func() {
// Tendermint likes to try and save to priv_validator.json after its been
// asked to shutdown so we pause to try and avoid collision
time.Sleep(time.Second)
ffs.RemoveAll()
}()
vm.SetDebug(true)
err := initGlobalVariables(ffs)
if err != nil {
panic(err)
}
tmServer, err := testCore.NewGatewayTendermint(serverConfig)
defer func() {
// Shutdown -- make sure we don't hit a race on ffs.RemoveAll
tmServer.Shutdown()
testCore.Stop()
}()
if err != nil {
panic(err)
}
return runner()
}
// initialize config and create new node
func initGlobalVariables(ffs *fixtures.FileFixtures) error {
configBytes, err := config.GetConfigurationFileBytes(chainID,
"test_single_node",
"",
"burrow",
true,
"46657",
"burrow serve")
if err != nil {
return err
}
genesisBytes, err := genesisFileBytesFromUsers(chainID, users)
if err != nil {
return err
}
testConfigFile := ffs.AddFile("config.toml", string(configBytes))
rootWorkDir = ffs.AddDir("rootWorkDir")
rootDataDir := ffs.AddDir("rootDataDir")
genesisFile := ffs.AddFile("rootWorkDir/genesis.json", string(genesisBytes))
genesisDoc = genesis.GenesisDocFromJSON(genesisBytes)
if ffs.Error != nil {
return ffs.Error
}
testConfig := viper.New()
testConfig.SetConfigFile(testConfigFile)
err = testConfig.ReadInConfig()
if err != nil {
return err
}
sconf, err := core.LoadServerConfig(chainID, testConfig)
if err != nil {
return err
}
serverConfig = sconf
rpcAddr := serverConfig.Tendermint.RpcLocalAddress
websocketAddr = rpcAddr
websocketEndpoint = "/websocket"
consensusConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "consensus")
if err != nil {
return err
}
managerConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "manager")
if err != nil {
return err
}
// Set up priv_validator.json before we start tendermint (otherwise it will
// create its own one.
saveNewPriv()
logger, _ := lifecycle.NewStdErrLogger()
// To spill tendermint logs on the floor:
// lifecycle.CaptureTendermintLog15Output(loggers.NewNoopInfoTraceLogger())
lifecycle.CaptureTendermintLog15Output(logger)
lifecycle.CaptureStdlibLogOutput(logger)
testCore, err = core.NewCore("testCore", consensusConfig, managerConfig,
logger)
if err != nil {
return err
}
jsonRpcClient = rpcclient.NewJSONRPCClient(rpcAddr)
httpClient = rpcclient.NewURIClient(rpcAddr)
clients = map[string]client.RPCClient{
"JSONRPC": jsonRpcClient,
"HTTP": httpClient,
}
return nil
}
// Deterministic account generation helper. Pass number of accounts to make
func makeUsers(n int) []*acm.PrivAccount {
accounts := []*acm.PrivAccount{}
for i := 0; i < n; i++ {
secret := "mysecret" + strconv.Itoa(i)
user := acm.GenPrivAccountFromSecret(secret)
accounts = append(accounts, user)
}
return accounts
}
func genesisFileBytesFromUsers(chainName string, accounts []*acm.PrivAccount) ([]byte, error) {
if len(accounts) < 1 {
return nil, errors.New("Please pass in at least 1 account to be the validator")
}
genesisValidators := make([]*genesis.GenesisValidator, 1)
genesisAccounts := make([]*genesis.GenesisAccount, len(accounts))
genesisValidators[0] = genesisValidatorFromPrivAccount(accounts[0])
for i, acc := range accounts {
genesisAccounts[i] = genesisAccountFromPrivAccount(acc)
}
return genesis.GenerateGenesisFileBytes(chainName, genesisAccounts, genesisValidators)
}
func genesisValidatorFromPrivAccount(account *acm.PrivAccount) *genesis.GenesisValidator {
return &genesis.GenesisValidator{
Amount: 1000000,
Name: fmt.Sprintf("full-account_%X", account.Address),
PubKey: account.PubKey,
UnbondTo: []genesis.BasicAccount{
{
Address: account.Address,
Amount: 100,
},
},
}
}
func genesisAccountFromPrivAccount(account *acm.PrivAccount) *genesis.GenesisAccount {
return genesis.NewGenesisAccount(account.Address, 100000,
fmt.Sprintf("account_%X", account.Address), &ptypes.DefaultAccountPermissions)
}
func saveNewPriv() {
// Save new priv_validator file.
priv := &types.PrivValidator{
Address: users[0].Address,
PubKey: crypto.PubKeyEd25519(users[0].PubKey.(crypto.PubKeyEd25519)),
PrivKey: crypto.PrivKeyEd25519(users[0].PrivKey.(crypto.PrivKeyEd25519)),
}
priv.SetFile(path.Join(rootWorkDir, "priv_validator.json"))
priv.Save()
}
//-------------------------------------------------------------------------------
// some default transaction functions
func makeDefaultSendTx(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewSendTx()
tx.AddInputWithNonce(users[0].PubKey, amt, nonce+1)
tx.AddOutput(addr, amt)
return tx
}
func makeDefaultSendTxSigned(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
tx := makeDefaultSendTx(t, client, addr, amt)
tx.SignInput(chainID, 0, users[0])
return tx
}
func makeDefaultCallTx(t *testing.T, client client.RPCClient, addr, code []byte, amt, gasLim,
fee int64) *txs.CallTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewCallTxWithNonce(users[0].PubKey, addr, code, amt, gasLim, fee,
nonce+1)
tx.Sign(chainID, users[0])
return tx
}
func makeDefaultNameTx(t *testing.T, client client.RPCClient, name, value string, amt,
fee int64) *txs.NameTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewNameTxWithNonce(users[0].PubKey, name, value, amt, fee, nonce+1)
tx.Sign(chainID, users[0])
return tx
}
//-------------------------------------------------------------------------------
// rpc call wrappers (fail on err)
// get an account's nonce
func getNonce(t *testing.T, client client.RPCClient, addr []byte) int {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
if ac == nil {
return 0
}
return ac.Sequence
}
// get the account
func getAccount(t *testing.T, client client.RPCClient, addr []byte) *acm.Account {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
return ac
}
// sign transaction
func signTx(t *testing.T, client client.RPCClient, tx txs.Tx,
privAcc *acm.PrivAccount) txs.Tx {
signedTx, err := edbcli.SignTx(client, tx, []*acm.PrivAccount{privAcc})
if err != nil {
t.Fatal(err)
}
return signedTx
}
// broadcast transaction
func broadcastTx(t *testing.T, client client.RPCClient, tx txs.Tx) txs.Receipt {
rec, err := edbcli.BroadcastTx(client, tx)
if err != nil {
t.Fatal(err)
}
mempoolCount += 1
return rec
}
// dump all storage for an account. currently unused
func dumpStorage(t *testing.T, addr []byte) *rpc_types.ResultDumpStorage {
client := clients["HTTP"]
resp, err := edbcli.DumpStorage(client, addr)
if err != nil {
t.Fatal(err)
}
return resp
}
func getStorage(t *testing.T, client client.RPCClient, addr, key []byte) []byte {
resp, err := edbcli.GetStorage(client, addr, key)
if err != nil {
t.Fatal(err)
}
return resp
}
func callCode(t *testing.T, client client.RPCClient, fromAddress, code, data,
expected []byte) {
resp, err := edbcli.CallCode(client, fromAddress, code, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
}
func callContract(t *testing.T, client client.RPCClient, fromAddress, toAddress,
data, expected []byte) {
resp, err := edbcli.Call(client, fromAddress, toAddress, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
}
// get the namereg entry
func getNameRegEntry(t *testing.T, client client.RPCClient, name string) *core_types.NameRegEntry {
entry, err := edbcli.GetName(client, name)
if err != nil |
return entry
}
// Returns a positive int64 hash of text (consumers want int64 instead of uint64)
func hashString(text string) int64 {
hasher := fnv.New64()
hasher.Write([]byte(text))
value := int64(hasher.Sum64())
// Flip the sign if we wrapped
if value < 0 {
return -value
}
return value
}
//--------------------------------------------------------------------------------
// utility verification function
// simple contract returns 5 + 6 = 0xb
func simpleContract() ([]byte, []byte, []byte) {
// this is the code we want to run when the contract is called
contractCode := []byte{0x60, 0x5, 0x60, 0x6, 0x1, 0x60, 0x0, 0x52, 0x60, 0x20,
0x60, 0x0, 0xf3}
// the is the code we need to return the contractCode when the contract is initialized
lenCode := len(contractCode)
// push code to the stack
//code := append([]byte{byte(0x60 + lenCode - 1)}, RightPadWord256(contractCode).Bytes()...)
code := append([]byte{0x7f},
word256.RightPadWord256(contractCode).Bytes()...)
// store it in memory
code = append(code, []byte{0x60, 0x0, 0x52}...)
// return whats in memory
//code = append(code, []byte{0x60, byte(32 - lenCode), 0x60, byte(lenCode), 0xf3}...)
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
}
// simple call contract calls another contract
func simpleCallContract(addr []byte) ([]byte, []byte, []byte) {
gas1, gas2 := byte(0x1), byte(0x1)
value := byte(0x1)
inOff, inSize := byte(0x0), byte(0x0) // no call data
retOff, retSize := byte(0x0), byte(0x20)
// this is the code we want to run (call a contract and return)
contractCode := []byte{0x60, retSize, 0x60, retOff, 0x60, inSize, 0x60, inOff,
0x60, value, 0x73}
contractCode = append(contractCode, addr...)
contractCode = append(contractCode, []byte{0x61, gas1, gas2, 0xf1, 0x60, 0x20,
0x60, 0x0, 0xf3}...)
// the is the code we need to return; the contractCode when the contract is initialized
// it should copy the code from the input into memory
lenCode := len(contractCode)
memOff := byte(0x0)
inOff = byte(0xc) // length of code before codeContract
length := byte(lenCode)
code := []byte{0x60, length, 0x60, inOff, 0x60, memOff, 0x37}
// return whats in memory
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
code = append(code, contractCode...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
}
| {
t.Fatal(err)
} | conditional_block |
shared.go | // Copyright 2017 Monax Industries Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"errors"
"fmt"
"hash/fnv"
"path"
"strconv"
"testing"
"time"
acm "github.com/hyperledger/burrow/account"
"github.com/hyperledger/burrow/config"
"github.com/hyperledger/burrow/core"
core_types "github.com/hyperledger/burrow/core/types"
genesis "github.com/hyperledger/burrow/genesis"
"github.com/hyperledger/burrow/logging/lifecycle"
"github.com/hyperledger/burrow/manager/burrow-mint/evm"
ptypes "github.com/hyperledger/burrow/permission/types"
"github.com/hyperledger/burrow/rpc/tendermint/client"
edbcli "github.com/hyperledger/burrow/rpc/tendermint/client"
rpc_types "github.com/hyperledger/burrow/rpc/tendermint/core/types"
"github.com/hyperledger/burrow/server"
"github.com/hyperledger/burrow/test/fixtures"
"github.com/hyperledger/burrow/txs"
"github.com/hyperledger/burrow/word256"
"github.com/spf13/viper"
"github.com/tendermint/go-crypto"
rpcclient "github.com/tendermint/go-rpc/client"
"github.com/tendermint/tendermint/types"
)
const chainID = "RPC_Test_Chain"
// global variables for use across all tests
var (
serverConfig *server.ServerConfig
rootWorkDir string
mempoolCount = 0
websocketAddr string
genesisDoc *genesis.GenesisDoc
websocketEndpoint string
users = makeUsers(5) // make keys
jsonRpcClient client.RPCClient
httpClient client.RPCClient
clients map[string]client.RPCClient
testCore *core.Core
)
// We use this to wrap tests
func TestWrapper(runner func() int) int {
fmt.Println("Running with integration TestWrapper (rpc/tendermint/test/shared_test.go)...")
ffs := fixtures.NewFileFixtures("burrow")
defer func() {
// Tendermint likes to try and save to priv_validator.json after its been
// asked to shutdown so we pause to try and avoid collision
time.Sleep(time.Second)
ffs.RemoveAll()
}()
vm.SetDebug(true)
err := initGlobalVariables(ffs)
if err != nil {
panic(err)
}
tmServer, err := testCore.NewGatewayTendermint(serverConfig)
defer func() {
// Shutdown -- make sure we don't hit a race on ffs.RemoveAll
tmServer.Shutdown()
testCore.Stop()
}()
if err != nil {
panic(err)
}
return runner()
}
// initialize config and create new node
func initGlobalVariables(ffs *fixtures.FileFixtures) error {
configBytes, err := config.GetConfigurationFileBytes(chainID,
"test_single_node",
"",
"burrow",
true,
"46657",
"burrow serve")
if err != nil {
return err
}
genesisBytes, err := genesisFileBytesFromUsers(chainID, users)
if err != nil {
return err
}
testConfigFile := ffs.AddFile("config.toml", string(configBytes))
rootWorkDir = ffs.AddDir("rootWorkDir")
rootDataDir := ffs.AddDir("rootDataDir")
genesisFile := ffs.AddFile("rootWorkDir/genesis.json", string(genesisBytes))
genesisDoc = genesis.GenesisDocFromJSON(genesisBytes)
if ffs.Error != nil {
return ffs.Error
}
testConfig := viper.New()
testConfig.SetConfigFile(testConfigFile)
err = testConfig.ReadInConfig()
if err != nil {
return err
}
sconf, err := core.LoadServerConfig(chainID, testConfig)
if err != nil {
return err
}
serverConfig = sconf
rpcAddr := serverConfig.Tendermint.RpcLocalAddress
websocketAddr = rpcAddr
websocketEndpoint = "/websocket"
consensusConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "consensus")
if err != nil {
return err
}
managerConfig, err := core.LoadModuleConfig(testConfig, rootWorkDir,
rootDataDir, genesisFile, chainID, "manager")
if err != nil {
return err
}
// Set up priv_validator.json before we start tendermint (otherwise it will
// create its own one.
saveNewPriv()
logger, _ := lifecycle.NewStdErrLogger()
// To spill tendermint logs on the floor:
// lifecycle.CaptureTendermintLog15Output(loggers.NewNoopInfoTraceLogger())
lifecycle.CaptureTendermintLog15Output(logger)
lifecycle.CaptureStdlibLogOutput(logger)
testCore, err = core.NewCore("testCore", consensusConfig, managerConfig,
logger)
if err != nil {
return err
}
jsonRpcClient = rpcclient.NewJSONRPCClient(rpcAddr)
httpClient = rpcclient.NewURIClient(rpcAddr)
clients = map[string]client.RPCClient{
"JSONRPC": jsonRpcClient,
"HTTP": httpClient,
}
return nil
}
// Deterministic account generation helper. Pass number of accounts to make
func makeUsers(n int) []*acm.PrivAccount {
accounts := []*acm.PrivAccount{}
for i := 0; i < n; i++ {
secret := "mysecret" + strconv.Itoa(i)
user := acm.GenPrivAccountFromSecret(secret)
accounts = append(accounts, user)
}
return accounts
}
func genesisFileBytesFromUsers(chainName string, accounts []*acm.PrivAccount) ([]byte, error) {
if len(accounts) < 1 {
return nil, errors.New("Please pass in at least 1 account to be the validator")
}
genesisValidators := make([]*genesis.GenesisValidator, 1)
genesisAccounts := make([]*genesis.GenesisAccount, len(accounts))
genesisValidators[0] = genesisValidatorFromPrivAccount(accounts[0])
for i, acc := range accounts {
genesisAccounts[i] = genesisAccountFromPrivAccount(acc)
}
return genesis.GenerateGenesisFileBytes(chainName, genesisAccounts, genesisValidators)
}
func genesisValidatorFromPrivAccount(account *acm.PrivAccount) *genesis.GenesisValidator {
return &genesis.GenesisValidator{
Amount: 1000000,
Name: fmt.Sprintf("full-account_%X", account.Address),
PubKey: account.PubKey,
UnbondTo: []genesis.BasicAccount{
{
Address: account.Address,
Amount: 100,
},
},
}
}
func genesisAccountFromPrivAccount(account *acm.PrivAccount) *genesis.GenesisAccount {
return genesis.NewGenesisAccount(account.Address, 100000,
fmt.Sprintf("account_%X", account.Address), &ptypes.DefaultAccountPermissions)
}
func saveNewPriv() {
// Save new priv_validator file.
priv := &types.PrivValidator{
Address: users[0].Address,
PubKey: crypto.PubKeyEd25519(users[0].PubKey.(crypto.PubKeyEd25519)),
PrivKey: crypto.PrivKeyEd25519(users[0].PrivKey.(crypto.PrivKeyEd25519)),
}
priv.SetFile(path.Join(rootWorkDir, "priv_validator.json"))
priv.Save()
}
//-------------------------------------------------------------------------------
// some default transaction functions
func makeDefaultSendTx(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewSendTx()
tx.AddInputWithNonce(users[0].PubKey, amt, nonce+1)
tx.AddOutput(addr, amt)
return tx
}
func makeDefaultSendTxSigned(t *testing.T, client client.RPCClient, addr []byte,
amt int64) *txs.SendTx {
tx := makeDefaultSendTx(t, client, addr, amt)
tx.SignInput(chainID, 0, users[0])
return tx
}
func makeDefaultCallTx(t *testing.T, client client.RPCClient, addr, code []byte, amt, gasLim,
fee int64) *txs.CallTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewCallTxWithNonce(users[0].PubKey, addr, code, amt, gasLim, fee,
nonce+1)
tx.Sign(chainID, users[0])
return tx
}
func makeDefaultNameTx(t *testing.T, client client.RPCClient, name, value string, amt,
fee int64) *txs.NameTx {
nonce := getNonce(t, client, users[0].Address)
tx := txs.NewNameTxWithNonce(users[0].PubKey, name, value, amt, fee, nonce+1)
tx.Sign(chainID, users[0])
return tx
}
//-------------------------------------------------------------------------------
// rpc call wrappers (fail on err)
// get an account's nonce
func getNonce(t *testing.T, client client.RPCClient, addr []byte) int {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
if ac == nil {
return 0
}
return ac.Sequence
}
// get the account
func getAccount(t *testing.T, client client.RPCClient, addr []byte) *acm.Account {
ac, err := edbcli.GetAccount(client, addr)
if err != nil {
t.Fatal(err)
}
return ac
}
// sign transaction
func signTx(t *testing.T, client client.RPCClient, tx txs.Tx,
privAcc *acm.PrivAccount) txs.Tx {
signedTx, err := edbcli.SignTx(client, tx, []*acm.PrivAccount{privAcc})
if err != nil {
t.Fatal(err)
}
return signedTx
}
// broadcast transaction
func broadcastTx(t *testing.T, client client.RPCClient, tx txs.Tx) txs.Receipt {
rec, err := edbcli.BroadcastTx(client, tx)
if err != nil {
t.Fatal(err)
}
mempoolCount += 1
return rec
}
// dump all storage for an account. currently unused
func dumpStorage(t *testing.T, addr []byte) *rpc_types.ResultDumpStorage {
client := clients["HTTP"]
resp, err := edbcli.DumpStorage(client, addr)
if err != nil {
t.Fatal(err)
}
return resp
}
func getStorage(t *testing.T, client client.RPCClient, addr, key []byte) []byte {
resp, err := edbcli.GetStorage(client, addr, key)
if err != nil {
t.Fatal(err)
}
return resp
}
func callCode(t *testing.T, client client.RPCClient, fromAddress, code, data,
expected []byte) |
func callContract(t *testing.T, client client.RPCClient, fromAddress, toAddress,
data, expected []byte) {
resp, err := edbcli.Call(client, fromAddress, toAddress, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
}
// get the namereg entry
func getNameRegEntry(t *testing.T, client client.RPCClient, name string) *core_types.NameRegEntry {
entry, err := edbcli.GetName(client, name)
if err != nil {
t.Fatal(err)
}
return entry
}
// Returns a positive int64 hash of text (consumers want int64 instead of uint64)
func hashString(text string) int64 {
hasher := fnv.New64()
hasher.Write([]byte(text))
value := int64(hasher.Sum64())
// Flip the sign if we wrapped
if value < 0 {
return -value
}
return value
}
//--------------------------------------------------------------------------------
// utility verification function
// simple contract returns 5 + 6 = 0xb
func simpleContract() ([]byte, []byte, []byte) {
// this is the code we want to run when the contract is called
contractCode := []byte{0x60, 0x5, 0x60, 0x6, 0x1, 0x60, 0x0, 0x52, 0x60, 0x20,
0x60, 0x0, 0xf3}
// the is the code we need to return the contractCode when the contract is initialized
lenCode := len(contractCode)
// push code to the stack
//code := append([]byte{byte(0x60 + lenCode - 1)}, RightPadWord256(contractCode).Bytes()...)
code := append([]byte{0x7f},
word256.RightPadWord256(contractCode).Bytes()...)
// store it in memory
code = append(code, []byte{0x60, 0x0, 0x52}...)
// return whats in memory
//code = append(code, []byte{0x60, byte(32 - lenCode), 0x60, byte(lenCode), 0xf3}...)
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
}
// simple call contract calls another contract
func simpleCallContract(addr []byte) ([]byte, []byte, []byte) {
gas1, gas2 := byte(0x1), byte(0x1)
value := byte(0x1)
inOff, inSize := byte(0x0), byte(0x0) // no call data
retOff, retSize := byte(0x0), byte(0x20)
// this is the code we want to run (call a contract and return)
contractCode := []byte{0x60, retSize, 0x60, retOff, 0x60, inSize, 0x60, inOff,
0x60, value, 0x73}
contractCode = append(contractCode, addr...)
contractCode = append(contractCode, []byte{0x61, gas1, gas2, 0xf1, 0x60, 0x20,
0x60, 0x0, 0xf3}...)
// the is the code we need to return; the contractCode when the contract is initialized
// it should copy the code from the input into memory
lenCode := len(contractCode)
memOff := byte(0x0)
inOff = byte(0xc) // length of code before codeContract
length := byte(lenCode)
code := []byte{0x60, length, 0x60, inOff, 0x60, memOff, 0x37}
// return whats in memory
code = append(code, []byte{0x60, byte(lenCode), 0x60, 0x0, 0xf3}...)
code = append(code, contractCode...)
// return init code, contract code, expected return
return code, contractCode, word256.LeftPadBytes([]byte{0xb}, 32)
}
| {
resp, err := edbcli.CallCode(client, fromAddress, code, data)
if err != nil {
t.Fatal(err)
}
ret := resp.Return
// NOTE: we don't flip memory when it comes out of RETURN (?!)
if bytes.Compare(ret, word256.LeftPadWord256(expected).Bytes()) != 0 {
t.Fatalf("Conflicting return value. Got %x, expected %x", ret, expected)
}
} | identifier_body |
BaseRole.js | var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var Skeleton = Laya.Skeleton;
/*
* 角色
*/
var BaseRole = /** @class */ (function (_super) {
__extends(BaseRole, _super);
function BaseRole() {
| // private testScale(ary):void
// {
// var roleID = ary[0];
// var sca = ary[1];
// if(this.roleVo && this.roleVo.id == roleID)
// {
// var s:number = this.roleVo.isEnemy ? 1 : -1;
// this.skeletonAni.scaleX = s * sca;
// this.skeletonAni.scaleY = sca;
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.name,bound.width,bound.height);
// }
// }
BaseRole.prototype.initRole = function (baseRoleVo, showPriority, scale, parentDis, showBloodBar) {
this.baseRoleVo = baseRoleVo;
this.showPriority = showPriority;
this.showBloodBar = showBloodBar === undefined ? false : showBloodBar;
if (scale) {
this.aniScale = scale;
}
this.isLoaded = false;
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
// this.skeletonAni = new Skeleton();
// this.skeletonAni.scale(this.aniScale,this.aniScale);
// this.skeletonAni.scaleX = this.baseRoleVo.scale * this.aniScale;
// this.addChild(this.skeletonAni);
if (parentDis) {
parentDis.addChild(this);
}
else {
LayerManager.ins.addToLayer(this, LayerManager.ROLE_LAYER, false, true, false);
}
this.visible = true;
};
BaseRole.prototype.showFloatFont = function (tipString) {
tipString = tipString === undefined ? "" : tipString;
var floatFontTip = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.FLOAT_FONT_TIPS);
if (floatFontTip) {
// floatFontTip.setAttribute(40,"#ff0000");
// floatFontTip.show(tipString,this,-30,-200,0.5,40,80,this.baseRoleVo.isEnemy);
floatFontTip.showFlontClip(tipString, this, -30, -200, 0.5, 40, 80, this.baseRoleVo.isEnemy);
}
};
/**
*
* @param aniID 动画id
*/
BaseRole.prototype.aniPlay = function (aniID, loop, caller, method, defRole) {
this.aniId = aniID;
this.loop = loop;
this.caller = caller;
this.method = method;
this.defRole = defRole;
if (this.isLoaded) {
loop = loop === undefined ? true : loop;
aniID = aniID % this.aniCount;
//>= aniCount默认播放第一个动画
if (this.skeletonAni) {
Laya.loader.on(/*laya.events.Event.ERROR*/ "error", this, this.skeletonLoadError);
// console.log("前........",this.baseRoleVo.name,aniID);
this.skeletonAni.player.on(Laya.Event.COMPLETE, this, this.onPlayCompleted);
var speedTime = GameDataManager.ins.isChallengeBoss ? GameConfig.BATTLE_ADDSPEED_TIMES : 1;
this.skeletonAni.playbackRate(speedTime);
this.skeletonAni.play(aniID, loop);
// console.log("........"+aniID);
}
}
else {
Laya.timer.frameOnce(this.showPriority * 6, this, this.skeletonAniLoad, null, false);
}
};
BaseRole.prototype.getSkillEffectInd = function () {
if (this.skeletonAni) {
return this.getChildIndex(this.skeletonAni);
}
return 0;
};
/**播放一次动画回调 */
BaseRole.prototype.onPlayCompleted = function () {
// console.log("后........",this.baseRoleVo.name,this.aniId);
if (this.aniId == RoleAniIndex.ATTACK && GameDataManager.showModuleViewInd == GameButtomTabIndex.BATTLE) {
SoundsManager.ins.playSound("res/outside/sound/effect/fit.wav");
}
this.skeletonAni.player.off(Laya.Event.COMPLETE, this, this.onPlayCompleted);
if (this.caller && this.method) {
// console.log(this.roleVo.name);
this.skeletonAni.paused();
this.method.call(this.caller, [this, this.defRole]);
}
};
BaseRole.prototype.skeletonAniLoad = function () {
//分帧加载
if (this.baseRoleVo) {
this.aniUrl = "res/outside/spine/role/" + this.baseRoleVo.modelId + "/" + this.baseRoleVo.modelId + ".sk";
// this.aniUrl = "res/outside/anim/role/sanjiaolong001/sanjiaolong001.sk";
// this.skeletonAni.load(this.aniUrl,Laya.Handler.create(this,this.loadCompleted));
this.templet.loadAni(this.aniUrl);
}
};
BaseRole.prototype.loadCompleted = function () {
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.id,bound.width,bound.height);
if (!this.isLoaded) {
this.skeletonAni = this.templet.buildArmature(2);
if (this.baseRoleVo)
this.skeletonAni.scale(this.aniScale * this.baseRoleVo.scale, this.baseRoleVo.scale);
else
this.skeletonAni.scale(this.aniScale, 1);
this.addChild(this.skeletonAni);
this.isLoaded = true;
this.aniCount = this.skeletonAni.getAnimNum();
this.aniPlay(this.aniId, this.loop, this.caller, this.method);
// Laya.timer.once(100,this,this.initComponets);
if (this.showBloodBar) {
this.initComponets();
}
}
};
BaseRole.prototype.initComponets = function () {
// var bound:Rectangle = this.skeletonAni.getSelfBounds();
// this.aniWidth = bound.width + Math.abs(bound.x);
// this.aniHeight = bound.height + Math.abs(bound.y);
// console.log(this.baseRoleVo.name,bound);
//血条
// this.roleBloodBar = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.ROLE_BLOOD_BAR);
this.roleBloodBar = new RoleBloodBar();
this.roleBloodBar.visible = true;
// this.roleBloodBar.scaleX = 0.5;
if (this.baseRoleVo.isEnemy)
this.roleBloodBar.x = -30;
else
this.roleBloodBar.x = -60;
this.roleBloodBar.y = -180;
this.roleBloodBar.init();
this.addChild(this.roleBloodBar);
//名字
this.LblName = new Laya.Label();
this.LblName.width = 114;
this.LblName.x = this.roleBloodBar.x;
this.LblName.y = this.roleBloodBar.y - 30;
this.LblName.fontSize = 24;
this.LblName.color = "#00FF99";
this.LblName.align = "center";
this.LblName.text = this.baseRoleVo.name;
this.addChild(this.LblName);
};
BaseRole.prototype.setBlood = function (value) {
if (this.roleBloodBar) {
this.roleBloodBar.setProgress(value);
}
};
/**设置显示层级 */
BaseRole.prototype.setShowIndex = function (ind) {
if (this.parent && ind >= 0) {
this.parent.setChildIndex(this, ind);
}
};
BaseRole.prototype.run = function () {
this.aniPlay(RoleAniIndex.MOVE);
};
BaseRole.prototype.setVisible = function (bool) {
// Laya.timer.once(1000 / GameConfig.BATTLE_ADDSPEED_TIMES,this, this.setVis,[bool]);
Laya.timer.once(1000, this, this.setVis, [bool]);
};
BaseRole.prototype.setVis = function (bool) {
//延迟回调判断,复活就设置隐藏
if (this.baseRoleVo && this.baseRoleVo.isDeath) {
this.visible = bool;
}
};
BaseRole.prototype.dispose = function () {
this.parent.setChildIndex(this, 0);
this.removeSelf();
if (this.skeletonAni) {
Laya.loader.clearRes(this.skeletonAni.url);
this.skeletonAni.destroy();
}
this.skeletonAni = null;
if (this.LblName) {
this.LblName.removeSelf();
this.LblName = null;
}
if (this.roleBloodBar) {
this.roleBloodBar.removeSelf();
this.roleBloodBar = null;
// ObjectPoolUtil.stillObject(ObjectPoolUtil.ROLE_BLOOD_BAR,this.roleBloodBar);
}
this.baseRoleVo = null;
};
BaseRole.prototype.moveByMap = function (speed) {
};
/**加载出错用默认资源 */
BaseRole.prototype.skeletonLoadError = function (url) {
if (url.indexOf(this.aniUrl) != -1) {
if (this.templet) {
//释放老资源
this.templet.off(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.off(Laya.Event.ERROR, this, this.skeletonLoadError);
this.templet.dispose();
this.templet = null;
}
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
this.aniUrl = "res/outside/anim/role/" + GameConfig.HERO_DEFAULT_ANI_MODELID + "/" + GameConfig.HERO_DEFAULT_ANI_MODELID + ".sk";
this.templet.loadAni(this.aniUrl);
// this.skeletonAni.load(url,Laya.Handler.create(this,this.loadCompleted));
}
};
return BaseRole;
}(Laya.Sprite));
//# sourceMappingURL=BaseRole.js.map | var _this = _super.call(this) || this;
_this.templet = null;
_this.skeletonAni = null;
_this.aniCount = 0;
_this.aniScale = 1;
_this.LblName = null;
_this.roleBloodBar = null;
_this.showPriority = 0;
_this.showBloodBar = false;
_this.clipShadow = new Laya.Image("comp/img_shadow.png");
_this.clipShadow.height = 30;
_this.clipShadow.x = -_this.clipShadow.width / 2;
_this.clipShadow.y = -_this.clipShadow.height / 2;
_this.clipShadow.alpha = 0.2;
_this.addChild(_this.clipShadow);
return _this;
// EventManager.ins.addEvent(EventManager.TEST_CHANGE_ROLE_SCALE,this,this.testScale);
}
| identifier_body |
BaseRole.js | var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var Skeleton = Laya.Skeleton;
/*
* 角色
*/
var BaseRole = /** @class */ (function (_super) {
__extends(BaseRole, _super);
function BaseRole() {
var _this = _super.call(this) || this;
_this.templet = null;
_this.skeletonAni = null;
_this.aniCount = 0;
_this.aniScale = 1;
_this.LblName = null;
_this.roleBloodBar = null;
_this.showPriority = 0;
_this.showBloodBar = false;
_this.clipShadow = new Laya.Image("comp/img_shadow.png");
_this.clipShadow.height = 30;
_this.clipShadow.x = -_this.clipShadow.width / 2;
_this.clipShadow.y = -_this.clipShadow.height / 2;
_this.clipShadow.alpha = 0.2;
_this.addChild(_this.clipShadow);
return _this;
// EventManager.ins.addEvent(EventManager.TEST_CHANGE_ROLE_SCALE,this,this.testScale);
}
// private testScale(ary):void
// {
// var roleID = ary[0];
// var sca = ary[1];
// if(this.roleVo && this.roleVo.id == roleID)
// {
// var s:number = this.roleVo.isEnemy ? 1 : -1;
// this.skeletonAni.scaleX = s * sca;
// this.skeletonAni.scaleY = sca;
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.name,bound.width,bound.height);
// }
// }
BaseRole.prototype.initRole = function (baseRoleVo, showPriority, scale, parentDis, showBloodBar) {
this.baseRoleVo = baseRoleVo;
this.showPriority = showPriority;
this.showBloodBar = showBloodBar === undefined ? false : showBloodBar;
if (scale) {
this.aniScale = scale;
}
this.isLoaded = false;
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
// this.skeletonAni = new Skeleton();
// this.skeletonAni.scale(this.aniScale,this.aniScale);
// this.skeletonAni.scaleX = this.baseRoleVo.scale * this.aniScale;
// this.addChild(this.skeletonAni);
if (parentDis) {
parentDis.addChild(this);
}
else {
LayerManager.ins.addToLayer(this, LayerManager.ROLE_LAYER, false, true, false);
}
this.visible = true;
};
BaseRole.prototype.showFloatFont = function (tipString) {
tipString = tipString === undefined ? "" : tipString;
var floatFontTip = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.FLOAT_FONT_TIPS);
if (floatFontTip) {
// floatFontTip. | @param aniID 动画id
*/
BaseRole.prototype.aniPlay = function (aniID, loop, caller, method, defRole) {
this.aniId = aniID;
this.loop = loop;
this.caller = caller;
this.method = method;
this.defRole = defRole;
if (this.isLoaded) {
loop = loop === undefined ? true : loop;
aniID = aniID % this.aniCount;
//>= aniCount默认播放第一个动画
if (this.skeletonAni) {
Laya.loader.on(/*laya.events.Event.ERROR*/ "error", this, this.skeletonLoadError);
// console.log("前........",this.baseRoleVo.name,aniID);
this.skeletonAni.player.on(Laya.Event.COMPLETE, this, this.onPlayCompleted);
var speedTime = GameDataManager.ins.isChallengeBoss ? GameConfig.BATTLE_ADDSPEED_TIMES : 1;
this.skeletonAni.playbackRate(speedTime);
this.skeletonAni.play(aniID, loop);
// console.log("........"+aniID);
}
}
else {
Laya.timer.frameOnce(this.showPriority * 6, this, this.skeletonAniLoad, null, false);
}
};
BaseRole.prototype.getSkillEffectInd = function () {
if (this.skeletonAni) {
return this.getChildIndex(this.skeletonAni);
}
return 0;
};
/**播放一次动画回调 */
BaseRole.prototype.onPlayCompleted = function () {
// console.log("后........",this.baseRoleVo.name,this.aniId);
if (this.aniId == RoleAniIndex.ATTACK && GameDataManager.showModuleViewInd == GameButtomTabIndex.BATTLE) {
SoundsManager.ins.playSound("res/outside/sound/effect/fit.wav");
}
this.skeletonAni.player.off(Laya.Event.COMPLETE, this, this.onPlayCompleted);
if (this.caller && this.method) {
// console.log(this.roleVo.name);
this.skeletonAni.paused();
this.method.call(this.caller, [this, this.defRole]);
}
};
BaseRole.prototype.skeletonAniLoad = function () {
//分帧加载
if (this.baseRoleVo) {
this.aniUrl = "res/outside/spine/role/" + this.baseRoleVo.modelId + "/" + this.baseRoleVo.modelId + ".sk";
// this.aniUrl = "res/outside/anim/role/sanjiaolong001/sanjiaolong001.sk";
// this.skeletonAni.load(this.aniUrl,Laya.Handler.create(this,this.loadCompleted));
this.templet.loadAni(this.aniUrl);
}
};
BaseRole.prototype.loadCompleted = function () {
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.id,bound.width,bound.height);
if (!this.isLoaded) {
this.skeletonAni = this.templet.buildArmature(2);
if (this.baseRoleVo)
this.skeletonAni.scale(this.aniScale * this.baseRoleVo.scale, this.baseRoleVo.scale);
else
this.skeletonAni.scale(this.aniScale, 1);
this.addChild(this.skeletonAni);
this.isLoaded = true;
this.aniCount = this.skeletonAni.getAnimNum();
this.aniPlay(this.aniId, this.loop, this.caller, this.method);
// Laya.timer.once(100,this,this.initComponets);
if (this.showBloodBar) {
this.initComponets();
}
}
};
BaseRole.prototype.initComponets = function () {
// var bound:Rectangle = this.skeletonAni.getSelfBounds();
// this.aniWidth = bound.width + Math.abs(bound.x);
// this.aniHeight = bound.height + Math.abs(bound.y);
// console.log(this.baseRoleVo.name,bound);
//血条
// this.roleBloodBar = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.ROLE_BLOOD_BAR);
this.roleBloodBar = new RoleBloodBar();
this.roleBloodBar.visible = true;
// this.roleBloodBar.scaleX = 0.5;
if (this.baseRoleVo.isEnemy)
this.roleBloodBar.x = -30;
else
this.roleBloodBar.x = -60;
this.roleBloodBar.y = -180;
this.roleBloodBar.init();
this.addChild(this.roleBloodBar);
//名字
this.LblName = new Laya.Label();
this.LblName.width = 114;
this.LblName.x = this.roleBloodBar.x;
this.LblName.y = this.roleBloodBar.y - 30;
this.LblName.fontSize = 24;
this.LblName.color = "#00FF99";
this.LblName.align = "center";
this.LblName.text = this.baseRoleVo.name;
this.addChild(this.LblName);
};
BaseRole.prototype.setBlood = function (value) {
if (this.roleBloodBar) {
this.roleBloodBar.setProgress(value);
}
};
/**设置显示层级 */
BaseRole.prototype.setShowIndex = function (ind) {
if (this.parent && ind >= 0) {
this.parent.setChildIndex(this, ind);
}
};
BaseRole.prototype.run = function () {
this.aniPlay(RoleAniIndex.MOVE);
};
BaseRole.prototype.setVisible = function (bool) {
// Laya.timer.once(1000 / GameConfig.BATTLE_ADDSPEED_TIMES,this, this.setVis,[bool]);
Laya.timer.once(1000, this, this.setVis, [bool]);
};
BaseRole.prototype.setVis = function (bool) {
//延迟回调判断,复活就设置隐藏
if (this.baseRoleVo && this.baseRoleVo.isDeath) {
this.visible = bool;
}
};
BaseRole.prototype.dispose = function () {
this.parent.setChildIndex(this, 0);
this.removeSelf();
if (this.skeletonAni) {
Laya.loader.clearRes(this.skeletonAni.url);
this.skeletonAni.destroy();
}
this.skeletonAni = null;
if (this.LblName) {
this.LblName.removeSelf();
this.LblName = null;
}
if (this.roleBloodBar) {
this.roleBloodBar.removeSelf();
this.roleBloodBar = null;
// ObjectPoolUtil.stillObject(ObjectPoolUtil.ROLE_BLOOD_BAR,this.roleBloodBar);
}
this.baseRoleVo = null;
};
BaseRole.prototype.moveByMap = function (speed) {
};
/**加载出错用默认资源 */
BaseRole.prototype.skeletonLoadError = function (url) {
if (url.indexOf(this.aniUrl) != -1) {
if (this.templet) {
//释放老资源
this.templet.off(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.off(Laya.Event.ERROR, this, this.skeletonLoadError);
this.templet.dispose();
this.templet = null;
}
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
this.aniUrl = "res/outside/anim/role/" + GameConfig.HERO_DEFAULT_ANI_MODELID + "/" + GameConfig.HERO_DEFAULT_ANI_MODELID + ".sk";
this.templet.loadAni(this.aniUrl);
// this.skeletonAni.load(url,Laya.Handler.create(this,this.loadCompleted));
}
};
return BaseRole;
}(Laya.Sprite));
//# sourceMappingURL=BaseRole.js.map | setAttribute(40,"#ff0000");
// floatFontTip.show(tipString,this,-30,-200,0.5,40,80,this.baseRoleVo.isEnemy);
floatFontTip.showFlontClip(tipString, this, -30, -200, 0.5, 40, 80, this.baseRoleVo.isEnemy);
}
};
/**
*
* | conditional_block |
BaseRole.js | var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var Skeleton = Laya.Skeleton;
/*
* 角色
*/
var BaseRole = /** @class */ (function (_super) {
__extends(BaseRole, _super);
function Base |
var _this = _super.call(this) || this;
_this.templet = null;
_this.skeletonAni = null;
_this.aniCount = 0;
_this.aniScale = 1;
_this.LblName = null;
_this.roleBloodBar = null;
_this.showPriority = 0;
_this.showBloodBar = false;
_this.clipShadow = new Laya.Image("comp/img_shadow.png");
_this.clipShadow.height = 30;
_this.clipShadow.x = -_this.clipShadow.width / 2;
_this.clipShadow.y = -_this.clipShadow.height / 2;
_this.clipShadow.alpha = 0.2;
_this.addChild(_this.clipShadow);
return _this;
// EventManager.ins.addEvent(EventManager.TEST_CHANGE_ROLE_SCALE,this,this.testScale);
}
// private testScale(ary):void
// {
// var roleID = ary[0];
// var sca = ary[1];
// if(this.roleVo && this.roleVo.id == roleID)
// {
// var s:number = this.roleVo.isEnemy ? 1 : -1;
// this.skeletonAni.scaleX = s * sca;
// this.skeletonAni.scaleY = sca;
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.name,bound.width,bound.height);
// }
// }
BaseRole.prototype.initRole = function (baseRoleVo, showPriority, scale, parentDis, showBloodBar) {
this.baseRoleVo = baseRoleVo;
this.showPriority = showPriority;
this.showBloodBar = showBloodBar === undefined ? false : showBloodBar;
if (scale) {
this.aniScale = scale;
}
this.isLoaded = false;
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
// this.skeletonAni = new Skeleton();
// this.skeletonAni.scale(this.aniScale,this.aniScale);
// this.skeletonAni.scaleX = this.baseRoleVo.scale * this.aniScale;
// this.addChild(this.skeletonAni);
if (parentDis) {
parentDis.addChild(this);
}
else {
LayerManager.ins.addToLayer(this, LayerManager.ROLE_LAYER, false, true, false);
}
this.visible = true;
};
BaseRole.prototype.showFloatFont = function (tipString) {
tipString = tipString === undefined ? "" : tipString;
var floatFontTip = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.FLOAT_FONT_TIPS);
if (floatFontTip) {
// floatFontTip.setAttribute(40,"#ff0000");
// floatFontTip.show(tipString,this,-30,-200,0.5,40,80,this.baseRoleVo.isEnemy);
floatFontTip.showFlontClip(tipString, this, -30, -200, 0.5, 40, 80, this.baseRoleVo.isEnemy);
}
};
/**
*
* @param aniID 动画id
*/
BaseRole.prototype.aniPlay = function (aniID, loop, caller, method, defRole) {
this.aniId = aniID;
this.loop = loop;
this.caller = caller;
this.method = method;
this.defRole = defRole;
if (this.isLoaded) {
loop = loop === undefined ? true : loop;
aniID = aniID % this.aniCount;
//>= aniCount默认播放第一个动画
if (this.skeletonAni) {
Laya.loader.on(/*laya.events.Event.ERROR*/ "error", this, this.skeletonLoadError);
// console.log("前........",this.baseRoleVo.name,aniID);
this.skeletonAni.player.on(Laya.Event.COMPLETE, this, this.onPlayCompleted);
var speedTime = GameDataManager.ins.isChallengeBoss ? GameConfig.BATTLE_ADDSPEED_TIMES : 1;
this.skeletonAni.playbackRate(speedTime);
this.skeletonAni.play(aniID, loop);
// console.log("........"+aniID);
}
}
else {
Laya.timer.frameOnce(this.showPriority * 6, this, this.skeletonAniLoad, null, false);
}
};
BaseRole.prototype.getSkillEffectInd = function () {
if (this.skeletonAni) {
return this.getChildIndex(this.skeletonAni);
}
return 0;
};
/**播放一次动画回调 */
BaseRole.prototype.onPlayCompleted = function () {
// console.log("后........",this.baseRoleVo.name,this.aniId);
if (this.aniId == RoleAniIndex.ATTACK && GameDataManager.showModuleViewInd == GameButtomTabIndex.BATTLE) {
SoundsManager.ins.playSound("res/outside/sound/effect/fit.wav");
}
this.skeletonAni.player.off(Laya.Event.COMPLETE, this, this.onPlayCompleted);
if (this.caller && this.method) {
// console.log(this.roleVo.name);
this.skeletonAni.paused();
this.method.call(this.caller, [this, this.defRole]);
}
};
BaseRole.prototype.skeletonAniLoad = function () {
//分帧加载
if (this.baseRoleVo) {
this.aniUrl = "res/outside/spine/role/" + this.baseRoleVo.modelId + "/" + this.baseRoleVo.modelId + ".sk";
// this.aniUrl = "res/outside/anim/role/sanjiaolong001/sanjiaolong001.sk";
// this.skeletonAni.load(this.aniUrl,Laya.Handler.create(this,this.loadCompleted));
this.templet.loadAni(this.aniUrl);
}
};
BaseRole.prototype.loadCompleted = function () {
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.id,bound.width,bound.height);
if (!this.isLoaded) {
this.skeletonAni = this.templet.buildArmature(2);
if (this.baseRoleVo)
this.skeletonAni.scale(this.aniScale * this.baseRoleVo.scale, this.baseRoleVo.scale);
else
this.skeletonAni.scale(this.aniScale, 1);
this.addChild(this.skeletonAni);
this.isLoaded = true;
this.aniCount = this.skeletonAni.getAnimNum();
this.aniPlay(this.aniId, this.loop, this.caller, this.method);
// Laya.timer.once(100,this,this.initComponets);
if (this.showBloodBar) {
this.initComponets();
}
}
};
BaseRole.prototype.initComponets = function () {
// var bound:Rectangle = this.skeletonAni.getSelfBounds();
// this.aniWidth = bound.width + Math.abs(bound.x);
// this.aniHeight = bound.height + Math.abs(bound.y);
// console.log(this.baseRoleVo.name,bound);
//血条
// this.roleBloodBar = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.ROLE_BLOOD_BAR);
this.roleBloodBar = new RoleBloodBar();
this.roleBloodBar.visible = true;
// this.roleBloodBar.scaleX = 0.5;
if (this.baseRoleVo.isEnemy)
this.roleBloodBar.x = -30;
else
this.roleBloodBar.x = -60;
this.roleBloodBar.y = -180;
this.roleBloodBar.init();
this.addChild(this.roleBloodBar);
//名字
this.LblName = new Laya.Label();
this.LblName.width = 114;
this.LblName.x = this.roleBloodBar.x;
this.LblName.y = this.roleBloodBar.y - 30;
this.LblName.fontSize = 24;
this.LblName.color = "#00FF99";
this.LblName.align = "center";
this.LblName.text = this.baseRoleVo.name;
this.addChild(this.LblName);
};
BaseRole.prototype.setBlood = function (value) {
if (this.roleBloodBar) {
this.roleBloodBar.setProgress(value);
}
};
/**设置显示层级 */
BaseRole.prototype.setShowIndex = function (ind) {
if (this.parent && ind >= 0) {
this.parent.setChildIndex(this, ind);
}
};
BaseRole.prototype.run = function () {
this.aniPlay(RoleAniIndex.MOVE);
};
BaseRole.prototype.setVisible = function (bool) {
// Laya.timer.once(1000 / GameConfig.BATTLE_ADDSPEED_TIMES,this, this.setVis,[bool]);
Laya.timer.once(1000, this, this.setVis, [bool]);
};
BaseRole.prototype.setVis = function (bool) {
//延迟回调判断,复活就设置隐藏
if (this.baseRoleVo && this.baseRoleVo.isDeath) {
this.visible = bool;
}
};
BaseRole.prototype.dispose = function () {
this.parent.setChildIndex(this, 0);
this.removeSelf();
if (this.skeletonAni) {
Laya.loader.clearRes(this.skeletonAni.url);
this.skeletonAni.destroy();
}
this.skeletonAni = null;
if (this.LblName) {
this.LblName.removeSelf();
this.LblName = null;
}
if (this.roleBloodBar) {
this.roleBloodBar.removeSelf();
this.roleBloodBar = null;
// ObjectPoolUtil.stillObject(ObjectPoolUtil.ROLE_BLOOD_BAR,this.roleBloodBar);
}
this.baseRoleVo = null;
};
BaseRole.prototype.moveByMap = function (speed) {
};
/**加载出错用默认资源 */
BaseRole.prototype.skeletonLoadError = function (url) {
if (url.indexOf(this.aniUrl) != -1) {
if (this.templet) {
//释放老资源
this.templet.off(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.off(Laya.Event.ERROR, this, this.skeletonLoadError);
this.templet.dispose();
this.templet = null;
}
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
this.aniUrl = "res/outside/anim/role/" + GameConfig.HERO_DEFAULT_ANI_MODELID + "/" + GameConfig.HERO_DEFAULT_ANI_MODELID + ".sk";
this.templet.loadAni(this.aniUrl);
// this.skeletonAni.load(url,Laya.Handler.create(this,this.loadCompleted));
}
};
return BaseRole;
}(Laya.Sprite));
//# sourceMappingURL=BaseRole.js.map | Role() { | identifier_name |
BaseRole.js | var __extends = (this && this.__extends) || (function () {
var extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; };
return function (d, b) {
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var Skeleton = Laya.Skeleton;
/*
* 角色
*/
var BaseRole = /** @class */ (function (_super) {
__extends(BaseRole, _super);
function BaseRole() {
var _this = _super.call(this) || this;
_this.templet = null;
_this.skeletonAni = null;
_this.aniCount = 0;
_this.aniScale = 1;
_this.LblName = null;
_this.roleBloodBar = null;
_this.showPriority = 0;
_this.showBloodBar = false;
_this.clipShadow = new Laya.Image("comp/img_shadow.png");
_this.clipShadow.height = 30;
_this.clipShadow.x = -_this.clipShadow.width / 2;
_this.clipShadow.y = -_this.clipShadow.height / 2;
_this.clipShadow.alpha = 0.2;
_this.addChild(_this.clipShadow);
return _this;
// EventManager.ins.addEvent(EventManager.TEST_CHANGE_ROLE_SCALE,this,this.testScale);
}
// private testScale(ary):void
// {
// var roleID = ary[0];
// var sca = ary[1];
// if(this.roleVo && this.roleVo.id == roleID)
// {
// var s:number = this.roleVo.isEnemy ? 1 : -1;
// this.skeletonAni.scaleX = s * sca;
// this.skeletonAni.scaleY = sca;
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.name,bound.width,bound.height);
// }
// }
BaseRole.prototype.initRole = function (baseRoleVo, showPriority, scale, parentDis, showBloodBar) {
this.baseRoleVo = baseRoleVo;
this.showPriority = showPriority;
this.showBloodBar = showBloodBar === undefined ? false : showBloodBar;
if (scale) {
this.aniScale = scale;
}
this.isLoaded = false;
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
// this.skeletonAni = new Skeleton();
// this.skeletonAni.scale(this.aniScale,this.aniScale);
// this.skeletonAni.scaleX = this.baseRoleVo.scale * this.aniScale;
// this.addChild(this.skeletonAni);
if (parentDis) {
parentDis.addChild(this);
}
else {
LayerManager.ins.addToLayer(this, LayerManager.ROLE_LAYER, false, true, false);
}
this.visible = true;
};
BaseRole.prototype.showFloatFont = function (tipString) {
tipString = tipString === undefined ? "" : tipString;
var floatFontTip = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.FLOAT_FONT_TIPS);
if (floatFontTip) {
// floatFontTip.setAttribute(40,"#ff0000");
// floatFontTip.show(tipString,this,-30,-200,0.5,40,80,this.baseRoleVo.isEnemy);
floatFontTip.showFlontClip(tipString, this, -30, -200, 0.5, 40, 80, this.baseRoleVo.isEnemy);
}
};
/**
*
* @param aniID 动画id
*/
BaseRole.prototype.aniPlay = function (aniID, loop, caller, method, defRole) {
this.aniId = aniID;
this.loop = loop;
this.caller = caller;
this.method = method;
this.defRole = defRole;
if (this.isLoaded) {
loop = loop === undefined ? true : loop;
aniID = aniID % this.aniCount;
//>= aniCount默认播放第一个动画
if (this.skeletonAni) {
Laya.loader.on(/*laya.events.Event.ERROR*/ "error", this, this.skeletonLoadError);
// console.log("前........",this.baseRoleVo.name,aniID);
this.skeletonAni.player.on(Laya.Event.COMPLETE, this, this.onPlayCompleted);
var speedTime = GameDataManager.ins.isChallengeBoss ? GameConfig.BATTLE_ADDSPEED_TIMES : 1;
this.skeletonAni.playbackRate(speedTime);
this.skeletonAni.play(aniID, loop);
// console.log("........"+aniID);
}
}
else {
Laya.timer.frameOnce(this.showPriority * 6, this, this.skeletonAniLoad, null, false);
}
};
BaseRole.prototype.getSkillEffectInd = function () {
if (this.skeletonAni) {
return this.getChildIndex(this.skeletonAni);
}
return 0;
};
/**播放一次动画回调 */
BaseRole.prototype.onPlayCompleted = function () {
// console.log("后........",this.baseRoleVo.name,this.aniId);
if (this.aniId == RoleAniIndex.ATTACK && GameDataManager.showModuleViewInd == GameButtomTabIndex.BATTLE) {
SoundsManager.ins.playSound("res/outside/sound/effect/fit.wav");
}
this.skeletonAni.player.off(Laya.Event.COMPLETE, this, this.onPlayCompleted);
if (this.caller && this.method) {
// console.log(this.roleVo.name);
this.skeletonAni.paused();
this.method.call(this.caller, [this, this.defRole]);
}
};
BaseRole.prototype.skeletonAniLoad = function () {
//分帧加载
if (this.baseRoleVo) {
this.aniUrl = "res/outside/spine/role/" + this.baseRoleVo.modelId + "/" + this.baseRoleVo.modelId + ".sk";
// this.aniUrl = "res/outside/anim/role/sanjiaolong001/sanjiaolong001.sk";
// this.skeletonAni.load(this.aniUrl,Laya.Handler.create(this,this.loadCompleted));
this.templet.loadAni(this.aniUrl);
}
};
BaseRole.prototype.loadCompleted = function () {
// var bound = this.skeletonAni.getBounds(); // 加载完毕之后才能拿到有效的bounds
// console.log(this.roleVo.id,bound.width,bound.height);
if (!this.isLoaded) {
this.skeletonAni = this.templet.buildArmature(2);
if (this.baseRoleVo)
this.skeletonAni.scale(this.aniScale * this.baseRoleVo.scale, this.baseRoleVo.scale);
else
this.skeletonAni.scale(this.aniScale, 1);
this.addChild(this.skeletonAni);
this.isLoaded = true;
this.aniCount = this.skeletonAni.getAnimNum();
this.aniPlay(this.aniId, this.loop, this.caller, this.method);
// Laya.timer.once(100,this,this.initComponets);
if (this.showBloodBar) {
this.initComponets();
}
}
};
BaseRole.prototype.initComponets = function () {
// var bound:Rectangle = this.skeletonAni.getSelfBounds();
// this.aniWidth = bound.width + Math.abs(bound.x);
// this.aniHeight = bound.height + Math.abs(bound.y);
// console.log(this.baseRoleVo.name,bound);
//血条
// this.roleBloodBar = ObjectPoolUtil.borrowObjcet(ObjectPoolUtil.ROLE_BLOOD_BAR);
this.roleBloodBar = new RoleBloodBar();
this.roleBloodBar.visible = true;
// this.roleBloodBar.scaleX = 0.5;
if (this.baseRoleVo.isEnemy)
this.roleBloodBar.x = -30;
else
this.roleBloodBar.x = -60;
this.roleBloodBar.y = -180;
this.roleBloodBar.init();
this.addChild(this.roleBloodBar);
//名字
this.LblName = new Laya.Label();
this.LblName.width = 114;
this.LblName.x = this.roleBloodBar.x;
this.LblName.y = this.roleBloodBar.y - 30;
this.LblName.fontSize = 24;
this.LblName.color = "#00FF99";
this.LblName.align = "center";
this.LblName.text = this.baseRoleVo.name;
this.addChild(this.LblName);
};
BaseRole.prototype.setBlood = function (value) {
if (this.roleBloodBar) {
this.roleBloodBar.setProgress(value);
}
};
/**设置显示层级 */
BaseRole.prototype.setShowIndex = function (ind) {
if (this.parent && ind >= 0) {
this.parent.setChildIndex(this, ind);
}
};
BaseRole.prototype.run = function () {
this.aniPlay(RoleAniIndex.MOVE);
}; | };
BaseRole.prototype.setVis = function (bool) {
//延迟回调判断,复活就设置隐藏
if (this.baseRoleVo && this.baseRoleVo.isDeath) {
this.visible = bool;
}
};
BaseRole.prototype.dispose = function () {
this.parent.setChildIndex(this, 0);
this.removeSelf();
if (this.skeletonAni) {
Laya.loader.clearRes(this.skeletonAni.url);
this.skeletonAni.destroy();
}
this.skeletonAni = null;
if (this.LblName) {
this.LblName.removeSelf();
this.LblName = null;
}
if (this.roleBloodBar) {
this.roleBloodBar.removeSelf();
this.roleBloodBar = null;
// ObjectPoolUtil.stillObject(ObjectPoolUtil.ROLE_BLOOD_BAR,this.roleBloodBar);
}
this.baseRoleVo = null;
};
BaseRole.prototype.moveByMap = function (speed) {
};
/**加载出错用默认资源 */
BaseRole.prototype.skeletonLoadError = function (url) {
if (url.indexOf(this.aniUrl) != -1) {
if (this.templet) {
//释放老资源
this.templet.off(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.off(Laya.Event.ERROR, this, this.skeletonLoadError);
this.templet.dispose();
this.templet = null;
}
this.templet = new Laya.Templet();
this.templet.on(Laya.Event.COMPLETE, this, this.loadCompleted);
this.templet.on(Laya.Event.ERROR, this, this.skeletonLoadError);
this.aniUrl = "res/outside/anim/role/" + GameConfig.HERO_DEFAULT_ANI_MODELID + "/" + GameConfig.HERO_DEFAULT_ANI_MODELID + ".sk";
this.templet.loadAni(this.aniUrl);
// this.skeletonAni.load(url,Laya.Handler.create(this,this.loadCompleted));
}
};
return BaseRole;
}(Laya.Sprite));
//# sourceMappingURL=BaseRole.js.map | BaseRole.prototype.setVisible = function (bool) {
// Laya.timer.once(1000 / GameConfig.BATTLE_ADDSPEED_TIMES,this, this.setVis,[bool]);
Laya.timer.once(1000, this, this.setVis, [bool]); | random_line_split |
preprocessorImpl.go | // Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package preprocessor
import (
"bytes"
"fmt"
"github.com/google/gapid/core/text/parse"
"github.com/google/gapid/gapis/api/gles/glsl/ast"
)
// ifEntry is a structure containing the data necessary for proper evaluation of #if*
// preprocessor directives.
type ifEntry struct {
HadElse bool // Whether we already encountered an #else block
Skipping bool // Whether the current block should be skipped
SkipElse bool // Whether all else and elif blocks should be skipped
}
type macroDefinition struct {
name string // macro name
function bool // Whether this is a function macro.
argCount int // The number of arguments of the macro.
definition []macroExpander // The macro definition as a list of macroExpanders.
}
// preprocessorImpl stores the internal state of a preprocessor instance.
type preprocessorImpl struct {
err ast.ErrorCollector
lexer *lexer
macros map[string]macroDefinition // All currently defined macros.
version string // The shader version declared with #version.
extensions []Extension // All encountered #extension directives.
ifStack []ifEntry // The stack of all encountered #if directives.
line int // The current line.
currentToken *tokenExpansion
evaluator ExpressionEvaluator
}
func (p *preprocessorImpl) Version() string {
return p.version
}
func (p *preprocessorImpl) Extensions() []Extension {
return p.extensions
}
func (p *preprocessorImpl) Errors() []error {
return ast.ConcatErrors(p.lexer.err.GetErrors(), p.err.GetErrors())
}
// skipping returnes true if we should skip this token. We skip if any of the #if directives in
// the stack says we should skip.
func (p *preprocessorImpl) skipping() (skip bool) {
c := len(p.ifStack)
return c > 0 && p.ifStack[c-1].Skipping
}
// tokenReader is an internal interface encapsulating a stream of tokens.
type tokenReader interface {
Next() tokenExpansion
Peek() tokenExpansion
}
// listReader is an implementation of tokenReader which reads tokens from a list. It is used to
// rescan a macro expansion to expand macros recursively. It contains a nested tokenReader, which
// is read from after the own token list. This happens in case of recursive function macros with
// unbalanced parenthesis.
type listReader struct {
list []tokenExpansion
next tokenReader
}
func (r *listReader) Next() (t tokenExpansion) {
if len(r.list) > 0 {
t = r.list[0]
r.list = r.list[1:]
} else if r.next != nil {
t = r.next.Next()
}
return
}
func (r *listReader) Peek() (t tokenExpansion) {
if len(r.list) > 0 {
t = r.list[0]
} else if r.next != nil {
t = r.next.Peek()
}
return
}
// processList is a helper function for processMacro. It calls processMacro on all tokens in the
// list.
func (p *preprocessorImpl) processList(r *listReader) (result []tokenExpansion) {
for len(r.list) > 0 {
token := r.Next()
result = append(result, p.processMacro(token, r)...)
}
return
}
// readMacroArgs reads macro arguments. It returns the arguments as a list of lists of tokens.
// Failure is reported by the second return value.
func (p *preprocessorImpl) readMacroArgs(reader tokenReader) (args [][]tokenExpansion, ok bool) {
var arg []tokenExpansion // currently processed argument
level := 0 // number of nested parenthesis
for {
if reader.Peek().Info.Token == nil {
p.err.Errorf("Unexpected end of file while processing a macro.")
return args, false
}
if level == 0 {
switch reader.Peek().Info.Token {
case OpRParen:
args = append(args, arg)
return args, true
case OpLParen:
level++
arg = append(arg, reader.Next())
continue
case ast.BoComma:
reader.Next()
args = append(args, arg)
arg = nil
continue
}
}
switch reader.Peek().Info.Token {
case OpRParen:
level--
arg = append(arg, reader.Next())
case OpLParen:
level++
arg = append(arg, reader.Next())
default:
arg = append(arg, reader.Next())
}
}
}
// parseMacroCallArgs reads arguments to a function macro, pre-expands them and computes the
// intersection of their hide sets. It reads the argument from the specified token reader. In
// case of errors the hide set is nil.
func (p *preprocessorImpl) parseMacroCallArgs(reader tokenReader, macro tokenExpansion,
argCount int) ([][]tokenExpansion, hideSet) {
if reader.Peek().Info.Token != OpLParen {
// Function macros are not expanded if the next token is not '('.
return nil, nil
}
reader.Next()
args, ok := p.readMacroArgs(reader)
if !ok {
return nil, nil
}
lastTok := reader.Next()
if len(args) != argCount {
p.err.Errorf("Incorrect number of arguments to macro '%v': expected %d, got %d.",
macro.Info.Token, argCount, len(args))
// Try to recover by padding args
for len(args) < argCount {
args = append(args, nil)
}
}
// Macro argument pre-expansion
for i := range args {
args[i] = p.processList(&listReader{args[i], nil})
}
set := intersect(macro.HideSet, lastTok.HideSet)
return args, set
}
// processMacro checks t for macro definitions and fully expands it. reader is an interface to
// the following tokens, needed for processing function macro invocations.
func (p *preprocessorImpl) processMacro(t tokenExpansion, reader tokenReader) []tokenExpansion {
// eof pseudo-token
if t.Info.Token == nil {
return []tokenExpansion{t}
}
name := t.Info.Token.String()
def, present := p.macros[name]
if !present {
// no expansion needed
return []tokenExpansion{t}
}
set := t.HideSet
if _, present := set[name]; present {
// This macro should not be expanded.
return []tokenExpansion{t}
}
var args [][]tokenExpansion
if def.function {
args, set = p.parseMacroCallArgs(reader, t, def.argCount)
if set == nil {
return []tokenExpansion{t}
}
}
list := make([]tokenExpansion, 0, len(def.definition))
// Substitute arguments into macro definition
for _, expander := range def.definition {
list = append(list, expander(args)...)
}
// Extend the hide sets
for _, e := range list {
e.HideSet.AddAll(set)
e.HideSet[name] = struct{}{}
}
// Token pasting
for i := 0; i < len(list); i++ {
for i+2 < len(list) && list[i+1].Info.Token.String() == "##" {
newIdentifier := list[i].Info.Token.String() + list[i+2].Info.Token.String()
list[i] = newTokenExpansion(TokenInfo{Token: Identifier(newIdentifier)})
list = append(list[:i+1], list[i+3:]...) // Remove the ## and following token
}
}
// Expand macros in the definition recursively
return p.processList(&listReader{list, reader})
}
// getDirectiveArguments is a helper function used to read the arguments of a preprocessor
// directive. It consumes all tokens until the newline and returns them. If emptyOk is false, it
// will raise an error in the case of an empty argument list.
func (p *preprocessorImpl) getDirectiveArguments(info TokenInfo, emptyOk bool) []TokenInfo {
dir := info.Token
var ret []TokenInfo
for info = p.lexer.Peek(); info.Token != nil && !info.Newline; info = p.lexer.Peek() {
ret = append(ret, p.lexer.Next())
}
if len(ret) == 0 && !emptyOk {
p.err.Errorf("%s needs an argument.", dir)
}
return ret
}
func isIdentOrKeyword(t TokenInfo) bool {
switch t.Token.(type) {
case Identifier, Keyword, ast.BareType:
return true
default:
return false
}
}
// Given a list of tokens following `#define FOO(`, consume the tokens that make up the macro
// argument list. Returns the list of unconsumed tokens and a `macro_name->position` map.
func (p *preprocessorImpl) parseDefMacroArgs(macro Token,
args []TokenInfo) (rest []TokenInfo, argMap map[string]int) {
argMap = make(map[string]int)
for {
if len(args) <= 1 {
p.err.Errorf("Macro definition ended unexpectedly.")
return nil, nil
}
if !isIdentOrKeyword(args[0]) {
p.err.Errorf("Invalid function macro definition. "+
"Expected an identifier, got '%s'.", args[0].Token)
return
}
name := args[0].Token.String()
if _, ok := argMap[name]; ok {
p.err.Errorf("Macro '%s' contains two arguments named '%s'.", macro, name)
}
argMap[name] = len(argMap)
switch args[1].Token {
case OpRParen:
return args[2:], argMap
case ast.BoComma:
args = args[2:]
continue
default:
p.err.Errorf("Invalid function macro definition. "+
"Expected ',', ')', got '%s'.", args[1].Token)
return nil, nil
}
}
}
// process a #define directive
func (p *preprocessorImpl) processDefine(args []TokenInfo) {
macro := args[0]
if _, ok := p.macros[macro.Token.String()]; ok {
delete(p.macros, macro.Token.String())
}
args = args[1:]
if len(args) == 0 || args[0].Whitespace || args[0].Token != OpLParen {
// Just an object macro, we're done.
expansion := make([]macroExpander, len(args))
for i := range args {
expansion[i] = args[i].expand
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, false, 0, expansion}
return
}
args, argMap := p.parseDefMacroArgs(macro.Token, args[1:])
if argMap == nil {
return
}
expansion := make([]macroExpander, len(args))
for i := range args {
if arg, ok := argMap[args[i].Token.String()]; ok {
expansion[i] = argumentExpander(arg).expand
} else {
expansion[i] = args[i].expand
}
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, true, len(argMap), expansion}
}
// processDirectives reads any preprocessor directives from the input stream and processes them.
func (p *preprocessorImpl) processDirectives() {
for {
if _, ok := p.lexer.Peek().Token.(ppKeyword); !ok {
break
}
p.processDirective(p.lexer.Next())
}
}
func (p *preprocessorImpl) evaluateDefined(arg TokenInfo) tokenExpansion {
var ic ast.IntValue
if _, present := p.macros[arg.Token.String()]; present {
ic = ast.IntValue(1)
} else {
ic = ast.IntValue(0)
}
return newTokenExpansion(TokenInfo{Token: ic})
}
func (p *preprocessorImpl) evaluateIf(args []TokenInfo) bool {
// append fake EOF
lastToken := args[len(args)-1].Cst.Token()
eof := &parse.Leaf{}
eof.SetToken(parse.Token{Source: lastToken.Source, Start: lastToken.End, End: lastToken.End})
args = append(args, TokenInfo{Token: nil, Cst: eof})
var list []tokenExpansion
// convert args to tokenExpansions and evaluate defined(X)
for i := 0; i < len(args); i++ {
if args[i].Token == Identifier("defined") {
if i+1 < len(args) && isIdentOrKeyword(args[i+1]) {
list = append(list, p.evaluateDefined(args[i+1]))
i++
} else if i+3 < len(args) && args[i+1].Token == OpLParen &&
isIdentOrKeyword(args[i+2]) && args[i+3].Token == OpRParen {
list = append(list, p.evaluateDefined(args[i+2]))
i += 3
} else {
p.err.Errorf("Operator 'defined' used incorrectly.") | }
}
reader := &listReader{list: list} // reader will read the arguments
worker := &listWorker{reader, p} // worker will expand them
pp := &Preprocessor{impl: worker} // pp will provide the lookahead
val, err := p.evaluator(pp) // and evaluator will evalate them
p.err.Error(err...)
return val != 0
}
func (p *preprocessorImpl) processDirective(info TokenInfo) {
switch info.Token {
case ppDefine:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
p.processDefine(args)
case ppUndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
if _, ok := p.macros[args[0].Token.String()]; !ok {
p.err.Errorf("Macro '%s' not defined.", args[0].Token)
return
}
delete(p.macros, args[0].Token.String())
case ppIf:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
// Skip both of the branches if the parent condition evaluated to false.
// We intentionally do not evaluate the condition since it might be invalid.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
val := p.evaluateIf(args)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !val, SkipElse: val})
case ppElif:
args := p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #elif.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#elif after #else.")
entry.Skipping = true
return
}
if entry.SkipElse {
entry.Skipping = true
} else {
val := p.evaluateIf(args)
entry.Skipping = !val
entry.SkipElse = val
}
return
case ppVersion:
args := p.getDirectiveArguments(info, false)
if len(args) > 0 {
p.version = args[0].Token.String()
} else {
p.err.Errorf("expected version number after #version")
}
return
// TODO: support #pragma instead of silently ignoring it.
case ppPragma:
_ = p.getDirectiveArguments(info, false)
return
case ppExtension:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
return
}
if len(args) == 3 {
name, nameOk := args[0].Token.(Identifier)
colonOk := args[1].Token == OpColon
behaviour, behaviourOk := args[2].Token.(Identifier)
if nameOk && colonOk && behaviourOk {
extension := Extension{Name: name.String(), Behaviour: behaviour.String()}
p.extensions = append(p.extensions, extension)
return
}
}
p.err.Errorf("#extension should have the form '#extension name : behaviour'")
return
case ppIfdef, ppIfndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
// Skip both of the branches if the parent condition evaluated to false.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
var defined bool
if args == nil {
defined = false
} else {
_, defined = p.macros[args[0].Token.String()]
}
value := defined == (info.Token == ppIfdef)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !value, SkipElse: value})
case ppElse:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #else.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#if directive has multiple #else directives.")
entry.Skipping = true
return
}
entry.HadElse = true
entry.Skipping = entry.SkipElse
case ppEndif:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #endif.")
return
}
p.ifStack = p.ifStack[:len(p.ifStack)-1]
case ppLine:
args := p.getDirectiveArguments(info, true)
if len(args) != 1 && len(args) != 2 {
p.err.Errorf("expected line/file number after #line")
}
case ppError:
args := p.getDirectiveArguments(info, true)
if p.skipping() {
return
}
var msg bytes.Buffer
for _, i := range args {
i.Cst.Prefix().WriteTo(&msg)
msg.Write([]byte(i.Token.String()))
i.Cst.Suffix().WriteTo(&msg)
}
p.err.Errorf(msg.String())
}
}
func addBuiltinMacro(macros map[string]macroDefinition, name string, expander macroExpander) {
macros[name] = macroDefinition{
name: name,
definition: []macroExpander{expander},
}
}
func newPreprocessorImpl(data string, eval ExpressionEvaluator, file int) *preprocessorImpl {
p := &preprocessorImpl{
lexer: newLexer(fmt.Sprintf("File %v", file), data),
macros: make(map[string]macroDefinition),
evaluator: eval,
}
addBuiltinMacro(p.macros, "__LINE__", p.expandLine)
addBuiltinMacro(p.macros, "__FILE__", TokenInfo{Token: ast.IntValue(file)}.expand)
addBuiltinMacro(p.macros, "__VERSION__", TokenInfo{Token: ast.IntValue(300)}.expand)
addBuiltinMacro(p.macros, "GL_ES", TokenInfo{Token: ast.IntValue(1)}.expand)
return p
}
//////////////////////////// tokenReader interface /////////////////////////
func (p *preprocessorImpl) Peek() tokenExpansion {
for p.currentToken == nil {
// process any preprocessor directives
p.processDirectives()
tok := newTokenExpansion(p.lexer.Next())
p.line, _ = tok.Info.Cst.Token().Cursor()
if tok.Info.Token == nil {
if len(p.ifStack) > 0 {
p.err.Errorf("Unterminated #if directive at the end of file.")
}
p.currentToken = &tok
} else if !p.skipping() {
p.currentToken = &tok
}
}
return *p.currentToken
}
func (p *preprocessorImpl) Next() tokenExpansion {
ret := p.Peek()
p.currentToken = nil
return ret
}
//////////////////////////// worker interface /////////////////////////
func (p *preprocessorImpl) Work() []tokenExpansion {
return p.processMacro(p.Next(), p)
} | }
} else {
list = append(list, newTokenExpansion(args[i])) | random_line_split |
preprocessorImpl.go | // Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package preprocessor
import (
"bytes"
"fmt"
"github.com/google/gapid/core/text/parse"
"github.com/google/gapid/gapis/api/gles/glsl/ast"
)
// ifEntry is a structure containing the data necessary for proper evaluation of #if*
// preprocessor directives.
type ifEntry struct {
HadElse bool // Whether we already encountered an #else block
Skipping bool // Whether the current block should be skipped
SkipElse bool // Whether all else and elif blocks should be skipped
}
type macroDefinition struct {
name string // macro name
function bool // Whether this is a function macro.
argCount int // The number of arguments of the macro.
definition []macroExpander // The macro definition as a list of macroExpanders.
}
// preprocessorImpl stores the internal state of a preprocessor instance.
type preprocessorImpl struct {
err ast.ErrorCollector
lexer *lexer
macros map[string]macroDefinition // All currently defined macros.
version string // The shader version declared with #version.
extensions []Extension // All encountered #extension directives.
ifStack []ifEntry // The stack of all encountered #if directives.
line int // The current line.
currentToken *tokenExpansion
evaluator ExpressionEvaluator
}
func (p *preprocessorImpl) Version() string {
return p.version
}
func (p *preprocessorImpl) Extensions() []Extension {
return p.extensions
}
func (p *preprocessorImpl) Errors() []error {
return ast.ConcatErrors(p.lexer.err.GetErrors(), p.err.GetErrors())
}
// skipping returnes true if we should skip this token. We skip if any of the #if directives in
// the stack says we should skip.
func (p *preprocessorImpl) skipping() (skip bool) {
c := len(p.ifStack)
return c > 0 && p.ifStack[c-1].Skipping
}
// tokenReader is an internal interface encapsulating a stream of tokens.
type tokenReader interface {
Next() tokenExpansion
Peek() tokenExpansion
}
// listReader is an implementation of tokenReader which reads tokens from a list. It is used to
// rescan a macro expansion to expand macros recursively. It contains a nested tokenReader, which
// is read from after the own token list. This happens in case of recursive function macros with
// unbalanced parenthesis.
type listReader struct {
list []tokenExpansion
next tokenReader
}
func (r *listReader) Next() (t tokenExpansion) {
if len(r.list) > 0 {
t = r.list[0]
r.list = r.list[1:]
} else if r.next != nil {
t = r.next.Next()
}
return
}
func (r *listReader) Peek() (t tokenExpansion) {
if len(r.list) > 0 {
t = r.list[0]
} else if r.next != nil {
t = r.next.Peek()
}
return
}
// processList is a helper function for processMacro. It calls processMacro on all tokens in the
// list.
func (p *preprocessorImpl) processList(r *listReader) (result []tokenExpansion) {
for len(r.list) > 0 {
token := r.Next()
result = append(result, p.processMacro(token, r)...)
}
return
}
// readMacroArgs reads macro arguments. It returns the arguments as a list of lists of tokens.
// Failure is reported by the second return value.
func (p *preprocessorImpl) readMacroArgs(reader tokenReader) (args [][]tokenExpansion, ok bool) {
var arg []tokenExpansion // currently processed argument
level := 0 // number of nested parenthesis
for {
if reader.Peek().Info.Token == nil {
p.err.Errorf("Unexpected end of file while processing a macro.")
return args, false
}
if level == 0 {
switch reader.Peek().Info.Token {
case OpRParen:
args = append(args, arg)
return args, true
case OpLParen:
level++
arg = append(arg, reader.Next())
continue
case ast.BoComma:
reader.Next()
args = append(args, arg)
arg = nil
continue
}
}
switch reader.Peek().Info.Token {
case OpRParen:
level--
arg = append(arg, reader.Next())
case OpLParen:
level++
arg = append(arg, reader.Next())
default:
arg = append(arg, reader.Next())
}
}
}
// parseMacroCallArgs reads arguments to a function macro, pre-expands them and computes the
// intersection of their hide sets. It reads the argument from the specified token reader. In
// case of errors the hide set is nil.
func (p *preprocessorImpl) parseMacroCallArgs(reader tokenReader, macro tokenExpansion,
argCount int) ([][]tokenExpansion, hideSet) {
if reader.Peek().Info.Token != OpLParen {
// Function macros are not expanded if the next token is not '('.
return nil, nil
}
reader.Next()
args, ok := p.readMacroArgs(reader)
if !ok {
return nil, nil
}
lastTok := reader.Next()
if len(args) != argCount {
p.err.Errorf("Incorrect number of arguments to macro '%v': expected %d, got %d.",
macro.Info.Token, argCount, len(args))
// Try to recover by padding args
for len(args) < argCount |
}
// Macro argument pre-expansion
for i := range args {
args[i] = p.processList(&listReader{args[i], nil})
}
set := intersect(macro.HideSet, lastTok.HideSet)
return args, set
}
// processMacro checks t for macro definitions and fully expands it. reader is an interface to
// the following tokens, needed for processing function macro invocations.
func (p *preprocessorImpl) processMacro(t tokenExpansion, reader tokenReader) []tokenExpansion {
// eof pseudo-token
if t.Info.Token == nil {
return []tokenExpansion{t}
}
name := t.Info.Token.String()
def, present := p.macros[name]
if !present {
// no expansion needed
return []tokenExpansion{t}
}
set := t.HideSet
if _, present := set[name]; present {
// This macro should not be expanded.
return []tokenExpansion{t}
}
var args [][]tokenExpansion
if def.function {
args, set = p.parseMacroCallArgs(reader, t, def.argCount)
if set == nil {
return []tokenExpansion{t}
}
}
list := make([]tokenExpansion, 0, len(def.definition))
// Substitute arguments into macro definition
for _, expander := range def.definition {
list = append(list, expander(args)...)
}
// Extend the hide sets
for _, e := range list {
e.HideSet.AddAll(set)
e.HideSet[name] = struct{}{}
}
// Token pasting
for i := 0; i < len(list); i++ {
for i+2 < len(list) && list[i+1].Info.Token.String() == "##" {
newIdentifier := list[i].Info.Token.String() + list[i+2].Info.Token.String()
list[i] = newTokenExpansion(TokenInfo{Token: Identifier(newIdentifier)})
list = append(list[:i+1], list[i+3:]...) // Remove the ## and following token
}
}
// Expand macros in the definition recursively
return p.processList(&listReader{list, reader})
}
// getDirectiveArguments is a helper function used to read the arguments of a preprocessor
// directive. It consumes all tokens until the newline and returns them. If emptyOk is false, it
// will raise an error in the case of an empty argument list.
func (p *preprocessorImpl) getDirectiveArguments(info TokenInfo, emptyOk bool) []TokenInfo {
dir := info.Token
var ret []TokenInfo
for info = p.lexer.Peek(); info.Token != nil && !info.Newline; info = p.lexer.Peek() {
ret = append(ret, p.lexer.Next())
}
if len(ret) == 0 && !emptyOk {
p.err.Errorf("%s needs an argument.", dir)
}
return ret
}
func isIdentOrKeyword(t TokenInfo) bool {
switch t.Token.(type) {
case Identifier, Keyword, ast.BareType:
return true
default:
return false
}
}
// Given a list of tokens following `#define FOO(`, consume the tokens that make up the macro
// argument list. Returns the list of unconsumed tokens and a `macro_name->position` map.
func (p *preprocessorImpl) parseDefMacroArgs(macro Token,
args []TokenInfo) (rest []TokenInfo, argMap map[string]int) {
argMap = make(map[string]int)
for {
if len(args) <= 1 {
p.err.Errorf("Macro definition ended unexpectedly.")
return nil, nil
}
if !isIdentOrKeyword(args[0]) {
p.err.Errorf("Invalid function macro definition. "+
"Expected an identifier, got '%s'.", args[0].Token)
return
}
name := args[0].Token.String()
if _, ok := argMap[name]; ok {
p.err.Errorf("Macro '%s' contains two arguments named '%s'.", macro, name)
}
argMap[name] = len(argMap)
switch args[1].Token {
case OpRParen:
return args[2:], argMap
case ast.BoComma:
args = args[2:]
continue
default:
p.err.Errorf("Invalid function macro definition. "+
"Expected ',', ')', got '%s'.", args[1].Token)
return nil, nil
}
}
}
// process a #define directive
func (p *preprocessorImpl) processDefine(args []TokenInfo) {
macro := args[0]
if _, ok := p.macros[macro.Token.String()]; ok {
delete(p.macros, macro.Token.String())
}
args = args[1:]
if len(args) == 0 || args[0].Whitespace || args[0].Token != OpLParen {
// Just an object macro, we're done.
expansion := make([]macroExpander, len(args))
for i := range args {
expansion[i] = args[i].expand
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, false, 0, expansion}
return
}
args, argMap := p.parseDefMacroArgs(macro.Token, args[1:])
if argMap == nil {
return
}
expansion := make([]macroExpander, len(args))
for i := range args {
if arg, ok := argMap[args[i].Token.String()]; ok {
expansion[i] = argumentExpander(arg).expand
} else {
expansion[i] = args[i].expand
}
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, true, len(argMap), expansion}
}
// processDirectives reads any preprocessor directives from the input stream and processes them.
func (p *preprocessorImpl) processDirectives() {
for {
if _, ok := p.lexer.Peek().Token.(ppKeyword); !ok {
break
}
p.processDirective(p.lexer.Next())
}
}
func (p *preprocessorImpl) evaluateDefined(arg TokenInfo) tokenExpansion {
var ic ast.IntValue
if _, present := p.macros[arg.Token.String()]; present {
ic = ast.IntValue(1)
} else {
ic = ast.IntValue(0)
}
return newTokenExpansion(TokenInfo{Token: ic})
}
func (p *preprocessorImpl) evaluateIf(args []TokenInfo) bool {
// append fake EOF
lastToken := args[len(args)-1].Cst.Token()
eof := &parse.Leaf{}
eof.SetToken(parse.Token{Source: lastToken.Source, Start: lastToken.End, End: lastToken.End})
args = append(args, TokenInfo{Token: nil, Cst: eof})
var list []tokenExpansion
// convert args to tokenExpansions and evaluate defined(X)
for i := 0; i < len(args); i++ {
if args[i].Token == Identifier("defined") {
if i+1 < len(args) && isIdentOrKeyword(args[i+1]) {
list = append(list, p.evaluateDefined(args[i+1]))
i++
} else if i+3 < len(args) && args[i+1].Token == OpLParen &&
isIdentOrKeyword(args[i+2]) && args[i+3].Token == OpRParen {
list = append(list, p.evaluateDefined(args[i+2]))
i += 3
} else {
p.err.Errorf("Operator 'defined' used incorrectly.")
}
} else {
list = append(list, newTokenExpansion(args[i]))
}
}
reader := &listReader{list: list} // reader will read the arguments
worker := &listWorker{reader, p} // worker will expand them
pp := &Preprocessor{impl: worker} // pp will provide the lookahead
val, err := p.evaluator(pp) // and evaluator will evalate them
p.err.Error(err...)
return val != 0
}
func (p *preprocessorImpl) processDirective(info TokenInfo) {
switch info.Token {
case ppDefine:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
p.processDefine(args)
case ppUndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
if _, ok := p.macros[args[0].Token.String()]; !ok {
p.err.Errorf("Macro '%s' not defined.", args[0].Token)
return
}
delete(p.macros, args[0].Token.String())
case ppIf:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
// Skip both of the branches if the parent condition evaluated to false.
// We intentionally do not evaluate the condition since it might be invalid.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
val := p.evaluateIf(args)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !val, SkipElse: val})
case ppElif:
args := p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #elif.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#elif after #else.")
entry.Skipping = true
return
}
if entry.SkipElse {
entry.Skipping = true
} else {
val := p.evaluateIf(args)
entry.Skipping = !val
entry.SkipElse = val
}
return
case ppVersion:
args := p.getDirectiveArguments(info, false)
if len(args) > 0 {
p.version = args[0].Token.String()
} else {
p.err.Errorf("expected version number after #version")
}
return
// TODO: support #pragma instead of silently ignoring it.
case ppPragma:
_ = p.getDirectiveArguments(info, false)
return
case ppExtension:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
return
}
if len(args) == 3 {
name, nameOk := args[0].Token.(Identifier)
colonOk := args[1].Token == OpColon
behaviour, behaviourOk := args[2].Token.(Identifier)
if nameOk && colonOk && behaviourOk {
extension := Extension{Name: name.String(), Behaviour: behaviour.String()}
p.extensions = append(p.extensions, extension)
return
}
}
p.err.Errorf("#extension should have the form '#extension name : behaviour'")
return
case ppIfdef, ppIfndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
// Skip both of the branches if the parent condition evaluated to false.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
var defined bool
if args == nil {
defined = false
} else {
_, defined = p.macros[args[0].Token.String()]
}
value := defined == (info.Token == ppIfdef)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !value, SkipElse: value})
case ppElse:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #else.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#if directive has multiple #else directives.")
entry.Skipping = true
return
}
entry.HadElse = true
entry.Skipping = entry.SkipElse
case ppEndif:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #endif.")
return
}
p.ifStack = p.ifStack[:len(p.ifStack)-1]
case ppLine:
args := p.getDirectiveArguments(info, true)
if len(args) != 1 && len(args) != 2 {
p.err.Errorf("expected line/file number after #line")
}
case ppError:
args := p.getDirectiveArguments(info, true)
if p.skipping() {
return
}
var msg bytes.Buffer
for _, i := range args {
i.Cst.Prefix().WriteTo(&msg)
msg.Write([]byte(i.Token.String()))
i.Cst.Suffix().WriteTo(&msg)
}
p.err.Errorf(msg.String())
}
}
func addBuiltinMacro(macros map[string]macroDefinition, name string, expander macroExpander) {
macros[name] = macroDefinition{
name: name,
definition: []macroExpander{expander},
}
}
func newPreprocessorImpl(data string, eval ExpressionEvaluator, file int) *preprocessorImpl {
p := &preprocessorImpl{
lexer: newLexer(fmt.Sprintf("File %v", file), data),
macros: make(map[string]macroDefinition),
evaluator: eval,
}
addBuiltinMacro(p.macros, "__LINE__", p.expandLine)
addBuiltinMacro(p.macros, "__FILE__", TokenInfo{Token: ast.IntValue(file)}.expand)
addBuiltinMacro(p.macros, "__VERSION__", TokenInfo{Token: ast.IntValue(300)}.expand)
addBuiltinMacro(p.macros, "GL_ES", TokenInfo{Token: ast.IntValue(1)}.expand)
return p
}
//////////////////////////// tokenReader interface /////////////////////////
func (p *preprocessorImpl) Peek() tokenExpansion {
for p.currentToken == nil {
// process any preprocessor directives
p.processDirectives()
tok := newTokenExpansion(p.lexer.Next())
p.line, _ = tok.Info.Cst.Token().Cursor()
if tok.Info.Token == nil {
if len(p.ifStack) > 0 {
p.err.Errorf("Unterminated #if directive at the end of file.")
}
p.currentToken = &tok
} else if !p.skipping() {
p.currentToken = &tok
}
}
return *p.currentToken
}
func (p *preprocessorImpl) Next() tokenExpansion {
ret := p.Peek()
p.currentToken = nil
return ret
}
//////////////////////////// worker interface /////////////////////////
func (p *preprocessorImpl) Work() []tokenExpansion {
return p.processMacro(p.Next(), p)
}
| {
args = append(args, nil)
} | conditional_block |
preprocessorImpl.go | // Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package preprocessor
import (
"bytes"
"fmt"
"github.com/google/gapid/core/text/parse"
"github.com/google/gapid/gapis/api/gles/glsl/ast"
)
// ifEntry is a structure containing the data necessary for proper evaluation of #if*
// preprocessor directives.
type ifEntry struct {
HadElse bool // Whether we already encountered an #else block
Skipping bool // Whether the current block should be skipped
SkipElse bool // Whether all else and elif blocks should be skipped
}
type macroDefinition struct {
name string // macro name
function bool // Whether this is a function macro.
argCount int // The number of arguments of the macro.
definition []macroExpander // The macro definition as a list of macroExpanders.
}
// preprocessorImpl stores the internal state of a preprocessor instance.
type preprocessorImpl struct {
err ast.ErrorCollector
lexer *lexer
macros map[string]macroDefinition // All currently defined macros.
version string // The shader version declared with #version.
extensions []Extension // All encountered #extension directives.
ifStack []ifEntry // The stack of all encountered #if directives.
line int // The current line.
currentToken *tokenExpansion
evaluator ExpressionEvaluator
}
func (p *preprocessorImpl) Version() string {
return p.version
}
func (p *preprocessorImpl) Extensions() []Extension {
return p.extensions
}
func (p *preprocessorImpl) Errors() []error {
return ast.ConcatErrors(p.lexer.err.GetErrors(), p.err.GetErrors())
}
// skipping returnes true if we should skip this token. We skip if any of the #if directives in
// the stack says we should skip.
func (p *preprocessorImpl) skipping() (skip bool) {
c := len(p.ifStack)
return c > 0 && p.ifStack[c-1].Skipping
}
// tokenReader is an internal interface encapsulating a stream of tokens.
type tokenReader interface {
Next() tokenExpansion
Peek() tokenExpansion
}
// listReader is an implementation of tokenReader which reads tokens from a list. It is used to
// rescan a macro expansion to expand macros recursively. It contains a nested tokenReader, which
// is read from after the own token list. This happens in case of recursive function macros with
// unbalanced parenthesis.
type listReader struct {
list []tokenExpansion
next tokenReader
}
func (r *listReader) Next() (t tokenExpansion) |
func (r *listReader) Peek() (t tokenExpansion) {
if len(r.list) > 0 {
t = r.list[0]
} else if r.next != nil {
t = r.next.Peek()
}
return
}
// processList is a helper function for processMacro. It calls processMacro on all tokens in the
// list.
func (p *preprocessorImpl) processList(r *listReader) (result []tokenExpansion) {
for len(r.list) > 0 {
token := r.Next()
result = append(result, p.processMacro(token, r)...)
}
return
}
// readMacroArgs reads macro arguments. It returns the arguments as a list of lists of tokens.
// Failure is reported by the second return value.
func (p *preprocessorImpl) readMacroArgs(reader tokenReader) (args [][]tokenExpansion, ok bool) {
var arg []tokenExpansion // currently processed argument
level := 0 // number of nested parenthesis
for {
if reader.Peek().Info.Token == nil {
p.err.Errorf("Unexpected end of file while processing a macro.")
return args, false
}
if level == 0 {
switch reader.Peek().Info.Token {
case OpRParen:
args = append(args, arg)
return args, true
case OpLParen:
level++
arg = append(arg, reader.Next())
continue
case ast.BoComma:
reader.Next()
args = append(args, arg)
arg = nil
continue
}
}
switch reader.Peek().Info.Token {
case OpRParen:
level--
arg = append(arg, reader.Next())
case OpLParen:
level++
arg = append(arg, reader.Next())
default:
arg = append(arg, reader.Next())
}
}
}
// parseMacroCallArgs reads arguments to a function macro, pre-expands them and computes the
// intersection of their hide sets. It reads the argument from the specified token reader. In
// case of errors the hide set is nil.
func (p *preprocessorImpl) parseMacroCallArgs(reader tokenReader, macro tokenExpansion,
argCount int) ([][]tokenExpansion, hideSet) {
if reader.Peek().Info.Token != OpLParen {
// Function macros are not expanded if the next token is not '('.
return nil, nil
}
reader.Next()
args, ok := p.readMacroArgs(reader)
if !ok {
return nil, nil
}
lastTok := reader.Next()
if len(args) != argCount {
p.err.Errorf("Incorrect number of arguments to macro '%v': expected %d, got %d.",
macro.Info.Token, argCount, len(args))
// Try to recover by padding args
for len(args) < argCount {
args = append(args, nil)
}
}
// Macro argument pre-expansion
for i := range args {
args[i] = p.processList(&listReader{args[i], nil})
}
set := intersect(macro.HideSet, lastTok.HideSet)
return args, set
}
// processMacro checks t for macro definitions and fully expands it. reader is an interface to
// the following tokens, needed for processing function macro invocations.
func (p *preprocessorImpl) processMacro(t tokenExpansion, reader tokenReader) []tokenExpansion {
// eof pseudo-token
if t.Info.Token == nil {
return []tokenExpansion{t}
}
name := t.Info.Token.String()
def, present := p.macros[name]
if !present {
// no expansion needed
return []tokenExpansion{t}
}
set := t.HideSet
if _, present := set[name]; present {
// This macro should not be expanded.
return []tokenExpansion{t}
}
var args [][]tokenExpansion
if def.function {
args, set = p.parseMacroCallArgs(reader, t, def.argCount)
if set == nil {
return []tokenExpansion{t}
}
}
list := make([]tokenExpansion, 0, len(def.definition))
// Substitute arguments into macro definition
for _, expander := range def.definition {
list = append(list, expander(args)...)
}
// Extend the hide sets
for _, e := range list {
e.HideSet.AddAll(set)
e.HideSet[name] = struct{}{}
}
// Token pasting
for i := 0; i < len(list); i++ {
for i+2 < len(list) && list[i+1].Info.Token.String() == "##" {
newIdentifier := list[i].Info.Token.String() + list[i+2].Info.Token.String()
list[i] = newTokenExpansion(TokenInfo{Token: Identifier(newIdentifier)})
list = append(list[:i+1], list[i+3:]...) // Remove the ## and following token
}
}
// Expand macros in the definition recursively
return p.processList(&listReader{list, reader})
}
// getDirectiveArguments is a helper function used to read the arguments of a preprocessor
// directive. It consumes all tokens until the newline and returns them. If emptyOk is false, it
// will raise an error in the case of an empty argument list.
func (p *preprocessorImpl) getDirectiveArguments(info TokenInfo, emptyOk bool) []TokenInfo {
dir := info.Token
var ret []TokenInfo
for info = p.lexer.Peek(); info.Token != nil && !info.Newline; info = p.lexer.Peek() {
ret = append(ret, p.lexer.Next())
}
if len(ret) == 0 && !emptyOk {
p.err.Errorf("%s needs an argument.", dir)
}
return ret
}
func isIdentOrKeyword(t TokenInfo) bool {
switch t.Token.(type) {
case Identifier, Keyword, ast.BareType:
return true
default:
return false
}
}
// Given a list of tokens following `#define FOO(`, consume the tokens that make up the macro
// argument list. Returns the list of unconsumed tokens and a `macro_name->position` map.
func (p *preprocessorImpl) parseDefMacroArgs(macro Token,
args []TokenInfo) (rest []TokenInfo, argMap map[string]int) {
argMap = make(map[string]int)
for {
if len(args) <= 1 {
p.err.Errorf("Macro definition ended unexpectedly.")
return nil, nil
}
if !isIdentOrKeyword(args[0]) {
p.err.Errorf("Invalid function macro definition. "+
"Expected an identifier, got '%s'.", args[0].Token)
return
}
name := args[0].Token.String()
if _, ok := argMap[name]; ok {
p.err.Errorf("Macro '%s' contains two arguments named '%s'.", macro, name)
}
argMap[name] = len(argMap)
switch args[1].Token {
case OpRParen:
return args[2:], argMap
case ast.BoComma:
args = args[2:]
continue
default:
p.err.Errorf("Invalid function macro definition. "+
"Expected ',', ')', got '%s'.", args[1].Token)
return nil, nil
}
}
}
// process a #define directive
func (p *preprocessorImpl) processDefine(args []TokenInfo) {
macro := args[0]
if _, ok := p.macros[macro.Token.String()]; ok {
delete(p.macros, macro.Token.String())
}
args = args[1:]
if len(args) == 0 || args[0].Whitespace || args[0].Token != OpLParen {
// Just an object macro, we're done.
expansion := make([]macroExpander, len(args))
for i := range args {
expansion[i] = args[i].expand
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, false, 0, expansion}
return
}
args, argMap := p.parseDefMacroArgs(macro.Token, args[1:])
if argMap == nil {
return
}
expansion := make([]macroExpander, len(args))
for i := range args {
if arg, ok := argMap[args[i].Token.String()]; ok {
expansion[i] = argumentExpander(arg).expand
} else {
expansion[i] = args[i].expand
}
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, true, len(argMap), expansion}
}
// processDirectives reads any preprocessor directives from the input stream and processes them.
func (p *preprocessorImpl) processDirectives() {
for {
if _, ok := p.lexer.Peek().Token.(ppKeyword); !ok {
break
}
p.processDirective(p.lexer.Next())
}
}
func (p *preprocessorImpl) evaluateDefined(arg TokenInfo) tokenExpansion {
var ic ast.IntValue
if _, present := p.macros[arg.Token.String()]; present {
ic = ast.IntValue(1)
} else {
ic = ast.IntValue(0)
}
return newTokenExpansion(TokenInfo{Token: ic})
}
func (p *preprocessorImpl) evaluateIf(args []TokenInfo) bool {
// append fake EOF
lastToken := args[len(args)-1].Cst.Token()
eof := &parse.Leaf{}
eof.SetToken(parse.Token{Source: lastToken.Source, Start: lastToken.End, End: lastToken.End})
args = append(args, TokenInfo{Token: nil, Cst: eof})
var list []tokenExpansion
// convert args to tokenExpansions and evaluate defined(X)
for i := 0; i < len(args); i++ {
if args[i].Token == Identifier("defined") {
if i+1 < len(args) && isIdentOrKeyword(args[i+1]) {
list = append(list, p.evaluateDefined(args[i+1]))
i++
} else if i+3 < len(args) && args[i+1].Token == OpLParen &&
isIdentOrKeyword(args[i+2]) && args[i+3].Token == OpRParen {
list = append(list, p.evaluateDefined(args[i+2]))
i += 3
} else {
p.err.Errorf("Operator 'defined' used incorrectly.")
}
} else {
list = append(list, newTokenExpansion(args[i]))
}
}
reader := &listReader{list: list} // reader will read the arguments
worker := &listWorker{reader, p} // worker will expand them
pp := &Preprocessor{impl: worker} // pp will provide the lookahead
val, err := p.evaluator(pp) // and evaluator will evalate them
p.err.Error(err...)
return val != 0
}
func (p *preprocessorImpl) processDirective(info TokenInfo) {
switch info.Token {
case ppDefine:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
p.processDefine(args)
case ppUndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
if _, ok := p.macros[args[0].Token.String()]; !ok {
p.err.Errorf("Macro '%s' not defined.", args[0].Token)
return
}
delete(p.macros, args[0].Token.String())
case ppIf:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
// Skip both of the branches if the parent condition evaluated to false.
// We intentionally do not evaluate the condition since it might be invalid.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
val := p.evaluateIf(args)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !val, SkipElse: val})
case ppElif:
args := p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #elif.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#elif after #else.")
entry.Skipping = true
return
}
if entry.SkipElse {
entry.Skipping = true
} else {
val := p.evaluateIf(args)
entry.Skipping = !val
entry.SkipElse = val
}
return
case ppVersion:
args := p.getDirectiveArguments(info, false)
if len(args) > 0 {
p.version = args[0].Token.String()
} else {
p.err.Errorf("expected version number after #version")
}
return
// TODO: support #pragma instead of silently ignoring it.
case ppPragma:
_ = p.getDirectiveArguments(info, false)
return
case ppExtension:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
return
}
if len(args) == 3 {
name, nameOk := args[0].Token.(Identifier)
colonOk := args[1].Token == OpColon
behaviour, behaviourOk := args[2].Token.(Identifier)
if nameOk && colonOk && behaviourOk {
extension := Extension{Name: name.String(), Behaviour: behaviour.String()}
p.extensions = append(p.extensions, extension)
return
}
}
p.err.Errorf("#extension should have the form '#extension name : behaviour'")
return
case ppIfdef, ppIfndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
// Skip both of the branches if the parent condition evaluated to false.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
var defined bool
if args == nil {
defined = false
} else {
_, defined = p.macros[args[0].Token.String()]
}
value := defined == (info.Token == ppIfdef)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !value, SkipElse: value})
case ppElse:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #else.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#if directive has multiple #else directives.")
entry.Skipping = true
return
}
entry.HadElse = true
entry.Skipping = entry.SkipElse
case ppEndif:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #endif.")
return
}
p.ifStack = p.ifStack[:len(p.ifStack)-1]
case ppLine:
args := p.getDirectiveArguments(info, true)
if len(args) != 1 && len(args) != 2 {
p.err.Errorf("expected line/file number after #line")
}
case ppError:
args := p.getDirectiveArguments(info, true)
if p.skipping() {
return
}
var msg bytes.Buffer
for _, i := range args {
i.Cst.Prefix().WriteTo(&msg)
msg.Write([]byte(i.Token.String()))
i.Cst.Suffix().WriteTo(&msg)
}
p.err.Errorf(msg.String())
}
}
func addBuiltinMacro(macros map[string]macroDefinition, name string, expander macroExpander) {
macros[name] = macroDefinition{
name: name,
definition: []macroExpander{expander},
}
}
func newPreprocessorImpl(data string, eval ExpressionEvaluator, file int) *preprocessorImpl {
p := &preprocessorImpl{
lexer: newLexer(fmt.Sprintf("File %v", file), data),
macros: make(map[string]macroDefinition),
evaluator: eval,
}
addBuiltinMacro(p.macros, "__LINE__", p.expandLine)
addBuiltinMacro(p.macros, "__FILE__", TokenInfo{Token: ast.IntValue(file)}.expand)
addBuiltinMacro(p.macros, "__VERSION__", TokenInfo{Token: ast.IntValue(300)}.expand)
addBuiltinMacro(p.macros, "GL_ES", TokenInfo{Token: ast.IntValue(1)}.expand)
return p
}
//////////////////////////// tokenReader interface /////////////////////////
func (p *preprocessorImpl) Peek() tokenExpansion {
for p.currentToken == nil {
// process any preprocessor directives
p.processDirectives()
tok := newTokenExpansion(p.lexer.Next())
p.line, _ = tok.Info.Cst.Token().Cursor()
if tok.Info.Token == nil {
if len(p.ifStack) > 0 {
p.err.Errorf("Unterminated #if directive at the end of file.")
}
p.currentToken = &tok
} else if !p.skipping() {
p.currentToken = &tok
}
}
return *p.currentToken
}
func (p *preprocessorImpl) Next() tokenExpansion {
ret := p.Peek()
p.currentToken = nil
return ret
}
//////////////////////////// worker interface /////////////////////////
func (p *preprocessorImpl) Work() []tokenExpansion {
return p.processMacro(p.Next(), p)
}
| {
if len(r.list) > 0 {
t = r.list[0]
r.list = r.list[1:]
} else if r.next != nil {
t = r.next.Next()
}
return
} | identifier_body |
preprocessorImpl.go | // Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package preprocessor
import (
"bytes"
"fmt"
"github.com/google/gapid/core/text/parse"
"github.com/google/gapid/gapis/api/gles/glsl/ast"
)
// ifEntry is a structure containing the data necessary for proper evaluation of #if*
// preprocessor directives.
type ifEntry struct {
HadElse bool // Whether we already encountered an #else block
Skipping bool // Whether the current block should be skipped
SkipElse bool // Whether all else and elif blocks should be skipped
}
type macroDefinition struct {
name string // macro name
function bool // Whether this is a function macro.
argCount int // The number of arguments of the macro.
definition []macroExpander // The macro definition as a list of macroExpanders.
}
// preprocessorImpl stores the internal state of a preprocessor instance.
type preprocessorImpl struct {
err ast.ErrorCollector
lexer *lexer
macros map[string]macroDefinition // All currently defined macros.
version string // The shader version declared with #version.
extensions []Extension // All encountered #extension directives.
ifStack []ifEntry // The stack of all encountered #if directives.
line int // The current line.
currentToken *tokenExpansion
evaluator ExpressionEvaluator
}
func (p *preprocessorImpl) Version() string {
return p.version
}
func (p *preprocessorImpl) | () []Extension {
return p.extensions
}
func (p *preprocessorImpl) Errors() []error {
return ast.ConcatErrors(p.lexer.err.GetErrors(), p.err.GetErrors())
}
// skipping returnes true if we should skip this token. We skip if any of the #if directives in
// the stack says we should skip.
func (p *preprocessorImpl) skipping() (skip bool) {
c := len(p.ifStack)
return c > 0 && p.ifStack[c-1].Skipping
}
// tokenReader is an internal interface encapsulating a stream of tokens.
type tokenReader interface {
Next() tokenExpansion
Peek() tokenExpansion
}
// listReader is an implementation of tokenReader which reads tokens from a list. It is used to
// rescan a macro expansion to expand macros recursively. It contains a nested tokenReader, which
// is read from after the own token list. This happens in case of recursive function macros with
// unbalanced parenthesis.
type listReader struct {
list []tokenExpansion
next tokenReader
}
func (r *listReader) Next() (t tokenExpansion) {
if len(r.list) > 0 {
t = r.list[0]
r.list = r.list[1:]
} else if r.next != nil {
t = r.next.Next()
}
return
}
func (r *listReader) Peek() (t tokenExpansion) {
if len(r.list) > 0 {
t = r.list[0]
} else if r.next != nil {
t = r.next.Peek()
}
return
}
// processList is a helper function for processMacro. It calls processMacro on all tokens in the
// list.
func (p *preprocessorImpl) processList(r *listReader) (result []tokenExpansion) {
for len(r.list) > 0 {
token := r.Next()
result = append(result, p.processMacro(token, r)...)
}
return
}
// readMacroArgs reads macro arguments. It returns the arguments as a list of lists of tokens.
// Failure is reported by the second return value.
func (p *preprocessorImpl) readMacroArgs(reader tokenReader) (args [][]tokenExpansion, ok bool) {
var arg []tokenExpansion // currently processed argument
level := 0 // number of nested parenthesis
for {
if reader.Peek().Info.Token == nil {
p.err.Errorf("Unexpected end of file while processing a macro.")
return args, false
}
if level == 0 {
switch reader.Peek().Info.Token {
case OpRParen:
args = append(args, arg)
return args, true
case OpLParen:
level++
arg = append(arg, reader.Next())
continue
case ast.BoComma:
reader.Next()
args = append(args, arg)
arg = nil
continue
}
}
switch reader.Peek().Info.Token {
case OpRParen:
level--
arg = append(arg, reader.Next())
case OpLParen:
level++
arg = append(arg, reader.Next())
default:
arg = append(arg, reader.Next())
}
}
}
// parseMacroCallArgs reads arguments to a function macro, pre-expands them and computes the
// intersection of their hide sets. It reads the argument from the specified token reader. In
// case of errors the hide set is nil.
func (p *preprocessorImpl) parseMacroCallArgs(reader tokenReader, macro tokenExpansion,
argCount int) ([][]tokenExpansion, hideSet) {
if reader.Peek().Info.Token != OpLParen {
// Function macros are not expanded if the next token is not '('.
return nil, nil
}
reader.Next()
args, ok := p.readMacroArgs(reader)
if !ok {
return nil, nil
}
lastTok := reader.Next()
if len(args) != argCount {
p.err.Errorf("Incorrect number of arguments to macro '%v': expected %d, got %d.",
macro.Info.Token, argCount, len(args))
// Try to recover by padding args
for len(args) < argCount {
args = append(args, nil)
}
}
// Macro argument pre-expansion
for i := range args {
args[i] = p.processList(&listReader{args[i], nil})
}
set := intersect(macro.HideSet, lastTok.HideSet)
return args, set
}
// processMacro checks t for macro definitions and fully expands it. reader is an interface to
// the following tokens, needed for processing function macro invocations.
func (p *preprocessorImpl) processMacro(t tokenExpansion, reader tokenReader) []tokenExpansion {
// eof pseudo-token
if t.Info.Token == nil {
return []tokenExpansion{t}
}
name := t.Info.Token.String()
def, present := p.macros[name]
if !present {
// no expansion needed
return []tokenExpansion{t}
}
set := t.HideSet
if _, present := set[name]; present {
// This macro should not be expanded.
return []tokenExpansion{t}
}
var args [][]tokenExpansion
if def.function {
args, set = p.parseMacroCallArgs(reader, t, def.argCount)
if set == nil {
return []tokenExpansion{t}
}
}
list := make([]tokenExpansion, 0, len(def.definition))
// Substitute arguments into macro definition
for _, expander := range def.definition {
list = append(list, expander(args)...)
}
// Extend the hide sets
for _, e := range list {
e.HideSet.AddAll(set)
e.HideSet[name] = struct{}{}
}
// Token pasting
for i := 0; i < len(list); i++ {
for i+2 < len(list) && list[i+1].Info.Token.String() == "##" {
newIdentifier := list[i].Info.Token.String() + list[i+2].Info.Token.String()
list[i] = newTokenExpansion(TokenInfo{Token: Identifier(newIdentifier)})
list = append(list[:i+1], list[i+3:]...) // Remove the ## and following token
}
}
// Expand macros in the definition recursively
return p.processList(&listReader{list, reader})
}
// getDirectiveArguments is a helper function used to read the arguments of a preprocessor
// directive. It consumes all tokens until the newline and returns them. If emptyOk is false, it
// will raise an error in the case of an empty argument list.
func (p *preprocessorImpl) getDirectiveArguments(info TokenInfo, emptyOk bool) []TokenInfo {
dir := info.Token
var ret []TokenInfo
for info = p.lexer.Peek(); info.Token != nil && !info.Newline; info = p.lexer.Peek() {
ret = append(ret, p.lexer.Next())
}
if len(ret) == 0 && !emptyOk {
p.err.Errorf("%s needs an argument.", dir)
}
return ret
}
func isIdentOrKeyword(t TokenInfo) bool {
switch t.Token.(type) {
case Identifier, Keyword, ast.BareType:
return true
default:
return false
}
}
// Given a list of tokens following `#define FOO(`, consume the tokens that make up the macro
// argument list. Returns the list of unconsumed tokens and a `macro_name->position` map.
func (p *preprocessorImpl) parseDefMacroArgs(macro Token,
args []TokenInfo) (rest []TokenInfo, argMap map[string]int) {
argMap = make(map[string]int)
for {
if len(args) <= 1 {
p.err.Errorf("Macro definition ended unexpectedly.")
return nil, nil
}
if !isIdentOrKeyword(args[0]) {
p.err.Errorf("Invalid function macro definition. "+
"Expected an identifier, got '%s'.", args[0].Token)
return
}
name := args[0].Token.String()
if _, ok := argMap[name]; ok {
p.err.Errorf("Macro '%s' contains two arguments named '%s'.", macro, name)
}
argMap[name] = len(argMap)
switch args[1].Token {
case OpRParen:
return args[2:], argMap
case ast.BoComma:
args = args[2:]
continue
default:
p.err.Errorf("Invalid function macro definition. "+
"Expected ',', ')', got '%s'.", args[1].Token)
return nil, nil
}
}
}
// process a #define directive
func (p *preprocessorImpl) processDefine(args []TokenInfo) {
macro := args[0]
if _, ok := p.macros[macro.Token.String()]; ok {
delete(p.macros, macro.Token.String())
}
args = args[1:]
if len(args) == 0 || args[0].Whitespace || args[0].Token != OpLParen {
// Just an object macro, we're done.
expansion := make([]macroExpander, len(args))
for i := range args {
expansion[i] = args[i].expand
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, false, 0, expansion}
return
}
args, argMap := p.parseDefMacroArgs(macro.Token, args[1:])
if argMap == nil {
return
}
expansion := make([]macroExpander, len(args))
for i := range args {
if arg, ok := argMap[args[i].Token.String()]; ok {
expansion[i] = argumentExpander(arg).expand
} else {
expansion[i] = args[i].expand
}
}
name := macro.Token.String()
p.macros[name] = macroDefinition{name, true, len(argMap), expansion}
}
// processDirectives reads any preprocessor directives from the input stream and processes them.
func (p *preprocessorImpl) processDirectives() {
for {
if _, ok := p.lexer.Peek().Token.(ppKeyword); !ok {
break
}
p.processDirective(p.lexer.Next())
}
}
func (p *preprocessorImpl) evaluateDefined(arg TokenInfo) tokenExpansion {
var ic ast.IntValue
if _, present := p.macros[arg.Token.String()]; present {
ic = ast.IntValue(1)
} else {
ic = ast.IntValue(0)
}
return newTokenExpansion(TokenInfo{Token: ic})
}
func (p *preprocessorImpl) evaluateIf(args []TokenInfo) bool {
// append fake EOF
lastToken := args[len(args)-1].Cst.Token()
eof := &parse.Leaf{}
eof.SetToken(parse.Token{Source: lastToken.Source, Start: lastToken.End, End: lastToken.End})
args = append(args, TokenInfo{Token: nil, Cst: eof})
var list []tokenExpansion
// convert args to tokenExpansions and evaluate defined(X)
for i := 0; i < len(args); i++ {
if args[i].Token == Identifier("defined") {
if i+1 < len(args) && isIdentOrKeyword(args[i+1]) {
list = append(list, p.evaluateDefined(args[i+1]))
i++
} else if i+3 < len(args) && args[i+1].Token == OpLParen &&
isIdentOrKeyword(args[i+2]) && args[i+3].Token == OpRParen {
list = append(list, p.evaluateDefined(args[i+2]))
i += 3
} else {
p.err.Errorf("Operator 'defined' used incorrectly.")
}
} else {
list = append(list, newTokenExpansion(args[i]))
}
}
reader := &listReader{list: list} // reader will read the arguments
worker := &listWorker{reader, p} // worker will expand them
pp := &Preprocessor{impl: worker} // pp will provide the lookahead
val, err := p.evaluator(pp) // and evaluator will evalate them
p.err.Error(err...)
return val != 0
}
func (p *preprocessorImpl) processDirective(info TokenInfo) {
switch info.Token {
case ppDefine:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
p.processDefine(args)
case ppUndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
return
}
if _, ok := p.macros[args[0].Token.String()]; !ok {
p.err.Errorf("Macro '%s' not defined.", args[0].Token)
return
}
delete(p.macros, args[0].Token.String())
case ppIf:
args := p.getDirectiveArguments(info, false)
if p.skipping() || args == nil {
// Skip both of the branches if the parent condition evaluated to false.
// We intentionally do not evaluate the condition since it might be invalid.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
val := p.evaluateIf(args)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !val, SkipElse: val})
case ppElif:
args := p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #elif.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#elif after #else.")
entry.Skipping = true
return
}
if entry.SkipElse {
entry.Skipping = true
} else {
val := p.evaluateIf(args)
entry.Skipping = !val
entry.SkipElse = val
}
return
case ppVersion:
args := p.getDirectiveArguments(info, false)
if len(args) > 0 {
p.version = args[0].Token.String()
} else {
p.err.Errorf("expected version number after #version")
}
return
// TODO: support #pragma instead of silently ignoring it.
case ppPragma:
_ = p.getDirectiveArguments(info, false)
return
case ppExtension:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
return
}
if len(args) == 3 {
name, nameOk := args[0].Token.(Identifier)
colonOk := args[1].Token == OpColon
behaviour, behaviourOk := args[2].Token.(Identifier)
if nameOk && colonOk && behaviourOk {
extension := Extension{Name: name.String(), Behaviour: behaviour.String()}
p.extensions = append(p.extensions, extension)
return
}
}
p.err.Errorf("#extension should have the form '#extension name : behaviour'")
return
case ppIfdef, ppIfndef:
args := p.getDirectiveArguments(info, false)
if p.skipping() {
// Skip both of the branches if the parent condition evaluated to false.
p.ifStack = append(p.ifStack, ifEntry{Skipping: true, SkipElse: true})
return
}
var defined bool
if args == nil {
defined = false
} else {
_, defined = p.macros[args[0].Token.String()]
}
value := defined == (info.Token == ppIfdef)
p.ifStack = append(p.ifStack, ifEntry{Skipping: !value, SkipElse: value})
case ppElse:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #else.")
return
}
entry := &p.ifStack[len(p.ifStack)-1]
if entry.HadElse {
p.err.Errorf("#if directive has multiple #else directives.")
entry.Skipping = true
return
}
entry.HadElse = true
entry.Skipping = entry.SkipElse
case ppEndif:
_ = p.getDirectiveArguments(info, true)
if len(p.ifStack) == 0 {
p.err.Errorf("Unmatched #endif.")
return
}
p.ifStack = p.ifStack[:len(p.ifStack)-1]
case ppLine:
args := p.getDirectiveArguments(info, true)
if len(args) != 1 && len(args) != 2 {
p.err.Errorf("expected line/file number after #line")
}
case ppError:
args := p.getDirectiveArguments(info, true)
if p.skipping() {
return
}
var msg bytes.Buffer
for _, i := range args {
i.Cst.Prefix().WriteTo(&msg)
msg.Write([]byte(i.Token.String()))
i.Cst.Suffix().WriteTo(&msg)
}
p.err.Errorf(msg.String())
}
}
func addBuiltinMacro(macros map[string]macroDefinition, name string, expander macroExpander) {
macros[name] = macroDefinition{
name: name,
definition: []macroExpander{expander},
}
}
func newPreprocessorImpl(data string, eval ExpressionEvaluator, file int) *preprocessorImpl {
p := &preprocessorImpl{
lexer: newLexer(fmt.Sprintf("File %v", file), data),
macros: make(map[string]macroDefinition),
evaluator: eval,
}
addBuiltinMacro(p.macros, "__LINE__", p.expandLine)
addBuiltinMacro(p.macros, "__FILE__", TokenInfo{Token: ast.IntValue(file)}.expand)
addBuiltinMacro(p.macros, "__VERSION__", TokenInfo{Token: ast.IntValue(300)}.expand)
addBuiltinMacro(p.macros, "GL_ES", TokenInfo{Token: ast.IntValue(1)}.expand)
return p
}
//////////////////////////// tokenReader interface /////////////////////////
func (p *preprocessorImpl) Peek() tokenExpansion {
for p.currentToken == nil {
// process any preprocessor directives
p.processDirectives()
tok := newTokenExpansion(p.lexer.Next())
p.line, _ = tok.Info.Cst.Token().Cursor()
if tok.Info.Token == nil {
if len(p.ifStack) > 0 {
p.err.Errorf("Unterminated #if directive at the end of file.")
}
p.currentToken = &tok
} else if !p.skipping() {
p.currentToken = &tok
}
}
return *p.currentToken
}
func (p *preprocessorImpl) Next() tokenExpansion {
ret := p.Peek()
p.currentToken = nil
return ret
}
//////////////////////////// worker interface /////////////////////////
func (p *preprocessorImpl) Work() []tokenExpansion {
return p.processMacro(p.Next(), p)
}
| Extensions | identifier_name |
auto_chords.go | package examples
import (
"fmt"
"math"
"math/rand"
"os"
"sort"
"github.com/rwelin/aujo"
)
type intervals []float64
func (i intervals) Equal(j intervals) bool {
for k := range i {
if i[k] != j[k] {
return false
}
}
return true
}
func (i intervals) Add(g float64) intervals {
var ret intervals
for _, f := range i {
ret = append(ret, f+g)
}
return ret
}
func (i intervals) Expand() intervals {
var exp []float64
for _, f := range i {
exp = append(exp, f, f-12, f+12, f-24)
}
sort.Float64s(exp)
return intervals(exp)
}
var (
s_Major = intervals{0, 2, 4, 5, 7, 9, 11}
s_Minor = intervals{0, 2, 3, 5, 7, 8, 10, 11}
)
var (
c_I = intervals{0, 4, 7}
c_iii7 = intervals{2, 4, 7, 11}
c_vi7 = intervals{-3, 0, 4, 7}
c_I6 = intervals{0, 4, 7, 9}
c_ii7 = intervals{0, 2, 5, 9}
c_IV7 = intervals{0, 4, 5, 9}
c_V7 = intervals{2, 5, 7, 11}
c_vii_dim_7 = intervals{-1, 2, 5, 9}
c_ii7b5 = intervals{0, 2, 5, 8}
c_bIII_aug_7 = intervals{2, 3, 7, 11}
c_V7sus4 = intervals{2, 5, 7, 12}
c_i = intervals{0, 3, 7}
c_ii_dim_7 = intervals{0, 2, 5, 8}
c_III_7 = intervals{2, 3, 7, 10}
c_iv7 = intervals{0, 3, 5, 8}
c_VI7 = intervals{0, 3, 7, 8}
c_vii_7 = intervals{2, 5, 8, 10}
)
var tonicChords = []intervals{
c_I,
}
var subDominantChords = []intervals{
c_ii7,
c_IV7,
}
var dominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var otherChords = []intervals{
c_iii7,
c_vi7,
}
var minorTonicChords = []intervals{
c_i,
}
var minorSubDominantChords = []intervals{
c_ii_dim_7,
c_iv7,
}
var minorDominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var minorOtherChords = []intervals{
c_III_7,
c_VI7,
c_vii_7,
}
const (
funcTonic = iota
funcSubDom
funcDom
funcOther
funcMax
)
var functionMap = map[int][]intervals{
funcTonic: tonicChords,
funcSubDom: subDominantChords,
funcDom: dominantChords,
funcOther: otherChords,
}
var minorFunctionMap = map[int][]intervals{
funcTonic: minorTonicChords,
funcSubDom: minorSubDominantChords,
funcDom: minorDominantChords,
funcOther: minorOtherChords,
}
func | (minor bool, function int) []intervals {
if minor {
return nil
} else {
switch function {
case funcTonic:
return []intervals{
c_vi7,
c_I6,
}
case funcDom:
return []intervals{
c_ii7b5,
c_bIII_aug_7,
c_V7sus4,
}
}
}
return nil
}
func middleFunctions(prevFunction int, nextFunction int) []int {
pot := make(map[int]struct{})
fromPrev := followingFunctions(prevFunction)
for _, f := range fromPrev {
p := followingFunctions(f)
for _, g := range p {
if g == nextFunction {
pot[f] = struct{}{}
break
}
}
}
var funcs []int
for f := range pot {
funcs = append(funcs, f)
}
return funcs
}
func followingFunctions(currentFunction int) []int {
var funcs []int
switch currentFunction {
case funcTonic:
funcs = []int{funcSubDom, funcDom, funcOther}
case funcSubDom:
funcs = []int{funcTonic, funcDom, funcOther}
case funcDom:
funcs = []int{funcTonic}
case funcOther:
funcs = []int{funcTonic, funcSubDom, funcDom}
default:
panic("no")
}
return funcs
}
func nextFunction(currentFunction int) int {
funcs := followingFunctions(currentFunction)
return funcs[rand.Intn(len(funcs))]
}
type nextChordResult struct {
Function int
MinDistChord intervals
CanModulateUp bool
CanModulateDown bool
CanModulateRelative bool
}
func (a *autoChord) nextChord(prevFunction int, prevChord intervals, exclude []intervals) nextChordResult {
scale := s_Major.Add(a.TonicPitch).Expand()
if a.Minor {
scale = s_Minor.Add(a.TonicPitch).Expand()
}
var chromaticNote float64
var requiredNotes []float64
for i := range prevChord {
found := false
for j := range scale {
if prevChord[i] == scale[j] {
found = true
break
} else if prevChord[i] < scale[j] {
break
}
}
if !found {
chromaticNote = prevChord[i]
break
}
}
if chromaticNote != 0 {
r := float64(0)
for i := len(scale) - 1; i >= 0; i-- {
if r == 0 ||
math.Abs(scale[i]-chromaticNote) < math.Abs(r-chromaticNote) {
r = scale[i]
}
}
requiredNotes = append(requiredNotes, r)
}
for attempts := 0; ; attempts++ {
function := nextFunction(prevFunction)
chords := functionMap[function]
if a.Minor {
chords = minorFunctionMap[function]
}
if a.Modulate != modulationChance {
chords = append(chords, substituteChords(a.Minor, function)...)
}
c := chords[rand.Intn(len(chords))]
minDistChord := findMinDistChord(prevChord, c.Add(a.TonicPitch), requiredNotes)
inExclude := false
for _, e := range exclude {
eq := true
for i := range minDistChord {
if e[i] != minDistChord[i] {
eq = false
break
}
}
if eq {
inExclude = true
break
}
}
foundRequiredNote := len(requiredNotes)
if foundRequiredNote > 0 {
for _, r := range requiredNotes {
for _, n := range minDistChord {
if n == r {
foundRequiredNote--
break
}
}
}
}
if (foundRequiredNote == 0 && !inExclude) ||
(attempts > 50 && (foundRequiredNote == 0 || !inExclude)) ||
attempts > 100 {
return nextChordResult{
Function: function,
MinDistChord: minDistChord,
//CanModulateUp: c.Equal(c_vi7),
//CanModulateDown: c.Equal(c_ii7),
//CanModulateUp: c.Equal(c_vi7) || c.Equal(c_I7) || c.Equal(c_iii7),
CanModulateDown: c.Equal(c_ii7) || c.Equal(c_IV7) || c.Equal(c_vi7),
CanModulateRelative: a.Minor && c.Equal(c_vii_7) || !a.Minor && c.Equal(c_V7),
}
}
}
}
func events(chord []float64, offset int64) []aujo.Event {
var events []aujo.Event
for i, f := range chord {
e := aujo.Event{
Time: offset + 2000*int64(i),
Voice: 3,
Type: aujo.EventOn,
Pitch: f,
}
events = append(events, e)
}
return events
}
func findMinDistChord(prevChord intervals, nextChord intervals, requiredNotes []float64) intervals {
exp := nextChord.Expand()
const numChordNotes = 4
minDist := float64(0)
var minDistChord []float64
for i := 0; i <= len(exp)-numChordNotes; i++ {
c1 := exp[i : i+numChordNotes]
var dist float64
for j := 0; j < len(c1); j++ {
d := 10 + math.Pow(prevChord[j]-c1[j], 2) + math.Pow((c1[j]-57)/5, 2)
dist += math.Sqrt(d)
}
for j := 0; j < len(requiredNotes); j++ {
found := false
for k := 0; k < len(c1); k++ {
if c1[k] == requiredNotes[j] {
found = true
}
}
if !found {
dist += 100
}
}
if minDistChord == nil || dist < minDist {
minDist = dist
minDistChord = c1
}
}
return minDistChord
}
const chordDuration = 96000
const microOffset = 0.5
func (a *autoChord) progression(reps int, length int, prevBass float64, prevFunction int, prevChord intervals) (es []aujo.Event, lastBass float64, lastFunction int, lastChord intervals) {
var cc []nextChordResult
for i := 0; i < length; i++ {
excludeChords := a.LastProgression
for _, c := range cc {
excludeChords = append(excludeChords, c.MinDistChord)
}
res := a.nextChord(prevFunction, prevChord, excludeChords)
if i%2 == 1 {
res.MinDistChord = res.MinDistChord.Add(microOffset)
}
cc = append(cc, res)
prevChord = res.MinDistChord
prevFunction = res.Function
}
a.LastProgression = a.LastProgression[:0]
for _, c := range cc {
a.LastProgression = append(a.LastProgression, c.MinDistChord)
}
var prevPrevBass float64
fmt.Fprintln(os.Stderr, "TONIC", a.TonicPitch)
for i := 0; i < reps; i++ {
for j := 0; j < len(cc); j++ {
c := cc[j].MinDistChord
f := cc[j].Function
if i == reps-1 && j == len(cc)-1 {
penultimateChord := cc[j-1]
if (penultimateChord.CanModulateUp || penultimateChord.CanModulateDown) &&
rand.Intn(a.Modulate) == 0 {
if penultimateChord.CanModulateUp {
fmt.Fprintln(os.Stderr, "MODULATE UP")
a.TonicPitch += 7
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else if penultimateChord.CanModulateDown {
fmt.Fprintln(os.Stderr, "MODULATE DOWN")
a.TonicPitch -= 7
if a.TonicPitch < 69 {
a.TonicPitch += 12
}
}
c = findMinDistChord(penultimateChord.MinDistChord, c_V7.Add(a.TonicPitch), nil)
f = funcDom
prevChord = c
prevFunction = funcDom
a.Modulate = modulationChance
} else if cc[j].CanModulateRelative && rand.Intn(a.Modulate) == 0 {
if a.Minor {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MAJOR")
a.Minor = false
a.TonicPitch += 3
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MINOR")
a.Minor = true
a.TonicPitch -= 3
if a.TonicPitch < 66 {
a.TonicPitch += 12
}
}
prevFunction = funcDom
a.Modulate = modulationChance
}
}
var bass, bass1 float64
for {
getBassNote := func(prev float64, prevPrev float64) float64 {
for {
n := c[rand.Intn(len(c))]
for n > 45 {
n -= 12
}
for n < 29 {
n += 12
}
if n != prev && n != prevPrev {
return n
}
}
}
bass = getBassNote(prevBass, prevPrevBass)
if bass-prevBass <= 3 && bass-prevBass > 0 {
bass1 = bass - 1
} else if prevBass-bass < 3 && prevBass-bass > 0 {
bass1 = bass + 1
} else {
bass1 = getBassNote(bass, prevBass)
}
prevBass, prevPrevBass = bass, prevBass
break
}
chordTime := int64(i*len(cc)+j) * chordDuration
if bass1 != 0 {
if i%2 == 0 {
bass1 -= microOffset
}
walkingBassTime := chordTime - chordDuration/2
es = append(es, events([]float64{bass1}, walkingBassTime)...)
}
fmt.Fprintln(os.Stderr, bass1, bass, c, f)
es = append(es, events(append([]float64{bass}, c...), chordTime)...)
}
}
fmt.Fprintln(os.Stderr)
if a.Modulate > 1 {
a.Modulate--
}
return es, prevBass, prevFunction, prevChord
}
func (a *autoChord) seq(prevBass float64, prevFunction int, prevChord intervals) *aujo.Sequence {
e, prevBass, prevFunction, prevChord := a.progression(2, 4, prevBass, prevFunction, prevChord)
return &aujo.Sequence{
Events: append(e, aujo.Event{
Time: e[len(e)-1].Time + chordDuration,
Func: func(m *aujo.Mix) {
m.SetNextSequence(a.seq(prevBass, prevFunction, prevChord))
},
}),
}
}
type autoChord struct {
TonicPitch float64
LastProgression []intervals
Modulate int
Minor bool
}
const modulationChance = 2
func AutoChords() *aujo.Sequence {
a := &autoChord{
TonicPitch: 69,
Modulate: modulationChance,
Minor: false,
}
return a.seq(0, funcDom, c_V7.Add(a.TonicPitch-12))
}
| substituteChords | identifier_name |
auto_chords.go | package examples
import (
"fmt"
"math"
"math/rand"
"os"
"sort"
"github.com/rwelin/aujo"
)
type intervals []float64
func (i intervals) Equal(j intervals) bool {
for k := range i {
if i[k] != j[k] {
return false
}
}
return true
}
func (i intervals) Add(g float64) intervals {
var ret intervals
for _, f := range i {
ret = append(ret, f+g)
}
return ret
}
func (i intervals) Expand() intervals {
var exp []float64
for _, f := range i {
exp = append(exp, f, f-12, f+12, f-24)
}
sort.Float64s(exp)
return intervals(exp)
}
var (
s_Major = intervals{0, 2, 4, 5, 7, 9, 11}
s_Minor = intervals{0, 2, 3, 5, 7, 8, 10, 11}
)
var (
c_I = intervals{0, 4, 7}
c_iii7 = intervals{2, 4, 7, 11}
c_vi7 = intervals{-3, 0, 4, 7}
c_I6 = intervals{0, 4, 7, 9}
c_ii7 = intervals{0, 2, 5, 9}
c_IV7 = intervals{0, 4, 5, 9}
c_V7 = intervals{2, 5, 7, 11}
c_vii_dim_7 = intervals{-1, 2, 5, 9}
c_ii7b5 = intervals{0, 2, 5, 8}
c_bIII_aug_7 = intervals{2, 3, 7, 11}
c_V7sus4 = intervals{2, 5, 7, 12}
c_i = intervals{0, 3, 7}
c_ii_dim_7 = intervals{0, 2, 5, 8}
c_III_7 = intervals{2, 3, 7, 10}
c_iv7 = intervals{0, 3, 5, 8}
c_VI7 = intervals{0, 3, 7, 8}
c_vii_7 = intervals{2, 5, 8, 10}
)
var tonicChords = []intervals{
c_I,
}
var subDominantChords = []intervals{
c_ii7,
c_IV7,
}
var dominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var otherChords = []intervals{
c_iii7,
c_vi7,
}
var minorTonicChords = []intervals{
c_i,
}
var minorSubDominantChords = []intervals{
c_ii_dim_7,
c_iv7,
}
var minorDominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var minorOtherChords = []intervals{
c_III_7,
c_VI7,
c_vii_7,
}
const (
funcTonic = iota
funcSubDom
funcDom
funcOther
funcMax
)
var functionMap = map[int][]intervals{
funcTonic: tonicChords,
funcSubDom: subDominantChords,
funcDom: dominantChords,
funcOther: otherChords,
}
var minorFunctionMap = map[int][]intervals{
funcTonic: minorTonicChords,
funcSubDom: minorSubDominantChords,
funcDom: minorDominantChords,
funcOther: minorOtherChords,
}
func substituteChords(minor bool, function int) []intervals {
if minor {
return nil
} else {
switch function {
case funcTonic:
return []intervals{
c_vi7,
c_I6,
}
case funcDom:
return []intervals{
c_ii7b5,
c_bIII_aug_7,
c_V7sus4,
}
}
}
return nil
}
func middleFunctions(prevFunction int, nextFunction int) []int {
pot := make(map[int]struct{})
fromPrev := followingFunctions(prevFunction)
for _, f := range fromPrev {
p := followingFunctions(f)
for _, g := range p {
if g == nextFunction {
pot[f] = struct{}{}
break
}
}
}
var funcs []int
for f := range pot {
funcs = append(funcs, f)
}
return funcs
}
func followingFunctions(currentFunction int) []int {
var funcs []int
switch currentFunction {
case funcTonic:
funcs = []int{funcSubDom, funcDom, funcOther}
case funcSubDom:
funcs = []int{funcTonic, funcDom, funcOther}
case funcDom:
funcs = []int{funcTonic}
case funcOther:
funcs = []int{funcTonic, funcSubDom, funcDom}
default:
panic("no")
}
return funcs
}
func nextFunction(currentFunction int) int {
funcs := followingFunctions(currentFunction)
return funcs[rand.Intn(len(funcs))]
}
type nextChordResult struct {
Function int
MinDistChord intervals
CanModulateUp bool
CanModulateDown bool
CanModulateRelative bool
}
func (a *autoChord) nextChord(prevFunction int, prevChord intervals, exclude []intervals) nextChordResult |
func events(chord []float64, offset int64) []aujo.Event {
var events []aujo.Event
for i, f := range chord {
e := aujo.Event{
Time: offset + 2000*int64(i),
Voice: 3,
Type: aujo.EventOn,
Pitch: f,
}
events = append(events, e)
}
return events
}
func findMinDistChord(prevChord intervals, nextChord intervals, requiredNotes []float64) intervals {
exp := nextChord.Expand()
const numChordNotes = 4
minDist := float64(0)
var minDistChord []float64
for i := 0; i <= len(exp)-numChordNotes; i++ {
c1 := exp[i : i+numChordNotes]
var dist float64
for j := 0; j < len(c1); j++ {
d := 10 + math.Pow(prevChord[j]-c1[j], 2) + math.Pow((c1[j]-57)/5, 2)
dist += math.Sqrt(d)
}
for j := 0; j < len(requiredNotes); j++ {
found := false
for k := 0; k < len(c1); k++ {
if c1[k] == requiredNotes[j] {
found = true
}
}
if !found {
dist += 100
}
}
if minDistChord == nil || dist < minDist {
minDist = dist
minDistChord = c1
}
}
return minDistChord
}
const chordDuration = 96000
const microOffset = 0.5
func (a *autoChord) progression(reps int, length int, prevBass float64, prevFunction int, prevChord intervals) (es []aujo.Event, lastBass float64, lastFunction int, lastChord intervals) {
var cc []nextChordResult
for i := 0; i < length; i++ {
excludeChords := a.LastProgression
for _, c := range cc {
excludeChords = append(excludeChords, c.MinDistChord)
}
res := a.nextChord(prevFunction, prevChord, excludeChords)
if i%2 == 1 {
res.MinDistChord = res.MinDistChord.Add(microOffset)
}
cc = append(cc, res)
prevChord = res.MinDistChord
prevFunction = res.Function
}
a.LastProgression = a.LastProgression[:0]
for _, c := range cc {
a.LastProgression = append(a.LastProgression, c.MinDistChord)
}
var prevPrevBass float64
fmt.Fprintln(os.Stderr, "TONIC", a.TonicPitch)
for i := 0; i < reps; i++ {
for j := 0; j < len(cc); j++ {
c := cc[j].MinDistChord
f := cc[j].Function
if i == reps-1 && j == len(cc)-1 {
penultimateChord := cc[j-1]
if (penultimateChord.CanModulateUp || penultimateChord.CanModulateDown) &&
rand.Intn(a.Modulate) == 0 {
if penultimateChord.CanModulateUp {
fmt.Fprintln(os.Stderr, "MODULATE UP")
a.TonicPitch += 7
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else if penultimateChord.CanModulateDown {
fmt.Fprintln(os.Stderr, "MODULATE DOWN")
a.TonicPitch -= 7
if a.TonicPitch < 69 {
a.TonicPitch += 12
}
}
c = findMinDistChord(penultimateChord.MinDistChord, c_V7.Add(a.TonicPitch), nil)
f = funcDom
prevChord = c
prevFunction = funcDom
a.Modulate = modulationChance
} else if cc[j].CanModulateRelative && rand.Intn(a.Modulate) == 0 {
if a.Minor {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MAJOR")
a.Minor = false
a.TonicPitch += 3
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MINOR")
a.Minor = true
a.TonicPitch -= 3
if a.TonicPitch < 66 {
a.TonicPitch += 12
}
}
prevFunction = funcDom
a.Modulate = modulationChance
}
}
var bass, bass1 float64
for {
getBassNote := func(prev float64, prevPrev float64) float64 {
for {
n := c[rand.Intn(len(c))]
for n > 45 {
n -= 12
}
for n < 29 {
n += 12
}
if n != prev && n != prevPrev {
return n
}
}
}
bass = getBassNote(prevBass, prevPrevBass)
if bass-prevBass <= 3 && bass-prevBass > 0 {
bass1 = bass - 1
} else if prevBass-bass < 3 && prevBass-bass > 0 {
bass1 = bass + 1
} else {
bass1 = getBassNote(bass, prevBass)
}
prevBass, prevPrevBass = bass, prevBass
break
}
chordTime := int64(i*len(cc)+j) * chordDuration
if bass1 != 0 {
if i%2 == 0 {
bass1 -= microOffset
}
walkingBassTime := chordTime - chordDuration/2
es = append(es, events([]float64{bass1}, walkingBassTime)...)
}
fmt.Fprintln(os.Stderr, bass1, bass, c, f)
es = append(es, events(append([]float64{bass}, c...), chordTime)...)
}
}
fmt.Fprintln(os.Stderr)
if a.Modulate > 1 {
a.Modulate--
}
return es, prevBass, prevFunction, prevChord
}
func (a *autoChord) seq(prevBass float64, prevFunction int, prevChord intervals) *aujo.Sequence {
e, prevBass, prevFunction, prevChord := a.progression(2, 4, prevBass, prevFunction, prevChord)
return &aujo.Sequence{
Events: append(e, aujo.Event{
Time: e[len(e)-1].Time + chordDuration,
Func: func(m *aujo.Mix) {
m.SetNextSequence(a.seq(prevBass, prevFunction, prevChord))
},
}),
}
}
type autoChord struct {
TonicPitch float64
LastProgression []intervals
Modulate int
Minor bool
}
const modulationChance = 2
func AutoChords() *aujo.Sequence {
a := &autoChord{
TonicPitch: 69,
Modulate: modulationChance,
Minor: false,
}
return a.seq(0, funcDom, c_V7.Add(a.TonicPitch-12))
}
| {
scale := s_Major.Add(a.TonicPitch).Expand()
if a.Minor {
scale = s_Minor.Add(a.TonicPitch).Expand()
}
var chromaticNote float64
var requiredNotes []float64
for i := range prevChord {
found := false
for j := range scale {
if prevChord[i] == scale[j] {
found = true
break
} else if prevChord[i] < scale[j] {
break
}
}
if !found {
chromaticNote = prevChord[i]
break
}
}
if chromaticNote != 0 {
r := float64(0)
for i := len(scale) - 1; i >= 0; i-- {
if r == 0 ||
math.Abs(scale[i]-chromaticNote) < math.Abs(r-chromaticNote) {
r = scale[i]
}
}
requiredNotes = append(requiredNotes, r)
}
for attempts := 0; ; attempts++ {
function := nextFunction(prevFunction)
chords := functionMap[function]
if a.Minor {
chords = minorFunctionMap[function]
}
if a.Modulate != modulationChance {
chords = append(chords, substituteChords(a.Minor, function)...)
}
c := chords[rand.Intn(len(chords))]
minDistChord := findMinDistChord(prevChord, c.Add(a.TonicPitch), requiredNotes)
inExclude := false
for _, e := range exclude {
eq := true
for i := range minDistChord {
if e[i] != minDistChord[i] {
eq = false
break
}
}
if eq {
inExclude = true
break
}
}
foundRequiredNote := len(requiredNotes)
if foundRequiredNote > 0 {
for _, r := range requiredNotes {
for _, n := range minDistChord {
if n == r {
foundRequiredNote--
break
}
}
}
}
if (foundRequiredNote == 0 && !inExclude) ||
(attempts > 50 && (foundRequiredNote == 0 || !inExclude)) ||
attempts > 100 {
return nextChordResult{
Function: function,
MinDistChord: minDistChord,
//CanModulateUp: c.Equal(c_vi7),
//CanModulateDown: c.Equal(c_ii7),
//CanModulateUp: c.Equal(c_vi7) || c.Equal(c_I7) || c.Equal(c_iii7),
CanModulateDown: c.Equal(c_ii7) || c.Equal(c_IV7) || c.Equal(c_vi7),
CanModulateRelative: a.Minor && c.Equal(c_vii_7) || !a.Minor && c.Equal(c_V7),
}
}
}
} | identifier_body |
auto_chords.go | package examples
import (
"fmt"
"math"
"math/rand"
"os"
"sort"
"github.com/rwelin/aujo"
)
type intervals []float64
func (i intervals) Equal(j intervals) bool {
for k := range i {
if i[k] != j[k] {
return false
}
}
return true
}
func (i intervals) Add(g float64) intervals {
var ret intervals
for _, f := range i {
ret = append(ret, f+g)
}
return ret
}
func (i intervals) Expand() intervals {
var exp []float64
for _, f := range i {
exp = append(exp, f, f-12, f+12, f-24)
}
sort.Float64s(exp)
return intervals(exp)
}
var (
s_Major = intervals{0, 2, 4, 5, 7, 9, 11}
s_Minor = intervals{0, 2, 3, 5, 7, 8, 10, 11}
)
var (
c_I = intervals{0, 4, 7}
c_iii7 = intervals{2, 4, 7, 11}
c_vi7 = intervals{-3, 0, 4, 7}
c_I6 = intervals{0, 4, 7, 9}
c_ii7 = intervals{0, 2, 5, 9}
c_IV7 = intervals{0, 4, 5, 9}
c_V7 = intervals{2, 5, 7, 11}
c_vii_dim_7 = intervals{-1, 2, 5, 9}
c_ii7b5 = intervals{0, 2, 5, 8}
c_bIII_aug_7 = intervals{2, 3, 7, 11}
c_V7sus4 = intervals{2, 5, 7, 12}
c_i = intervals{0, 3, 7}
c_ii_dim_7 = intervals{0, 2, 5, 8}
c_III_7 = intervals{2, 3, 7, 10}
c_iv7 = intervals{0, 3, 5, 8}
c_VI7 = intervals{0, 3, 7, 8}
c_vii_7 = intervals{2, 5, 8, 10}
)
var tonicChords = []intervals{
c_I,
}
var subDominantChords = []intervals{
c_ii7,
c_IV7,
}
var dominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var otherChords = []intervals{
c_iii7,
c_vi7,
}
var minorTonicChords = []intervals{
c_i,
}
var minorSubDominantChords = []intervals{
c_ii_dim_7,
c_iv7,
}
var minorDominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var minorOtherChords = []intervals{
c_III_7,
c_VI7,
c_vii_7,
}
const (
funcTonic = iota
funcSubDom
funcDom
funcOther
funcMax
)
var functionMap = map[int][]intervals{
funcTonic: tonicChords,
funcSubDom: subDominantChords,
funcDom: dominantChords,
funcOther: otherChords,
}
var minorFunctionMap = map[int][]intervals{
funcTonic: minorTonicChords,
funcSubDom: minorSubDominantChords,
funcDom: minorDominantChords,
funcOther: minorOtherChords,
}
func substituteChords(minor bool, function int) []intervals {
if minor {
return nil
} else {
switch function {
case funcTonic:
return []intervals{
c_vi7,
c_I6,
}
case funcDom:
return []intervals{
c_ii7b5,
c_bIII_aug_7,
c_V7sus4,
}
}
}
return nil
}
func middleFunctions(prevFunction int, nextFunction int) []int {
pot := make(map[int]struct{})
fromPrev := followingFunctions(prevFunction)
for _, f := range fromPrev {
p := followingFunctions(f)
for _, g := range p {
if g == nextFunction {
pot[f] = struct{}{}
break
}
}
}
var funcs []int
for f := range pot {
funcs = append(funcs, f)
}
return funcs
}
func followingFunctions(currentFunction int) []int {
var funcs []int
switch currentFunction {
case funcTonic:
funcs = []int{funcSubDom, funcDom, funcOther}
case funcSubDom:
funcs = []int{funcTonic, funcDom, funcOther}
case funcDom:
funcs = []int{funcTonic}
case funcOther:
funcs = []int{funcTonic, funcSubDom, funcDom}
default:
panic("no")
}
return funcs
}
func nextFunction(currentFunction int) int {
funcs := followingFunctions(currentFunction)
return funcs[rand.Intn(len(funcs))]
}
type nextChordResult struct {
Function int
MinDistChord intervals
CanModulateUp bool
CanModulateDown bool
CanModulateRelative bool
}
func (a *autoChord) nextChord(prevFunction int, prevChord intervals, exclude []intervals) nextChordResult {
scale := s_Major.Add(a.TonicPitch).Expand()
if a.Minor {
scale = s_Minor.Add(a.TonicPitch).Expand()
}
var chromaticNote float64
var requiredNotes []float64
for i := range prevChord {
found := false
for j := range scale {
if prevChord[i] == scale[j] {
found = true
break
} else if prevChord[i] < scale[j] {
break
}
}
if !found {
chromaticNote = prevChord[i]
break
}
}
if chromaticNote != 0 {
r := float64(0)
for i := len(scale) - 1; i >= 0; i-- {
if r == 0 ||
math.Abs(scale[i]-chromaticNote) < math.Abs(r-chromaticNote) {
r = scale[i]
}
}
requiredNotes = append(requiredNotes, r)
}
for attempts := 0; ; attempts++ {
function := nextFunction(prevFunction)
chords := functionMap[function]
if a.Minor {
chords = minorFunctionMap[function]
}
if a.Modulate != modulationChance {
chords = append(chords, substituteChords(a.Minor, function)...)
}
c := chords[rand.Intn(len(chords))]
minDistChord := findMinDistChord(prevChord, c.Add(a.TonicPitch), requiredNotes)
inExclude := false
for _, e := range exclude {
eq := true
for i := range minDistChord {
if e[i] != minDistChord[i] {
eq = false
break
}
}
if eq {
inExclude = true
break
}
}
foundRequiredNote := len(requiredNotes)
if foundRequiredNote > 0 {
for _, r := range requiredNotes {
for _, n := range minDistChord {
if n == r {
foundRequiredNote--
break
}
}
}
}
if (foundRequiredNote == 0 && !inExclude) ||
(attempts > 50 && (foundRequiredNote == 0 || !inExclude)) ||
attempts > 100 {
return nextChordResult{
Function: function,
MinDistChord: minDistChord,
//CanModulateUp: c.Equal(c_vi7),
//CanModulateDown: c.Equal(c_ii7),
//CanModulateUp: c.Equal(c_vi7) || c.Equal(c_I7) || c.Equal(c_iii7),
CanModulateDown: c.Equal(c_ii7) || c.Equal(c_IV7) || c.Equal(c_vi7),
CanModulateRelative: a.Minor && c.Equal(c_vii_7) || !a.Minor && c.Equal(c_V7),
}
}
}
}
func events(chord []float64, offset int64) []aujo.Event {
var events []aujo.Event
for i, f := range chord {
e := aujo.Event{
Time: offset + 2000*int64(i),
Voice: 3,
Type: aujo.EventOn,
Pitch: f,
}
events = append(events, e)
}
return events
}
func findMinDistChord(prevChord intervals, nextChord intervals, requiredNotes []float64) intervals {
exp := nextChord.Expand()
const numChordNotes = 4
minDist := float64(0)
var minDistChord []float64
for i := 0; i <= len(exp)-numChordNotes; i++ {
c1 := exp[i : i+numChordNotes]
var dist float64
for j := 0; j < len(c1); j++ {
d := 10 + math.Pow(prevChord[j]-c1[j], 2) + math.Pow((c1[j]-57)/5, 2)
dist += math.Sqrt(d)
}
for j := 0; j < len(requiredNotes); j++ {
found := false
for k := 0; k < len(c1); k++ {
if c1[k] == requiredNotes[j] {
found = true
}
}
if !found {
dist += 100
}
}
if minDistChord == nil || dist < minDist {
minDist = dist
minDistChord = c1
}
}
return minDistChord
}
const chordDuration = 96000
const microOffset = 0.5
func (a *autoChord) progression(reps int, length int, prevBass float64, prevFunction int, prevChord intervals) (es []aujo.Event, lastBass float64, lastFunction int, lastChord intervals) {
var cc []nextChordResult
for i := 0; i < length; i++ {
excludeChords := a.LastProgression
for _, c := range cc {
excludeChords = append(excludeChords, c.MinDistChord)
}
res := a.nextChord(prevFunction, prevChord, excludeChords)
if i%2 == 1 {
res.MinDistChord = res.MinDistChord.Add(microOffset)
}
cc = append(cc, res)
prevChord = res.MinDistChord
prevFunction = res.Function
}
a.LastProgression = a.LastProgression[:0]
for _, c := range cc {
a.LastProgression = append(a.LastProgression, c.MinDistChord)
}
var prevPrevBass float64
fmt.Fprintln(os.Stderr, "TONIC", a.TonicPitch)
for i := 0; i < reps; i++ {
for j := 0; j < len(cc); j++ {
c := cc[j].MinDistChord
f := cc[j].Function
if i == reps-1 && j == len(cc)-1 {
penultimateChord := cc[j-1]
if (penultimateChord.CanModulateUp || penultimateChord.CanModulateDown) &&
rand.Intn(a.Modulate) == 0 {
if penultimateChord.CanModulateUp {
fmt.Fprintln(os.Stderr, "MODULATE UP")
a.TonicPitch += 7
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else if penultimateChord.CanModulateDown {
fmt.Fprintln(os.Stderr, "MODULATE DOWN")
a.TonicPitch -= 7
if a.TonicPitch < 69 {
a.TonicPitch += 12
}
}
c = findMinDistChord(penultimateChord.MinDistChord, c_V7.Add(a.TonicPitch), nil)
f = funcDom
prevChord = c
prevFunction = funcDom
a.Modulate = modulationChance
} else if cc[j].CanModulateRelative && rand.Intn(a.Modulate) == 0 {
if a.Minor {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MAJOR")
a.Minor = false
a.TonicPitch += 3
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MINOR")
a.Minor = true
a.TonicPitch -= 3
if a.TonicPitch < 66 {
a.TonicPitch += 12
}
}
prevFunction = funcDom
a.Modulate = modulationChance
}
}
var bass, bass1 float64
for {
getBassNote := func(prev float64, prevPrev float64) float64 {
for {
n := c[rand.Intn(len(c))]
for n > 45 |
for n < 29 {
n += 12
}
if n != prev && n != prevPrev {
return n
}
}
}
bass = getBassNote(prevBass, prevPrevBass)
if bass-prevBass <= 3 && bass-prevBass > 0 {
bass1 = bass - 1
} else if prevBass-bass < 3 && prevBass-bass > 0 {
bass1 = bass + 1
} else {
bass1 = getBassNote(bass, prevBass)
}
prevBass, prevPrevBass = bass, prevBass
break
}
chordTime := int64(i*len(cc)+j) * chordDuration
if bass1 != 0 {
if i%2 == 0 {
bass1 -= microOffset
}
walkingBassTime := chordTime - chordDuration/2
es = append(es, events([]float64{bass1}, walkingBassTime)...)
}
fmt.Fprintln(os.Stderr, bass1, bass, c, f)
es = append(es, events(append([]float64{bass}, c...), chordTime)...)
}
}
fmt.Fprintln(os.Stderr)
if a.Modulate > 1 {
a.Modulate--
}
return es, prevBass, prevFunction, prevChord
}
func (a *autoChord) seq(prevBass float64, prevFunction int, prevChord intervals) *aujo.Sequence {
e, prevBass, prevFunction, prevChord := a.progression(2, 4, prevBass, prevFunction, prevChord)
return &aujo.Sequence{
Events: append(e, aujo.Event{
Time: e[len(e)-1].Time + chordDuration,
Func: func(m *aujo.Mix) {
m.SetNextSequence(a.seq(prevBass, prevFunction, prevChord))
},
}),
}
}
type autoChord struct {
TonicPitch float64
LastProgression []intervals
Modulate int
Minor bool
}
const modulationChance = 2
func AutoChords() *aujo.Sequence {
a := &autoChord{
TonicPitch: 69,
Modulate: modulationChance,
Minor: false,
}
return a.seq(0, funcDom, c_V7.Add(a.TonicPitch-12))
}
| {
n -= 12
} | conditional_block |
auto_chords.go | package examples
import (
"fmt"
"math"
"math/rand"
"os"
"sort"
"github.com/rwelin/aujo"
)
type intervals []float64
func (i intervals) Equal(j intervals) bool {
for k := range i {
if i[k] != j[k] {
return false
}
}
return true
}
func (i intervals) Add(g float64) intervals {
var ret intervals
for _, f := range i {
ret = append(ret, f+g)
}
return ret
}
func (i intervals) Expand() intervals {
var exp []float64
for _, f := range i {
exp = append(exp, f, f-12, f+12, f-24)
}
sort.Float64s(exp)
return intervals(exp)
}
var (
s_Major = intervals{0, 2, 4, 5, 7, 9, 11}
s_Minor = intervals{0, 2, 3, 5, 7, 8, 10, 11}
)
var (
c_I = intervals{0, 4, 7}
c_iii7 = intervals{2, 4, 7, 11}
c_vi7 = intervals{-3, 0, 4, 7}
c_I6 = intervals{0, 4, 7, 9}
c_ii7 = intervals{0, 2, 5, 9}
c_IV7 = intervals{0, 4, 5, 9}
c_V7 = intervals{2, 5, 7, 11}
c_vii_dim_7 = intervals{-1, 2, 5, 9}
c_ii7b5 = intervals{0, 2, 5, 8}
c_bIII_aug_7 = intervals{2, 3, 7, 11}
c_V7sus4 = intervals{2, 5, 7, 12}
c_i = intervals{0, 3, 7}
c_ii_dim_7 = intervals{0, 2, 5, 8}
c_III_7 = intervals{2, 3, 7, 10}
c_iv7 = intervals{0, 3, 5, 8}
c_VI7 = intervals{0, 3, 7, 8}
c_vii_7 = intervals{2, 5, 8, 10}
)
var tonicChords = []intervals{
c_I,
}
var subDominantChords = []intervals{
c_ii7,
c_IV7,
}
var dominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var otherChords = []intervals{
c_iii7,
c_vi7,
}
var minorTonicChords = []intervals{
c_i,
}
var minorSubDominantChords = []intervals{
c_ii_dim_7,
c_iv7,
}
var minorDominantChords = []intervals{
c_V7,
c_vii_dim_7,
}
var minorOtherChords = []intervals{
c_III_7,
c_VI7,
c_vii_7,
}
const (
funcTonic = iota
funcSubDom
funcDom
funcOther
funcMax
)
var functionMap = map[int][]intervals{
funcTonic: tonicChords,
funcSubDom: subDominantChords,
funcDom: dominantChords,
funcOther: otherChords,
}
var minorFunctionMap = map[int][]intervals{
funcTonic: minorTonicChords,
funcSubDom: minorSubDominantChords,
funcDom: minorDominantChords,
funcOther: minorOtherChords,
}
func substituteChords(minor bool, function int) []intervals {
if minor {
return nil
} else {
switch function {
case funcTonic:
return []intervals{
c_vi7,
c_I6,
}
case funcDom:
return []intervals{
c_ii7b5,
c_bIII_aug_7,
c_V7sus4,
}
}
}
return nil
}
func middleFunctions(prevFunction int, nextFunction int) []int {
pot := make(map[int]struct{})
fromPrev := followingFunctions(prevFunction)
for _, f := range fromPrev {
p := followingFunctions(f)
for _, g := range p {
if g == nextFunction {
pot[f] = struct{}{}
break
}
}
}
var funcs []int
for f := range pot {
funcs = append(funcs, f)
}
return funcs
}
func followingFunctions(currentFunction int) []int {
var funcs []int
switch currentFunction {
case funcTonic:
funcs = []int{funcSubDom, funcDom, funcOther}
case funcSubDom:
funcs = []int{funcTonic, funcDom, funcOther}
case funcDom:
funcs = []int{funcTonic}
case funcOther:
funcs = []int{funcTonic, funcSubDom, funcDom}
default:
panic("no")
}
return funcs
}
func nextFunction(currentFunction int) int {
funcs := followingFunctions(currentFunction)
return funcs[rand.Intn(len(funcs))]
}
type nextChordResult struct {
Function int
MinDistChord intervals
CanModulateUp bool
CanModulateDown bool
CanModulateRelative bool
}
func (a *autoChord) nextChord(prevFunction int, prevChord intervals, exclude []intervals) nextChordResult {
scale := s_Major.Add(a.TonicPitch).Expand()
if a.Minor {
scale = s_Minor.Add(a.TonicPitch).Expand()
}
var chromaticNote float64
var requiredNotes []float64
for i := range prevChord {
found := false
for j := range scale {
if prevChord[i] == scale[j] {
found = true
break
} else if prevChord[i] < scale[j] {
break
}
}
if !found {
chromaticNote = prevChord[i]
break
}
}
if chromaticNote != 0 {
r := float64(0)
for i := len(scale) - 1; i >= 0; i-- {
if r == 0 ||
math.Abs(scale[i]-chromaticNote) < math.Abs(r-chromaticNote) {
r = scale[i]
}
}
requiredNotes = append(requiredNotes, r)
}
for attempts := 0; ; attempts++ {
function := nextFunction(prevFunction)
chords := functionMap[function]
if a.Minor {
chords = minorFunctionMap[function]
}
if a.Modulate != modulationChance {
chords = append(chords, substituteChords(a.Minor, function)...)
}
c := chords[rand.Intn(len(chords))]
minDistChord := findMinDistChord(prevChord, c.Add(a.TonicPitch), requiredNotes)
inExclude := false
for _, e := range exclude {
eq := true
for i := range minDistChord {
if e[i] != minDistChord[i] {
eq = false
break
}
}
if eq {
inExclude = true
break
}
}
foundRequiredNote := len(requiredNotes)
if foundRequiredNote > 0 {
for _, r := range requiredNotes {
for _, n := range minDistChord {
if n == r {
foundRequiredNote--
break
}
}
}
}
if (foundRequiredNote == 0 && !inExclude) ||
(attempts > 50 && (foundRequiredNote == 0 || !inExclude)) ||
attempts > 100 {
return nextChordResult{
Function: function,
MinDistChord: minDistChord,
//CanModulateUp: c.Equal(c_vi7),
//CanModulateDown: c.Equal(c_ii7),
//CanModulateUp: c.Equal(c_vi7) || c.Equal(c_I7) || c.Equal(c_iii7),
CanModulateDown: c.Equal(c_ii7) || c.Equal(c_IV7) || c.Equal(c_vi7),
CanModulateRelative: a.Minor && c.Equal(c_vii_7) || !a.Minor && c.Equal(c_V7),
}
}
}
}
func events(chord []float64, offset int64) []aujo.Event {
var events []aujo.Event
for i, f := range chord {
e := aujo.Event{
Time: offset + 2000*int64(i),
Voice: 3,
Type: aujo.EventOn,
Pitch: f,
}
events = append(events, e)
}
return events
}
func findMinDistChord(prevChord intervals, nextChord intervals, requiredNotes []float64) intervals {
exp := nextChord.Expand()
const numChordNotes = 4
minDist := float64(0)
var minDistChord []float64
for i := 0; i <= len(exp)-numChordNotes; i++ {
c1 := exp[i : i+numChordNotes]
var dist float64
for j := 0; j < len(c1); j++ {
d := 10 + math.Pow(prevChord[j]-c1[j], 2) + math.Pow((c1[j]-57)/5, 2)
dist += math.Sqrt(d)
}
for j := 0; j < len(requiredNotes); j++ {
found := false
for k := 0; k < len(c1); k++ {
if c1[k] == requiredNotes[j] {
found = true
}
}
if !found { | }
if minDistChord == nil || dist < minDist {
minDist = dist
minDistChord = c1
}
}
return minDistChord
}
const chordDuration = 96000
const microOffset = 0.5
func (a *autoChord) progression(reps int, length int, prevBass float64, prevFunction int, prevChord intervals) (es []aujo.Event, lastBass float64, lastFunction int, lastChord intervals) {
var cc []nextChordResult
for i := 0; i < length; i++ {
excludeChords := a.LastProgression
for _, c := range cc {
excludeChords = append(excludeChords, c.MinDistChord)
}
res := a.nextChord(prevFunction, prevChord, excludeChords)
if i%2 == 1 {
res.MinDistChord = res.MinDistChord.Add(microOffset)
}
cc = append(cc, res)
prevChord = res.MinDistChord
prevFunction = res.Function
}
a.LastProgression = a.LastProgression[:0]
for _, c := range cc {
a.LastProgression = append(a.LastProgression, c.MinDistChord)
}
var prevPrevBass float64
fmt.Fprintln(os.Stderr, "TONIC", a.TonicPitch)
for i := 0; i < reps; i++ {
for j := 0; j < len(cc); j++ {
c := cc[j].MinDistChord
f := cc[j].Function
if i == reps-1 && j == len(cc)-1 {
penultimateChord := cc[j-1]
if (penultimateChord.CanModulateUp || penultimateChord.CanModulateDown) &&
rand.Intn(a.Modulate) == 0 {
if penultimateChord.CanModulateUp {
fmt.Fprintln(os.Stderr, "MODULATE UP")
a.TonicPitch += 7
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else if penultimateChord.CanModulateDown {
fmt.Fprintln(os.Stderr, "MODULATE DOWN")
a.TonicPitch -= 7
if a.TonicPitch < 69 {
a.TonicPitch += 12
}
}
c = findMinDistChord(penultimateChord.MinDistChord, c_V7.Add(a.TonicPitch), nil)
f = funcDom
prevChord = c
prevFunction = funcDom
a.Modulate = modulationChance
} else if cc[j].CanModulateRelative && rand.Intn(a.Modulate) == 0 {
if a.Minor {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MAJOR")
a.Minor = false
a.TonicPitch += 3
if a.TonicPitch > 69 {
a.TonicPitch -= 12
}
} else {
fmt.Fprintln(os.Stderr, "MODULATE RELATIVE MINOR")
a.Minor = true
a.TonicPitch -= 3
if a.TonicPitch < 66 {
a.TonicPitch += 12
}
}
prevFunction = funcDom
a.Modulate = modulationChance
}
}
var bass, bass1 float64
for {
getBassNote := func(prev float64, prevPrev float64) float64 {
for {
n := c[rand.Intn(len(c))]
for n > 45 {
n -= 12
}
for n < 29 {
n += 12
}
if n != prev && n != prevPrev {
return n
}
}
}
bass = getBassNote(prevBass, prevPrevBass)
if bass-prevBass <= 3 && bass-prevBass > 0 {
bass1 = bass - 1
} else if prevBass-bass < 3 && prevBass-bass > 0 {
bass1 = bass + 1
} else {
bass1 = getBassNote(bass, prevBass)
}
prevBass, prevPrevBass = bass, prevBass
break
}
chordTime := int64(i*len(cc)+j) * chordDuration
if bass1 != 0 {
if i%2 == 0 {
bass1 -= microOffset
}
walkingBassTime := chordTime - chordDuration/2
es = append(es, events([]float64{bass1}, walkingBassTime)...)
}
fmt.Fprintln(os.Stderr, bass1, bass, c, f)
es = append(es, events(append([]float64{bass}, c...), chordTime)...)
}
}
fmt.Fprintln(os.Stderr)
if a.Modulate > 1 {
a.Modulate--
}
return es, prevBass, prevFunction, prevChord
}
func (a *autoChord) seq(prevBass float64, prevFunction int, prevChord intervals) *aujo.Sequence {
e, prevBass, prevFunction, prevChord := a.progression(2, 4, prevBass, prevFunction, prevChord)
return &aujo.Sequence{
Events: append(e, aujo.Event{
Time: e[len(e)-1].Time + chordDuration,
Func: func(m *aujo.Mix) {
m.SetNextSequence(a.seq(prevBass, prevFunction, prevChord))
},
}),
}
}
type autoChord struct {
TonicPitch float64
LastProgression []intervals
Modulate int
Minor bool
}
const modulationChance = 2
func AutoChords() *aujo.Sequence {
a := &autoChord{
TonicPitch: 69,
Modulate: modulationChance,
Minor: false,
}
return a.seq(0, funcDom, c_V7.Add(a.TonicPitch-12))
} | dist += 100
} | random_line_split |
shopping-list.reducer.ts | import { Ingredient } from '../../shared/ingredient.model';
import * as MyShoppingListActions from './shopping-list.actions';
/* INTERFACE? YES, INTERFACE.
Hmm, MAX Code has an 'interface' here for State.
Q. What happens if I omit? Hmm. ...
A. ___ ? Answer is: W-a-a-a-l, you *could* I s'ppose "omit" it.
BUT! This use of an 'interface' is a SUPER-CONVENIENCE
and generally good idea/practice.
SO: INCLUDE IT. Zank you.
How? Why?
Because you need to, in various spots,
indicate the TYPE of your (slice of) the STORE.
e.g. ShoppingListComponent:
private myStore: Store<{ myShoppingListViaReducer: { ingredients: Ingredient[] } }>
Above is OK. but gets unwieldy when you've got:
private myStore: Store<{ myShoppingListViaReducer:
{ ingredients: Ingredient[],
myBeingEditedIngredient: Ingredient,
myBeingEditedIngredientIndex: number } }>
Better will be use of interface: << N.B. This next line is a GUESS, at this point
private myStore: Store<{ myShoppingListViaReducer: State }> // << or similar; t.b.d.
*/
export interface StateShoppingListPart { // << Naming convention is just 'State'
ingredients: Ingredient [];
myBeingEditedIngredient: Ingredient; // << Came from ShoppingEditComponent
myBeingEditedIngredientIndex: number; // << Came from ShoppingEditComponent
}
// LECT. 356 ~06:04
// LECT. 360 - Okay, now we finally admit: this "App" level state does NOT belong here in ShoppingListReducer. No suh.
/*
export interface AppState {
/!* ORIG
myShoppingListReducer: StateShoppingListPart;
*!/
// This was MISSED by IDE Refactoring! (?) Hmm. Well, fixed it manually.
myShoppingListViaReducer: StateShoppingListPart;
}
*/
const initialStateShoppingListPart: StateShoppingListPart = {
ingredients: [ // NGRX: refactored here from ShoppingListService, fwiw
new Ingredient('ApplesWR__NGRX HardCoded Reducer', 5),
new Ingredient('TomatoesWR__NGRX HardCoded Reducer', 10),
],
myBeingEditedIngredient: null, // initial value: null
myBeingEditedIngredientIndex: -1, // initial value: -1 is "not a valid index" (What You Want).
// (Don't select 0 as initial value; 0 of course would be a valid index. Jus' sayin'.)
};
/* EXPERIMENT
Q.
Are 'state' and 'action' reserved, required, words?
Can I use something like 'ngrxState' and/or 'ngrxAction' or 'myShoppingListAction' or 'ngrxSLAction' instead?
A.
Early on... Seems maybe yeah ?
Hmm, bit of NgRx source code (!)
***************************************
And YES I DO SEE 'state' and 'action' therein (below) ... Hmm
--------
node_modules/@ngrx/store/src/models.d.ts:1
--------
export interface Action {
type: string;
}
...
/!**
* A function that takes an `Action` and a `State`, and returns a `State`.
* See `createReducer`.
// https://ngrx.io/api/store/createReducer#description
*!/
export interface ActionReducer<T, V extends Action = Action> {
(state: T | undefined, action: V): T; // <<<<<<<<<<<<<<<<<<<<<<<<< hmmmmmm... 'state', 'action'
}
--------
...but, hmm, apparently that does not make them required, reserved words. ok. (MBU)
***************************************
*/
export function shoppingListReducer (
ngrxState = initialStateShoppingListPart, // ngrxState will get initialState if null
// state = initialState, // state will get initialState if null
/*
action: Action, // No longer used. This was just NgRx Action interface
*/
/* When there was only One Action:
action: MyShoppingListActions.AddIngredientActionClass, // Now, our specific Action instance. (has ".myPayload")
*/
// Now we have additional Actions, so use this custom "union" 'type' to bring all in:
ngrxAction: MyShoppingListActions.ShoppingListActionsUnionType,
// Action instances. (all use ".myPayload" (and ".type" too of course))
): StateShoppingListPart | {
switch (ngrxAction.type) {
/* WAS: 'string'
case 'ADD_INGREDIENT_ACTION':
NOW: const
*/
case MyShoppingListActions.ADD_INGREDIENT_ACTION:
// Do NOT mutate the existing state!! Get a COPY, work on that
return { // reducer returns a new state!
...ngrxState, // spread operator gives you a COPY
ingredients: [
...ngrxState.ingredients, // likewise copy of our ingredients array, up to present...
ngrxAction.myPayload // << ostensibly the newly added Ingredient
]
};
case MyShoppingListActions.ADD_INGREDIENTS_ACTION:
console.log('ngrxAction.myPayload recipe ADD_INGREDIENTS_ACTION ', ngrxAction.myPayload);
// https://davidwalsh.name/spread-operator
console.log('...ngrxAction.myPayload recipe ADD_INGREDIENTS_ACTION ', ...ngrxAction.myPayload);
return {
...ngrxState,
ingredients: [
...ngrxState.ingredients,
/* No. This puts an array into this array. Not What You Want.
ngrxAction.myPayload // Nope.
*/
...ngrxAction.myPayload
// << Yes. This puts (inner) array element {} objects into this (outer) array. Just What You Want.
]
};
case MyShoppingListActions.UPDATE_INGREDIENT_ACTION:
/* No Longer. We do not get the Index # from the Component any longer. START_EDIT_ACTION has already put that Index # on the Store.
console.log('UPDATE ngrxAction.myPayload.indexPassedIn ', ngrxAction.myPayload.indexPassedIn); // Yep e.g. 1
*/
/* No longer this signature, of nested object w 'ingredientEdits' property:
console.log('UPDATE ngrxAction.myPayload.ingredientEdits ', ngrxAction.myPayload.ingredientEdits);
*/
console.log('UPDATE ngrxAction.myPayload ', ngrxAction.myPayload); // Now myPayload is simply of type Ingredient. Done.
/* WR__ - Nah. Not .splice() Let's change it up.
// .SPLICE() "in-place" modify array. Maybe not best idea!
ngrxState.ingredients.splice(ngrxAction.myPayload.indexPassedIn, 1, ngrxAction.myPayload.ingredientEdits);
console.log('UPDATE ngrxState ', ngrxState);
/!* Seemstabeokay:
ingredients: Array(2)
0: Ingredient {name: "Apples EDITED WR__SVC", amount: 5}
1: Ingredient {name: "TomatoesWR__NGRX", amount: 10}
*!/
*/
/* We get the Index # differently now. Not passed in. Known on the State, thx to START_EDIT_ACTION.
const myIngredientToBeUpdated: Ingredient = ngrxState.ingredients[ngrxAction.myPayload.indexPassedIn];
*/
const myIngredientToBeUpdated: Ingredient = ngrxState.ingredients[ngrxState.myBeingEditedIngredientIndex];
// Above is: Which ingredient (pre-editing values) was selected by user...
const myIngredientOnceItIsUpdated = {
...myIngredientToBeUpdated,
/* MAX comment re: above line -
"Arguably optional to put the original ingredient herein,
since with next line we do overwrite that original ingredient with new edits.
But - still good practice and does have uses. e.g. if ingredient
has an ID you don't want to lose/overwrite, etc. Best to incorporate above line.
*/
/* No longer this signature, of nested object w 'ingredientEdits' property:
...ngrxAction.myPayload.ingredientEdits
*/
...ngrxAction.myPayload // Now myPayload is simply of type Ingredient. Done.
};
const myIngredientsArrayToBeUpdatedACopy = [...ngrxState.ingredients];
/*
Now that we have (safe) COPY of array (above), we can modify it (below).
N.B. This modification seen here is of AN ELEMENT on the array.
It is NOT (apparently) permissible to just do assign statement
to ENTIRE array, even though yes it is a copy.
e.g. myIngredientsArrayToBeUpdatedACopy = []; // << Trying to assign some other array on to it..
Q. Why is that?
A. Because we have used 'const' above, in declaring that array copy. (Hmm?)
So, MBU - a const array, be it copy or not I guess of some other array,
CAN be modified on an element, but CANNOT be modified by wholesale assignment.
(Q. Wonder if I'm right.) << A. I tink you are.
*/
/* We get the Index # differently now. Not passed in. Known on the State, thx to START_EDIT_ACTION.
myIngredientsArrayToBeUpdatedACopy[ngrxAction.myPayload.indexPassedIn] = myIngredientOnceItIsUpdated;
*/
myIngredientsArrayToBeUpdatedACopy[ngrxState.myBeingEditedIngredientIndex] = myIngredientOnceItIsUpdated;
return {
...ngrxState,
/* Here, because I'd introduced literal array by use of '[ ]',
I in turn needed to use '...' spread operator when inserting into that
empty array, an actual array.
See comment just below.
ingredients: [
// ngrxState.ingredients
...myIngredientsArrayToBeUpdatedACopy
]
*/
/*
Here, omitting that literal '[ ]' business, I can also omit the '...' business.
Cheers.
*/
ingredients: myIngredientsArrayToBeUpdatedACopy,
myBeingEditedIngredient: null, // Intuitive? hmm.
// Reset here upon coming back from UPDATE (same for DELETE) Lect. 358 ~02:49
// See fuller Comment below at DELETE re: these 2 lines (1 above, 1 below)
myBeingEditedIngredientIndex: -1 // ditto above remark
};
case MyShoppingListActions.DELETE_INGREDIENT_ACTION:
/* HERE TOO, for DELETE - do NOT modify existing array. Eschew .splice(), plz.
Q. (Hmm, but seems our (MAX) use of .filter() *CAN* be run on existing array? Hmm.
A. Dumkopff! (oi) Yes, .filter() *DOES* return you a NEW Array. okay.
*/
/* Unnecessary.
const myOneIngredientToBeDeleted = {...ngrxState.ingredients[ngrxAction.myPayload]};
*/
/*
This (above) kinda useless. You can look at (or log if you like)
what ingredient you're deleting, but, we don't make any use of this variable.
*/
/* Unnecessary.
const myIngredientsArrayOneToBeDeletedACopy = [...ngrxState.ingredients];
*/
/*
This (above) not needed: "creating a copy". No.
In fact I hit error when I did the following:
- created this copy,
- and tried (below) to run .filter() on that copy...
- ...with the intention to assign back the now filtered array (copy).
No. As the copy I made is a 'const' array, you can't just assign wholesale back to it:
ERROR: "src/app/shopping-list/store/shopping-list.reducer.ts(178,13): error TS2588:
Cannot assign to 'myIngredientsArrayOneToBeDeletedACopy' because it is a constant."
Instead we'll be sort of running that .filter() 'in situ' (MAX Code approach).
That is, he doesn't bother to create a new variable - no need to. // << Dumkopff! .filter MAKES new array. Cheers.
Hmm. We'll see. << Yah works of course. Cheers.
*/
/* Unnecessary. (and error-generating, too)
myIngredientsArrayOneToBeDeletedACopy = [ // << No. Cannot ASSIGN ('=') to const
...myIngredientsArrayOneToBeDeletedACopy,
myIngredientsArrayOneToBeDeletedACopy.filter(
(nextIngredient, nextIngredientIndexFromFiltering) => {
return nextIngredientIndexFromFiltering !== 1; // << HARD-CODED
}
)
]
*/
return {
...ngrxState,
ingredients: ngrxState.ingredients.filter(
(nextIngredient, nextIngredientIndex) => {
return nextIngredientIndex !== ngrxState.myBeingEditedIngredientIndex; // << PART of STATE = good
/*
ngrxAction.myPayload; << Initially, got index off action, payload.
That approach sort of required the action logic to send it in, upon click,
to this particular reducer case/function/method (DELETE and same for UPDATE)
Refactoring, as 'twere:
Now, we've made that index PART of STATE: state.editedIngredientIndex (MAX code)
Now we invoke a central START_EDIT to get that index for BOTH Delete and Update.
*/
}
),
myBeingEditedIngredient: null, // Intuitive? hmm.
/* Reset here upon coming back from DELETE (same for UPDATE) Lect. 358 ~02:49
Hmm, MAX says we at this point are also STOP_EDIT, but instead
of, I don't know, here in middle of this UPDATE Action, calling (?)
the STOP_EDIT Action, we instead simply sort of do the
"non-D.R.Y." mode of pasting in 2 lines of logic here,
that STOP_EDIT does. o well. no biggie.
(line above re: Ingredient; line below re: IngredientIndex)
*/
myBeingEditedIngredientIndex: -1 // ditto above remark
/* Noop. NOT good idea. NO SPLICE (boo hiss)
[
ngrxState.ingredients.splice(ngrxAction.myPayload, 1)
]
*/
};
case MyShoppingListActions.START_EDIT_ACTION:
/*
SEE PASTED-IN OUTPUT FROM THIS CONSOLE LOGGING FURTHER BELOW
*/
console.log('{ ...ngrxState.ingredients } ', { ...ngrxState.ingredients });
console.log('{ ...ngrxState.ingredients[ngrxAction.myPayload] } ', { ...ngrxState.ingredients[ngrxAction.myPayload] });
/*
{name: "ApplesWR__NGRX HardCoded Reducer", amount: 5}
name: "ApplesWR__NGRX HardCoded Reducer"
amount: 5
*/
console.log(' ngrxState.ingredients ', ngrxState.ingredients );
console.log(' ngrxState.ingredients[ngrxAction.myPayload] ', ngrxState.ingredients[ngrxAction.myPayload] );
/*
Ingredient {name: "ApplesWR__NGRX HardCoded Reducer", amount: 5}
name: "ApplesWR__NGRX HardCoded Reducer"
amount: 5
*/
/* WR__ INITIAL. Seems to work A-O.K., but, I may be missing something ...
N.B.: I have/had no 'myBeingEditedIngredient' explicitly included (by me) in the 'return {}'
Hmm.
Why? I instead deal with picking out which ingredient, from the Store array
of ingredients, in logic over in the Store-subscribing() ShoppingEditComponent ngOnInit()
(MAX code does NOT do that.)
return {
...ngrxState,
myBeingEditedIngredientIndex: ngrxAction.myPayload
};
*/
/* MAX Code:
Max *does* deal with the picking out of which ingredient, from the Store's array
of ingredients, right here, in the Reducer 'return {}'
(Not, like I am, over in the ShoppingEditComponent.)
*/
return {
...ngrxState,
myBeingEditedIngredientIndex: ngrxAction.myPayload,
/* N.B. VERY IMPORTANT: ALWAYS A *COPY*, not Reference. Or, "Spread Operator is your friend."
That is...
this next line below (Commented Out) uses the Store's actual array for ingredients. << PROBLEMATIC
Arrays (and Objects) being REFERENCE types, if you modify a reference
to this particular Ingredient object, in our Store's ingredients array,
(e.g. modify over in ShoppingEditComponent),
then you modify the source, too, back in the Store, DIRECTLY. << PROBLEMATIC
You ALWAYS WANT A *COPY* OF A STORE ARRAY, or STORE ARRAY ELEMENT, or other Store data.
>> NO >> myBeingEditedIngredient: ngrxState.ingredients[ngrxAction.myPayload], // << NO <<
*/
/* MAX Explains: Lect. 357 ~04:40
"The spread operator ... gives you a COPY of the Store's array of ingredients (good),
and the result of that (I (WR__) guess ?) needs to go in curly braces ( ? )."
>> "I need to create a new object which can then be edited without problems by using curly braces."
Q. Curly Braces {} ?
- I guess I don't know how come curly braces are required, but I guess that using spread operator
on an array [] of objects {}, (that is [ {}, {} ]), you get a different result, pulling out an element,
such that you do need to (re)-put {} around the resulting element,
to assign to the property 'myBeingEditedIngredient:'. Okay.
- (As compared to the (Commented Out) line above, which is not using spread operator,
and does not need curly braces for the resulting element it gets out, to assign to
the property 'myBeingEditedIngredient:')
A. Curly Braces {} !
https://www.udemy.com/course/the-complete-guide-to-angular-2/learn/lecture/14466570#questions/8570924
Jost:
"With the spread operator (...) you can spread out the properties of an object
into a comma-separated list of key/value pairs, and then add new key/value pairs
(or overwrite an existent object property).
The whole is wrapped in curly braces to reconvert the resulting list into a (new) object."
WR__
Hmm, wonder what the syntax for that exactly looks like, a "comma-separated list of key/value pairs"
around which I need to then supply curly braces. Hmm.
e.g. let myObj = {far: 'cry', 'last': 'week', thisTime: 'around'}
>> 'far': 'cry', 'last': 'week', 'thisTime': 'around' << ?? Seems to be ??
>> far: 'cry', 'last': 'week', thisTime: 'around' << ?? Seems to be ?? [**]
>> ('far': 'cry', 'last': 'week', 'thisTime': 'around') << ?? Nah.
>> {'far': 'cry', 'last': 'week', 'thisTime': 'around'} << ?? Nah.
>> ['far': 'cry', 'last': 'week', 'thisTime': 'around'] << ?? Nah.
>> "'far': 'cry', 'last': 'week', 'thisTime': 'around'" << String seems most likely.
>> "far: 'cry', 'last': 'week', thisTime: 'around'" << (ditto)[**]
[**] And: Perhaps doesn't put single quotes around the keys that didn't have them; dunno
*/
/*
Some Console Logging! (invoked above, before the 'return')
{ ...ngrxState.ingredients } {0: Ingredient, 1: Ingredient, 2: Ingredient, 3: Ingredient}
{ ...ngrxState.ingredients[ngrxAction.myPayload] } {name: "ketchup", amount: 1}
ngrxState.ingredients (4) [Ingredient, Ingredient, Ingredient, Ingredient]
ngrxState.ingredients[ngrxAction.myPayload] Ingredient {name: "ketchup", amount: 1}
wholeDamnSLStore {ingredients: Array(4), myBeingEditedIngredient: {…}, myBeingEditedIngredientIndex: 2}
*/
myBeingEditedIngredient: { ...ngrxState.ingredients[ngrxAction.myPayload] }
}; // /START_EDIT_ACTION
case MyShoppingListActions.STOP_EDIT_ACTION:
return {
...ngrxState,
myBeingEditedIngredientIndex: -1,
myBeingEditedIngredient: null
};
default:
return ngrxState; // << Yes. (Do not use a "copy" ... here on default. good.)
}
}
| identifier_body | |
shopping-list.reducer.ts | import { Ingredient } from '../../shared/ingredient.model';
import * as MyShoppingListActions from './shopping-list.actions';
/* INTERFACE? YES, INTERFACE.
Hmm, MAX Code has an 'interface' here for State.
Q. What happens if I omit? Hmm. ...
A. ___ ? Answer is: W-a-a-a-l, you *could* I s'ppose "omit" it.
BUT! This use of an 'interface' is a SUPER-CONVENIENCE
and generally good idea/practice.
SO: INCLUDE IT. Zank you.
How? Why?
Because you need to, in various spots,
indicate the TYPE of your (slice of) the STORE.
e.g. ShoppingListComponent:
private myStore: Store<{ myShoppingListViaReducer: { ingredients: Ingredient[] } }>
Above is OK. but gets unwieldy when you've got:
private myStore: Store<{ myShoppingListViaReducer:
{ ingredients: Ingredient[],
myBeingEditedIngredient: Ingredient,
myBeingEditedIngredientIndex: number } }>
Better will be use of interface: << N.B. This next line is a GUESS, at this point
private myStore: Store<{ myShoppingListViaReducer: State }> // << or similar; t.b.d.
*/
export interface StateShoppingListPart { // << Naming convention is just 'State'
ingredients: Ingredient [];
myBeingEditedIngredient: Ingredient; // << Came from ShoppingEditComponent
myBeingEditedIngredientIndex: number; // << Came from ShoppingEditComponent
}
// LECT. 356 ~06:04
// LECT. 360 - Okay, now we finally admit: this "App" level state does NOT belong here in ShoppingListReducer. No suh.
/*
export interface AppState {
/!* ORIG
myShoppingListReducer: StateShoppingListPart;
*!/
// This was MISSED by IDE Refactoring! (?) Hmm. Well, fixed it manually.
myShoppingListViaReducer: StateShoppingListPart;
}
*/
const initialStateShoppingListPart: StateShoppingListPart = {
ingredients: [ // NGRX: refactored here from ShoppingListService, fwiw
new Ingredient('ApplesWR__NGRX HardCoded Reducer', 5),
new Ingredient('TomatoesWR__NGRX HardCoded Reducer', 10),
],
myBeingEditedIngredient: null, // initial value: null
myBeingEditedIngredientIndex: -1, // initial value: -1 is "not a valid index" (What You Want).
// (Don't select 0 as initial value; 0 of course would be a valid index. Jus' sayin'.)
};
/* EXPERIMENT
Q.
Are 'state' and 'action' reserved, required, words?
Can I use something like 'ngrxState' and/or 'ngrxAction' or 'myShoppingListAction' or 'ngrxSLAction' instead?
A.
Early on... Seems maybe yeah ?
Hmm, bit of NgRx source code (!)
***************************************
And YES I DO SEE 'state' and 'action' therein (below) ... Hmm
--------
node_modules/@ngrx/store/src/models.d.ts:1
--------
export interface Action {
type: string;
}
...
/!**
* A function that takes an `Action` and a `State`, and returns a `State`.
* See `createReducer`.
// https://ngrx.io/api/store/createReducer#description
*!/
export interface ActionReducer<T, V extends Action = Action> {
(state: T | undefined, action: V): T; // <<<<<<<<<<<<<<<<<<<<<<<<< hmmmmmm... 'state', 'action'
}
--------
...but, hmm, apparently that does not make them required, reserved words. ok. (MBU)
***************************************
*/
export function | (
ngrxState = initialStateShoppingListPart, // ngrxState will get initialState if null
// state = initialState, // state will get initialState if null
/*
action: Action, // No longer used. This was just NgRx Action interface
*/
/* When there was only One Action:
action: MyShoppingListActions.AddIngredientActionClass, // Now, our specific Action instance. (has ".myPayload")
*/
// Now we have additional Actions, so use this custom "union" 'type' to bring all in:
ngrxAction: MyShoppingListActions.ShoppingListActionsUnionType,
// Action instances. (all use ".myPayload" (and ".type" too of course))
): StateShoppingListPart {
switch (ngrxAction.type) {
/* WAS: 'string'
case 'ADD_INGREDIENT_ACTION':
NOW: const
*/
case MyShoppingListActions.ADD_INGREDIENT_ACTION:
// Do NOT mutate the existing state!! Get a COPY, work on that
return { // reducer returns a new state!
...ngrxState, // spread operator gives you a COPY
ingredients: [
...ngrxState.ingredients, // likewise copy of our ingredients array, up to present...
ngrxAction.myPayload // << ostensibly the newly added Ingredient
]
};
case MyShoppingListActions.ADD_INGREDIENTS_ACTION:
console.log('ngrxAction.myPayload recipe ADD_INGREDIENTS_ACTION ', ngrxAction.myPayload);
// https://davidwalsh.name/spread-operator
console.log('...ngrxAction.myPayload recipe ADD_INGREDIENTS_ACTION ', ...ngrxAction.myPayload);
return {
...ngrxState,
ingredients: [
...ngrxState.ingredients,
/* No. This puts an array into this array. Not What You Want.
ngrxAction.myPayload // Nope.
*/
...ngrxAction.myPayload
// << Yes. This puts (inner) array element {} objects into this (outer) array. Just What You Want.
]
};
case MyShoppingListActions.UPDATE_INGREDIENT_ACTION:
/* No Longer. We do not get the Index # from the Component any longer. START_EDIT_ACTION has already put that Index # on the Store.
console.log('UPDATE ngrxAction.myPayload.indexPassedIn ', ngrxAction.myPayload.indexPassedIn); // Yep e.g. 1
*/
/* No longer this signature, of nested object w 'ingredientEdits' property:
console.log('UPDATE ngrxAction.myPayload.ingredientEdits ', ngrxAction.myPayload.ingredientEdits);
*/
console.log('UPDATE ngrxAction.myPayload ', ngrxAction.myPayload); // Now myPayload is simply of type Ingredient. Done.
/* WR__ - Nah. Not .splice() Let's change it up.
// .SPLICE() "in-place" modify array. Maybe not best idea!
ngrxState.ingredients.splice(ngrxAction.myPayload.indexPassedIn, 1, ngrxAction.myPayload.ingredientEdits);
console.log('UPDATE ngrxState ', ngrxState);
/!* Seemstabeokay:
ingredients: Array(2)
0: Ingredient {name: "Apples EDITED WR__SVC", amount: 5}
1: Ingredient {name: "TomatoesWR__NGRX", amount: 10}
*!/
*/
/* We get the Index # differently now. Not passed in. Known on the State, thx to START_EDIT_ACTION.
const myIngredientToBeUpdated: Ingredient = ngrxState.ingredients[ngrxAction.myPayload.indexPassedIn];
*/
const myIngredientToBeUpdated: Ingredient = ngrxState.ingredients[ngrxState.myBeingEditedIngredientIndex];
// Above is: Which ingredient (pre-editing values) was selected by user...
const myIngredientOnceItIsUpdated = {
...myIngredientToBeUpdated,
/* MAX comment re: above line -
"Arguably optional to put the original ingredient herein,
since with next line we do overwrite that original ingredient with new edits.
But - still good practice and does have uses. e.g. if ingredient
has an ID you don't want to lose/overwrite, etc. Best to incorporate above line.
*/
/* No longer this signature, of nested object w 'ingredientEdits' property:
...ngrxAction.myPayload.ingredientEdits
*/
...ngrxAction.myPayload // Now myPayload is simply of type Ingredient. Done.
};
const myIngredientsArrayToBeUpdatedACopy = [...ngrxState.ingredients];
/*
Now that we have (safe) COPY of array (above), we can modify it (below).
N.B. This modification seen here is of AN ELEMENT on the array.
It is NOT (apparently) permissible to just do assign statement
to ENTIRE array, even though yes it is a copy.
e.g. myIngredientsArrayToBeUpdatedACopy = []; // << Trying to assign some other array on to it..
Q. Why is that?
A. Because we have used 'const' above, in declaring that array copy. (Hmm?)
So, MBU - a const array, be it copy or not I guess of some other array,
CAN be modified on an element, but CANNOT be modified by wholesale assignment.
(Q. Wonder if I'm right.) << A. I tink you are.
*/
/* We get the Index # differently now. Not passed in. Known on the State, thx to START_EDIT_ACTION.
myIngredientsArrayToBeUpdatedACopy[ngrxAction.myPayload.indexPassedIn] = myIngredientOnceItIsUpdated;
*/
myIngredientsArrayToBeUpdatedACopy[ngrxState.myBeingEditedIngredientIndex] = myIngredientOnceItIsUpdated;
return {
...ngrxState,
/* Here, because I'd introduced literal array by use of '[ ]',
I in turn needed to use '...' spread operator when inserting into that
empty array, an actual array.
See comment just below.
ingredients: [
// ngrxState.ingredients
...myIngredientsArrayToBeUpdatedACopy
]
*/
/*
Here, omitting that literal '[ ]' business, I can also omit the '...' business.
Cheers.
*/
ingredients: myIngredientsArrayToBeUpdatedACopy,
myBeingEditedIngredient: null, // Intuitive? hmm.
// Reset here upon coming back from UPDATE (same for DELETE) Lect. 358 ~02:49
// See fuller Comment below at DELETE re: these 2 lines (1 above, 1 below)
myBeingEditedIngredientIndex: -1 // ditto above remark
};
case MyShoppingListActions.DELETE_INGREDIENT_ACTION:
/* HERE TOO, for DELETE - do NOT modify existing array. Eschew .splice(), plz.
Q. (Hmm, but seems our (MAX) use of .filter() *CAN* be run on existing array? Hmm.
A. Dumkopff! (oi) Yes, .filter() *DOES* return you a NEW Array. okay.
*/
/* Unnecessary.
const myOneIngredientToBeDeleted = {...ngrxState.ingredients[ngrxAction.myPayload]};
*/
/*
This (above) kinda useless. You can look at (or log if you like)
what ingredient you're deleting, but, we don't make any use of this variable.
*/
/* Unnecessary.
const myIngredientsArrayOneToBeDeletedACopy = [...ngrxState.ingredients];
*/
/*
This (above) not needed: "creating a copy". No.
In fact I hit error when I did the following:
- created this copy,
- and tried (below) to run .filter() on that copy...
- ...with the intention to assign back the now filtered array (copy).
No. As the copy I made is a 'const' array, you can't just assign wholesale back to it:
ERROR: "src/app/shopping-list/store/shopping-list.reducer.ts(178,13): error TS2588:
Cannot assign to 'myIngredientsArrayOneToBeDeletedACopy' because it is a constant."
Instead we'll be sort of running that .filter() 'in situ' (MAX Code approach).
That is, he doesn't bother to create a new variable - no need to. // << Dumkopff! .filter MAKES new array. Cheers.
Hmm. We'll see. << Yah works of course. Cheers.
*/
/* Unnecessary. (and error-generating, too)
myIngredientsArrayOneToBeDeletedACopy = [ // << No. Cannot ASSIGN ('=') to const
...myIngredientsArrayOneToBeDeletedACopy,
myIngredientsArrayOneToBeDeletedACopy.filter(
(nextIngredient, nextIngredientIndexFromFiltering) => {
return nextIngredientIndexFromFiltering !== 1; // << HARD-CODED
}
)
]
*/
return {
...ngrxState,
ingredients: ngrxState.ingredients.filter(
(nextIngredient, nextIngredientIndex) => {
return nextIngredientIndex !== ngrxState.myBeingEditedIngredientIndex; // << PART of STATE = good
/*
ngrxAction.myPayload; << Initially, got index off action, payload.
That approach sort of required the action logic to send it in, upon click,
to this particular reducer case/function/method (DELETE and same for UPDATE)
Refactoring, as 'twere:
Now, we've made that index PART of STATE: state.editedIngredientIndex (MAX code)
Now we invoke a central START_EDIT to get that index for BOTH Delete and Update.
*/
}
),
myBeingEditedIngredient: null, // Intuitive? hmm.
/* Reset here upon coming back from DELETE (same for UPDATE) Lect. 358 ~02:49
Hmm, MAX says we at this point are also STOP_EDIT, but instead
of, I don't know, here in middle of this UPDATE Action, calling (?)
the STOP_EDIT Action, we instead simply sort of do the
"non-D.R.Y." mode of pasting in 2 lines of logic here,
that STOP_EDIT does. o well. no biggie.
(line above re: Ingredient; line below re: IngredientIndex)
*/
myBeingEditedIngredientIndex: -1 // ditto above remark
/* Noop. NOT good idea. NO SPLICE (boo hiss)
[
ngrxState.ingredients.splice(ngrxAction.myPayload, 1)
]
*/
};
case MyShoppingListActions.START_EDIT_ACTION:
/*
SEE PASTED-IN OUTPUT FROM THIS CONSOLE LOGGING FURTHER BELOW
*/
console.log('{ ...ngrxState.ingredients } ', { ...ngrxState.ingredients });
console.log('{ ...ngrxState.ingredients[ngrxAction.myPayload] } ', { ...ngrxState.ingredients[ngrxAction.myPayload] });
/*
{name: "ApplesWR__NGRX HardCoded Reducer", amount: 5}
name: "ApplesWR__NGRX HardCoded Reducer"
amount: 5
*/
console.log(' ngrxState.ingredients ', ngrxState.ingredients );
console.log(' ngrxState.ingredients[ngrxAction.myPayload] ', ngrxState.ingredients[ngrxAction.myPayload] );
/*
Ingredient {name: "ApplesWR__NGRX HardCoded Reducer", amount: 5}
name: "ApplesWR__NGRX HardCoded Reducer"
amount: 5
*/
/* WR__ INITIAL. Seems to work A-O.K., but, I may be missing something ...
N.B.: I have/had no 'myBeingEditedIngredient' explicitly included (by me) in the 'return {}'
Hmm.
Why? I instead deal with picking out which ingredient, from the Store array
of ingredients, in logic over in the Store-subscribing() ShoppingEditComponent ngOnInit()
(MAX code does NOT do that.)
return {
...ngrxState,
myBeingEditedIngredientIndex: ngrxAction.myPayload
};
*/
/* MAX Code:
Max *does* deal with the picking out of which ingredient, from the Store's array
of ingredients, right here, in the Reducer 'return {}'
(Not, like I am, over in the ShoppingEditComponent.)
*/
return {
...ngrxState,
myBeingEditedIngredientIndex: ngrxAction.myPayload,
/* N.B. VERY IMPORTANT: ALWAYS A *COPY*, not Reference. Or, "Spread Operator is your friend."
That is...
this next line below (Commented Out) uses the Store's actual array for ingredients. << PROBLEMATIC
Arrays (and Objects) being REFERENCE types, if you modify a reference
to this particular Ingredient object, in our Store's ingredients array,
(e.g. modify over in ShoppingEditComponent),
then you modify the source, too, back in the Store, DIRECTLY. << PROBLEMATIC
You ALWAYS WANT A *COPY* OF A STORE ARRAY, or STORE ARRAY ELEMENT, or other Store data.
>> NO >> myBeingEditedIngredient: ngrxState.ingredients[ngrxAction.myPayload], // << NO <<
*/
/* MAX Explains: Lect. 357 ~04:40
"The spread operator ... gives you a COPY of the Store's array of ingredients (good),
and the result of that (I (WR__) guess ?) needs to go in curly braces ( ? )."
>> "I need to create a new object which can then be edited without problems by using curly braces."
Q. Curly Braces {} ?
- I guess I don't know how come curly braces are required, but I guess that using spread operator
on an array [] of objects {}, (that is [ {}, {} ]), you get a different result, pulling out an element,
such that you do need to (re)-put {} around the resulting element,
to assign to the property 'myBeingEditedIngredient:'. Okay.
- (As compared to the (Commented Out) line above, which is not using spread operator,
and does not need curly braces for the resulting element it gets out, to assign to
the property 'myBeingEditedIngredient:')
A. Curly Braces {} !
https://www.udemy.com/course/the-complete-guide-to-angular-2/learn/lecture/14466570#questions/8570924
Jost:
"With the spread operator (...) you can spread out the properties of an object
into a comma-separated list of key/value pairs, and then add new key/value pairs
(or overwrite an existent object property).
The whole is wrapped in curly braces to reconvert the resulting list into a (new) object."
WR__
Hmm, wonder what the syntax for that exactly looks like, a "comma-separated list of key/value pairs"
around which I need to then supply curly braces. Hmm.
e.g. let myObj = {far: 'cry', 'last': 'week', thisTime: 'around'}
>> 'far': 'cry', 'last': 'week', 'thisTime': 'around' << ?? Seems to be ??
>> far: 'cry', 'last': 'week', thisTime: 'around' << ?? Seems to be ?? [**]
>> ('far': 'cry', 'last': 'week', 'thisTime': 'around') << ?? Nah.
>> {'far': 'cry', 'last': 'week', 'thisTime': 'around'} << ?? Nah.
>> ['far': 'cry', 'last': 'week', 'thisTime': 'around'] << ?? Nah.
>> "'far': 'cry', 'last': 'week', 'thisTime': 'around'" << String seems most likely.
>> "far: 'cry', 'last': 'week', thisTime: 'around'" << (ditto)[**]
[**] And: Perhaps doesn't put single quotes around the keys that didn't have them; dunno
*/
/*
Some Console Logging! (invoked above, before the 'return')
{ ...ngrxState.ingredients } {0: Ingredient, 1: Ingredient, 2: Ingredient, 3: Ingredient}
{ ...ngrxState.ingredients[ngrxAction.myPayload] } {name: "ketchup", amount: 1}
ngrxState.ingredients (4) [Ingredient, Ingredient, Ingredient, Ingredient]
ngrxState.ingredients[ngrxAction.myPayload] Ingredient {name: "ketchup", amount: 1}
wholeDamnSLStore {ingredients: Array(4), myBeingEditedIngredient: {…}, myBeingEditedIngredientIndex: 2}
*/
myBeingEditedIngredient: { ...ngrxState.ingredients[ngrxAction.myPayload] }
}; // /START_EDIT_ACTION
case MyShoppingListActions.STOP_EDIT_ACTION:
return {
...ngrxState,
myBeingEditedIngredientIndex: -1,
myBeingEditedIngredient: null
};
default:
return ngrxState; // << Yes. (Do not use a "copy" ... here on default. good.)
}
}
| shoppingListReducer | identifier_name |
shopping-list.reducer.ts | import { Ingredient } from '../../shared/ingredient.model';
import * as MyShoppingListActions from './shopping-list.actions';
/* INTERFACE? YES, INTERFACE.
Hmm, MAX Code has an 'interface' here for State.
Q. What happens if I omit? Hmm. ...
A. ___ ? Answer is: W-a-a-a-l, you *could* I s'ppose "omit" it.
BUT! This use of an 'interface' is a SUPER-CONVENIENCE
and generally good idea/practice.
SO: INCLUDE IT. Zank you.
How? Why?
Because you need to, in various spots,
indicate the TYPE of your (slice of) the STORE.
e.g. ShoppingListComponent:
private myStore: Store<{ myShoppingListViaReducer: { ingredients: Ingredient[] } }>
Above is OK. but gets unwieldy when you've got:
private myStore: Store<{ myShoppingListViaReducer:
{ ingredients: Ingredient[],
myBeingEditedIngredient: Ingredient,
myBeingEditedIngredientIndex: number } }>
Better will be use of interface: << N.B. This next line is a GUESS, at this point
private myStore: Store<{ myShoppingListViaReducer: State }> // << or similar; t.b.d.
*/
export interface StateShoppingListPart { // << Naming convention is just 'State'
ingredients: Ingredient [];
myBeingEditedIngredient: Ingredient; // << Came from ShoppingEditComponent
myBeingEditedIngredientIndex: number; // << Came from ShoppingEditComponent
}
// LECT. 356 ~06:04
// LECT. 360 - Okay, now we finally admit: this "App" level state does NOT belong here in ShoppingListReducer. No suh.
/*
export interface AppState {
/!* ORIG
myShoppingListReducer: StateShoppingListPart;
*!/
// This was MISSED by IDE Refactoring! (?) Hmm. Well, fixed it manually.
myShoppingListViaReducer: StateShoppingListPart;
}
*/
const initialStateShoppingListPart: StateShoppingListPart = {
ingredients: [ // NGRX: refactored here from ShoppingListService, fwiw
new Ingredient('ApplesWR__NGRX HardCoded Reducer', 5),
new Ingredient('TomatoesWR__NGRX HardCoded Reducer', 10),
],
myBeingEditedIngredient: null, // initial value: null
myBeingEditedIngredientIndex: -1, // initial value: -1 is "not a valid index" (What You Want).
// (Don't select 0 as initial value; 0 of course would be a valid index. Jus' sayin'.)
};
/* EXPERIMENT
Q.
Are 'state' and 'action' reserved, required, words?
Can I use something like 'ngrxState' and/or 'ngrxAction' or 'myShoppingListAction' or 'ngrxSLAction' instead?
A.
Early on... Seems maybe yeah ?
Hmm, bit of NgRx source code (!)
***************************************
And YES I DO SEE 'state' and 'action' therein (below) ... Hmm
--------
node_modules/@ngrx/store/src/models.d.ts:1
--------
export interface Action {
type: string;
}
...
/!**
* A function that takes an `Action` and a `State`, and returns a `State`.
* See `createReducer`.
// https://ngrx.io/api/store/createReducer#description
*!/
export interface ActionReducer<T, V extends Action = Action> {
(state: T | undefined, action: V): T; // <<<<<<<<<<<<<<<<<<<<<<<<< hmmmmmm... 'state', 'action'
}
--------
...but, hmm, apparently that does not make them required, reserved words. ok. (MBU)
***************************************
*/
export function shoppingListReducer (
ngrxState = initialStateShoppingListPart, // ngrxState will get initialState if null
// state = initialState, // state will get initialState if null
/*
action: Action, // No longer used. This was just NgRx Action interface
*/
/* When there was only One Action:
action: MyShoppingListActions.AddIngredientActionClass, // Now, our specific Action instance. (has ".myPayload")
*/
// Now we have additional Actions, so use this custom "union" 'type' to bring all in:
ngrxAction: MyShoppingListActions.ShoppingListActionsUnionType,
// Action instances. (all use ".myPayload" (and ".type" too of course))
): StateShoppingListPart {
switch (ngrxAction.type) {
/* WAS: 'string'
case 'ADD_INGREDIENT_ACTION':
NOW: const
*/
case MyShoppingListActions.ADD_INGREDIENT_ACTION:
// Do NOT mutate the existing state!! Get a COPY, work on that
return { // reducer returns a new state!
...ngrxState, // spread operator gives you a COPY
ingredients: [
...ngrxState.ingredients, // likewise copy of our ingredients array, up to present...
ngrxAction.myPayload // << ostensibly the newly added Ingredient
]
};
case MyShoppingListActions.ADD_INGREDIENTS_ACTION:
console.log('ngrxAction.myPayload recipe ADD_INGREDIENTS_ACTION ', ngrxAction.myPayload);
// https://davidwalsh.name/spread-operator
console.log('...ngrxAction.myPayload recipe ADD_INGREDIENTS_ACTION ', ...ngrxAction.myPayload);
return {
...ngrxState,
ingredients: [
...ngrxState.ingredients,
/* No. This puts an array into this array. Not What You Want.
ngrxAction.myPayload // Nope.
*/
...ngrxAction.myPayload
// << Yes. This puts (inner) array element {} objects into this (outer) array. Just What You Want.
]
};
case MyShoppingListActions.UPDATE_INGREDIENT_ACTION:
/* No Longer. We do not get the Index # from the Component any longer. START_EDIT_ACTION has already put that Index # on the Store.
console.log('UPDATE ngrxAction.myPayload.indexPassedIn ', ngrxAction.myPayload.indexPassedIn); // Yep e.g. 1
*/
/* No longer this signature, of nested object w 'ingredientEdits' property:
console.log('UPDATE ngrxAction.myPayload.ingredientEdits ', ngrxAction.myPayload.ingredientEdits);
*/
console.log('UPDATE ngrxAction.myPayload ', ngrxAction.myPayload); // Now myPayload is simply of type Ingredient. Done.
/* WR__ - Nah. Not .splice() Let's change it up.
// .SPLICE() "in-place" modify array. Maybe not best idea!
ngrxState.ingredients.splice(ngrxAction.myPayload.indexPassedIn, 1, ngrxAction.myPayload.ingredientEdits);
console.log('UPDATE ngrxState ', ngrxState);
/!* Seemstabeokay:
ingredients: Array(2)
0: Ingredient {name: "Apples EDITED WR__SVC", amount: 5}
1: Ingredient {name: "TomatoesWR__NGRX", amount: 10}
*!/
*/
/* We get the Index # differently now. Not passed in. Known on the State, thx to START_EDIT_ACTION.
const myIngredientToBeUpdated: Ingredient = ngrxState.ingredients[ngrxAction.myPayload.indexPassedIn];
*/
const myIngredientToBeUpdated: Ingredient = ngrxState.ingredients[ngrxState.myBeingEditedIngredientIndex];
// Above is: Which ingredient (pre-editing values) was selected by user...
const myIngredientOnceItIsUpdated = {
...myIngredientToBeUpdated,
/* MAX comment re: above line -
"Arguably optional to put the original ingredient herein,
since with next line we do overwrite that original ingredient with new edits.
But - still good practice and does have uses. e.g. if ingredient
has an ID you don't want to lose/overwrite, etc. Best to incorporate above line.
*/
/* No longer this signature, of nested object w 'ingredientEdits' property:
...ngrxAction.myPayload.ingredientEdits
*/
...ngrxAction.myPayload // Now myPayload is simply of type Ingredient. Done.
};
const myIngredientsArrayToBeUpdatedACopy = [...ngrxState.ingredients];
/*
Now that we have (safe) COPY of array (above), we can modify it (below).
N.B. This modification seen here is of AN ELEMENT on the array.
It is NOT (apparently) permissible to just do assign statement
to ENTIRE array, even though yes it is a copy.
e.g. myIngredientsArrayToBeUpdatedACopy = []; // << Trying to assign some other array on to it..
Q. Why is that?
A. Because we have used 'const' above, in declaring that array copy. (Hmm?)
So, MBU - a const array, be it copy or not I guess of some other array,
CAN be modified on an element, but CANNOT be modified by wholesale assignment.
(Q. Wonder if I'm right.) << A. I tink you are.
*/
/* We get the Index # differently now. Not passed in. Known on the State, thx to START_EDIT_ACTION.
myIngredientsArrayToBeUpdatedACopy[ngrxAction.myPayload.indexPassedIn] = myIngredientOnceItIsUpdated;
*/
myIngredientsArrayToBeUpdatedACopy[ngrxState.myBeingEditedIngredientIndex] = myIngredientOnceItIsUpdated;
return {
...ngrxState,
/* Here, because I'd introduced literal array by use of '[ ]',
I in turn needed to use '...' spread operator when inserting into that
empty array, an actual array.
See comment just below.
ingredients: [
// ngrxState.ingredients
...myIngredientsArrayToBeUpdatedACopy
]
*/
/*
Here, omitting that literal '[ ]' business, I can also omit the '...' business.
Cheers.
*/
ingredients: myIngredientsArrayToBeUpdatedACopy,
myBeingEditedIngredient: null, // Intuitive? hmm.
// Reset here upon coming back from UPDATE (same for DELETE) Lect. 358 ~02:49
// See fuller Comment below at DELETE re: these 2 lines (1 above, 1 below)
myBeingEditedIngredientIndex: -1 // ditto above remark
};
case MyShoppingListActions.DELETE_INGREDIENT_ACTION:
/* HERE TOO, for DELETE - do NOT modify existing array. Eschew .splice(), plz.
Q. (Hmm, but seems our (MAX) use of .filter() *CAN* be run on existing array? Hmm.
A. Dumkopff! (oi) Yes, .filter() *DOES* return you a NEW Array. okay.
*/
/* Unnecessary.
const myOneIngredientToBeDeleted = {...ngrxState.ingredients[ngrxAction.myPayload]};
*/
/*
This (above) kinda useless. You can look at (or log if you like)
what ingredient you're deleting, but, we don't make any use of this variable.
*/
/* Unnecessary.
const myIngredientsArrayOneToBeDeletedACopy = [...ngrxState.ingredients];
*/
/*
This (above) not needed: "creating a copy". No.
In fact I hit error when I did the following:
- created this copy,
- and tried (below) to run .filter() on that copy...
- ...with the intention to assign back the now filtered array (copy).
No. As the copy I made is a 'const' array, you can't just assign wholesale back to it:
ERROR: "src/app/shopping-list/store/shopping-list.reducer.ts(178,13): error TS2588:
Cannot assign to 'myIngredientsArrayOneToBeDeletedACopy' because it is a constant."
Instead we'll be sort of running that .filter() 'in situ' (MAX Code approach).
That is, he doesn't bother to create a new variable - no need to. // << Dumkopff! .filter MAKES new array. Cheers.
Hmm. We'll see. << Yah works of course. Cheers.
*/
/* Unnecessary. (and error-generating, too)
myIngredientsArrayOneToBeDeletedACopy = [ // << No. Cannot ASSIGN ('=') to const
...myIngredientsArrayOneToBeDeletedACopy,
myIngredientsArrayOneToBeDeletedACopy.filter(
(nextIngredient, nextIngredientIndexFromFiltering) => {
return nextIngredientIndexFromFiltering !== 1; // << HARD-CODED
}
)
]
*/
return {
...ngrxState, | /*
ngrxAction.myPayload; << Initially, got index off action, payload.
That approach sort of required the action logic to send it in, upon click,
to this particular reducer case/function/method (DELETE and same for UPDATE)
Refactoring, as 'twere:
Now, we've made that index PART of STATE: state.editedIngredientIndex (MAX code)
Now we invoke a central START_EDIT to get that index for BOTH Delete and Update.
*/
}
),
myBeingEditedIngredient: null, // Intuitive? hmm.
/* Reset here upon coming back from DELETE (same for UPDATE) Lect. 358 ~02:49
Hmm, MAX says we at this point are also STOP_EDIT, but instead
of, I don't know, here in middle of this UPDATE Action, calling (?)
the STOP_EDIT Action, we instead simply sort of do the
"non-D.R.Y." mode of pasting in 2 lines of logic here,
that STOP_EDIT does. o well. no biggie.
(line above re: Ingredient; line below re: IngredientIndex)
*/
myBeingEditedIngredientIndex: -1 // ditto above remark
/* Noop. NOT good idea. NO SPLICE (boo hiss)
[
ngrxState.ingredients.splice(ngrxAction.myPayload, 1)
]
*/
};
case MyShoppingListActions.START_EDIT_ACTION:
/*
SEE PASTED-IN OUTPUT FROM THIS CONSOLE LOGGING FURTHER BELOW
*/
console.log('{ ...ngrxState.ingredients } ', { ...ngrxState.ingredients });
console.log('{ ...ngrxState.ingredients[ngrxAction.myPayload] } ', { ...ngrxState.ingredients[ngrxAction.myPayload] });
/*
{name: "ApplesWR__NGRX HardCoded Reducer", amount: 5}
name: "ApplesWR__NGRX HardCoded Reducer"
amount: 5
*/
console.log(' ngrxState.ingredients ', ngrxState.ingredients );
console.log(' ngrxState.ingredients[ngrxAction.myPayload] ', ngrxState.ingredients[ngrxAction.myPayload] );
/*
Ingredient {name: "ApplesWR__NGRX HardCoded Reducer", amount: 5}
name: "ApplesWR__NGRX HardCoded Reducer"
amount: 5
*/
/* WR__ INITIAL. Seems to work A-O.K., but, I may be missing something ...
N.B.: I have/had no 'myBeingEditedIngredient' explicitly included (by me) in the 'return {}'
Hmm.
Why? I instead deal with picking out which ingredient, from the Store array
of ingredients, in logic over in the Store-subscribing() ShoppingEditComponent ngOnInit()
(MAX code does NOT do that.)
return {
...ngrxState,
myBeingEditedIngredientIndex: ngrxAction.myPayload
};
*/
/* MAX Code:
Max *does* deal with the picking out of which ingredient, from the Store's array
of ingredients, right here, in the Reducer 'return {}'
(Not, like I am, over in the ShoppingEditComponent.)
*/
return {
...ngrxState,
myBeingEditedIngredientIndex: ngrxAction.myPayload,
/* N.B. VERY IMPORTANT: ALWAYS A *COPY*, not Reference. Or, "Spread Operator is your friend."
That is...
this next line below (Commented Out) uses the Store's actual array for ingredients. << PROBLEMATIC
Arrays (and Objects) being REFERENCE types, if you modify a reference
to this particular Ingredient object, in our Store's ingredients array,
(e.g. modify over in ShoppingEditComponent),
then you modify the source, too, back in the Store, DIRECTLY. << PROBLEMATIC
You ALWAYS WANT A *COPY* OF A STORE ARRAY, or STORE ARRAY ELEMENT, or other Store data.
>> NO >> myBeingEditedIngredient: ngrxState.ingredients[ngrxAction.myPayload], // << NO <<
*/
/* MAX Explains: Lect. 357 ~04:40
"The spread operator ... gives you a COPY of the Store's array of ingredients (good),
and the result of that (I (WR__) guess ?) needs to go in curly braces ( ? )."
>> "I need to create a new object which can then be edited without problems by using curly braces."
Q. Curly Braces {} ?
- I guess I don't know how come curly braces are required, but I guess that using spread operator
on an array [] of objects {}, (that is [ {}, {} ]), you get a different result, pulling out an element,
such that you do need to (re)-put {} around the resulting element,
to assign to the property 'myBeingEditedIngredient:'. Okay.
- (As compared to the (Commented Out) line above, which is not using spread operator,
and does not need curly braces for the resulting element it gets out, to assign to
the property 'myBeingEditedIngredient:')
A. Curly Braces {} !
https://www.udemy.com/course/the-complete-guide-to-angular-2/learn/lecture/14466570#questions/8570924
Jost:
"With the spread operator (...) you can spread out the properties of an object
into a comma-separated list of key/value pairs, and then add new key/value pairs
(or overwrite an existent object property).
The whole is wrapped in curly braces to reconvert the resulting list into a (new) object."
WR__
Hmm, wonder what the syntax for that exactly looks like, a "comma-separated list of key/value pairs"
around which I need to then supply curly braces. Hmm.
e.g. let myObj = {far: 'cry', 'last': 'week', thisTime: 'around'}
>> 'far': 'cry', 'last': 'week', 'thisTime': 'around' << ?? Seems to be ??
>> far: 'cry', 'last': 'week', thisTime: 'around' << ?? Seems to be ?? [**]
>> ('far': 'cry', 'last': 'week', 'thisTime': 'around') << ?? Nah.
>> {'far': 'cry', 'last': 'week', 'thisTime': 'around'} << ?? Nah.
>> ['far': 'cry', 'last': 'week', 'thisTime': 'around'] << ?? Nah.
>> "'far': 'cry', 'last': 'week', 'thisTime': 'around'" << String seems most likely.
>> "far: 'cry', 'last': 'week', thisTime: 'around'" << (ditto)[**]
[**] And: Perhaps doesn't put single quotes around the keys that didn't have them; dunno
*/
/*
Some Console Logging! (invoked above, before the 'return')
{ ...ngrxState.ingredients } {0: Ingredient, 1: Ingredient, 2: Ingredient, 3: Ingredient}
{ ...ngrxState.ingredients[ngrxAction.myPayload] } {name: "ketchup", amount: 1}
ngrxState.ingredients (4) [Ingredient, Ingredient, Ingredient, Ingredient]
ngrxState.ingredients[ngrxAction.myPayload] Ingredient {name: "ketchup", amount: 1}
wholeDamnSLStore {ingredients: Array(4), myBeingEditedIngredient: {…}, myBeingEditedIngredientIndex: 2}
*/
myBeingEditedIngredient: { ...ngrxState.ingredients[ngrxAction.myPayload] }
}; // /START_EDIT_ACTION
case MyShoppingListActions.STOP_EDIT_ACTION:
return {
...ngrxState,
myBeingEditedIngredientIndex: -1,
myBeingEditedIngredient: null
};
default:
return ngrxState; // << Yes. (Do not use a "copy" ... here on default. good.)
}
} | ingredients: ngrxState.ingredients.filter(
(nextIngredient, nextIngredientIndex) => {
return nextIngredientIndex !== ngrxState.myBeingEditedIngredientIndex; // << PART of STATE = good | random_line_split |
Runner.ts | import { LandRequestQueue } from './Queue';
import { BitbucketClient } from '../bitbucket/BitbucketClient';
import { LandRequestHistory } from './History';
import { Logger } from './Logger';
import { RunnerState, Config, LandRequestOptions } from '../types';
import { withLock } from './utils/locker';
import {
Installation,
LandRequest,
PauseStateTransition,
PullRequest,
Permission,
LandRequestStatus,
} from '../db';
import { permissionService } from './PermissionService';
export class Runner {
constructor(
public queue: LandRequestQueue,
private history: LandRequestHistory,
private client: BitbucketClient,
private config: Config,
) {
// call our checkWaitingLandRequests() function on an interval so that we are always clearing out waiting builds
const timeBetweenChecksMins = 2;
setInterval(() => {
this.checkWaitingLandRequests();
}, timeBetweenChecksMins * 60 * 1000);
}
async getRunning() {
return this.queue.maybeGetStatusForRunningRequest();
}
async next() {
await withLock('Runner:next', async () => {
const running = await this.getRunning();
Logger.info('Next() called', {
running: running,
queue: this.queue,
});
if (running) return;
// check if there is something else in the queue
const landRequestInfo = await this.queue.maybeGetStatusForNextRequestInQueue();
if (!landRequestInfo) return;
const landRequest = landRequestInfo.request;
Logger.info('Checking if still allowed to land...', {
landRequest: landRequest.get(),
});
// TODO: Pass this commit in to isAllowed to land and make sure it hasnt changed
const commit = landRequest.forCommit;
const isAllowedToLand = await this.client.isAllowedToLand(landRequest.pullRequestId);
if (isAllowedToLand.errors.length === 0) {
Logger.info('Allowed to land, creating land build', {
landRequest: landRequest.get(),
});
const buildId = await this.client.createLandBuild(commit);
if (!buildId) return;
await landRequest.setStatus('running');
landRequest.buildId = buildId;
await landRequest.save();
Logger.info('Land build now running', { running: landRequest.get() });
} else {
Logger.info(
{ ...isAllowedToLand, ...landRequest.get() },
'Land request is not allowed to land',
);
await landRequest.setStatus('fail', 'Land request did not pass land checks');
this.next();
}
});
}
onStatusUpdate = async (statusEvent: BB.BuildStatusEvent) => {
const running = await this.getRunning();
if (!running) {
Logger.info('No build running, status event is irrelevant', statusEvent);
return;
}
if (running.request.buildId !== statusEvent.buildId) {
return Logger.warn(
`StatusEvent buildId doesn't match currently running buildId – ${
statusEvent.buildId
} !== ${running.request.buildId || ''}`,
{ statusEvent, running },
);
}
Logger.info('Build status update', { statusEvent, running });
switch (statusEvent.buildStatus) {
case 'SUCCESSFUL': {
try {
const pullRequestId = running.request.pullRequestId;
Logger.info('Attempting merge pull request', { pullRequestId, running });
await this.client.mergePullRequest(pullRequestId);
await running.request.setStatus('success');
} catch (err) {
await running.request.setStatus('fail', 'Unable to merge pull request');
}
break;
}
case 'FAILED': {
Logger.error('Land build failed', {
running: running.get(),
statusEvent,
});
await running.request.setStatus('fail', 'Landkid build failed');
break;
}
case 'STOPPED': {
Logger.warn('Land build has been stopped', {
running: running.get(),
statusEvent,
});
await running.request.setStatus('aborted', 'Landkid pipelines build was stopped');
break;
}
}
this.next();
};
async cancelCurrentlyRunningBuild(user: ISessionUser) {
const running = await this.getRunning();
if (!running) return;
await running.request.setStatus(
'aborted',
`Cancelled by user "${user.aaid}" (${user.displayName})`,
);
if (running.request.buildId) {
this.client.stopLandBuild(running.request.buildId);
}
}
async pause(reason: string, user: ISessionUser) {
await PauseStateTransition.create<PauseStateTransition>({
paused: true,
reason,
pauserAaid: user.aaid,
});
}
async unpause(user: ISessionUser) {
await PauseStateTransition.create<PauseStateTransition>({
paused: false,
pauserAaid: user.aaid,
});
}
private getPauseState = async (): Promise<IPauseState> => {
const state = await PauseStateTransition.findOne<PauseStateTransition>({
order: [['date', 'DESC']],
});
if (!state) {
return {
id: '_',
date: new Date(0),
paused: false,
pauserAaid: '',
reason: null,
};
}
return state.get();
};
public isPaused = async () => {
const state = await PauseStateTransition.findOne<PauseStateTransition>({
order: [['date', 'DESC']],
});
if (!state) return false;
return state.paused;
};
private async createRequestFromOptions(landRequestOptions: LandRequestOptions) {
const pr =
(await PullRequest.findOne<PullRequest>({
where: {
prId: landRequestOptions.prId,
},
})) ||
(await PullRequest.create<PullRequest>({
prId: landRequestOptions.prId, | triggererAaid: landRequestOptions.triggererAaid,
pullRequestId: pr.prId,
forCommit: landRequestOptions.commit,
});
}
async removeLandRequestByPullRequestId(pullRequestId: number, user: ISessionUser) {
const requests = await LandRequest.findAll<LandRequest>({
where: {
pullRequestId,
},
});
for (const request of requests) {
await request.setStatus('aborted', `Cancelled by user: "${user.aaid}" (${user.displayName})`);
}
}
async enqueue(landRequestOptions: LandRequestOptions): Promise<void> {
// TODO: Ensure no land request is pending for this PR
if (await this.isPaused()) return;
const request = await this.createRequestFromOptions(landRequestOptions);
await request.setStatus('queued');
}
async addToWaitingToLand(landRequestOptions: LandRequestOptions) {
// TODO: Ensure no land request is pending for this PR
if (await this.isPaused()) return;
const request = await this.createRequestFromOptions(landRequestOptions);
await request.setStatus('will-queue-when-ready');
this.checkWaitingLandRequests();
}
async moveFromWaitingToQueue(pullRequestId: number) {
const requests = await LandRequest.findAll<LandRequest>({
where: {
pullRequestId,
},
});
for (const request of requests) {
const status = await request.getStatus();
if (status && status.state !== 'will-queue-when-ready') continue;
await request.setStatus('queued');
}
Logger.info('Moving landRequests from waiting to queue', { requests });
this.next();
}
async checkWaitingLandRequests() {
Logger.info('Checking for waiting landrequests ready to queue');
for (let landRequest of await this.queue.getStatusesForWaitingRequests()) {
const pullRequestId = landRequest.request.pullRequestId;
let isAllowedToLand = await this.client.isAllowedToLand(pullRequestId);
if (isAllowedToLand.errors.length === 0) {
this.moveFromWaitingToQueue(pullRequestId);
}
}
}
private getUsersPermissions = async (requestingUser: ISessionUser): Promise<IPermission[]> => {
// TODO: Figure out how to use distinct
const perms = await Permission.findAll<Permission>({
order: [['dateAssigned', 'DESC']],
});
// Need to get only the latest record for each user
const aaidPerms: Record<string, Permission> = {};
for (const perm of perms) {
if (
!aaidPerms[perm.aaid] ||
aaidPerms[perm.aaid].dateAssigned.getTime() < perm.dateAssigned.getTime()
) {
aaidPerms[perm.aaid] = perm;
}
}
// Now we need to filter to only show the records that the requesting user is allowed to see
const allowedToLand: RunnerState['usersAllowedToLand'] = [];
const requestingUserMode = await permissionService.getPermissionForUser(requestingUser.aaid);
for (const aaid of Object.keys(aaidPerms)) {
// admins see all users
if (requestingUserMode === 'admin') {
allowedToLand.push(aaidPerms[aaid]);
// land users can see land and admin users
} else if (requestingUserMode === 'land' && aaidPerms[aaid].mode !== 'read') {
allowedToLand.push(aaidPerms[aaid]);
// read users can only see admins
} else if (requestingUserMode === 'read' && aaidPerms[aaid].mode === 'admin') {
allowedToLand.push(aaidPerms[aaid]);
}
}
return allowedToLand;
};
private getDatesSinceLastFailures = async (): Promise<number> => {
const lastFailure = await LandRequestStatus.findOne<LandRequestStatus>({
where: {
state: {
$in: ['fail', 'aborted'],
},
},
order: [['date', 'DESC']],
});
if (!lastFailure) return -1;
return Math.floor((Date.now() - lastFailure.date.getTime()) / (1000 * 60 * 60 * 24));
};
async getHistory(page: number) {
return this.history.getHistory(page);
}
async getInstallationIfExists() {
const install = await Installation.findOne();
return install;
}
async deleteInstallation() {
await Installation.truncate();
}
async getState(requestingUser: ISessionUser): Promise<RunnerState> {
const [
daysSinceLastFailure,
pauseState,
queue,
usersAllowedToLand,
waitingToQueue,
] = await Promise.all([
this.getDatesSinceLastFailures(),
this.getPauseState(),
this.queue.getStatusesForQueuedRequests(),
this.getUsersPermissions(requestingUser),
this.queue.getStatusesForWaitingRequests(),
]);
return {
daysSinceLastFailure,
pauseState,
queue,
usersAllowedToLand,
waitingToQueue,
bitbucketBaseUrl: `https://bitbucket.org/${this.config.repoConfig.repoOwner}/${
this.config.repoConfig.repoName
}`,
};
}
} | authorAaid: landRequestOptions.prAuthorAaid,
title: landRequestOptions.prTitle,
}));
return await LandRequest.create<LandRequest>({ | random_line_split |
Runner.ts | import { LandRequestQueue } from './Queue';
import { BitbucketClient } from '../bitbucket/BitbucketClient';
import { LandRequestHistory } from './History';
import { Logger } from './Logger';
import { RunnerState, Config, LandRequestOptions } from '../types';
import { withLock } from './utils/locker';
import {
Installation,
LandRequest,
PauseStateTransition,
PullRequest,
Permission,
LandRequestStatus,
} from '../db';
import { permissionService } from './PermissionService';
export class Runner {
constructor(
public queue: LandRequestQueue,
private history: LandRequestHistory,
private client: BitbucketClient,
private config: Config,
) {
// call our checkWaitingLandRequests() function on an interval so that we are always clearing out waiting builds
const timeBetweenChecksMins = 2;
setInterval(() => {
this.checkWaitingLandRequests();
}, timeBetweenChecksMins * 60 * 1000);
}
async getRunning() {
return this.queue.maybeGetStatusForRunningRequest();
}
async next() {
await withLock('Runner:next', async () => {
const running = await this.getRunning();
Logger.info('Next() called', {
running: running,
queue: this.queue,
});
if (running) return;
// check if there is something else in the queue
const landRequestInfo = await this.queue.maybeGetStatusForNextRequestInQueue();
if (!landRequestInfo) return;
const landRequest = landRequestInfo.request;
Logger.info('Checking if still allowed to land...', {
landRequest: landRequest.get(),
});
// TODO: Pass this commit in to isAllowed to land and make sure it hasnt changed
const commit = landRequest.forCommit;
const isAllowedToLand = await this.client.isAllowedToLand(landRequest.pullRequestId);
if (isAllowedToLand.errors.length === 0) {
Logger.info('Allowed to land, creating land build', {
landRequest: landRequest.get(),
});
const buildId = await this.client.createLandBuild(commit);
if (!buildId) return;
await landRequest.setStatus('running');
landRequest.buildId = buildId;
await landRequest.save();
Logger.info('Land build now running', { running: landRequest.get() });
} else {
Logger.info(
{ ...isAllowedToLand, ...landRequest.get() },
'Land request is not allowed to land',
);
await landRequest.setStatus('fail', 'Land request did not pass land checks');
this.next();
}
});
}
onStatusUpdate = async (statusEvent: BB.BuildStatusEvent) => {
const running = await this.getRunning();
if (!running) {
Logger.info('No build running, status event is irrelevant', statusEvent);
return;
}
if (running.request.buildId !== statusEvent.buildId) {
return Logger.warn(
`StatusEvent buildId doesn't match currently running buildId – ${
statusEvent.buildId
} !== ${running.request.buildId || ''}`,
{ statusEvent, running },
);
}
Logger.info('Build status update', { statusEvent, running });
switch (statusEvent.buildStatus) {
case 'SUCCESSFUL': {
try {
const pullRequestId = running.request.pullRequestId;
Logger.info('Attempting merge pull request', { pullRequestId, running });
await this.client.mergePullRequest(pullRequestId);
await running.request.setStatus('success');
} catch (err) {
await running.request.setStatus('fail', 'Unable to merge pull request');
}
break;
}
case 'FAILED': {
Logger.error('Land build failed', {
running: running.get(),
statusEvent,
});
await running.request.setStatus('fail', 'Landkid build failed');
break;
}
case 'STOPPED': {
Logger.warn('Land build has been stopped', {
running: running.get(),
statusEvent,
});
await running.request.setStatus('aborted', 'Landkid pipelines build was stopped');
break;
}
}
this.next();
};
async cancelCurrentlyRunningBuild(user: ISessionUser) {
const running = await this.getRunning();
if (!running) return;
await running.request.setStatus(
'aborted',
`Cancelled by user "${user.aaid}" (${user.displayName})`,
);
if (running.request.buildId) {
this.client.stopLandBuild(running.request.buildId);
}
}
async pause(reason: string, user: ISessionUser) {
await PauseStateTransition.create<PauseStateTransition>({
paused: true,
reason,
pauserAaid: user.aaid,
});
}
async unpause(user: ISessionUser) {
| private getPauseState = async (): Promise<IPauseState> => {
const state = await PauseStateTransition.findOne<PauseStateTransition>({
order: [['date', 'DESC']],
});
if (!state) {
return {
id: '_',
date: new Date(0),
paused: false,
pauserAaid: '',
reason: null,
};
}
return state.get();
};
public isPaused = async () => {
const state = await PauseStateTransition.findOne<PauseStateTransition>({
order: [['date', 'DESC']],
});
if (!state) return false;
return state.paused;
};
private async createRequestFromOptions(landRequestOptions: LandRequestOptions) {
const pr =
(await PullRequest.findOne<PullRequest>({
where: {
prId: landRequestOptions.prId,
},
})) ||
(await PullRequest.create<PullRequest>({
prId: landRequestOptions.prId,
authorAaid: landRequestOptions.prAuthorAaid,
title: landRequestOptions.prTitle,
}));
return await LandRequest.create<LandRequest>({
triggererAaid: landRequestOptions.triggererAaid,
pullRequestId: pr.prId,
forCommit: landRequestOptions.commit,
});
}
async removeLandRequestByPullRequestId(pullRequestId: number, user: ISessionUser) {
const requests = await LandRequest.findAll<LandRequest>({
where: {
pullRequestId,
},
});
for (const request of requests) {
await request.setStatus('aborted', `Cancelled by user: "${user.aaid}" (${user.displayName})`);
}
}
async enqueue(landRequestOptions: LandRequestOptions): Promise<void> {
// TODO: Ensure no land request is pending for this PR
if (await this.isPaused()) return;
const request = await this.createRequestFromOptions(landRequestOptions);
await request.setStatus('queued');
}
async addToWaitingToLand(landRequestOptions: LandRequestOptions) {
// TODO: Ensure no land request is pending for this PR
if (await this.isPaused()) return;
const request = await this.createRequestFromOptions(landRequestOptions);
await request.setStatus('will-queue-when-ready');
this.checkWaitingLandRequests();
}
async moveFromWaitingToQueue(pullRequestId: number) {
const requests = await LandRequest.findAll<LandRequest>({
where: {
pullRequestId,
},
});
for (const request of requests) {
const status = await request.getStatus();
if (status && status.state !== 'will-queue-when-ready') continue;
await request.setStatus('queued');
}
Logger.info('Moving landRequests from waiting to queue', { requests });
this.next();
}
async checkWaitingLandRequests() {
Logger.info('Checking for waiting landrequests ready to queue');
for (let landRequest of await this.queue.getStatusesForWaitingRequests()) {
const pullRequestId = landRequest.request.pullRequestId;
let isAllowedToLand = await this.client.isAllowedToLand(pullRequestId);
if (isAllowedToLand.errors.length === 0) {
this.moveFromWaitingToQueue(pullRequestId);
}
}
}
private getUsersPermissions = async (requestingUser: ISessionUser): Promise<IPermission[]> => {
// TODO: Figure out how to use distinct
const perms = await Permission.findAll<Permission>({
order: [['dateAssigned', 'DESC']],
});
// Need to get only the latest record for each user
const aaidPerms: Record<string, Permission> = {};
for (const perm of perms) {
if (
!aaidPerms[perm.aaid] ||
aaidPerms[perm.aaid].dateAssigned.getTime() < perm.dateAssigned.getTime()
) {
aaidPerms[perm.aaid] = perm;
}
}
// Now we need to filter to only show the records that the requesting user is allowed to see
const allowedToLand: RunnerState['usersAllowedToLand'] = [];
const requestingUserMode = await permissionService.getPermissionForUser(requestingUser.aaid);
for (const aaid of Object.keys(aaidPerms)) {
// admins see all users
if (requestingUserMode === 'admin') {
allowedToLand.push(aaidPerms[aaid]);
// land users can see land and admin users
} else if (requestingUserMode === 'land' && aaidPerms[aaid].mode !== 'read') {
allowedToLand.push(aaidPerms[aaid]);
// read users can only see admins
} else if (requestingUserMode === 'read' && aaidPerms[aaid].mode === 'admin') {
allowedToLand.push(aaidPerms[aaid]);
}
}
return allowedToLand;
};
private getDatesSinceLastFailures = async (): Promise<number> => {
const lastFailure = await LandRequestStatus.findOne<LandRequestStatus>({
where: {
state: {
$in: ['fail', 'aborted'],
},
},
order: [['date', 'DESC']],
});
if (!lastFailure) return -1;
return Math.floor((Date.now() - lastFailure.date.getTime()) / (1000 * 60 * 60 * 24));
};
async getHistory(page: number) {
return this.history.getHistory(page);
}
async getInstallationIfExists() {
const install = await Installation.findOne();
return install;
}
async deleteInstallation() {
await Installation.truncate();
}
async getState(requestingUser: ISessionUser): Promise<RunnerState> {
const [
daysSinceLastFailure,
pauseState,
queue,
usersAllowedToLand,
waitingToQueue,
] = await Promise.all([
this.getDatesSinceLastFailures(),
this.getPauseState(),
this.queue.getStatusesForQueuedRequests(),
this.getUsersPermissions(requestingUser),
this.queue.getStatusesForWaitingRequests(),
]);
return {
daysSinceLastFailure,
pauseState,
queue,
usersAllowedToLand,
waitingToQueue,
bitbucketBaseUrl: `https://bitbucket.org/${this.config.repoConfig.repoOwner}/${
this.config.repoConfig.repoName
}`,
};
}
}
| await PauseStateTransition.create<PauseStateTransition>({
paused: false,
pauserAaid: user.aaid,
});
}
| identifier_body |
Runner.ts | import { LandRequestQueue } from './Queue';
import { BitbucketClient } from '../bitbucket/BitbucketClient';
import { LandRequestHistory } from './History';
import { Logger } from './Logger';
import { RunnerState, Config, LandRequestOptions } from '../types';
import { withLock } from './utils/locker';
import {
Installation,
LandRequest,
PauseStateTransition,
PullRequest,
Permission,
LandRequestStatus,
} from '../db';
import { permissionService } from './PermissionService';
export class Runner {
constructor(
public queue: LandRequestQueue,
private history: LandRequestHistory,
private client: BitbucketClient,
private config: Config,
) {
// call our checkWaitingLandRequests() function on an interval so that we are always clearing out waiting builds
const timeBetweenChecksMins = 2;
setInterval(() => {
this.checkWaitingLandRequests();
}, timeBetweenChecksMins * 60 * 1000);
}
async getRunning() {
return this.queue.maybeGetStatusForRunningRequest();
}
async next() {
await withLock('Runner:next', async () => {
const running = await this.getRunning();
Logger.info('Next() called', {
running: running,
queue: this.queue,
});
if (running) return;
// check if there is something else in the queue
const landRequestInfo = await this.queue.maybeGetStatusForNextRequestInQueue();
if (!landRequestInfo) return;
const landRequest = landRequestInfo.request;
Logger.info('Checking if still allowed to land...', {
landRequest: landRequest.get(),
});
// TODO: Pass this commit in to isAllowed to land and make sure it hasnt changed
const commit = landRequest.forCommit;
const isAllowedToLand = await this.client.isAllowedToLand(landRequest.pullRequestId);
if (isAllowedToLand.errors.length === 0) {
Logger.info('Allowed to land, creating land build', {
landRequest: landRequest.get(),
});
const buildId = await this.client.createLandBuild(commit);
if (!buildId) return;
await landRequest.setStatus('running');
landRequest.buildId = buildId;
await landRequest.save();
Logger.info('Land build now running', { running: landRequest.get() });
} else {
Logger.info(
{ ...isAllowedToLand, ...landRequest.get() },
'Land request is not allowed to land',
);
await landRequest.setStatus('fail', 'Land request did not pass land checks');
this.next();
}
});
}
onStatusUpdate = async (statusEvent: BB.BuildStatusEvent) => {
const running = await this.getRunning();
if (!running) {
Logger.info('No build running, status event is irrelevant', statusEvent);
return;
}
if (running.request.buildId !== statusEvent.buildId) {
return Logger.warn(
`StatusEvent buildId doesn't match currently running buildId – ${
statusEvent.buildId
} !== ${running.request.buildId || ''}`,
{ statusEvent, running },
);
}
Logger.info('Build status update', { statusEvent, running });
switch (statusEvent.buildStatus) {
case 'SUCCESSFUL': {
try {
const pullRequestId = running.request.pullRequestId;
Logger.info('Attempting merge pull request', { pullRequestId, running });
await this.client.mergePullRequest(pullRequestId);
await running.request.setStatus('success');
} catch (err) {
await running.request.setStatus('fail', 'Unable to merge pull request');
}
break;
}
case 'FAILED': {
Logger.error('Land build failed', {
running: running.get(),
statusEvent,
});
await running.request.setStatus('fail', 'Landkid build failed');
break;
}
case 'STOPPED': {
Logger.warn('Land build has been stopped', {
running: running.get(),
statusEvent,
});
await running.request.setStatus('aborted', 'Landkid pipelines build was stopped');
break;
}
}
this.next();
};
async cancelCurrentlyRunningBuild(user: ISessionUser) {
const running = await this.getRunning();
if (!running) return;
await running.request.setStatus(
'aborted',
`Cancelled by user "${user.aaid}" (${user.displayName})`,
);
if (running.request.buildId) {
this.client.stopLandBuild(running.request.buildId);
}
}
async pause(reason: string, user: ISessionUser) {
await PauseStateTransition.create<PauseStateTransition>({
paused: true,
reason,
pauserAaid: user.aaid,
});
}
async unpause(user: ISessionUser) {
await PauseStateTransition.create<PauseStateTransition>({
paused: false,
pauserAaid: user.aaid,
});
}
private getPauseState = async (): Promise<IPauseState> => {
const state = await PauseStateTransition.findOne<PauseStateTransition>({
order: [['date', 'DESC']],
});
if (!state) {
return {
id: '_',
date: new Date(0),
paused: false,
pauserAaid: '',
reason: null,
};
}
return state.get();
};
public isPaused = async () => {
const state = await PauseStateTransition.findOne<PauseStateTransition>({
order: [['date', 'DESC']],
});
if (!state) return false;
return state.paused;
};
private async createRequestFromOptions(landRequestOptions: LandRequestOptions) {
const pr =
(await PullRequest.findOne<PullRequest>({
where: {
prId: landRequestOptions.prId,
},
})) ||
(await PullRequest.create<PullRequest>({
prId: landRequestOptions.prId,
authorAaid: landRequestOptions.prAuthorAaid,
title: landRequestOptions.prTitle,
}));
return await LandRequest.create<LandRequest>({
triggererAaid: landRequestOptions.triggererAaid,
pullRequestId: pr.prId,
forCommit: landRequestOptions.commit,
});
}
async removeLandRequestByPullRequestId(pullRequestId: number, user: ISessionUser) {
const requests = await LandRequest.findAll<LandRequest>({
where: {
pullRequestId,
},
});
for (const request of requests) {
await request.setStatus('aborted', `Cancelled by user: "${user.aaid}" (${user.displayName})`);
}
}
async enqueue(landRequestOptions: LandRequestOptions): Promise<void> {
// TODO: Ensure no land request is pending for this PR
if (await this.isPaused()) return;
const request = await this.createRequestFromOptions(landRequestOptions);
await request.setStatus('queued');
}
async addToWaitingToLand(landRequestOptions: LandRequestOptions) {
// TODO: Ensure no land request is pending for this PR
if (await this.isPaused()) return;
const request = await this.createRequestFromOptions(landRequestOptions);
await request.setStatus('will-queue-when-ready');
this.checkWaitingLandRequests();
}
async moveFromWaitingToQueue(pullRequestId: number) {
const requests = await LandRequest.findAll<LandRequest>({
where: {
pullRequestId,
},
});
for (const request of requests) {
const status = await request.getStatus();
if (status && status.state !== 'will-queue-when-ready') continue;
await request.setStatus('queued');
}
Logger.info('Moving landRequests from waiting to queue', { requests });
this.next();
}
async ch | {
Logger.info('Checking for waiting landrequests ready to queue');
for (let landRequest of await this.queue.getStatusesForWaitingRequests()) {
const pullRequestId = landRequest.request.pullRequestId;
let isAllowedToLand = await this.client.isAllowedToLand(pullRequestId);
if (isAllowedToLand.errors.length === 0) {
this.moveFromWaitingToQueue(pullRequestId);
}
}
}
private getUsersPermissions = async (requestingUser: ISessionUser): Promise<IPermission[]> => {
// TODO: Figure out how to use distinct
const perms = await Permission.findAll<Permission>({
order: [['dateAssigned', 'DESC']],
});
// Need to get only the latest record for each user
const aaidPerms: Record<string, Permission> = {};
for (const perm of perms) {
if (
!aaidPerms[perm.aaid] ||
aaidPerms[perm.aaid].dateAssigned.getTime() < perm.dateAssigned.getTime()
) {
aaidPerms[perm.aaid] = perm;
}
}
// Now we need to filter to only show the records that the requesting user is allowed to see
const allowedToLand: RunnerState['usersAllowedToLand'] = [];
const requestingUserMode = await permissionService.getPermissionForUser(requestingUser.aaid);
for (const aaid of Object.keys(aaidPerms)) {
// admins see all users
if (requestingUserMode === 'admin') {
allowedToLand.push(aaidPerms[aaid]);
// land users can see land and admin users
} else if (requestingUserMode === 'land' && aaidPerms[aaid].mode !== 'read') {
allowedToLand.push(aaidPerms[aaid]);
// read users can only see admins
} else if (requestingUserMode === 'read' && aaidPerms[aaid].mode === 'admin') {
allowedToLand.push(aaidPerms[aaid]);
}
}
return allowedToLand;
};
private getDatesSinceLastFailures = async (): Promise<number> => {
const lastFailure = await LandRequestStatus.findOne<LandRequestStatus>({
where: {
state: {
$in: ['fail', 'aborted'],
},
},
order: [['date', 'DESC']],
});
if (!lastFailure) return -1;
return Math.floor((Date.now() - lastFailure.date.getTime()) / (1000 * 60 * 60 * 24));
};
async getHistory(page: number) {
return this.history.getHistory(page);
}
async getInstallationIfExists() {
const install = await Installation.findOne();
return install;
}
async deleteInstallation() {
await Installation.truncate();
}
async getState(requestingUser: ISessionUser): Promise<RunnerState> {
const [
daysSinceLastFailure,
pauseState,
queue,
usersAllowedToLand,
waitingToQueue,
] = await Promise.all([
this.getDatesSinceLastFailures(),
this.getPauseState(),
this.queue.getStatusesForQueuedRequests(),
this.getUsersPermissions(requestingUser),
this.queue.getStatusesForWaitingRequests(),
]);
return {
daysSinceLastFailure,
pauseState,
queue,
usersAllowedToLand,
waitingToQueue,
bitbucketBaseUrl: `https://bitbucket.org/${this.config.repoConfig.repoOwner}/${
this.config.repoConfig.repoName
}`,
};
}
}
| eckWaitingLandRequests() | identifier_name |
client_iface.go | package db
import (
"errors"
"fmt"
"os"
"path/filepath"
bolt "github.com/coreos/bbolt"
"github.com/golang/protobuf/proto"
"github.com/iancoleman/strcase"
"github.com/jinzhu/inflection"
log "github.com/sirupsen/logrus"
"jaytaylor.com/andromeda/domain"
"jaytaylor.com/andromeda/pkg/contains"
)
const (
TableMetadata = "andromeda-metadata"
TablePackages = "packages"
TablePendingReferences = "pending-references"
TableCrawlResults = "crawl-result"
TableToCrawl = "to-crawl"
MaxPriority = 10 // Number of supported priorities, 1-indexed.
)
var (
ErrKeyNotFound = errors.New("requested key not found")
ErrNotImplemented = errors.New("function not implemented")
ErrMetadataUnsupportedSrcType = errors.New("unsupported src type: must be an []byte, string, or proto.Message")
ErrMetadataUnsupportedDstType = errors.New("unsupported dst type: must be an *[]byte, *string, or proto.Message")
DefaultQueuePriority = 3
DefaultBoltQueueFilename = "queue.bolt"
tables = []string{
TableMetadata,
TablePackages,
TableToCrawl,
TablePendingReferences,
TableCrawlResults,
}
qTables = []string{
TableToCrawl,
TableCrawlResults,
}
// pkgSepB is a byte array of the package component separator character.
// It's used for hierarchical searches and lookups.
pkgSepB = []byte{'/'}
)
type Client interface {
Open() error // Open / start DB client connection.
Close() error // Close / shutdown the DB client connection.
Destroy(tables ...string) error // Destroy K/V tables and / or queue topics.
EachRow(table string, fn func(k []byte, v []byte)) error // Invoke a callback on the key/value pair for each row of the named table.
EachRowWithBreak(table string, fn func(k []byte, v []byte) bool) error // Invoke a callback on the key/value pair for each row of the named table until cb returns false.
PackageSave(pkgs ...*domain.Package) error // Performs an upsert merge operation on a fully crawled package.
PackageDelete(pkgPaths ...string) error // Delete a package from the index. Complete erasure.
Package(pkgPath string) (*domain.Package, error) // Retrieve a specific package..
Packages(pkgPaths ...string) (map[string]*domain.Package, error) // Retrieve several packages.
EachPackage(func(pkg *domain.Package)) error // Iterates over all indexed packages and invokes callback on each.
EachPackageWithBreak(func(pkg *domain.Package) bool) error // Iterates over packages until callback returns false.
PathPrefixSearch(prefix string) (map[string]*domain.Package, error) // Search for packages with paths matching a specific prefix.
PackagesLen() (int, error) // Number of packages in index.
RecordImportedBy(refPkg *domain.Package, resources map[string]*domain.PackageReferences) error // Save imported-by relationship updates.
CrawlResultAdd(cr *domain.CrawlResult, opts *QueueOptions) error // Append a crawl-result to the queue for later merging and save.
CrawlResultDequeue() (*domain.CrawlResult, error) // Pop a crawl-result from the queue.
EachCrawlResult(func(cr *domain.CrawlResult)) error // Iterates over all crawl-results and invokes callback on each.
EachCrawlResultWithBreak(func(cr *domain.CrawlResult) bool) error // Iterates over all crawl-results and invokes callback until callback returns false.
CrawlResultsLen() (int, error) // Number of unprocessed crawl results.
ToCrawlAdd(entries []*domain.ToCrawlEntry, opts *QueueOptions) (int, error) // Only adds entries which don't already exist. Returns number of new items added.
ToCrawlRemove(pkgs []string) (int, error) // Scrubs items from queue.
ToCrawlDequeue() (*domain.ToCrawlEntry, error) // Pop an entry from the crawl queue.
EachToCrawl(func(entry *domain.ToCrawlEntry)) error // Iterates over all to-crawl entries and invokes callback on each.
EachToCrawlWithBreak(func(entry *domain.ToCrawlEntry) bool) error // Iterates over to-crawl entries until callback returns false.
ToCrawlsLen() (int, error) // Number of packages currently awaiting crawl.
MetaSave(key string, src interface{}) error // Store metadata key/value. NB: src must be one of raw []byte, string, or proto.Message struct.
MetaDelete(key string) error // Delete a metadata key.
Meta(key string, dst interface{}) error // Retrieve metadata key and populate into dst. NB: dst must be one of *[]byte, *string, or proto.Message struct.
PendingReferences(pkgPathPrefix string) ([]*domain.PendingReferences, error) // Retrieve pending references listing for a package path prefix.
PendingReferencesSave(pendingRefs ...*domain.PendingReferences) error // Save pending references.
PendingReferencesDelete(keys ...string) error // Delete pending references keys.
EachPendingReferences(fn func(pendingRefs *domain.PendingReferences)) error // Iterate over each *domain.PrendingReferences object from the pending-references table.
EachPendingReferencesWithBreak(fn func(pendingRefs *domain.PendingReferences) bool) error // Iterate over each *domain.PrendingReferences object from the pending-references table until callback returns false.
PendingReferencesLen() (int, error) // Number of pending references keys.
RebuildTo(otherClient Client, kvFilters ...KeyValueFilterFunc) error // Rebuild a fresh copy of the DB at destination. Return ErrNotImplmented if not supported. Optionally pass in one or more KeyValueFilterFunc functions.
Backend() Backend // Expose underlying backend impl.
Queue() Queue // Expose underlying queue impl.
}
type KeyValueFilterFunc func(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte)
// skipKVFilterFunc is an internal reference used by SkipKVFilter.
func skipKVFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipKVFilter is a signal filter sentinel value to skip Key-Value tables.
var SkipKVFilter = skipKVFilterFunc
// skipQFilterFunc is an internal reference used by SkipQFilter.
func skipQFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipQFilter is a signal filter sentinel value to skip Queue tables.
var SkipQFilter = skipQFilterFunc
type Config interface {
Type() Type // Configuration type specifier.
}
func NewConfig(driver string, dbFile string) Config {
switch driver {
case "bolt", "boltdb":
return NewBoltConfig(dbFile)
case "rocks", "rocksdb":
return NewRocksConfig(dbFile)
case "postgres", "postgresql", "pg":
return NewPostgresConfig(dbFile)
default:
panic(fmt.Sprintf("Unrecognized or unsupported DB driver %q", driver))
}
}
// NewClient constructs a new DB client based on the passed configuration.
func NewClient(config Config) Client |
type QueueOptions struct {
Priority int
OnlyIfNotExists bool // Only enqueue items which don't already exist.
}
func NewQueueOptions() *QueueOptions {
opts := &QueueOptions{
Priority: DefaultQueuePriority,
}
return opts
}
// WithClient is a convenience utility which handles DB client construction,
// open, and close..
func WithClient(config Config, fn func(client Client) error) (err error) {
client := NewClient(config)
if err = client.Open(); err != nil {
err = fmt.Errorf("opening DB client %T: %s", client, err)
return
}
defer func() {
if closeErr := client.Close(); closeErr != nil {
if err == nil {
err = fmt.Errorf("closing DB client %T: %s", client, closeErr)
} else {
log.Errorf("Existing error before attempt to close DB client %T: %s", client, err)
log.Errorf("Also encountered problem closing DB client %T: %s", client, closeErr)
}
}
}()
if err = fn(client); err != nil {
return
}
return
}
// kvTables returns the names of the "regular" key-value tables.
func kvTables() []string {
kv := []string{}
for _, table := range tables {
regular := true
for _, qTable := range qTables {
if table == qTable {
regular = false
break
}
}
if regular {
kv = append(kv, table)
}
}
return kv
}
// KVTables publicly exported version of kvTables.
func KVTables() []string { return kvTables() }
// IsKV returns true when s is the name of a Key-Value oriented table.
//
// Note: Does not normalize postgres_formatted_names..
func IsKV(s string) bool {
return contains.String(kvTables(), s)
}
// QTables returns slice of queue table names.
func QTables() []string {
tables := []string{}
for _, table := range qTables {
tables = append(tables, table)
}
return tables
}
// Returns true when the name corresponds with a table.
//
// Note: Does not normalize postgres_formatted_names..
func IsQ(s string) bool {
return contains.String(qTables, s)
}
// Tables returns a slice of all tables.
func Tables() []string {
out := []string{}
for _, table := range tables {
out = append(out, table)
}
return out
}
// StructFor takes a table or queue name and returns a pointer to the newly
// allocated struct of the corresponding type associated with the table.
func StructFor(tableOrQueue string) (proto.Message, error) {
tableOrQueue = strcase.ToKebab(tableOrQueue)
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
tableOrQueue = TablePackages
}
switch tableOrQueue {
// N.B.: Metadata type is arbitrary on a per-key basis, so unsupported here.
// case TableMetadata:
case inflection.Plural(TablePackages), inflection.Singular(TablePackages):
return &domain.Package{}, nil
case inflection.Plural(TablePendingReferences), inflection.Singular(TablePendingReferences):
return &domain.PendingReferences{}, nil
case inflection.Plural(TableCrawlResults), inflection.Singular(TableCrawlResults):
return &domain.CrawlResult{}, nil
case inflection.Plural(TableToCrawl), inflection.Singular(TableToCrawl):
return &domain.ToCrawlEntry{}, nil
default:
return nil, fmt.Errorf("unrecognized or unsupported table or queue %q", tableOrQueue)
}
}
// FuzzyTableResolver attempts to resolve the input string to a corresponding table or
// queue name.
//
// An empty string is returned if no match is found.
func FuzzyTableResolver(tableOrQueue string) string {
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
return TablePackages
}
if tableOrQueue == "pending" {
return TablePendingReferences
}
if tableOrQueue == "metadata" || tableOrQueue == "meta" {
return TableMetadata
}
for _, name := range tables {
if inflection.Singular(name) == tableOrQueue || inflection.Plural(name) == tableOrQueue {
return name
}
}
return ""
}
| {
typ := config.Type()
switch typ {
case Bolt:
be := NewBoltBackend(config.(*BoltConfig))
// TODO: Return an error instead of panicking.
if err := be.Open(); err != nil {
panic(fmt.Errorf("Opening bolt backend: %s", err))
}
q := NewBoltQueue(be.db)
return newKVClient(be, q)
case Rocks:
// MORE TEMPORARY UGLINESS TO MAKE IT WORK FOR NOW:
if err := os.MkdirAll(config.(*RocksConfig).Dir, os.FileMode(int(0700))); err != nil {
panic(fmt.Errorf("Creating rocks directory %q: %s", config.(*RocksConfig).Dir, err))
}
be := NewRocksBackend(config.(*RocksConfig))
queueFile := filepath.Join(config.(*RocksConfig).Dir, DefaultBoltQueueFilename)
db, err := bolt.Open(queueFile, 0600, NewBoltConfig("").BoltOptions)
if err != nil {
panic(fmt.Errorf("Creating bolt queue: %s", err))
}
q := NewBoltQueue(db)
return newKVClient(be, q)
case Postgres:
be := NewPostgresBackend(config.(*PostgresConfig))
q := NewPostgresQueue(config.(*PostgresConfig))
return newKVClient(be, q)
default:
panic(fmt.Errorf("no client constructor available for db configuration type: %v", typ))
}
} | identifier_body |
client_iface.go | package db
import (
"errors"
"fmt"
"os"
"path/filepath"
bolt "github.com/coreos/bbolt"
"github.com/golang/protobuf/proto"
"github.com/iancoleman/strcase"
"github.com/jinzhu/inflection"
log "github.com/sirupsen/logrus"
"jaytaylor.com/andromeda/domain"
"jaytaylor.com/andromeda/pkg/contains"
)
const (
TableMetadata = "andromeda-metadata"
TablePackages = "packages"
TablePendingReferences = "pending-references"
TableCrawlResults = "crawl-result"
TableToCrawl = "to-crawl"
MaxPriority = 10 // Number of supported priorities, 1-indexed.
)
var (
ErrKeyNotFound = errors.New("requested key not found")
ErrNotImplemented = errors.New("function not implemented")
ErrMetadataUnsupportedSrcType = errors.New("unsupported src type: must be an []byte, string, or proto.Message")
ErrMetadataUnsupportedDstType = errors.New("unsupported dst type: must be an *[]byte, *string, or proto.Message")
DefaultQueuePriority = 3
DefaultBoltQueueFilename = "queue.bolt"
tables = []string{
TableMetadata,
TablePackages,
TableToCrawl,
TablePendingReferences,
TableCrawlResults,
}
qTables = []string{
TableToCrawl,
TableCrawlResults,
}
// pkgSepB is a byte array of the package component separator character.
// It's used for hierarchical searches and lookups.
pkgSepB = []byte{'/'}
)
type Client interface {
Open() error // Open / start DB client connection.
Close() error // Close / shutdown the DB client connection.
Destroy(tables ...string) error // Destroy K/V tables and / or queue topics.
EachRow(table string, fn func(k []byte, v []byte)) error // Invoke a callback on the key/value pair for each row of the named table.
EachRowWithBreak(table string, fn func(k []byte, v []byte) bool) error // Invoke a callback on the key/value pair for each row of the named table until cb returns false.
PackageSave(pkgs ...*domain.Package) error // Performs an upsert merge operation on a fully crawled package.
PackageDelete(pkgPaths ...string) error // Delete a package from the index. Complete erasure.
Package(pkgPath string) (*domain.Package, error) // Retrieve a specific package..
Packages(pkgPaths ...string) (map[string]*domain.Package, error) // Retrieve several packages.
EachPackage(func(pkg *domain.Package)) error // Iterates over all indexed packages and invokes callback on each.
EachPackageWithBreak(func(pkg *domain.Package) bool) error // Iterates over packages until callback returns false.
PathPrefixSearch(prefix string) (map[string]*domain.Package, error) // Search for packages with paths matching a specific prefix.
PackagesLen() (int, error) // Number of packages in index.
RecordImportedBy(refPkg *domain.Package, resources map[string]*domain.PackageReferences) error // Save imported-by relationship updates.
CrawlResultAdd(cr *domain.CrawlResult, opts *QueueOptions) error // Append a crawl-result to the queue for later merging and save.
CrawlResultDequeue() (*domain.CrawlResult, error) // Pop a crawl-result from the queue.
EachCrawlResult(func(cr *domain.CrawlResult)) error // Iterates over all crawl-results and invokes callback on each.
EachCrawlResultWithBreak(func(cr *domain.CrawlResult) bool) error // Iterates over all crawl-results and invokes callback until callback returns false.
CrawlResultsLen() (int, error) // Number of unprocessed crawl results.
ToCrawlAdd(entries []*domain.ToCrawlEntry, opts *QueueOptions) (int, error) // Only adds entries which don't already exist. Returns number of new items added.
ToCrawlRemove(pkgs []string) (int, error) // Scrubs items from queue.
ToCrawlDequeue() (*domain.ToCrawlEntry, error) // Pop an entry from the crawl queue.
EachToCrawl(func(entry *domain.ToCrawlEntry)) error // Iterates over all to-crawl entries and invokes callback on each.
EachToCrawlWithBreak(func(entry *domain.ToCrawlEntry) bool) error // Iterates over to-crawl entries until callback returns false.
ToCrawlsLen() (int, error) // Number of packages currently awaiting crawl.
MetaSave(key string, src interface{}) error // Store metadata key/value. NB: src must be one of raw []byte, string, or proto.Message struct.
MetaDelete(key string) error // Delete a metadata key.
Meta(key string, dst interface{}) error // Retrieve metadata key and populate into dst. NB: dst must be one of *[]byte, *string, or proto.Message struct.
PendingReferences(pkgPathPrefix string) ([]*domain.PendingReferences, error) // Retrieve pending references listing for a package path prefix.
PendingReferencesSave(pendingRefs ...*domain.PendingReferences) error // Save pending references.
PendingReferencesDelete(keys ...string) error // Delete pending references keys.
EachPendingReferences(fn func(pendingRefs *domain.PendingReferences)) error // Iterate over each *domain.PrendingReferences object from the pending-references table.
EachPendingReferencesWithBreak(fn func(pendingRefs *domain.PendingReferences) bool) error // Iterate over each *domain.PrendingReferences object from the pending-references table until callback returns false.
PendingReferencesLen() (int, error) // Number of pending references keys.
RebuildTo(otherClient Client, kvFilters ...KeyValueFilterFunc) error // Rebuild a fresh copy of the DB at destination. Return ErrNotImplmented if not supported. Optionally pass in one or more KeyValueFilterFunc functions.
Backend() Backend // Expose underlying backend impl.
Queue() Queue // Expose underlying queue impl.
}
type KeyValueFilterFunc func(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte)
// skipKVFilterFunc is an internal reference used by SkipKVFilter.
func skipKVFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipKVFilter is a signal filter sentinel value to skip Key-Value tables.
var SkipKVFilter = skipKVFilterFunc
// skipQFilterFunc is an internal reference used by SkipQFilter.
func skipQFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipQFilter is a signal filter sentinel value to skip Queue tables.
var SkipQFilter = skipQFilterFunc
type Config interface {
Type() Type // Configuration type specifier.
}
func NewConfig(driver string, dbFile string) Config {
switch driver {
case "bolt", "boltdb":
return NewBoltConfig(dbFile)
case "rocks", "rocksdb":
return NewRocksConfig(dbFile)
case "postgres", "postgresql", "pg":
return NewPostgresConfig(dbFile)
default:
panic(fmt.Sprintf("Unrecognized or unsupported DB driver %q", driver))
}
}
// NewClient constructs a new DB client based on the passed configuration.
func NewClient(config Config) Client {
typ := config.Type()
switch typ {
case Bolt:
be := NewBoltBackend(config.(*BoltConfig))
// TODO: Return an error instead of panicking.
if err := be.Open(); err != nil {
panic(fmt.Errorf("Opening bolt backend: %s", err))
}
q := NewBoltQueue(be.db)
return newKVClient(be, q)
case Rocks:
// MORE TEMPORARY UGLINESS TO MAKE IT WORK FOR NOW:
if err := os.MkdirAll(config.(*RocksConfig).Dir, os.FileMode(int(0700))); err != nil {
panic(fmt.Errorf("Creating rocks directory %q: %s", config.(*RocksConfig).Dir, err))
}
be := NewRocksBackend(config.(*RocksConfig))
queueFile := filepath.Join(config.(*RocksConfig).Dir, DefaultBoltQueueFilename)
db, err := bolt.Open(queueFile, 0600, NewBoltConfig("").BoltOptions)
if err != nil {
panic(fmt.Errorf("Creating bolt queue: %s", err))
}
q := NewBoltQueue(db)
return newKVClient(be, q)
case Postgres:
be := NewPostgresBackend(config.(*PostgresConfig))
q := NewPostgresQueue(config.(*PostgresConfig))
return newKVClient(be, q)
default:
panic(fmt.Errorf("no client constructor available for db configuration type: %v", typ))
}
}
type QueueOptions struct {
Priority int
OnlyIfNotExists bool // Only enqueue items which don't already exist.
}
func NewQueueOptions() *QueueOptions {
opts := &QueueOptions{
Priority: DefaultQueuePriority,
}
return opts
}
// WithClient is a convenience utility which handles DB client construction,
// open, and close..
func WithClient(config Config, fn func(client Client) error) (err error) {
client := NewClient(config)
if err = client.Open(); err != nil {
err = fmt.Errorf("opening DB client %T: %s", client, err)
return
}
defer func() {
if closeErr := client.Close(); closeErr != nil {
if err == nil {
err = fmt.Errorf("closing DB client %T: %s", client, closeErr)
} else {
log.Errorf("Existing error before attempt to close DB client %T: %s", client, err)
log.Errorf("Also encountered problem closing DB client %T: %s", client, closeErr)
}
}
}()
if err = fn(client); err != nil {
return
}
return
}
// kvTables returns the names of the "regular" key-value tables.
func kvTables() []string {
kv := []string{}
for _, table := range tables {
regular := true
for _, qTable := range qTables {
if table == qTable {
regular = false
break
}
}
if regular {
kv = append(kv, table)
}
}
return kv
}
// KVTables publicly exported version of kvTables.
func | () []string { return kvTables() }
// IsKV returns true when s is the name of a Key-Value oriented table.
//
// Note: Does not normalize postgres_formatted_names..
func IsKV(s string) bool {
return contains.String(kvTables(), s)
}
// QTables returns slice of queue table names.
func QTables() []string {
tables := []string{}
for _, table := range qTables {
tables = append(tables, table)
}
return tables
}
// Returns true when the name corresponds with a table.
//
// Note: Does not normalize postgres_formatted_names..
func IsQ(s string) bool {
return contains.String(qTables, s)
}
// Tables returns a slice of all tables.
func Tables() []string {
out := []string{}
for _, table := range tables {
out = append(out, table)
}
return out
}
// StructFor takes a table or queue name and returns a pointer to the newly
// allocated struct of the corresponding type associated with the table.
func StructFor(tableOrQueue string) (proto.Message, error) {
tableOrQueue = strcase.ToKebab(tableOrQueue)
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
tableOrQueue = TablePackages
}
switch tableOrQueue {
// N.B.: Metadata type is arbitrary on a per-key basis, so unsupported here.
// case TableMetadata:
case inflection.Plural(TablePackages), inflection.Singular(TablePackages):
return &domain.Package{}, nil
case inflection.Plural(TablePendingReferences), inflection.Singular(TablePendingReferences):
return &domain.PendingReferences{}, nil
case inflection.Plural(TableCrawlResults), inflection.Singular(TableCrawlResults):
return &domain.CrawlResult{}, nil
case inflection.Plural(TableToCrawl), inflection.Singular(TableToCrawl):
return &domain.ToCrawlEntry{}, nil
default:
return nil, fmt.Errorf("unrecognized or unsupported table or queue %q", tableOrQueue)
}
}
// FuzzyTableResolver attempts to resolve the input string to a corresponding table or
// queue name.
//
// An empty string is returned if no match is found.
func FuzzyTableResolver(tableOrQueue string) string {
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
return TablePackages
}
if tableOrQueue == "pending" {
return TablePendingReferences
}
if tableOrQueue == "metadata" || tableOrQueue == "meta" {
return TableMetadata
}
for _, name := range tables {
if inflection.Singular(name) == tableOrQueue || inflection.Plural(name) == tableOrQueue {
return name
}
}
return ""
}
| KVTables | identifier_name |
client_iface.go | package db
import (
"errors"
"fmt"
"os"
"path/filepath"
bolt "github.com/coreos/bbolt"
"github.com/golang/protobuf/proto"
"github.com/iancoleman/strcase"
"github.com/jinzhu/inflection"
log "github.com/sirupsen/logrus"
"jaytaylor.com/andromeda/domain"
"jaytaylor.com/andromeda/pkg/contains"
)
const ( | TablePackages = "packages"
TablePendingReferences = "pending-references"
TableCrawlResults = "crawl-result"
TableToCrawl = "to-crawl"
MaxPriority = 10 // Number of supported priorities, 1-indexed.
)
var (
ErrKeyNotFound = errors.New("requested key not found")
ErrNotImplemented = errors.New("function not implemented")
ErrMetadataUnsupportedSrcType = errors.New("unsupported src type: must be an []byte, string, or proto.Message")
ErrMetadataUnsupportedDstType = errors.New("unsupported dst type: must be an *[]byte, *string, or proto.Message")
DefaultQueuePriority = 3
DefaultBoltQueueFilename = "queue.bolt"
tables = []string{
TableMetadata,
TablePackages,
TableToCrawl,
TablePendingReferences,
TableCrawlResults,
}
qTables = []string{
TableToCrawl,
TableCrawlResults,
}
// pkgSepB is a byte array of the package component separator character.
// It's used for hierarchical searches and lookups.
pkgSepB = []byte{'/'}
)
type Client interface {
Open() error // Open / start DB client connection.
Close() error // Close / shutdown the DB client connection.
Destroy(tables ...string) error // Destroy K/V tables and / or queue topics.
EachRow(table string, fn func(k []byte, v []byte)) error // Invoke a callback on the key/value pair for each row of the named table.
EachRowWithBreak(table string, fn func(k []byte, v []byte) bool) error // Invoke a callback on the key/value pair for each row of the named table until cb returns false.
PackageSave(pkgs ...*domain.Package) error // Performs an upsert merge operation on a fully crawled package.
PackageDelete(pkgPaths ...string) error // Delete a package from the index. Complete erasure.
Package(pkgPath string) (*domain.Package, error) // Retrieve a specific package..
Packages(pkgPaths ...string) (map[string]*domain.Package, error) // Retrieve several packages.
EachPackage(func(pkg *domain.Package)) error // Iterates over all indexed packages and invokes callback on each.
EachPackageWithBreak(func(pkg *domain.Package) bool) error // Iterates over packages until callback returns false.
PathPrefixSearch(prefix string) (map[string]*domain.Package, error) // Search for packages with paths matching a specific prefix.
PackagesLen() (int, error) // Number of packages in index.
RecordImportedBy(refPkg *domain.Package, resources map[string]*domain.PackageReferences) error // Save imported-by relationship updates.
CrawlResultAdd(cr *domain.CrawlResult, opts *QueueOptions) error // Append a crawl-result to the queue for later merging and save.
CrawlResultDequeue() (*domain.CrawlResult, error) // Pop a crawl-result from the queue.
EachCrawlResult(func(cr *domain.CrawlResult)) error // Iterates over all crawl-results and invokes callback on each.
EachCrawlResultWithBreak(func(cr *domain.CrawlResult) bool) error // Iterates over all crawl-results and invokes callback until callback returns false.
CrawlResultsLen() (int, error) // Number of unprocessed crawl results.
ToCrawlAdd(entries []*domain.ToCrawlEntry, opts *QueueOptions) (int, error) // Only adds entries which don't already exist. Returns number of new items added.
ToCrawlRemove(pkgs []string) (int, error) // Scrubs items from queue.
ToCrawlDequeue() (*domain.ToCrawlEntry, error) // Pop an entry from the crawl queue.
EachToCrawl(func(entry *domain.ToCrawlEntry)) error // Iterates over all to-crawl entries and invokes callback on each.
EachToCrawlWithBreak(func(entry *domain.ToCrawlEntry) bool) error // Iterates over to-crawl entries until callback returns false.
ToCrawlsLen() (int, error) // Number of packages currently awaiting crawl.
MetaSave(key string, src interface{}) error // Store metadata key/value. NB: src must be one of raw []byte, string, or proto.Message struct.
MetaDelete(key string) error // Delete a metadata key.
Meta(key string, dst interface{}) error // Retrieve metadata key and populate into dst. NB: dst must be one of *[]byte, *string, or proto.Message struct.
PendingReferences(pkgPathPrefix string) ([]*domain.PendingReferences, error) // Retrieve pending references listing for a package path prefix.
PendingReferencesSave(pendingRefs ...*domain.PendingReferences) error // Save pending references.
PendingReferencesDelete(keys ...string) error // Delete pending references keys.
EachPendingReferences(fn func(pendingRefs *domain.PendingReferences)) error // Iterate over each *domain.PrendingReferences object from the pending-references table.
EachPendingReferencesWithBreak(fn func(pendingRefs *domain.PendingReferences) bool) error // Iterate over each *domain.PrendingReferences object from the pending-references table until callback returns false.
PendingReferencesLen() (int, error) // Number of pending references keys.
RebuildTo(otherClient Client, kvFilters ...KeyValueFilterFunc) error // Rebuild a fresh copy of the DB at destination. Return ErrNotImplmented if not supported. Optionally pass in one or more KeyValueFilterFunc functions.
Backend() Backend // Expose underlying backend impl.
Queue() Queue // Expose underlying queue impl.
}
type KeyValueFilterFunc func(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte)
// skipKVFilterFunc is an internal reference used by SkipKVFilter.
func skipKVFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipKVFilter is a signal filter sentinel value to skip Key-Value tables.
var SkipKVFilter = skipKVFilterFunc
// skipQFilterFunc is an internal reference used by SkipQFilter.
func skipQFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipQFilter is a signal filter sentinel value to skip Queue tables.
var SkipQFilter = skipQFilterFunc
type Config interface {
Type() Type // Configuration type specifier.
}
func NewConfig(driver string, dbFile string) Config {
switch driver {
case "bolt", "boltdb":
return NewBoltConfig(dbFile)
case "rocks", "rocksdb":
return NewRocksConfig(dbFile)
case "postgres", "postgresql", "pg":
return NewPostgresConfig(dbFile)
default:
panic(fmt.Sprintf("Unrecognized or unsupported DB driver %q", driver))
}
}
// NewClient constructs a new DB client based on the passed configuration.
func NewClient(config Config) Client {
typ := config.Type()
switch typ {
case Bolt:
be := NewBoltBackend(config.(*BoltConfig))
// TODO: Return an error instead of panicking.
if err := be.Open(); err != nil {
panic(fmt.Errorf("Opening bolt backend: %s", err))
}
q := NewBoltQueue(be.db)
return newKVClient(be, q)
case Rocks:
// MORE TEMPORARY UGLINESS TO MAKE IT WORK FOR NOW:
if err := os.MkdirAll(config.(*RocksConfig).Dir, os.FileMode(int(0700))); err != nil {
panic(fmt.Errorf("Creating rocks directory %q: %s", config.(*RocksConfig).Dir, err))
}
be := NewRocksBackend(config.(*RocksConfig))
queueFile := filepath.Join(config.(*RocksConfig).Dir, DefaultBoltQueueFilename)
db, err := bolt.Open(queueFile, 0600, NewBoltConfig("").BoltOptions)
if err != nil {
panic(fmt.Errorf("Creating bolt queue: %s", err))
}
q := NewBoltQueue(db)
return newKVClient(be, q)
case Postgres:
be := NewPostgresBackend(config.(*PostgresConfig))
q := NewPostgresQueue(config.(*PostgresConfig))
return newKVClient(be, q)
default:
panic(fmt.Errorf("no client constructor available for db configuration type: %v", typ))
}
}
type QueueOptions struct {
Priority int
OnlyIfNotExists bool // Only enqueue items which don't already exist.
}
func NewQueueOptions() *QueueOptions {
opts := &QueueOptions{
Priority: DefaultQueuePriority,
}
return opts
}
// WithClient is a convenience utility which handles DB client construction,
// open, and close..
func WithClient(config Config, fn func(client Client) error) (err error) {
client := NewClient(config)
if err = client.Open(); err != nil {
err = fmt.Errorf("opening DB client %T: %s", client, err)
return
}
defer func() {
if closeErr := client.Close(); closeErr != nil {
if err == nil {
err = fmt.Errorf("closing DB client %T: %s", client, closeErr)
} else {
log.Errorf("Existing error before attempt to close DB client %T: %s", client, err)
log.Errorf("Also encountered problem closing DB client %T: %s", client, closeErr)
}
}
}()
if err = fn(client); err != nil {
return
}
return
}
// kvTables returns the names of the "regular" key-value tables.
func kvTables() []string {
kv := []string{}
for _, table := range tables {
regular := true
for _, qTable := range qTables {
if table == qTable {
regular = false
break
}
}
if regular {
kv = append(kv, table)
}
}
return kv
}
// KVTables publicly exported version of kvTables.
func KVTables() []string { return kvTables() }
// IsKV returns true when s is the name of a Key-Value oriented table.
//
// Note: Does not normalize postgres_formatted_names..
func IsKV(s string) bool {
return contains.String(kvTables(), s)
}
// QTables returns slice of queue table names.
func QTables() []string {
tables := []string{}
for _, table := range qTables {
tables = append(tables, table)
}
return tables
}
// Returns true when the name corresponds with a table.
//
// Note: Does not normalize postgres_formatted_names..
func IsQ(s string) bool {
return contains.String(qTables, s)
}
// Tables returns a slice of all tables.
func Tables() []string {
out := []string{}
for _, table := range tables {
out = append(out, table)
}
return out
}
// StructFor takes a table or queue name and returns a pointer to the newly
// allocated struct of the corresponding type associated with the table.
func StructFor(tableOrQueue string) (proto.Message, error) {
tableOrQueue = strcase.ToKebab(tableOrQueue)
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
tableOrQueue = TablePackages
}
switch tableOrQueue {
// N.B.: Metadata type is arbitrary on a per-key basis, so unsupported here.
// case TableMetadata:
case inflection.Plural(TablePackages), inflection.Singular(TablePackages):
return &domain.Package{}, nil
case inflection.Plural(TablePendingReferences), inflection.Singular(TablePendingReferences):
return &domain.PendingReferences{}, nil
case inflection.Plural(TableCrawlResults), inflection.Singular(TableCrawlResults):
return &domain.CrawlResult{}, nil
case inflection.Plural(TableToCrawl), inflection.Singular(TableToCrawl):
return &domain.ToCrawlEntry{}, nil
default:
return nil, fmt.Errorf("unrecognized or unsupported table or queue %q", tableOrQueue)
}
}
// FuzzyTableResolver attempts to resolve the input string to a corresponding table or
// queue name.
//
// An empty string is returned if no match is found.
func FuzzyTableResolver(tableOrQueue string) string {
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
return TablePackages
}
if tableOrQueue == "pending" {
return TablePendingReferences
}
if tableOrQueue == "metadata" || tableOrQueue == "meta" {
return TableMetadata
}
for _, name := range tables {
if inflection.Singular(name) == tableOrQueue || inflection.Plural(name) == tableOrQueue {
return name
}
}
return ""
} | TableMetadata = "andromeda-metadata" | random_line_split |
client_iface.go | package db
import (
"errors"
"fmt"
"os"
"path/filepath"
bolt "github.com/coreos/bbolt"
"github.com/golang/protobuf/proto"
"github.com/iancoleman/strcase"
"github.com/jinzhu/inflection"
log "github.com/sirupsen/logrus"
"jaytaylor.com/andromeda/domain"
"jaytaylor.com/andromeda/pkg/contains"
)
const (
TableMetadata = "andromeda-metadata"
TablePackages = "packages"
TablePendingReferences = "pending-references"
TableCrawlResults = "crawl-result"
TableToCrawl = "to-crawl"
MaxPriority = 10 // Number of supported priorities, 1-indexed.
)
var (
ErrKeyNotFound = errors.New("requested key not found")
ErrNotImplemented = errors.New("function not implemented")
ErrMetadataUnsupportedSrcType = errors.New("unsupported src type: must be an []byte, string, or proto.Message")
ErrMetadataUnsupportedDstType = errors.New("unsupported dst type: must be an *[]byte, *string, or proto.Message")
DefaultQueuePriority = 3
DefaultBoltQueueFilename = "queue.bolt"
tables = []string{
TableMetadata,
TablePackages,
TableToCrawl,
TablePendingReferences,
TableCrawlResults,
}
qTables = []string{
TableToCrawl,
TableCrawlResults,
}
// pkgSepB is a byte array of the package component separator character.
// It's used for hierarchical searches and lookups.
pkgSepB = []byte{'/'}
)
type Client interface {
Open() error // Open / start DB client connection.
Close() error // Close / shutdown the DB client connection.
Destroy(tables ...string) error // Destroy K/V tables and / or queue topics.
EachRow(table string, fn func(k []byte, v []byte)) error // Invoke a callback on the key/value pair for each row of the named table.
EachRowWithBreak(table string, fn func(k []byte, v []byte) bool) error // Invoke a callback on the key/value pair for each row of the named table until cb returns false.
PackageSave(pkgs ...*domain.Package) error // Performs an upsert merge operation on a fully crawled package.
PackageDelete(pkgPaths ...string) error // Delete a package from the index. Complete erasure.
Package(pkgPath string) (*domain.Package, error) // Retrieve a specific package..
Packages(pkgPaths ...string) (map[string]*domain.Package, error) // Retrieve several packages.
EachPackage(func(pkg *domain.Package)) error // Iterates over all indexed packages and invokes callback on each.
EachPackageWithBreak(func(pkg *domain.Package) bool) error // Iterates over packages until callback returns false.
PathPrefixSearch(prefix string) (map[string]*domain.Package, error) // Search for packages with paths matching a specific prefix.
PackagesLen() (int, error) // Number of packages in index.
RecordImportedBy(refPkg *domain.Package, resources map[string]*domain.PackageReferences) error // Save imported-by relationship updates.
CrawlResultAdd(cr *domain.CrawlResult, opts *QueueOptions) error // Append a crawl-result to the queue for later merging and save.
CrawlResultDequeue() (*domain.CrawlResult, error) // Pop a crawl-result from the queue.
EachCrawlResult(func(cr *domain.CrawlResult)) error // Iterates over all crawl-results and invokes callback on each.
EachCrawlResultWithBreak(func(cr *domain.CrawlResult) bool) error // Iterates over all crawl-results and invokes callback until callback returns false.
CrawlResultsLen() (int, error) // Number of unprocessed crawl results.
ToCrawlAdd(entries []*domain.ToCrawlEntry, opts *QueueOptions) (int, error) // Only adds entries which don't already exist. Returns number of new items added.
ToCrawlRemove(pkgs []string) (int, error) // Scrubs items from queue.
ToCrawlDequeue() (*domain.ToCrawlEntry, error) // Pop an entry from the crawl queue.
EachToCrawl(func(entry *domain.ToCrawlEntry)) error // Iterates over all to-crawl entries and invokes callback on each.
EachToCrawlWithBreak(func(entry *domain.ToCrawlEntry) bool) error // Iterates over to-crawl entries until callback returns false.
ToCrawlsLen() (int, error) // Number of packages currently awaiting crawl.
MetaSave(key string, src interface{}) error // Store metadata key/value. NB: src must be one of raw []byte, string, or proto.Message struct.
MetaDelete(key string) error // Delete a metadata key.
Meta(key string, dst interface{}) error // Retrieve metadata key and populate into dst. NB: dst must be one of *[]byte, *string, or proto.Message struct.
PendingReferences(pkgPathPrefix string) ([]*domain.PendingReferences, error) // Retrieve pending references listing for a package path prefix.
PendingReferencesSave(pendingRefs ...*domain.PendingReferences) error // Save pending references.
PendingReferencesDelete(keys ...string) error // Delete pending references keys.
EachPendingReferences(fn func(pendingRefs *domain.PendingReferences)) error // Iterate over each *domain.PrendingReferences object from the pending-references table.
EachPendingReferencesWithBreak(fn func(pendingRefs *domain.PendingReferences) bool) error // Iterate over each *domain.PrendingReferences object from the pending-references table until callback returns false.
PendingReferencesLen() (int, error) // Number of pending references keys.
RebuildTo(otherClient Client, kvFilters ...KeyValueFilterFunc) error // Rebuild a fresh copy of the DB at destination. Return ErrNotImplmented if not supported. Optionally pass in one or more KeyValueFilterFunc functions.
Backend() Backend // Expose underlying backend impl.
Queue() Queue // Expose underlying queue impl.
}
type KeyValueFilterFunc func(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte)
// skipKVFilterFunc is an internal reference used by SkipKVFilter.
func skipKVFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipKVFilter is a signal filter sentinel value to skip Key-Value tables.
var SkipKVFilter = skipKVFilterFunc
// skipQFilterFunc is an internal reference used by SkipQFilter.
func skipQFilterFunc(table []byte, key []byte, value []byte) (keyOut []byte, valueOut []byte) {
panic("software author error: should never be invoked")
return key, value
}
// SkipQFilter is a signal filter sentinel value to skip Queue tables.
var SkipQFilter = skipQFilterFunc
type Config interface {
Type() Type // Configuration type specifier.
}
func NewConfig(driver string, dbFile string) Config {
switch driver {
case "bolt", "boltdb":
return NewBoltConfig(dbFile)
case "rocks", "rocksdb":
return NewRocksConfig(dbFile)
case "postgres", "postgresql", "pg":
return NewPostgresConfig(dbFile)
default:
panic(fmt.Sprintf("Unrecognized or unsupported DB driver %q", driver))
}
}
// NewClient constructs a new DB client based on the passed configuration.
func NewClient(config Config) Client {
typ := config.Type()
switch typ {
case Bolt:
be := NewBoltBackend(config.(*BoltConfig))
// TODO: Return an error instead of panicking.
if err := be.Open(); err != nil {
panic(fmt.Errorf("Opening bolt backend: %s", err))
}
q := NewBoltQueue(be.db)
return newKVClient(be, q)
case Rocks:
// MORE TEMPORARY UGLINESS TO MAKE IT WORK FOR NOW:
if err := os.MkdirAll(config.(*RocksConfig).Dir, os.FileMode(int(0700))); err != nil {
panic(fmt.Errorf("Creating rocks directory %q: %s", config.(*RocksConfig).Dir, err))
}
be := NewRocksBackend(config.(*RocksConfig))
queueFile := filepath.Join(config.(*RocksConfig).Dir, DefaultBoltQueueFilename)
db, err := bolt.Open(queueFile, 0600, NewBoltConfig("").BoltOptions)
if err != nil {
panic(fmt.Errorf("Creating bolt queue: %s", err))
}
q := NewBoltQueue(db)
return newKVClient(be, q)
case Postgres:
be := NewPostgresBackend(config.(*PostgresConfig))
q := NewPostgresQueue(config.(*PostgresConfig))
return newKVClient(be, q)
default:
panic(fmt.Errorf("no client constructor available for db configuration type: %v", typ))
}
}
type QueueOptions struct {
Priority int
OnlyIfNotExists bool // Only enqueue items which don't already exist.
}
func NewQueueOptions() *QueueOptions {
opts := &QueueOptions{
Priority: DefaultQueuePriority,
}
return opts
}
// WithClient is a convenience utility which handles DB client construction,
// open, and close..
func WithClient(config Config, fn func(client Client) error) (err error) {
client := NewClient(config)
if err = client.Open(); err != nil {
err = fmt.Errorf("opening DB client %T: %s", client, err)
return
}
defer func() {
if closeErr := client.Close(); closeErr != nil {
if err == nil {
err = fmt.Errorf("closing DB client %T: %s", client, closeErr)
} else {
log.Errorf("Existing error before attempt to close DB client %T: %s", client, err)
log.Errorf("Also encountered problem closing DB client %T: %s", client, closeErr)
}
}
}()
if err = fn(client); err != nil |
return
}
// kvTables returns the names of the "regular" key-value tables.
func kvTables() []string {
kv := []string{}
for _, table := range tables {
regular := true
for _, qTable := range qTables {
if table == qTable {
regular = false
break
}
}
if regular {
kv = append(kv, table)
}
}
return kv
}
// KVTables publicly exported version of kvTables.
func KVTables() []string { return kvTables() }
// IsKV returns true when s is the name of a Key-Value oriented table.
//
// Note: Does not normalize postgres_formatted_names..
func IsKV(s string) bool {
return contains.String(kvTables(), s)
}
// QTables returns slice of queue table names.
func QTables() []string {
tables := []string{}
for _, table := range qTables {
tables = append(tables, table)
}
return tables
}
// Returns true when the name corresponds with a table.
//
// Note: Does not normalize postgres_formatted_names..
func IsQ(s string) bool {
return contains.String(qTables, s)
}
// Tables returns a slice of all tables.
func Tables() []string {
out := []string{}
for _, table := range tables {
out = append(out, table)
}
return out
}
// StructFor takes a table or queue name and returns a pointer to the newly
// allocated struct of the corresponding type associated with the table.
func StructFor(tableOrQueue string) (proto.Message, error) {
tableOrQueue = strcase.ToKebab(tableOrQueue)
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
tableOrQueue = TablePackages
}
switch tableOrQueue {
// N.B.: Metadata type is arbitrary on a per-key basis, so unsupported here.
// case TableMetadata:
case inflection.Plural(TablePackages), inflection.Singular(TablePackages):
return &domain.Package{}, nil
case inflection.Plural(TablePendingReferences), inflection.Singular(TablePendingReferences):
return &domain.PendingReferences{}, nil
case inflection.Plural(TableCrawlResults), inflection.Singular(TableCrawlResults):
return &domain.CrawlResult{}, nil
case inflection.Plural(TableToCrawl), inflection.Singular(TableToCrawl):
return &domain.ToCrawlEntry{}, nil
default:
return nil, fmt.Errorf("unrecognized or unsupported table or queue %q", tableOrQueue)
}
}
// FuzzyTableResolver attempts to resolve the input string to a corresponding table or
// queue name.
//
// An empty string is returned if no match is found.
func FuzzyTableResolver(tableOrQueue string) string {
if tableOrQueue == "pkg" || tableOrQueue == "pkgs" {
return TablePackages
}
if tableOrQueue == "pending" {
return TablePendingReferences
}
if tableOrQueue == "metadata" || tableOrQueue == "meta" {
return TableMetadata
}
for _, name := range tables {
if inflection.Singular(name) == tableOrQueue || inflection.Plural(name) == tableOrQueue {
return name
}
}
return ""
}
| {
return
} | conditional_block |
center-control.js | var fly = require('fly');
// var io = require('socket.io');
var now = utils.getNowFormatDate();
var time = utils.getNowFormatTime();
var getFullYear = now.year;
var getMonth = now.month;
var getDate = now.strDate;
var getTimes = time.currenttime;
var getWeek = utils.getWeek(now.currentdate);
// var routerManager = require('router-manager');
var that;
var view = module.exports = fly.Component.extend({
name: 'router-view',
template: fly.template(__inline('./center-control.html')),
ctor: function (element, options) {
this._super(element, options);
that = this;
// var socket = this.socket = io.connect('http://localhost:1414');
// console.log(socket);
// socket.emit('regViewer', {},function(msg){
// console.log(msg);
// });
// socket.on('receive', function(data,cb){
// console.log(data);
// that.options.set('msg','我也好');
// cb('已收到');
// });
},
options: { //构造体options就是data,而且不支持深层次读写数据,两层也不行
getFullYear: getFullYear,
getMonth: getMonth,
getDate: getDate,
getTimes: getTimes,
getWeek: getWeek,
alarmInfoData: []
},
});
var dao = {
findAlarmInfo: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.findAlarmInfo,
data: param
}, function (res) {
var alarmInfoData = res;
// that.options.set('alarmInfoData', alarmInfoData);
that.options.set('alarmInfoData', alarmInfoData);
}, function () {
});
},
findMonitoringControlCity: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.monitoringControlCity,
data: param
}, function (res) {
var cityInfoData = res;
initEchartBubble(cityInfoData, 'bubblegraph');
}, function () { });
},
findDataResourceChangeNum: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.changeNum,
// url: 'public/project-a/1.0.0/mock/flowMonitor/changeNum.json',//实测是ok的,然后把地址封装到上面的api配置文件中来访问
data: param
}, function (res) {
console.log(res);
}, function () {
});
},
};
function init() {
// fly.alert('显示端页面init初始化成功');
// dao.findAlarmInfo();
dao.findMonitoringControlCity();
// dao.findDataResourceChangeNum();
// var v = new view(); //fly的fly.Component.extend是构造函数,需要实例化才能拿到其内的对象设置值
// console.log(v);
setInterval(function () {
var getTimes = utils.getNowFormatTime().currenttime;
that.options.set('getTimes', getTimes);
}, 1000)
}
// module.exports.destroy = function () {
// }
// module.exports.render = function () {
// // var mainview = document.getElementById("mainview");
// // mainview.innerHTML = tpl;//不用走路由的话,这样绑定模板是可以的,与路由器冲突不推荐,去掉路由器会报很多错
// // fly.bind(mainview, vm);//不用走路由的话,这样绑定vm控制器也是可以的,与路由器冲突不推荐
// init();
// }
init();
// bubble graph
var agencyUnits = ['省文化厅','省教育厅','省财政厅','省地震局','合肥市','省气象局','安庆市','省体育局','省农科院','省管局','省质监局','淮南市','淮北市','省司法厅','毫州市','省林业厅'];
function random(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r * (r % 2 == 0 ? 1 : -1));
}
function randomRadius(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r > 400 ? r : 400); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomCeil(){
var n = Math.round(Math.random() * 10);//这个数据代表了气泡大小
return Math.ceil(n); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomDataArray() {
var d = [];
var len = 20; //这个值就代表了气泡数量,scatter1显示20个,scatter2显示20个,一共显示40个
while (len--) {
d.push([
random(),
random(), //前面两个数据是气泡圆心坐标位置
// Math.abs(random()), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
randomRadius(), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
]);
// console.log(d);
}
return d;
}
function initEchartBubble(data, id) {
var option = {
tooltip : {
trigger: 'item', //axis是指根据坐标轴来触发悬浮效果,item是根据划过的个体元素对象来触发悬浮效果
showDelay : 0,
axisPointer:{
show: true,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
| r: 100,
scale: true
}
],
series : [
{
name:'bubbleGraphGreen',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#a6fad5"
}, {
offset: 1,
color: "#19cc7e"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
},
{
name:'bubbleGraphBule',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
borderColor: 'rgba(35, 92, 147, .5)', //透明度颜色设置
borderWidth: 0,
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#8fd7ff"
}, {
offset: 1,
color: "#007eee"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
}
]
};
var echart = echarts.init(document.getElementById(id));
echart.setOption(option);
} | }
},
// legend: {
// data:['scatter1','scatter2']
// },
xAxis : [
{
type : 'value',
show: false,
splitNumber: 100,
scale: true
}
],
yAxis : [
{
type : 'value',
show: false,
splitNumbe | conditional_block |
center-control.js | var fly = require('fly');
// var io = require('socket.io');
var now = utils.getNowFormatDate();
var time = utils.getNowFormatTime();
var getFullYear = now.year;
var getMonth = now.month;
var getDate = now.strDate;
var getTimes = time.currenttime;
var getWeek = utils.getWeek(now.currentdate);
// var routerManager = require('router-manager');
var that;
var view = module.exports = fly.Component.extend({
name: 'router-view',
template: fly.template(__inline('./center-control.html')),
ctor: function (element, options) {
this._super(element, options);
that = this;
// var socket = this.socket = io.connect('http://localhost:1414');
// console.log(socket);
// socket.emit('regViewer', {},function(msg){
// console.log(msg);
// });
// socket.on('receive', function(data,cb){
// console.log(data);
// that.options.set('msg','我也好');
// cb('已收到');
// });
},
options: { //构造体options就是data,而且不支持深层次读写数据,两层也不行
getFullYear: getFullYear,
getMonth: getMonth,
getDate: getDate,
getTimes: getTimes,
getWeek: getWeek,
alarmInfoData: []
},
});
var dao = {
findAlarmInfo: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.findAlarmInfo,
data: param
}, function (res) {
var alarmInfoData = res;
// that.options.set('alarmInfoData', alarmInfoData);
that.options.set('alarmInfoData', alarmInfoData);
}, function () {
});
},
findMonitoringControlCity: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.monitoringControlCity,
data: param
}, function (res) {
var cityInfoData = res;
initEchartBubble(cityInfoData, 'bubblegraph');
}, function () { });
},
findDataResourceChangeNum: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.changeNum,
// url: 'public/project-a/1.0.0/mock/flowMonitor/changeNum.json',//实测是ok的,然后把地址封装到上面的api配置文件中来访问
data: param
}, function (res) {
console.log(res);
}, function () {
});
},
};
function init() {
// fly.alert('显示端页面init初始化成功');
// dao.findAlarmInfo();
dao.findMonitoringControlCity();
// dao.findDataResourceChangeNum();
// var v = new view(); //fly的fly.Component.extend是构造函数,需要实例化才能拿到其内的对象设置值
// console.log(v);
setInterval(function () {
var getTimes = utils.getNowFormatTime().currenttime;
that.options.set('getTimes', getTimes);
}, 1000)
}
// module.exports.destroy = function () {
// }
// module.exports.render = function () {
// // var mainview = document.getElementById("mainview");
// // mainview.innerHTML = tpl;//不用走路由的话,这样绑定模板是可以的,与路由器冲突不推荐,去掉路由器会报很多错
// // fly.bind(mainview, vm);//不用走路由的话,这样绑定vm控制器也是可以的,与路由器冲突不推荐 | init();
// bubble graph
var agencyUnits = ['省文化厅','省教育厅','省财政厅','省地震局','合肥市','省气象局','安庆市','省体育局','省农科院','省管局','省质监局','淮南市','淮北市','省司法厅','毫州市','省林业厅'];
function random(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r * (r % 2 == 0 ? 1 : -1));
}
function randomRadius(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r > 400 ? r : 400); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomCeil(){
var n = Math.round(Math.random() * 10);//这个数据代表了气泡大小
return Math.ceil(n); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomDataArray() {
var d = [];
var len = 20; //这个值就代表了气泡数量,scatter1显示20个,scatter2显示20个,一共显示40个
while (len--) {
d.push([
random(),
random(), //前面两个数据是气泡圆心坐标位置
// Math.abs(random()), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
randomRadius(), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
]);
// console.log(d);
}
return d;
}
function initEchartBubble(data, id) {
var option = {
tooltip : {
trigger: 'item', //axis是指根据坐标轴来触发悬浮效果,item是根据划过的个体元素对象来触发悬浮效果
showDelay : 0,
axisPointer:{
show: true,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
}
},
// legend: {
// data:['scatter1','scatter2']
// },
xAxis : [
{
type : 'value',
show: false,
splitNumber: 100,
scale: true
}
],
yAxis : [
{
type : 'value',
show: false,
splitNumber: 100,
scale: true
}
],
series : [
{
name:'bubbleGraphGreen',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#a6fad5"
}, {
offset: 1,
color: "#19cc7e"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
},
{
name:'bubbleGraphBule',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
borderColor: 'rgba(35, 92, 147, .5)', //透明度颜色设置
borderWidth: 0,
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#8fd7ff"
}, {
offset: 1,
color: "#007eee"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
}
]
};
var echart = echarts.init(document.getElementById(id));
echart.setOption(option);
} | // init();
// } | random_line_split |
center-control.js | var fly = require('fly');
// var io = require('socket.io');
var now = utils.getNowFormatDate();
var time = utils.getNowFormatTime();
var getFullYear = now.year;
var getMonth = now.month;
var getDate = now.strDate;
var getTimes = time.currenttime;
var getWeek = utils.getWeek(now.currentdate);
// var routerManager = require('router-manager');
var that;
var view = module.exports = fly.Component.extend({
name: 'router-view',
template: fly.template(__inline('./center-control.html')),
ctor: function (element, options) {
this._super(element, options);
that = this;
// var socket = this.socket = io.connect('http://localhost:1414');
// console.log(socket);
// socket.emit('regViewer', {},function(msg){
// console.log(msg);
// });
// socket.on('receive', function(data,cb){
// console.log(data);
// that.options.set('msg','我也好');
// cb('已收到');
// });
},
options: { //构造体options就是data,而且不支持深层次读写数据,两层也不行
getFullYear: getFullYear,
getMonth: getMonth,
getDate: getDate,
getTimes: getTimes,
getWeek: getWeek,
alarmInfoData: []
},
});
var dao = {
findAlarmInfo: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.findAlarmInfo,
data: param
}, function (res) {
var alarmInfoData = res;
// that.options.set('alarmInfoData', alarmInfoData);
that.options.set('alarmInfoData', alarmInfoData);
}, function () {
});
},
findMonitoringControlCity: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.monitoringControlCity,
data: param
}, function (res) {
var cityInfoData = res;
initEchartBubble(cityInfoData, 'bubblegraph');
}, function () { });
},
findDataResourceChangeNum: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.changeNum,
// url: 'public/project-a/1.0.0/mock/flowMonitor/changeNum.json',//实测是ok的,然后把地址封装到上面的api配置文件中来访问
data: param
}, function (res) {
console.log(res);
}, function () {
});
},
};
function init() {
// fly.alert('显示端页面init初始化成功');
// dao.findAlarmInfo();
dao.findMonitoringControlCity();
// dao.findDataResourceChangeNum();
// var v = new view(); //fly的fly.Component.extend是构造函数,需要实例化才能拿到其内的对象设置值
// console.log(v);
setInterval(function () {
var getTimes = utils.getNowFormatTime().currenttime;
that.options.set('getTimes', getTimes);
}, 1000)
}
// module.exports.destroy = function () {
// }
// module.exports.render = function () {
// // var mainview = document.getElementById("mainview");
// // mainview.innerHTML = tpl;//不用走路由的话,这样绑定模板是可以的,与路由器冲突不推荐,去掉路由器会报很多错
// // fly.bind(mainview, vm);//不用走路由的话,这样绑定vm控制器也是可以的,与路由器冲突不推荐
// init();
// }
init();
// bubble graph
var agencyUnits = ['省文化厅','省教育厅','省财政厅','省地震局','合肥市','省气象局','安庆市','省体育局','省农科院','省管局','省质监局','淮南市','淮北市','省司法厅','毫州市','省林业厅'];
function random(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r * (r % 2 == 0 ? 1 : -1));
}
function randomRadius(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r > 400 ? r : 400); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomCeil(){
var n = Math.round(Math.random() * 10);//这个数据代表了气泡大小
return Math.ceil(n); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomDataArray() { | ar d = [];
var len = 20; //这个值就代表了气泡数量,scatter1显示20个,scatter2显示20个,一共显示40个
while (len--) {
d.push([
random(),
random(), //前面两个数据是气泡圆心坐标位置
// Math.abs(random()), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
randomRadius(), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
]);
// console.log(d);
}
return d;
}
function initEchartBubble(data, id) {
var option = {
tooltip : {
trigger: 'item', //axis是指根据坐标轴来触发悬浮效果,item是根据划过的个体元素对象来触发悬浮效果
showDelay : 0,
axisPointer:{
show: true,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
}
},
// legend: {
// data:['scatter1','scatter2']
// },
xAxis : [
{
type : 'value',
show: false,
splitNumber: 100,
scale: true
}
],
yAxis : [
{
type : 'value',
show: false,
splitNumber: 100,
scale: true
}
],
series : [
{
name:'bubbleGraphGreen',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#a6fad5"
}, {
offset: 1,
color: "#19cc7e"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
},
{
name:'bubbleGraphBule',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
borderColor: 'rgba(35, 92, 147, .5)', //透明度颜色设置
borderWidth: 0,
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#8fd7ff"
}, {
offset: 1,
color: "#007eee"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
}
]
};
var echart = echarts.init(document.getElementById(id));
echart.setOption(option);
} |
v | identifier_name |
center-control.js | var fly = require('fly');
// var io = require('socket.io');
var now = utils.getNowFormatDate();
var time = utils.getNowFormatTime();
var getFullYear = now.year;
var getMonth = now.month;
var getDate = now.strDate;
var getTimes = time.currenttime;
var getWeek = utils.getWeek(now.currentdate);
// var routerManager = require('router-manager');
var that;
var view = module.exports = fly.Component.extend({
name: 'router-view',
template: fly.template(__inline('./center-control.html')),
ctor: function (element, options) {
this._super(element, options);
that = this;
// var socket = this.socket = io.connect('http://localhost:1414');
// console.log(socket);
// socket.emit('regViewer', {},function(msg){
// console.log(msg);
// });
// socket.on('receive', function(data,cb){
// console.log(data);
// that.options.set('msg','我也好');
// cb('已收到');
// });
},
options: { //构造体options就是data,而且不支持深层次读写数据,两层也不行
getFullYear: getFullYear,
getMonth: getMonth,
getDate: getDate,
getTimes: getTimes,
getWeek: getWeek,
alarmInfoData: []
},
});
var dao = {
findAlarmInfo: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.findAlarmInfo,
data: param
}, function (res) {
var alarmInfoData = res;
// that.options.set('alarmInfoData', alarmInfoData);
that.options.set('alarmInfoData', alarmInfoData);
}, function () {
});
},
findMonitoringControlCity: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.monitoringControlCity,
data: param
}, function (res) {
var cityInfoData = res;
initEchartBubble(cityInfoData, 'bubblegraph');
}, function () { });
},
findDataResourceChangeNum: function () {
// var time = '';
// if (vm.data.trafficStatisticsMonth >= 10) {
// time = vm.data.trafficStatisticsYear + "" + vm.data.trafficStatisticsMonth;
// } else {
// time = vm.data.trafficStatisticsYear + "0" + vm.data.trafficStatisticsMonth;
// }
var param = {
// code: DEFAULT_CODE,
// type: DEFAULT_TYPE,
// time: time,
}
networkUtils.ajaxGet({
url: CONFIGPATH.API.DATA_RESOURCES.changeNum,
// url: 'public/project-a/1.0.0/mock/flowMonitor/changeNum.json',//实测是ok的,然后把地址封装到上面的api配置文件中来访问
data: param
}, function (res) {
console.log(res);
}, function () {
});
},
};
function init() {
// fly.alert('显示端页面init初始化成功');
// dao.findAlarmInfo();
dao.findMonitoringControlCity();
// dao.findDataResourceChangeNum();
// var v = new view(); //fly的fly.Component.extend是构造函数,需要实例化才能拿到其内的对象设置值
// console.log(v);
setInterval(function () {
var getTimes = utils.getNowFormatTime().currenttime;
that.options.set('getTimes', getTimes);
}, 1000)
}
// module.exports.destroy = function () {
// }
// module.exports.render = function () {
// // var mainview = document.getElementById("mainview");
// // mainview.innerHTML = tpl;//不用走路由的话,这样绑定模板是可以的,与路由器冲突不推荐,去掉路由器会报很多错
// // fly.bind(mainview, vm);//不用走路由的话,这样绑定vm控制器也是可以的,与路由器冲突不推荐
// init();
// }
init();
// bubble graph
var agencyUnits = ['省文化厅','省教育厅','省财政厅','省地震局','合肥市','省气象局','安庆市','省体育局','省农科院','省管局','省质监局','淮南市','淮北市','省司法厅','毫州市','省林业厅'];
function random(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r * (r % 2 == 0 ? 1 : -1));
}
function randomRadius(){
var r = Math.round(Math.random() * 700);//这个数据代表了气泡大小
return (r > 400 ? r : 400); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomCeil(){
var n = Math.round(Math.random() * 10);//这个数据代表了气泡大小
return Math.ceil(n); //控制数据就是控制气泡的大小,所以最小气泡我给400的大小展示
}
function randomDataArray() {
var d = [];
var len = 20; //这个值就代表了气泡数量,scatter1显示20个,scatter2显示20个,一共显示40个
while (len--) {
d.push([
random(),
random(), //前面两个数据是气泡圆心坐标位置
// Math.abs(random()), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
randomRadius(), //这个数据为什么要绝对值,因为这个是气泡半径值,也就是气泡大小
]);
// console.log(d);
}
return d;
}
function initEchartBubble(data, id) {
var option = {
tooltip : {
trigger: 'item', //axis是指根据坐标轴来触发悬浮效果,item是根据划过的个体元素对象来触发悬浮效果
showDelay : 0,
axisPointer:{
show: t | scale: true
}
],
series : [
{
name:'bubbleGraphGreen',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#a6fad5"
}, {
offset: 1,
color: "#19cc7e"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
},
{
name:'bubbleGraphBule',
type:'scatter',
symbolSize: function (value){
return Math.round(value[2] / 5);
},
borderColor: 'rgba(35, 92, 147, .5)', //透明度颜色设置
borderWidth: 0,
itemStyle : {
normal: {
color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#8fd7ff"
}, {
offset: 1,
color: "#007eee"
}], false),
label:{
show:true,
position:'inside',
textStyle: {
color: '#ffffff',
fontWeight: 'bold',
fontSize: 16
},
formatter:function(params){
for (var i = 0; i < agencyUnits.length; i++) {
var allStrs = agencyUnits[randomCeil()];
return allStrs;
}
}
},
labelLine:{show:false}
},
emphasis: {
borderColor: 'rgba(35, 92, 147, 0.6)', //透明度颜色设置
borderWidth: 0,
itemStyle : { normal: {color: new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: "#d5efff"
}, {
offset: 1,
color: "#1ca7fc"
}], false), }
},
}
},
data: randomDataArray()
}
]
};
var echart = echarts.init(document.getElementById(id));
echart.setOption(option);
} | rue,
type : 'cross',
lineStyle: {
type : 'dashed',
width : 1
}
}
},
// legend: {
// data:['scatter1','scatter2']
// },
xAxis : [
{
type : 'value',
show: false,
splitNumber: 100,
scale: true
}
],
yAxis : [
{
type : 'value',
show: false,
splitNumber: 100,
| identifier_body |
server.rs | //! Processes requests from clients & peer nodes
//!
//! # Overview
//!
//! The MinDB server is a peer in the MiniDB cluster. It is initialized with a
//! port to bind to and a list of peers to replicate to. The server then
//! processes requests send from both clients and peers.
//!
//! # Peers
//!
//! On process start, the cluster topology is read from a config file
//! (`etc/nodes.toml`). The node represented by the current server process is
//! extracted this topology, leaving the list of peers.
//!
//! After the server starts, connections will be established to all the peers.
//! If a peer cannot be reached, the connection will be retried until the peer
//! becomes reachable.
//!
//! # Replication
//!
//! When the server receives a mutation request from a client, the mutation is
//! processed on the server itself. Once the mutation succeeds, the server
//! state is replicated to all the peers. Replication involves sending the
//! entire data set to all the peers. This is not the most efficient strategy,
//! but we aren't trying to build Riak (or even MongoDB).
//!
//! In the event that replication is unable to keep up with the mutation rate,
//! replication messages are dropped in favor of ensuring that the final
//! replication message is delivered. This works because every replication
//! message includes the entire state at that point. The CRDT ensures that no
//! matter what state the peers are at, it is able to converge with the final
//! replication message (assuming no bugs in the CRDT implementation);
use config;
use dt::{Set, ActorId};
use peer::Peer;
use proto::{self, Request, Response, Transport};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpListener;
use tokio_service::Service;
use tokio_proto::easy::multiplex;
use tokio_timer::Timer;
use futures::{self, Future, Async};
use futures::stream::{Stream};
use rand::{self, Rng};
use std::io;
use std::cell::RefCell;
use std::rc::Rc;
// The in-memory MinDB state. Stored in a ref-counted cell and shared across
// all open server connections. Whenever a socket connects, a `RequestHandler`
// instance will be initialized with a pointer to this.
struct Server {
// The DB data. This is the CRDT Set. When the server process starts, this
// is initialized to an empty set.
data: Set<String>,
// The server's ActorId. This ActorID value **MUST** be unique across the
// cluster and should never be reused by another node.
//
// In theory, if the data was persisted to disk, the ActorId would be
// stored withe persisted state. However, since the state is initialized
// each time the process starts, the ActorId must be unique.
//
// To handle this, the ActorId is randomly generated.
actor_id: ActorId,
// Handle to the cluster peers. The `Peer` type manages the socket
// connection, including initial connect as well as reconnecting when the
// connection fails.
//
// Whenever the set is mutated by a client, it is replicated to the list of
// peers.
peers: Vec<Peer>,
}
// Handles MiniDB client requests. Implements `Service`
struct RequestHandler {
server_state: Rc<RefCell<Server>>,
}
/// Run a server node.
///
/// The function initializes new server state, including an empty CRDT set,
/// then it will bind to the requested port and start processing connections.
/// Connections will be made from both clients and other peers.
///
/// The function will block while the server is running.
pub fn run(config: config::Node) -> io::Result<()> {
// Create the tokio-core reactor
let mut core = try!(Core::new());
// Get a handle to the reactor
let handle = core.handle();
// Bind the Tcp listener, listening on the requested socket address.
let listener = try!(TcpListener::bind(config.local_addr(), &handle));
// `Core::run` runs the reactor. This call will block until the future
// provided as the argument completes. In this case, the given future
// processes the inbound TCP connections.
core.run(futures::lazy(move || {
// Initialize the server state
let server_state = Rc::new(RefCell::new(Server::new(&config, &handle)));
// `listener.incoming()` provides a `Stream` of inbound TCP connections
listener.incoming().for_each(move |(sock, _)| {
debug!("server accepted socket");
// Create client handle. This implements `tokio_service::Service`.
let client_handler = RequestHandler {
server_state: server_state.clone(),
};
// Initialize the transport implementation backed by the Tcp
// socket.
let transport = Transport::new(sock);
// Use `tokio_proto` to handle the details of multiplexing.
// `EasyServer` takes the transport, which is basically a stream of
// frames as they are read off the socket and manages mapping the
// frames to request / response pairs.
let connection_task = multiplex::EasyServer::new(client_handler, transport);
// Spawn a new reactor task to process the connection. A task is a
// light-weight unit of work. Tasks are generally used for managing
// resources, in this case the resource is the socket.
handle.spawn(connection_task);
Ok(())
})
}))
}
impl Server {
// Initialize the server state using the supplied config. This function
// will also establish connections to the peers, which is why `&Handle` is
// needed.
fn new(config: &config::Node, handle: &Handle) -> Server {
// Initialize a timer, this timer will be passed to all peers.
let timer = Timer::default();
// Connect the peers
let peers = config.routes().into_iter()
.map(|route| Peer::connect(route, handle, &timer))
.collect();
// Randomly assign an `ActorId` to the current process. It is
// imperative that the `ActorId` is unique in the cluster and is not
// reused across server restarts (since the state is reset).
let mut rng = rand::thread_rng();
let actor_id: ActorId = rng.next_u64().into();
debug!("server actor-id={:?}", actor_id);
// Return the new server state with an empty data set
Server {
data: Set::new(),
actor_id: actor_id,
peers: peers,
}
}
// Replicate the current data set to the list of peers.
fn | (&self) {
// Iterate all the peers sending a clone of the data. This operation
// performs a deep clone for each peer, which is not going to be super
// efficient as the data set grows, but improving this is out of scope
// for MiniDB.
for peer in &self.peers {
peer.send(self.data.clone());
}
}
}
// Service implementation for `RequestHandler`
//
// `Service` is the Tokio abstraction for asynchronous request / response
// handling. This is where we will process all requests sent by clients and
// peers.
//
// Instead of mixing a single service to handle both clients and peers, a
// better strategy would probably be to have two separate TCP listeners on
// different ports to handle the client and the peers.
impl Service for RequestHandler {
// The Request and Response types live in `proto`
type Request = Request;
type Response = Response;
type Error = io::Error;
// For greates flexibility, a Box<Future> is used. This has the downside of
// requiring an allocation and dynamic dispatch. Currently, the service
// only responds with `futures::Done`, so the type could be changed.
//
// If the service respond with different futures depending on a conditional
// branch, then returning Box or implementing a custom future is required.
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
match request {
Request::Get(_) => {
info!("[COMMAND] Get");
// Clone the current state and respond with the set
//
let data = self.server_state.borrow().data.clone();
let resp = Response::Value(data);
Box::new(futures::done(Ok(resp)))
}
Request::Insert(cmd) => {
info!("[COMMAND] Insert {:?}", cmd.value());
// Insert the new value, initiate a replication to all peers,
// and respond with Success
//
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.insert(actor_id, cmd.value());
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Remove(cmd) => {
info!("[COMMAND] Remove {:?}", cmd.value());
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
// If the request includes a version vector, this indicates
// that a causal remove is requested. A causal remove implies
// removing the value from the set at the state represented by
// the version vector and leaving any insertions that are
// either concurrent or successors to the supplied version
// vector.
match cmd.causality() {
Some(version_vec) => {
state.data.causal_remove(actor_id, version_vec, cmd.value());
}
None => {
state.data.remove(actor_id, cmd.value());
}
}
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Clear(_) => {
info!("[COMMAND] Clear");
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.clear(actor_id);
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Join(other) => {
info!("[COMMAND] Join");
// A Join request is issued by a peer during replication and
// provides the peer's latest state.
//
// The join request is handled by joining the provided state
// into the node's current state.
let mut state = self.server_state.borrow_mut();
state.data.join(&other);
if log_enabled!(::log::LogLevel::Debug) {
for elem in state.data.iter() {
debug!(" - {:?}", elem);
}
}
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
}
}
fn poll_ready(&self) -> Async<()> {
Async::Ready(())
}
}
| replicate | identifier_name |
server.rs | //! Processes requests from clients & peer nodes
//!
//! # Overview
//!
//! The MinDB server is a peer in the MiniDB cluster. It is initialized with a
//! port to bind to and a list of peers to replicate to. The server then
//! processes requests send from both clients and peers.
//!
//! # Peers
//!
//! On process start, the cluster topology is read from a config file
//! (`etc/nodes.toml`). The node represented by the current server process is
//! extracted this topology, leaving the list of peers.
//!
//! After the server starts, connections will be established to all the peers.
//! If a peer cannot be reached, the connection will be retried until the peer
//! becomes reachable.
//!
//! # Replication
//!
//! When the server receives a mutation request from a client, the mutation is
//! processed on the server itself. Once the mutation succeeds, the server
//! state is replicated to all the peers. Replication involves sending the
//! entire data set to all the peers. This is not the most efficient strategy,
//! but we aren't trying to build Riak (or even MongoDB).
//!
//! In the event that replication is unable to keep up with the mutation rate,
//! replication messages are dropped in favor of ensuring that the final
//! replication message is delivered. This works because every replication
//! message includes the entire state at that point. The CRDT ensures that no
//! matter what state the peers are at, it is able to converge with the final
//! replication message (assuming no bugs in the CRDT implementation);
use config;
use dt::{Set, ActorId};
use peer::Peer;
use proto::{self, Request, Response, Transport};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpListener;
use tokio_service::Service;
use tokio_proto::easy::multiplex;
use tokio_timer::Timer;
use futures::{self, Future, Async};
use futures::stream::{Stream};
use rand::{self, Rng};
use std::io;
use std::cell::RefCell;
use std::rc::Rc;
// The in-memory MinDB state. Stored in a ref-counted cell and shared across
// all open server connections. Whenever a socket connects, a `RequestHandler`
// instance will be initialized with a pointer to this.
struct Server {
// The DB data. This is the CRDT Set. When the server process starts, this
// is initialized to an empty set.
data: Set<String>,
// The server's ActorId. This ActorID value **MUST** be unique across the
// cluster and should never be reused by another node.
//
// In theory, if the data was persisted to disk, the ActorId would be
// stored withe persisted state. However, since the state is initialized
// each time the process starts, the ActorId must be unique.
//
// To handle this, the ActorId is randomly generated.
actor_id: ActorId,
// Handle to the cluster peers. The `Peer` type manages the socket
// connection, including initial connect as well as reconnecting when the
// connection fails.
//
// Whenever the set is mutated by a client, it is replicated to the list of
// peers.
peers: Vec<Peer>,
}
// Handles MiniDB client requests. Implements `Service`
struct RequestHandler {
server_state: Rc<RefCell<Server>>,
}
/// Run a server node.
///
/// The function initializes new server state, including an empty CRDT set,
/// then it will bind to the requested port and start processing connections.
/// Connections will be made from both clients and other peers.
///
/// The function will block while the server is running.
pub fn run(config: config::Node) -> io::Result<()> {
// Create the tokio-core reactor
let mut core = try!(Core::new());
// Get a handle to the reactor
let handle = core.handle();
// Bind the Tcp listener, listening on the requested socket address.
let listener = try!(TcpListener::bind(config.local_addr(), &handle));
// `Core::run` runs the reactor. This call will block until the future
// provided as the argument completes. In this case, the given future
// processes the inbound TCP connections.
core.run(futures::lazy(move || {
// Initialize the server state
let server_state = Rc::new(RefCell::new(Server::new(&config, &handle)));
// `listener.incoming()` provides a `Stream` of inbound TCP connections
listener.incoming().for_each(move |(sock, _)| {
debug!("server accepted socket");
// Create client handle. This implements `tokio_service::Service`.
let client_handler = RequestHandler {
server_state: server_state.clone(),
};
// Initialize the transport implementation backed by the Tcp
// socket.
let transport = Transport::new(sock);
// Use `tokio_proto` to handle the details of multiplexing.
// `EasyServer` takes the transport, which is basically a stream of
// frames as they are read off the socket and manages mapping the
// frames to request / response pairs.
let connection_task = multiplex::EasyServer::new(client_handler, transport);
// Spawn a new reactor task to process the connection. A task is a
// light-weight unit of work. Tasks are generally used for managing
// resources, in this case the resource is the socket.
handle.spawn(connection_task);
Ok(())
})
}))
}
impl Server {
// Initialize the server state using the supplied config. This function
// will also establish connections to the peers, which is why `&Handle` is
// needed.
fn new(config: &config::Node, handle: &Handle) -> Server {
// Initialize a timer, this timer will be passed to all peers.
let timer = Timer::default();
// Connect the peers
let peers = config.routes().into_iter()
.map(|route| Peer::connect(route, handle, &timer))
.collect();
// Randomly assign an `ActorId` to the current process. It is
// imperative that the `ActorId` is unique in the cluster and is not
// reused across server restarts (since the state is reset).
let mut rng = rand::thread_rng();
let actor_id: ActorId = rng.next_u64().into();
debug!("server actor-id={:?}", actor_id);
// Return the new server state with an empty data set
Server {
data: Set::new(),
actor_id: actor_id,
peers: peers,
}
}
// Replicate the current data set to the list of peers.
fn replicate(&self) {
// Iterate all the peers sending a clone of the data. This operation
// performs a deep clone for each peer, which is not going to be super
// efficient as the data set grows, but improving this is out of scope
// for MiniDB.
for peer in &self.peers {
peer.send(self.data.clone());
}
}
}
// Service implementation for `RequestHandler`
//
// `Service` is the Tokio abstraction for asynchronous request / response
// handling. This is where we will process all requests sent by clients and
// peers.
//
// Instead of mixing a single service to handle both clients and peers, a
// better strategy would probably be to have two separate TCP listeners on
// different ports to handle the client and the peers.
impl Service for RequestHandler {
// The Request and Response types live in `proto`
type Request = Request;
type Response = Response;
type Error = io::Error;
// For greates flexibility, a Box<Future> is used. This has the downside of
// requiring an allocation and dynamic dispatch. Currently, the service
// only responds with `futures::Done`, so the type could be changed.
//
// If the service respond with different futures depending on a conditional
// branch, then returning Box or implementing a custom future is required.
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
match request {
Request::Get(_) => {
info!("[COMMAND] Get");
// Clone the current state and respond with the set
//
let data = self.server_state.borrow().data.clone();
let resp = Response::Value(data);
Box::new(futures::done(Ok(resp)))
}
Request::Insert(cmd) => {
info!("[COMMAND] Insert {:?}", cmd.value());
// Insert the new value, initiate a replication to all peers, | //
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.insert(actor_id, cmd.value());
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Remove(cmd) => {
info!("[COMMAND] Remove {:?}", cmd.value());
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
// If the request includes a version vector, this indicates
// that a causal remove is requested. A causal remove implies
// removing the value from the set at the state represented by
// the version vector and leaving any insertions that are
// either concurrent or successors to the supplied version
// vector.
match cmd.causality() {
Some(version_vec) => {
state.data.causal_remove(actor_id, version_vec, cmd.value());
}
None => {
state.data.remove(actor_id, cmd.value());
}
}
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Clear(_) => {
info!("[COMMAND] Clear");
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.clear(actor_id);
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Join(other) => {
info!("[COMMAND] Join");
// A Join request is issued by a peer during replication and
// provides the peer's latest state.
//
// The join request is handled by joining the provided state
// into the node's current state.
let mut state = self.server_state.borrow_mut();
state.data.join(&other);
if log_enabled!(::log::LogLevel::Debug) {
for elem in state.data.iter() {
debug!(" - {:?}", elem);
}
}
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
}
}
fn poll_ready(&self) -> Async<()> {
Async::Ready(())
}
} | // and respond with Success | random_line_split |
server.rs | //! Processes requests from clients & peer nodes
//!
//! # Overview
//!
//! The MinDB server is a peer in the MiniDB cluster. It is initialized with a
//! port to bind to and a list of peers to replicate to. The server then
//! processes requests send from both clients and peers.
//!
//! # Peers
//!
//! On process start, the cluster topology is read from a config file
//! (`etc/nodes.toml`). The node represented by the current server process is
//! extracted this topology, leaving the list of peers.
//!
//! After the server starts, connections will be established to all the peers.
//! If a peer cannot be reached, the connection will be retried until the peer
//! becomes reachable.
//!
//! # Replication
//!
//! When the server receives a mutation request from a client, the mutation is
//! processed on the server itself. Once the mutation succeeds, the server
//! state is replicated to all the peers. Replication involves sending the
//! entire data set to all the peers. This is not the most efficient strategy,
//! but we aren't trying to build Riak (or even MongoDB).
//!
//! In the event that replication is unable to keep up with the mutation rate,
//! replication messages are dropped in favor of ensuring that the final
//! replication message is delivered. This works because every replication
//! message includes the entire state at that point. The CRDT ensures that no
//! matter what state the peers are at, it is able to converge with the final
//! replication message (assuming no bugs in the CRDT implementation);
use config;
use dt::{Set, ActorId};
use peer::Peer;
use proto::{self, Request, Response, Transport};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpListener;
use tokio_service::Service;
use tokio_proto::easy::multiplex;
use tokio_timer::Timer;
use futures::{self, Future, Async};
use futures::stream::{Stream};
use rand::{self, Rng};
use std::io;
use std::cell::RefCell;
use std::rc::Rc;
// The in-memory MinDB state. Stored in a ref-counted cell and shared across
// all open server connections. Whenever a socket connects, a `RequestHandler`
// instance will be initialized with a pointer to this.
struct Server {
// The DB data. This is the CRDT Set. When the server process starts, this
// is initialized to an empty set.
data: Set<String>,
// The server's ActorId. This ActorID value **MUST** be unique across the
// cluster and should never be reused by another node.
//
// In theory, if the data was persisted to disk, the ActorId would be
// stored withe persisted state. However, since the state is initialized
// each time the process starts, the ActorId must be unique.
//
// To handle this, the ActorId is randomly generated.
actor_id: ActorId,
// Handle to the cluster peers. The `Peer` type manages the socket
// connection, including initial connect as well as reconnecting when the
// connection fails.
//
// Whenever the set is mutated by a client, it is replicated to the list of
// peers.
peers: Vec<Peer>,
}
// Handles MiniDB client requests. Implements `Service`
struct RequestHandler {
server_state: Rc<RefCell<Server>>,
}
/// Run a server node.
///
/// The function initializes new server state, including an empty CRDT set,
/// then it will bind to the requested port and start processing connections.
/// Connections will be made from both clients and other peers.
///
/// The function will block while the server is running.
pub fn run(config: config::Node) -> io::Result<()> {
// Create the tokio-core reactor
let mut core = try!(Core::new());
// Get a handle to the reactor
let handle = core.handle();
// Bind the Tcp listener, listening on the requested socket address.
let listener = try!(TcpListener::bind(config.local_addr(), &handle));
// `Core::run` runs the reactor. This call will block until the future
// provided as the argument completes. In this case, the given future
// processes the inbound TCP connections.
core.run(futures::lazy(move || {
// Initialize the server state
let server_state = Rc::new(RefCell::new(Server::new(&config, &handle)));
// `listener.incoming()` provides a `Stream` of inbound TCP connections
listener.incoming().for_each(move |(sock, _)| {
debug!("server accepted socket");
// Create client handle. This implements `tokio_service::Service`.
let client_handler = RequestHandler {
server_state: server_state.clone(),
};
// Initialize the transport implementation backed by the Tcp
// socket.
let transport = Transport::new(sock);
// Use `tokio_proto` to handle the details of multiplexing.
// `EasyServer` takes the transport, which is basically a stream of
// frames as they are read off the socket and manages mapping the
// frames to request / response pairs.
let connection_task = multiplex::EasyServer::new(client_handler, transport);
// Spawn a new reactor task to process the connection. A task is a
// light-weight unit of work. Tasks are generally used for managing
// resources, in this case the resource is the socket.
handle.spawn(connection_task);
Ok(())
})
}))
}
impl Server {
// Initialize the server state using the supplied config. This function
// will also establish connections to the peers, which is why `&Handle` is
// needed.
fn new(config: &config::Node, handle: &Handle) -> Server {
// Initialize a timer, this timer will be passed to all peers.
let timer = Timer::default();
// Connect the peers
let peers = config.routes().into_iter()
.map(|route| Peer::connect(route, handle, &timer))
.collect();
// Randomly assign an `ActorId` to the current process. It is
// imperative that the `ActorId` is unique in the cluster and is not
// reused across server restarts (since the state is reset).
let mut rng = rand::thread_rng();
let actor_id: ActorId = rng.next_u64().into();
debug!("server actor-id={:?}", actor_id);
// Return the new server state with an empty data set
Server {
data: Set::new(),
actor_id: actor_id,
peers: peers,
}
}
// Replicate the current data set to the list of peers.
fn replicate(&self) {
// Iterate all the peers sending a clone of the data. This operation
// performs a deep clone for each peer, which is not going to be super
// efficient as the data set grows, but improving this is out of scope
// for MiniDB.
for peer in &self.peers {
peer.send(self.data.clone());
}
}
}
// Service implementation for `RequestHandler`
//
// `Service` is the Tokio abstraction for asynchronous request / response
// handling. This is where we will process all requests sent by clients and
// peers.
//
// Instead of mixing a single service to handle both clients and peers, a
// better strategy would probably be to have two separate TCP listeners on
// different ports to handle the client and the peers.
impl Service for RequestHandler {
// The Request and Response types live in `proto`
type Request = Request;
type Response = Response;
type Error = io::Error;
// For greates flexibility, a Box<Future> is used. This has the downside of
// requiring an allocation and dynamic dispatch. Currently, the service
// only responds with `futures::Done`, so the type could be changed.
//
// If the service respond with different futures depending on a conditional
// branch, then returning Box or implementing a custom future is required.
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
match request {
Request::Get(_) => {
info!("[COMMAND] Get");
// Clone the current state and respond with the set
//
let data = self.server_state.borrow().data.clone();
let resp = Response::Value(data);
Box::new(futures::done(Ok(resp)))
}
Request::Insert(cmd) => {
info!("[COMMAND] Insert {:?}", cmd.value());
// Insert the new value, initiate a replication to all peers,
// and respond with Success
//
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.insert(actor_id, cmd.value());
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Remove(cmd) => {
info!("[COMMAND] Remove {:?}", cmd.value());
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
// If the request includes a version vector, this indicates
// that a causal remove is requested. A causal remove implies
// removing the value from the set at the state represented by
// the version vector and leaving any insertions that are
// either concurrent or successors to the supplied version
// vector.
match cmd.causality() {
Some(version_vec) => {
state.data.causal_remove(actor_id, version_vec, cmd.value());
}
None => {
state.data.remove(actor_id, cmd.value());
}
}
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Clear(_) => |
Request::Join(other) => {
info!("[COMMAND] Join");
// A Join request is issued by a peer during replication and
// provides the peer's latest state.
//
// The join request is handled by joining the provided state
// into the node's current state.
let mut state = self.server_state.borrow_mut();
state.data.join(&other);
if log_enabled!(::log::LogLevel::Debug) {
for elem in state.data.iter() {
debug!(" - {:?}", elem);
}
}
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
}
}
fn poll_ready(&self) -> Async<()> {
Async::Ready(())
}
}
| {
info!("[COMMAND] Clear");
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.clear(actor_id);
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
} | conditional_block |
server.rs | //! Processes requests from clients & peer nodes
//!
//! # Overview
//!
//! The MinDB server is a peer in the MiniDB cluster. It is initialized with a
//! port to bind to and a list of peers to replicate to. The server then
//! processes requests send from both clients and peers.
//!
//! # Peers
//!
//! On process start, the cluster topology is read from a config file
//! (`etc/nodes.toml`). The node represented by the current server process is
//! extracted this topology, leaving the list of peers.
//!
//! After the server starts, connections will be established to all the peers.
//! If a peer cannot be reached, the connection will be retried until the peer
//! becomes reachable.
//!
//! # Replication
//!
//! When the server receives a mutation request from a client, the mutation is
//! processed on the server itself. Once the mutation succeeds, the server
//! state is replicated to all the peers. Replication involves sending the
//! entire data set to all the peers. This is not the most efficient strategy,
//! but we aren't trying to build Riak (or even MongoDB).
//!
//! In the event that replication is unable to keep up with the mutation rate,
//! replication messages are dropped in favor of ensuring that the final
//! replication message is delivered. This works because every replication
//! message includes the entire state at that point. The CRDT ensures that no
//! matter what state the peers are at, it is able to converge with the final
//! replication message (assuming no bugs in the CRDT implementation);
use config;
use dt::{Set, ActorId};
use peer::Peer;
use proto::{self, Request, Response, Transport};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpListener;
use tokio_service::Service;
use tokio_proto::easy::multiplex;
use tokio_timer::Timer;
use futures::{self, Future, Async};
use futures::stream::{Stream};
use rand::{self, Rng};
use std::io;
use std::cell::RefCell;
use std::rc::Rc;
// The in-memory MinDB state. Stored in a ref-counted cell and shared across
// all open server connections. Whenever a socket connects, a `RequestHandler`
// instance will be initialized with a pointer to this.
struct Server {
// The DB data. This is the CRDT Set. When the server process starts, this
// is initialized to an empty set.
data: Set<String>,
// The server's ActorId. This ActorID value **MUST** be unique across the
// cluster and should never be reused by another node.
//
// In theory, if the data was persisted to disk, the ActorId would be
// stored withe persisted state. However, since the state is initialized
// each time the process starts, the ActorId must be unique.
//
// To handle this, the ActorId is randomly generated.
actor_id: ActorId,
// Handle to the cluster peers. The `Peer` type manages the socket
// connection, including initial connect as well as reconnecting when the
// connection fails.
//
// Whenever the set is mutated by a client, it is replicated to the list of
// peers.
peers: Vec<Peer>,
}
// Handles MiniDB client requests. Implements `Service`
struct RequestHandler {
server_state: Rc<RefCell<Server>>,
}
/// Run a server node.
///
/// The function initializes new server state, including an empty CRDT set,
/// then it will bind to the requested port and start processing connections.
/// Connections will be made from both clients and other peers.
///
/// The function will block while the server is running.
pub fn run(config: config::Node) -> io::Result<()> {
// Create the tokio-core reactor
let mut core = try!(Core::new());
// Get a handle to the reactor
let handle = core.handle();
// Bind the Tcp listener, listening on the requested socket address.
let listener = try!(TcpListener::bind(config.local_addr(), &handle));
// `Core::run` runs the reactor. This call will block until the future
// provided as the argument completes. In this case, the given future
// processes the inbound TCP connections.
core.run(futures::lazy(move || {
// Initialize the server state
let server_state = Rc::new(RefCell::new(Server::new(&config, &handle)));
// `listener.incoming()` provides a `Stream` of inbound TCP connections
listener.incoming().for_each(move |(sock, _)| {
debug!("server accepted socket");
// Create client handle. This implements `tokio_service::Service`.
let client_handler = RequestHandler {
server_state: server_state.clone(),
};
// Initialize the transport implementation backed by the Tcp
// socket.
let transport = Transport::new(sock);
// Use `tokio_proto` to handle the details of multiplexing.
// `EasyServer` takes the transport, which is basically a stream of
// frames as they are read off the socket and manages mapping the
// frames to request / response pairs.
let connection_task = multiplex::EasyServer::new(client_handler, transport);
// Spawn a new reactor task to process the connection. A task is a
// light-weight unit of work. Tasks are generally used for managing
// resources, in this case the resource is the socket.
handle.spawn(connection_task);
Ok(())
})
}))
}
impl Server {
// Initialize the server state using the supplied config. This function
// will also establish connections to the peers, which is why `&Handle` is
// needed.
fn new(config: &config::Node, handle: &Handle) -> Server {
// Initialize a timer, this timer will be passed to all peers.
let timer = Timer::default();
// Connect the peers
let peers = config.routes().into_iter()
.map(|route| Peer::connect(route, handle, &timer))
.collect();
// Randomly assign an `ActorId` to the current process. It is
// imperative that the `ActorId` is unique in the cluster and is not
// reused across server restarts (since the state is reset).
let mut rng = rand::thread_rng();
let actor_id: ActorId = rng.next_u64().into();
debug!("server actor-id={:?}", actor_id);
// Return the new server state with an empty data set
Server {
data: Set::new(),
actor_id: actor_id,
peers: peers,
}
}
// Replicate the current data set to the list of peers.
fn replicate(&self) {
// Iterate all the peers sending a clone of the data. This operation
// performs a deep clone for each peer, which is not going to be super
// efficient as the data set grows, but improving this is out of scope
// for MiniDB.
for peer in &self.peers {
peer.send(self.data.clone());
}
}
}
// Service implementation for `RequestHandler`
//
// `Service` is the Tokio abstraction for asynchronous request / response
// handling. This is where we will process all requests sent by clients and
// peers.
//
// Instead of mixing a single service to handle both clients and peers, a
// better strategy would probably be to have two separate TCP listeners on
// different ports to handle the client and the peers.
impl Service for RequestHandler {
// The Request and Response types live in `proto`
type Request = Request;
type Response = Response;
type Error = io::Error;
// For greates flexibility, a Box<Future> is used. This has the downside of
// requiring an allocation and dynamic dispatch. Currently, the service
// only responds with `futures::Done`, so the type could be changed.
//
// If the service respond with different futures depending on a conditional
// branch, then returning Box or implementing a custom future is required.
type Future = Box<Future<Item = Self::Response, Error = Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future |
fn poll_ready(&self) -> Async<()> {
Async::Ready(())
}
}
| {
match request {
Request::Get(_) => {
info!("[COMMAND] Get");
// Clone the current state and respond with the set
//
let data = self.server_state.borrow().data.clone();
let resp = Response::Value(data);
Box::new(futures::done(Ok(resp)))
}
Request::Insert(cmd) => {
info!("[COMMAND] Insert {:?}", cmd.value());
// Insert the new value, initiate a replication to all peers,
// and respond with Success
//
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.insert(actor_id, cmd.value());
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Remove(cmd) => {
info!("[COMMAND] Remove {:?}", cmd.value());
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
// If the request includes a version vector, this indicates
// that a causal remove is requested. A causal remove implies
// removing the value from the set at the state represented by
// the version vector and leaving any insertions that are
// either concurrent or successors to the supplied version
// vector.
match cmd.causality() {
Some(version_vec) => {
state.data.causal_remove(actor_id, version_vec, cmd.value());
}
None => {
state.data.remove(actor_id, cmd.value());
}
}
// Replicate the new state to all peers
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Clear(_) => {
info!("[COMMAND] Clear");
let mut state = self.server_state.borrow_mut();
let actor_id = state.actor_id;
state.data.clear(actor_id);
state.replicate();
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
Request::Join(other) => {
info!("[COMMAND] Join");
// A Join request is issued by a peer during replication and
// provides the peer's latest state.
//
// The join request is handled by joining the provided state
// into the node's current state.
let mut state = self.server_state.borrow_mut();
state.data.join(&other);
if log_enabled!(::log::LogLevel::Debug) {
for elem in state.data.iter() {
debug!(" - {:?}", elem);
}
}
let resp = Response::Success(proto::Success);
Box::new(futures::done(Ok(resp)))
}
}
} | identifier_body |
heap.rs | /// Implements a heap data structure (a.k.a. a priority queue).
///
/// In this implementation, heap is "top-heavy" meaning that the root node is
/// the node with the highest value in the heap. The relative priority of
/// two nodes is determined via the `cmp` function defined over type of the
/// heap's elements (i.e. the generic type `T`).
///
/// The allocated memory used to store the elements of the heap grows (and
/// shrinks) as necessary.
///
/// Method naming conventions generally follow those found in `std::vec::Vec`.
///
/// The heap can contain more than one element with the same priority. No
/// guarantees are made about the order in which elements with equivalent
/// priorities are popped from the queue.
///
/// The data sturcture can be output in Graphviz `.dot` format.
// This data structure is implemented as a vector-backed binary heap. (The
// parent-child relationships are therefore not stored via pointers between
// nodes, but using logical connections between NodeIdx values.
//
// See [Wikipedia](http://en.wikipedia.org/wiki/Heap_%28data_structure%29)
// for overview of this implementation strategy.
//
// A simple C implementation of a binary heap is available from
// [here](https://github.com/dale48/levawc). This code is adapted from Chapter
// 10 of O'Reilly book *Mastering Algorithms with C*. This chapter also served
// as a guide while implementing this module.
extern crate graphviz;
extern crate core;
use std;
use std::ptr;
use std::fmt;
use std::fmt::Debug;
use self::core::borrow::IntoCow;
pub type NodeIdx = usize;
pub struct Heap<T: Ord> {
store: Vec<T>,
}
#[derive(Debug)]
enum ChildType {
Left,
Right
}
fn left_child(i: NodeIdx) -> NodeIdx {
2 * i + 1
}
fn right_child(i: NodeIdx) -> NodeIdx {
2 * i + 2
}
impl<T: Ord> Heap<T> {
/// Creates a new empty heap.
pub fn new() -> Heap<T> {
Heap { store: Vec::new() }
}
/// Creates a new empty heap which has initially allocated enough memory
/// for the given number of elements.
pub fn with_capacity(capacity: usize) -> Heap<T> {
Heap { store: Vec::with_capacity(capacity) }
}
/// Adds the given element to the heap.
pub fn push(&mut self, elem: T) {
let len = self.store.len();
self.store.push(elem);
let insert_idx: NodeIdx = len as NodeIdx;
self.percolate_up(insert_idx);
}
/// Removes from the heap an element with the largest priority of all in
/// the heap. This element is then returned wrapped returns it wrapped in
/// an `Option<T>`. If there are no elements in the heap, then `None` is
/// returned.
pub fn pop(&mut self) -> Option<T> {
match self.store.len() {
0 => None,
1 => self.store.pop(),
_ => {
let rv = self.store.swap_remove(0);
self.percolate_down(0);
Some(rv)
}
}
}
/// Returns the number of elements in the heap.
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` iff there are no elements in the heap.
pub fn empty(&self) -> bool {
self.len() == 0
}
/// Takes the index of a node and returns the index of its parent. Returns
/// `None` if the given node has no such parent (i.e. the given node is
/// the root.
///
/// The function panics if the given index is not valid.
fn parent(&self, idx: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(idx) {
if idx == 0 { None } else { Some((idx - 1) / 2) }
} else {
panic!("Heap.parent({}): given `idx` not in the heap.", idx)
}
}
fn left_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Left, parent)
}
fn right_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Right, parent)
}
/// Takes the index of a node and returns the index of indicated child.
/// Returns `None` if the given node has no such child.
///
/// The function panics if the given index is not valid.
fn child(&self, ct: ChildType, parent: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(parent) {
let child: NodeIdx = match ct {
ChildType::Left => left_child(parent),
ChildType::Right => right_child(parent)
};
if child < self.store.len() {
Some(child) }
else {
None
}
} else {
panic!("Heap.child({:?}, {:?}): the given `idx` is not in `Heap`.",
ct, parent)
}
}
/// Starting from the given `NodeIdx`, recursively move an element up the
/// heap until the heap property has been restored all along this node's
/// ancestor path.
fn percolate_up(&mut self, child: NodeIdx) {
let maybe_parent = self.parent(child);
match maybe_parent {
None => {
// Do nothing: The given `child` has no parent because it is
// the root node.
return
},
Some(parent) => {
if self.is_violating(parent, child) {
self.swap(parent, child);
self.percolate_up(parent)
} else {
// Do nothing: the two nodes are already ordered correctly.
return
}
}
}
}
/// Starting from the given `NodeIdx`, recursively move an element down
/// the heap until the heap property has been restored in the entire
/// sub-heap.
///
/// (For the heap property to be restored to the entire sub-heap being
/// re-heapified, the only element which may be violating the heap-property
/// is the node indicated by the given `NodeIdx`.)
fn percolate_down(&mut self, parent: NodeIdx) {
match (self.left_child(parent), self.right_child(parent)) {
(None, None) => return,
(None, Some(right)) => panic!("Heap can't only have right child."),
(Some(left), None) => {
if self.is_violating(parent, left) {
self.swap_down(parent, left)
}
},
(Some(left), Some(right)) => {
match (self.is_violating(parent, left),
self.is_violating(parent, right)) {
(false, false) => return,
(false, true) => self.swap_down(parent, right),
(true, false) => self.swap_down(parent, left),
(true, true) => {
// Since both of the parent's children are violating
// the heap property, choose which child should be
// swapped with the parent, such that the heap property
// will not be violated after the swap. (That is, the
// greater of the two will need to become the parent of
// the other.)
if self.store[left] >= self.store[right] {
self.swap_down(parent, left)
} else {
self.swap_down(parent, right)
}
}
}
}
}
}
/// Helper function for `percolate_down()`.
fn swap_down(&mut self, parent: NodeIdx, child: NodeIdx) {
self.swap(parent, child);
self.percolate_down(child);
}
/// Checks to see whether the given parent-child nodes are violating the
/// heap property.
///
/// Panics if either index is out of bounds. Panics if the given parent is
/// not actually the parent of the given child.
fn is_violating(&self, parent: NodeIdx, child: NodeIdx) -> bool{
if parent == self.parent(child).unwrap() {
self.store[parent] < self.store[child]
} else {
panic!("Given parent is not actually the parent of this child.")
}
}
fn is_valid(&self, idx: NodeIdx) -> bool {
idx < self.store.len()
}
/// Swaps the data stored at the two inciated heap nodes.
///
/// Does nothing if the two indices are the same. Panics if either index is
/// invalid.
fn swap(&mut self, a: NodeIdx, b: NodeIdx) {
if a != b {
unsafe {
let pa: *mut T = &mut self.store[a];
let pb: *mut T = &mut self.store[b];
ptr::swap(pa, pb);
}
}
}
}
pub type Edge = (NodeIdx, NodeIdx);
impl<'a, T: Ord + 'a> graphviz::GraphWalk<'a, NodeIdx, Edge> for Heap<T> {
fn nodes(&self) -> graphviz::Nodes<'a, NodeIdx> {
let mut v: Vec<NodeIdx> = Vec::new();
for node_idx in (0..self.len()) {
v.push(node_idx);
}
v.into_cow()
}
fn edges(&'a self) -> graphviz::Edges<'a, Edge> {
let mut v: Vec<Edge> = Vec::with_capacity(2 * self.len());
// Add an edge for each parent-child relationship in the heap.
for idx in (0..self.len()) {
match self.left_child(idx) {
Some(l) => v.push((idx, l)),
None => ()
};
match self.right_child(idx) {
Some(r) => v.push((idx, r)),
None => ()
};
}
v.into_cow()
}
fn source(&self, edge: &Edge) -> NodeIdx {
let &(s, _) = edge;
s
}
fn target(&self, edge: &Edge) -> NodeIdx {
let &(_, t) = edge;
t
}
}
impl<'a, T: 'a + Ord + fmt::Debug> graphviz::Labeller<'a, NodeIdx, Edge> for Heap<T> {
fn graph_id(&'a self) -> graphviz::Id<'a> {
graphviz::Id::new("Heap").unwrap()
}
fn node_id(&'a self, n: &NodeIdx) -> graphviz::Id<'a> {
graphviz::Id::new(format!("n{}", n)).unwrap()
}
fn node_label(&'a self, n: &NodeIdx) -> graphviz::LabelText<'a> |
}
| {
let label = format!("{:?}", self.store[*n]);
graphviz::LabelText::LabelStr(label.into_cow())
} | identifier_body |
heap.rs | /// Implements a heap data structure (a.k.a. a priority queue).
///
/// In this implementation, heap is "top-heavy" meaning that the root node is
/// the node with the highest value in the heap. The relative priority of
/// two nodes is determined via the `cmp` function defined over type of the
/// heap's elements (i.e. the generic type `T`).
///
/// The allocated memory used to store the elements of the heap grows (and
/// shrinks) as necessary.
///
/// Method naming conventions generally follow those found in `std::vec::Vec`.
///
/// The heap can contain more than one element with the same priority. No
/// guarantees are made about the order in which elements with equivalent
/// priorities are popped from the queue.
///
/// The data sturcture can be output in Graphviz `.dot` format.
// This data structure is implemented as a vector-backed binary heap. (The
// parent-child relationships are therefore not stored via pointers between
// nodes, but using logical connections between NodeIdx values.
//
// See [Wikipedia](http://en.wikipedia.org/wiki/Heap_%28data_structure%29)
// for overview of this implementation strategy.
//
// A simple C implementation of a binary heap is available from
// [here](https://github.com/dale48/levawc). This code is adapted from Chapter
// 10 of O'Reilly book *Mastering Algorithms with C*. This chapter also served
// as a guide while implementing this module.
extern crate graphviz;
extern crate core;
use std;
use std::ptr;
use std::fmt;
use std::fmt::Debug; | store: Vec<T>,
}
#[derive(Debug)]
enum ChildType {
Left,
Right
}
fn left_child(i: NodeIdx) -> NodeIdx {
2 * i + 1
}
fn right_child(i: NodeIdx) -> NodeIdx {
2 * i + 2
}
impl<T: Ord> Heap<T> {
/// Creates a new empty heap.
pub fn new() -> Heap<T> {
Heap { store: Vec::new() }
}
/// Creates a new empty heap which has initially allocated enough memory
/// for the given number of elements.
pub fn with_capacity(capacity: usize) -> Heap<T> {
Heap { store: Vec::with_capacity(capacity) }
}
/// Adds the given element to the heap.
pub fn push(&mut self, elem: T) {
let len = self.store.len();
self.store.push(elem);
let insert_idx: NodeIdx = len as NodeIdx;
self.percolate_up(insert_idx);
}
/// Removes from the heap an element with the largest priority of all in
/// the heap. This element is then returned wrapped returns it wrapped in
/// an `Option<T>`. If there are no elements in the heap, then `None` is
/// returned.
pub fn pop(&mut self) -> Option<T> {
match self.store.len() {
0 => None,
1 => self.store.pop(),
_ => {
let rv = self.store.swap_remove(0);
self.percolate_down(0);
Some(rv)
}
}
}
/// Returns the number of elements in the heap.
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` iff there are no elements in the heap.
pub fn empty(&self) -> bool {
self.len() == 0
}
/// Takes the index of a node and returns the index of its parent. Returns
/// `None` if the given node has no such parent (i.e. the given node is
/// the root.
///
/// The function panics if the given index is not valid.
fn parent(&self, idx: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(idx) {
if idx == 0 { None } else { Some((idx - 1) / 2) }
} else {
panic!("Heap.parent({}): given `idx` not in the heap.", idx)
}
}
fn left_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Left, parent)
}
fn right_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Right, parent)
}
/// Takes the index of a node and returns the index of indicated child.
/// Returns `None` if the given node has no such child.
///
/// The function panics if the given index is not valid.
fn child(&self, ct: ChildType, parent: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(parent) {
let child: NodeIdx = match ct {
ChildType::Left => left_child(parent),
ChildType::Right => right_child(parent)
};
if child < self.store.len() {
Some(child) }
else {
None
}
} else {
panic!("Heap.child({:?}, {:?}): the given `idx` is not in `Heap`.",
ct, parent)
}
}
/// Starting from the given `NodeIdx`, recursively move an element up the
/// heap until the heap property has been restored all along this node's
/// ancestor path.
fn percolate_up(&mut self, child: NodeIdx) {
let maybe_parent = self.parent(child);
match maybe_parent {
None => {
// Do nothing: The given `child` has no parent because it is
// the root node.
return
},
Some(parent) => {
if self.is_violating(parent, child) {
self.swap(parent, child);
self.percolate_up(parent)
} else {
// Do nothing: the two nodes are already ordered correctly.
return
}
}
}
}
/// Starting from the given `NodeIdx`, recursively move an element down
/// the heap until the heap property has been restored in the entire
/// sub-heap.
///
/// (For the heap property to be restored to the entire sub-heap being
/// re-heapified, the only element which may be violating the heap-property
/// is the node indicated by the given `NodeIdx`.)
fn percolate_down(&mut self, parent: NodeIdx) {
match (self.left_child(parent), self.right_child(parent)) {
(None, None) => return,
(None, Some(right)) => panic!("Heap can't only have right child."),
(Some(left), None) => {
if self.is_violating(parent, left) {
self.swap_down(parent, left)
}
},
(Some(left), Some(right)) => {
match (self.is_violating(parent, left),
self.is_violating(parent, right)) {
(false, false) => return,
(false, true) => self.swap_down(parent, right),
(true, false) => self.swap_down(parent, left),
(true, true) => {
// Since both of the parent's children are violating
// the heap property, choose which child should be
// swapped with the parent, such that the heap property
// will not be violated after the swap. (That is, the
// greater of the two will need to become the parent of
// the other.)
if self.store[left] >= self.store[right] {
self.swap_down(parent, left)
} else {
self.swap_down(parent, right)
}
}
}
}
}
}
/// Helper function for `percolate_down()`.
fn swap_down(&mut self, parent: NodeIdx, child: NodeIdx) {
self.swap(parent, child);
self.percolate_down(child);
}
/// Checks to see whether the given parent-child nodes are violating the
/// heap property.
///
/// Panics if either index is out of bounds. Panics if the given parent is
/// not actually the parent of the given child.
fn is_violating(&self, parent: NodeIdx, child: NodeIdx) -> bool{
if parent == self.parent(child).unwrap() {
self.store[parent] < self.store[child]
} else {
panic!("Given parent is not actually the parent of this child.")
}
}
fn is_valid(&self, idx: NodeIdx) -> bool {
idx < self.store.len()
}
/// Swaps the data stored at the two inciated heap nodes.
///
/// Does nothing if the two indices are the same. Panics if either index is
/// invalid.
fn swap(&mut self, a: NodeIdx, b: NodeIdx) {
if a != b {
unsafe {
let pa: *mut T = &mut self.store[a];
let pb: *mut T = &mut self.store[b];
ptr::swap(pa, pb);
}
}
}
}
pub type Edge = (NodeIdx, NodeIdx);
impl<'a, T: Ord + 'a> graphviz::GraphWalk<'a, NodeIdx, Edge> for Heap<T> {
fn nodes(&self) -> graphviz::Nodes<'a, NodeIdx> {
let mut v: Vec<NodeIdx> = Vec::new();
for node_idx in (0..self.len()) {
v.push(node_idx);
}
v.into_cow()
}
fn edges(&'a self) -> graphviz::Edges<'a, Edge> {
let mut v: Vec<Edge> = Vec::with_capacity(2 * self.len());
// Add an edge for each parent-child relationship in the heap.
for idx in (0..self.len()) {
match self.left_child(idx) {
Some(l) => v.push((idx, l)),
None => ()
};
match self.right_child(idx) {
Some(r) => v.push((idx, r)),
None => ()
};
}
v.into_cow()
}
fn source(&self, edge: &Edge) -> NodeIdx {
let &(s, _) = edge;
s
}
fn target(&self, edge: &Edge) -> NodeIdx {
let &(_, t) = edge;
t
}
}
impl<'a, T: 'a + Ord + fmt::Debug> graphviz::Labeller<'a, NodeIdx, Edge> for Heap<T> {
fn graph_id(&'a self) -> graphviz::Id<'a> {
graphviz::Id::new("Heap").unwrap()
}
fn node_id(&'a self, n: &NodeIdx) -> graphviz::Id<'a> {
graphviz::Id::new(format!("n{}", n)).unwrap()
}
fn node_label(&'a self, n: &NodeIdx) -> graphviz::LabelText<'a> {
let label = format!("{:?}", self.store[*n]);
graphviz::LabelText::LabelStr(label.into_cow())
}
} | use self::core::borrow::IntoCow;
pub type NodeIdx = usize;
pub struct Heap<T: Ord> { | random_line_split |
heap.rs | /// Implements a heap data structure (a.k.a. a priority queue).
///
/// In this implementation, heap is "top-heavy" meaning that the root node is
/// the node with the highest value in the heap. The relative priority of
/// two nodes is determined via the `cmp` function defined over type of the
/// heap's elements (i.e. the generic type `T`).
///
/// The allocated memory used to store the elements of the heap grows (and
/// shrinks) as necessary.
///
/// Method naming conventions generally follow those found in `std::vec::Vec`.
///
/// The heap can contain more than one element with the same priority. No
/// guarantees are made about the order in which elements with equivalent
/// priorities are popped from the queue.
///
/// The data sturcture can be output in Graphviz `.dot` format.
// This data structure is implemented as a vector-backed binary heap. (The
// parent-child relationships are therefore not stored via pointers between
// nodes, but using logical connections between NodeIdx values.
//
// See [Wikipedia](http://en.wikipedia.org/wiki/Heap_%28data_structure%29)
// for overview of this implementation strategy.
//
// A simple C implementation of a binary heap is available from
// [here](https://github.com/dale48/levawc). This code is adapted from Chapter
// 10 of O'Reilly book *Mastering Algorithms with C*. This chapter also served
// as a guide while implementing this module.
extern crate graphviz;
extern crate core;
use std;
use std::ptr;
use std::fmt;
use std::fmt::Debug;
use self::core::borrow::IntoCow;
pub type NodeIdx = usize;
pub struct Heap<T: Ord> {
store: Vec<T>,
}
#[derive(Debug)]
enum ChildType {
Left,
Right
}
fn left_child(i: NodeIdx) -> NodeIdx {
2 * i + 1
}
fn right_child(i: NodeIdx) -> NodeIdx {
2 * i + 2
}
impl<T: Ord> Heap<T> {
/// Creates a new empty heap.
pub fn new() -> Heap<T> {
Heap { store: Vec::new() }
}
/// Creates a new empty heap which has initially allocated enough memory
/// for the given number of elements.
pub fn with_capacity(capacity: usize) -> Heap<T> {
Heap { store: Vec::with_capacity(capacity) }
}
/// Adds the given element to the heap.
pub fn push(&mut self, elem: T) {
let len = self.store.len();
self.store.push(elem);
let insert_idx: NodeIdx = len as NodeIdx;
self.percolate_up(insert_idx);
}
/// Removes from the heap an element with the largest priority of all in
/// the heap. This element is then returned wrapped returns it wrapped in
/// an `Option<T>`. If there are no elements in the heap, then `None` is
/// returned.
pub fn pop(&mut self) -> Option<T> {
match self.store.len() {
0 => None,
1 => self.store.pop(),
_ => {
let rv = self.store.swap_remove(0);
self.percolate_down(0);
Some(rv)
}
}
}
/// Returns the number of elements in the heap.
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` iff there are no elements in the heap.
pub fn empty(&self) -> bool {
self.len() == 0
}
/// Takes the index of a node and returns the index of its parent. Returns
/// `None` if the given node has no such parent (i.e. the given node is
/// the root.
///
/// The function panics if the given index is not valid.
fn parent(&self, idx: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(idx) {
if idx == 0 | else { Some((idx - 1) / 2) }
} else {
panic!("Heap.parent({}): given `idx` not in the heap.", idx)
}
}
fn left_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Left, parent)
}
fn right_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Right, parent)
}
/// Takes the index of a node and returns the index of indicated child.
/// Returns `None` if the given node has no such child.
///
/// The function panics if the given index is not valid.
fn child(&self, ct: ChildType, parent: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(parent) {
let child: NodeIdx = match ct {
ChildType::Left => left_child(parent),
ChildType::Right => right_child(parent)
};
if child < self.store.len() {
Some(child) }
else {
None
}
} else {
panic!("Heap.child({:?}, {:?}): the given `idx` is not in `Heap`.",
ct, parent)
}
}
/// Starting from the given `NodeIdx`, recursively move an element up the
/// heap until the heap property has been restored all along this node's
/// ancestor path.
fn percolate_up(&mut self, child: NodeIdx) {
let maybe_parent = self.parent(child);
match maybe_parent {
None => {
// Do nothing: The given `child` has no parent because it is
// the root node.
return
},
Some(parent) => {
if self.is_violating(parent, child) {
self.swap(parent, child);
self.percolate_up(parent)
} else {
// Do nothing: the two nodes are already ordered correctly.
return
}
}
}
}
/// Starting from the given `NodeIdx`, recursively move an element down
/// the heap until the heap property has been restored in the entire
/// sub-heap.
///
/// (For the heap property to be restored to the entire sub-heap being
/// re-heapified, the only element which may be violating the heap-property
/// is the node indicated by the given `NodeIdx`.)
fn percolate_down(&mut self, parent: NodeIdx) {
match (self.left_child(parent), self.right_child(parent)) {
(None, None) => return,
(None, Some(right)) => panic!("Heap can't only have right child."),
(Some(left), None) => {
if self.is_violating(parent, left) {
self.swap_down(parent, left)
}
},
(Some(left), Some(right)) => {
match (self.is_violating(parent, left),
self.is_violating(parent, right)) {
(false, false) => return,
(false, true) => self.swap_down(parent, right),
(true, false) => self.swap_down(parent, left),
(true, true) => {
// Since both of the parent's children are violating
// the heap property, choose which child should be
// swapped with the parent, such that the heap property
// will not be violated after the swap. (That is, the
// greater of the two will need to become the parent of
// the other.)
if self.store[left] >= self.store[right] {
self.swap_down(parent, left)
} else {
self.swap_down(parent, right)
}
}
}
}
}
}
/// Helper function for `percolate_down()`.
fn swap_down(&mut self, parent: NodeIdx, child: NodeIdx) {
self.swap(parent, child);
self.percolate_down(child);
}
/// Checks to see whether the given parent-child nodes are violating the
/// heap property.
///
/// Panics if either index is out of bounds. Panics if the given parent is
/// not actually the parent of the given child.
fn is_violating(&self, parent: NodeIdx, child: NodeIdx) -> bool{
if parent == self.parent(child).unwrap() {
self.store[parent] < self.store[child]
} else {
panic!("Given parent is not actually the parent of this child.")
}
}
fn is_valid(&self, idx: NodeIdx) -> bool {
idx < self.store.len()
}
/// Swaps the data stored at the two inciated heap nodes.
///
/// Does nothing if the two indices are the same. Panics if either index is
/// invalid.
fn swap(&mut self, a: NodeIdx, b: NodeIdx) {
if a != b {
unsafe {
let pa: *mut T = &mut self.store[a];
let pb: *mut T = &mut self.store[b];
ptr::swap(pa, pb);
}
}
}
}
pub type Edge = (NodeIdx, NodeIdx);
impl<'a, T: Ord + 'a> graphviz::GraphWalk<'a, NodeIdx, Edge> for Heap<T> {
fn nodes(&self) -> graphviz::Nodes<'a, NodeIdx> {
let mut v: Vec<NodeIdx> = Vec::new();
for node_idx in (0..self.len()) {
v.push(node_idx);
}
v.into_cow()
}
fn edges(&'a self) -> graphviz::Edges<'a, Edge> {
let mut v: Vec<Edge> = Vec::with_capacity(2 * self.len());
// Add an edge for each parent-child relationship in the heap.
for idx in (0..self.len()) {
match self.left_child(idx) {
Some(l) => v.push((idx, l)),
None => ()
};
match self.right_child(idx) {
Some(r) => v.push((idx, r)),
None => ()
};
}
v.into_cow()
}
fn source(&self, edge: &Edge) -> NodeIdx {
let &(s, _) = edge;
s
}
fn target(&self, edge: &Edge) -> NodeIdx {
let &(_, t) = edge;
t
}
}
impl<'a, T: 'a + Ord + fmt::Debug> graphviz::Labeller<'a, NodeIdx, Edge> for Heap<T> {
fn graph_id(&'a self) -> graphviz::Id<'a> {
graphviz::Id::new("Heap").unwrap()
}
fn node_id(&'a self, n: &NodeIdx) -> graphviz::Id<'a> {
graphviz::Id::new(format!("n{}", n)).unwrap()
}
fn node_label(&'a self, n: &NodeIdx) -> graphviz::LabelText<'a> {
let label = format!("{:?}", self.store[*n]);
graphviz::LabelText::LabelStr(label.into_cow())
}
}
| { None } | conditional_block |
heap.rs | /// Implements a heap data structure (a.k.a. a priority queue).
///
/// In this implementation, heap is "top-heavy" meaning that the root node is
/// the node with the highest value in the heap. The relative priority of
/// two nodes is determined via the `cmp` function defined over type of the
/// heap's elements (i.e. the generic type `T`).
///
/// The allocated memory used to store the elements of the heap grows (and
/// shrinks) as necessary.
///
/// Method naming conventions generally follow those found in `std::vec::Vec`.
///
/// The heap can contain more than one element with the same priority. No
/// guarantees are made about the order in which elements with equivalent
/// priorities are popped from the queue.
///
/// The data sturcture can be output in Graphviz `.dot` format.
// This data structure is implemented as a vector-backed binary heap. (The
// parent-child relationships are therefore not stored via pointers between
// nodes, but using logical connections between NodeIdx values.
//
// See [Wikipedia](http://en.wikipedia.org/wiki/Heap_%28data_structure%29)
// for overview of this implementation strategy.
//
// A simple C implementation of a binary heap is available from
// [here](https://github.com/dale48/levawc). This code is adapted from Chapter
// 10 of O'Reilly book *Mastering Algorithms with C*. This chapter also served
// as a guide while implementing this module.
extern crate graphviz;
extern crate core;
use std;
use std::ptr;
use std::fmt;
use std::fmt::Debug;
use self::core::borrow::IntoCow;
pub type NodeIdx = usize;
pub struct Heap<T: Ord> {
store: Vec<T>,
}
#[derive(Debug)]
enum ChildType {
Left,
Right
}
fn left_child(i: NodeIdx) -> NodeIdx {
2 * i + 1
}
fn right_child(i: NodeIdx) -> NodeIdx {
2 * i + 2
}
impl<T: Ord> Heap<T> {
/// Creates a new empty heap.
pub fn new() -> Heap<T> {
Heap { store: Vec::new() }
}
/// Creates a new empty heap which has initially allocated enough memory
/// for the given number of elements.
pub fn with_capacity(capacity: usize) -> Heap<T> {
Heap { store: Vec::with_capacity(capacity) }
}
/// Adds the given element to the heap.
pub fn push(&mut self, elem: T) {
let len = self.store.len();
self.store.push(elem);
let insert_idx: NodeIdx = len as NodeIdx;
self.percolate_up(insert_idx);
}
/// Removes from the heap an element with the largest priority of all in
/// the heap. This element is then returned wrapped returns it wrapped in
/// an `Option<T>`. If there are no elements in the heap, then `None` is
/// returned.
pub fn pop(&mut self) -> Option<T> {
match self.store.len() {
0 => None,
1 => self.store.pop(),
_ => {
let rv = self.store.swap_remove(0);
self.percolate_down(0);
Some(rv)
}
}
}
/// Returns the number of elements in the heap.
pub fn len(&self) -> usize {
self.store.len()
}
/// Returns `true` iff there are no elements in the heap.
pub fn | (&self) -> bool {
self.len() == 0
}
/// Takes the index of a node and returns the index of its parent. Returns
/// `None` if the given node has no such parent (i.e. the given node is
/// the root.
///
/// The function panics if the given index is not valid.
fn parent(&self, idx: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(idx) {
if idx == 0 { None } else { Some((idx - 1) / 2) }
} else {
panic!("Heap.parent({}): given `idx` not in the heap.", idx)
}
}
fn left_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Left, parent)
}
fn right_child(&self, parent: NodeIdx) -> Option<NodeIdx> {
self.child(ChildType::Right, parent)
}
/// Takes the index of a node and returns the index of indicated child.
/// Returns `None` if the given node has no such child.
///
/// The function panics if the given index is not valid.
fn child(&self, ct: ChildType, parent: NodeIdx) -> Option<NodeIdx> {
if self.is_valid(parent) {
let child: NodeIdx = match ct {
ChildType::Left => left_child(parent),
ChildType::Right => right_child(parent)
};
if child < self.store.len() {
Some(child) }
else {
None
}
} else {
panic!("Heap.child({:?}, {:?}): the given `idx` is not in `Heap`.",
ct, parent)
}
}
/// Starting from the given `NodeIdx`, recursively move an element up the
/// heap until the heap property has been restored all along this node's
/// ancestor path.
fn percolate_up(&mut self, child: NodeIdx) {
let maybe_parent = self.parent(child);
match maybe_parent {
None => {
// Do nothing: The given `child` has no parent because it is
// the root node.
return
},
Some(parent) => {
if self.is_violating(parent, child) {
self.swap(parent, child);
self.percolate_up(parent)
} else {
// Do nothing: the two nodes are already ordered correctly.
return
}
}
}
}
/// Starting from the given `NodeIdx`, recursively move an element down
/// the heap until the heap property has been restored in the entire
/// sub-heap.
///
/// (For the heap property to be restored to the entire sub-heap being
/// re-heapified, the only element which may be violating the heap-property
/// is the node indicated by the given `NodeIdx`.)
fn percolate_down(&mut self, parent: NodeIdx) {
match (self.left_child(parent), self.right_child(parent)) {
(None, None) => return,
(None, Some(right)) => panic!("Heap can't only have right child."),
(Some(left), None) => {
if self.is_violating(parent, left) {
self.swap_down(parent, left)
}
},
(Some(left), Some(right)) => {
match (self.is_violating(parent, left),
self.is_violating(parent, right)) {
(false, false) => return,
(false, true) => self.swap_down(parent, right),
(true, false) => self.swap_down(parent, left),
(true, true) => {
// Since both of the parent's children are violating
// the heap property, choose which child should be
// swapped with the parent, such that the heap property
// will not be violated after the swap. (That is, the
// greater of the two will need to become the parent of
// the other.)
if self.store[left] >= self.store[right] {
self.swap_down(parent, left)
} else {
self.swap_down(parent, right)
}
}
}
}
}
}
/// Helper function for `percolate_down()`.
fn swap_down(&mut self, parent: NodeIdx, child: NodeIdx) {
self.swap(parent, child);
self.percolate_down(child);
}
/// Checks to see whether the given parent-child nodes are violating the
/// heap property.
///
/// Panics if either index is out of bounds. Panics if the given parent is
/// not actually the parent of the given child.
fn is_violating(&self, parent: NodeIdx, child: NodeIdx) -> bool{
if parent == self.parent(child).unwrap() {
self.store[parent] < self.store[child]
} else {
panic!("Given parent is not actually the parent of this child.")
}
}
fn is_valid(&self, idx: NodeIdx) -> bool {
idx < self.store.len()
}
/// Swaps the data stored at the two inciated heap nodes.
///
/// Does nothing if the two indices are the same. Panics if either index is
/// invalid.
fn swap(&mut self, a: NodeIdx, b: NodeIdx) {
if a != b {
unsafe {
let pa: *mut T = &mut self.store[a];
let pb: *mut T = &mut self.store[b];
ptr::swap(pa, pb);
}
}
}
}
pub type Edge = (NodeIdx, NodeIdx);
impl<'a, T: Ord + 'a> graphviz::GraphWalk<'a, NodeIdx, Edge> for Heap<T> {
fn nodes(&self) -> graphviz::Nodes<'a, NodeIdx> {
let mut v: Vec<NodeIdx> = Vec::new();
for node_idx in (0..self.len()) {
v.push(node_idx);
}
v.into_cow()
}
fn edges(&'a self) -> graphviz::Edges<'a, Edge> {
let mut v: Vec<Edge> = Vec::with_capacity(2 * self.len());
// Add an edge for each parent-child relationship in the heap.
for idx in (0..self.len()) {
match self.left_child(idx) {
Some(l) => v.push((idx, l)),
None => ()
};
match self.right_child(idx) {
Some(r) => v.push((idx, r)),
None => ()
};
}
v.into_cow()
}
fn source(&self, edge: &Edge) -> NodeIdx {
let &(s, _) = edge;
s
}
fn target(&self, edge: &Edge) -> NodeIdx {
let &(_, t) = edge;
t
}
}
impl<'a, T: 'a + Ord + fmt::Debug> graphviz::Labeller<'a, NodeIdx, Edge> for Heap<T> {
fn graph_id(&'a self) -> graphviz::Id<'a> {
graphviz::Id::new("Heap").unwrap()
}
fn node_id(&'a self, n: &NodeIdx) -> graphviz::Id<'a> {
graphviz::Id::new(format!("n{}", n)).unwrap()
}
fn node_label(&'a self, n: &NodeIdx) -> graphviz::LabelText<'a> {
let label = format!("{:?}", self.store[*n]);
graphviz::LabelText::LabelStr(label.into_cow())
}
}
| empty | identifier_name |
oauth2.py | import json
from functools import lru_cache
from typing import Optional, Union, Type, List
from uuid import uuid4, UUID
from aioauth.base.database import BaseDB
from aioauth.config import Settings
from aioauth.models import (
Token as OAuth2Token,
AuthorizationCode as OAuth2AuthorizationCode,
Client as OAuth2Client,
)
from aioauth.requests import Request as OAuth2Request, Post, Query
from aioauth.response_type import (
ResponseTypeBase,
ResponseTypeToken,
ResponseTypeAuthorizationCode,
)
from aioauth.responses import Response as OAuth2Response
from aioauth.server import AuthorizationServer
from aioauth.structures import CaseInsensitiveDict
from aioauth.types import (
RequestMethod,
GrantType,
ResponseType,
CodeChallengeMethod,
)
from aioauth.utils import catch_errors_and_unavailability
from fastapi import (
APIRouter,
Request,
Response,
Depends,
status,
HTTPException,
)
from fastapi.responses import RedirectResponse
from fastapi.security import OAuth2AuthorizationCodeBearer
from pydantic import BaseModel
from starlette.authentication import AuthenticationBackend
from .config import get_settings
from .http import get_edgedb_pool
from .models import DatabaseModel, User, IdPClient
from .orm import with_block, ComputableProperty
router = APIRouter(prefix="/oauth2", tags=["OAuth 2.0"])
class Client(DatabaseModel):
client_secret: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
ComputableProperty("client_id", "<str>__source__.id")
class AuthorizationCode(DatabaseModel):
code: str
client: Client
redirect_uri: str
response_type: ResponseType
scope: str
auth_time: int
code_challenge: Optional[str] = None
code_challenge_method: Optional[CodeChallengeMethod] = None
nonce: Optional[str] = None
class Token(DatabaseModel):
user: User
access_token: str
refresh_token: str
scope: str
issued_at: int
expires_in: int
client: Client
token_type: str = "Bearer"
revoked: bool = False
class OAuth2Backend(AuthenticationBackend):
async def authenticate(self, conn):
token = await _get_oauth2_scheme(str(conn.base_url))(conn)
print("token", token)
class DB(BaseDB):
"""Class for interacting with the database. Used by `AuthorizationServer`.
Here you need to override the methods that are responsible for creating tokens,
creating authorization code, getting a client from the database, etc.
"""
def __init__(self, pool_or_conn):
self._db = pool_or_conn
async def create_token(self, *args, **kwargs) -> OAuth2Token:
"""Create token code in db"""
token = await super().create_token(*args, **kwargs)
# NOTE: Save data from token to db here.
return token
async def create_authorization_code(
self, *args, **kwargs
) -> OAuth2AuthorizationCode:
"""Create authorization code in db"""
authorization_code = await super().create_authorization_code(
*args, **kwargs
)
data = authorization_code._asdict()
# TODO: handle None values for optional property
for key in list(data):
if data[key] is None:
data.pop(key)
client_id = data.pop("client_id")
obj = AuthorizationCode.construct(**data)
await self._db.query_one(
with_block("oauth2")
+ obj.insert(
current_module="oauth2",
client="(SELECT Client FILTER .id = <uuid>$client_id)",
),
client_id=client_id,
**data,
)
return authorization_code
async def get_token(self, *args, **kwargs) -> Optional[OAuth2Token]:
"""Get token from the database by provided request from user.
Returns:
Token: if token exists in db.
None: if no token in db.
"""
token_record = ...
if token_record is not None:
return OAuth2Token(
access_token=token_record.access_token,
refresh_token=token_record.refresh_token,
scope=token_record.scope,
issued_at=token_record.issued_at,
expires_in=token_record.expires_in,
client_id=token_record.client_id,
token_type=token_record.token_type,
revoked=token_record.revoked,
)
async def get_client(
self,
request: Request,
client_id: str,
client_secret: Optional[str] = None,
) -> Optional[OAuth2Client]:
"""Get client record from the database by provided request from user.
Returns:
`Client` instance if client exists in db.
`None` if no client in db.
"""
client_record = await self._db.query_one(
Client.select(*OAuth2Client._fields, filters=".id = <uuid>$id"),
id=client_id,
)
client_record = Client.from_obj(client_record)
if client_record is not None:
return OAuth2Client(
client_id=client_record.client_id,
client_secret=client_record.client_secret,
grant_types=client_record.grant_types,
response_types=client_record.response_types,
redirect_uris=client_record.redirect_uris,
scope=client_record.scope,
)
async def revoke_token(self, request: Request, token: str) -> None:
"""Revokes an existing token. The `revoked`
Flag of the Token must be set to True
"""
token_record = ...
token_record.revoked = True
token_record.save()
async def get_authorization_code(
self, *args, **kwargs
) -> Optional[OAuth2AuthorizationCode]:
...
async def delete_authorization_code(self, *args, **kwargs) -> None:
...
async def authenticate(self, *args, **kwargs) -> bool:
...
class AuthubServer(AuthorizationServer):
def __init__(self, pool_or_conn=Depends(get_edgedb_pool)):
super().__init__(DB(pool_or_conn))
@catch_errors_and_unavailability
async def validate_authorize_request(self, request: OAuth2Request):
ResponseTypeClass: Union[
Type[ResponseTypeToken],
Type[ResponseTypeAuthorizationCode],
Type[ResponseTypeBase],
] = self.response_type.get(
request.query.response_type, ResponseTypeBase
)
response_type = ResponseTypeClass(db=self.db)
return await response_type.validate_request(request)
def get_router(app):
return router
@lru_cache()
def get_aioauth_settings():
settings = get_settings()
return Settings(
TOKEN_EXPIRES_IN=settings.token_expires_in,
AUTHORIZATION_CODE_EXPIRES_IN=settings.authorization_code_expires_in,
INSECURE_TRANSPORT=settings.debug,
)
def _url_for(base_url, name, **path_params):
return router.url_path_for(name, **path_params).make_absolute_url(base_url)
@lru_cache()
def _get_oauth2_scheme(base_url):
return OAuth2AuthorizationCodeBearer(
authorizationUrl=_url_for(base_url, "oauth2_authorize"),
tokenUrl=_url_for(base_url, "oauth2_token"),
auto_error=False,
)
def oauth2_schema(request: Request):
return _get_oauth2_scheme(str(request.base_url))
async def _oauth2_request(request: Request):
"""Converts fastapi Request instance to OAuth2Request instance"""
form = await request.form()
def get(user, query_params):
post = dict(form)
method = request.method
headers = CaseInsensitiveDict(**request.headers)
url = str(request.url)
return OAuth2Request(
settings=get_aioauth_settings(),
method=RequestMethod[method],
headers=headers,
post=Post(**post),
query=Query(**query_params),
url=url,
user=user,
)
return get
def _to_fastapi_response(oauth2_response: OAuth2Response):
"""Converts OAuth2Response instance to fastapi Response instance"""
response_content = (
oauth2_response.content._asdict()
if oauth2_response.content is not None
else {}
)
headers = dict(oauth2_response.headers)
status_code = oauth2_response.status_code
content = json.dumps(response_content)
return Response(content=content, headers=headers, status_code=status_code)
@router.get("/authorize")
async def oauth2_authorize(
client_id: UUID,
redirect_uri: str,
response_type: str,
scope: str,
request: Request,
idp_client_id: Optional[UUID] = None,
db=Depends(get_edgedb_pool),
server=Depends(AuthubServer),
oauth2_request=Depends(_oauth2_request),
):
"""Endpoint to interact with the resource owner and obtain an authorization
grant.
See Section 4.1.1: https://tools.ietf.org/html/rfc6749#section-4.1.1
"""
if idp_client_id:
await db.query_one(
IdPClient.select(filters=".id = <uuid>$id"),
id=idp_client_id,
)
query_params = dict(request.query_params)
query_params.pop("idp_client_id", None)
await server.validate_authorize_request(oauth2_request(True, query_params))
request.session["client_id"] = str(client_id)
request.session["redirect_uri"] = redirect_uri
request.session["response_type"] = response_type
request.session["scope"] = scope
if idp_client_id:
return RedirectResponse(
request.url_for("login", idp_client_id=idp_client_id)
)
clients = await db.query(IdPClient.select("id", "name"))
return {
client.name: request.url_for("login", idp_client_id=client.id)
for client in clients
}
async def oauth2_authorized(request: Request, user):
async for tx in request.app.state.db.retrying_transaction():
async with tx:
server = AuthubServer(tx)
query_params = {}
for key in ["client_id", "redirect_uri", "response_type", "scope"]:
query_params[key] = request.session.pop(key)
resp = await server.create_authorization_response(
(await _oauth2_request(request))(user, query_params)
)
return _to_fastapi_response(resp)
@router.post("/token")
async def oauth2_token(
request: Request, oauth2_request=Depends(_oauth2_request)
):
"""Endpoint to obtain an access and/or ID token by presenting an
authorization grant or refresh token.
See Section 4.1.3: https://tools.ietf.org/html/rfc6749#section-4.1.3
"""
class OAuth2ClientListOut(BaseModel):
client_id: str
href: str
@router.get("/clients", response_model=List[OAuth2ClientListOut])
async def list_oauth2_clients(request: Request, db=Depends(get_edgedb_pool)):
result = await db.query(Client.select("id", "client_id"))
return [
OAuth2ClientListOut(
client_id=obj.client_id,
href=request.url_for("get_oauth2_client", client_id=obj.id),
)
for obj in result
]
class OAuth2ClientIn(BaseModel):
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
class NewOAuth2Client(BaseModel):
client_id: str
client_secret: str
@router.post("/clients")
async def create_oauth2_clients(
client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
client_obj = Client(client_secret=uuid4().hex, **client.dict())
result = await db.query_one(
with_block("oauth2")
+ "SELECT ( "
+ client_obj.insert(current_module="oauth2")
+ ") { client_id, client_secret }",
**client_obj.dict(exclude={"id"}),
)
return NewOAuth2Client(**Client.from_obj(result).dict())
class OAuth2ClientOut(BaseModel):
client_id: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
@router.get("/clients/{client_id}", response_model=OAuth2ClientOut)
async def get_oauth2_client(client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
Client.select(
*OAuth2ClientOut.schema()["properties"], filters=".id = <uuid>$id"
),
id=client_id,
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.put("/clients/{client_id}", response_model=OAuth2ClientOut)
async def update_oauth2_client(
client_id: UUID, client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
with_block("oauth2")
+ "SELECT ("
+ Client.construct(**client.dict()).update(filters=".id = <uuid>$id")
+ ") { "
+ ", ".join(OAuth2ClientOut.schema()["properties"])
+ "}",
id=client_id,
**client.dict(),
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.delete("/clients/{client_id}")
async def | (client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
"""DELETE oauth2::Client FILTER .id = <uuid>$id""",
id=client_id,
)
if result:
return Response(status_code=status.HTTP_204_NO_CONTENT)
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
| delete_oauth2_client | identifier_name |
oauth2.py | import json
from functools import lru_cache
from typing import Optional, Union, Type, List
from uuid import uuid4, UUID
from aioauth.base.database import BaseDB
from aioauth.config import Settings
from aioauth.models import (
Token as OAuth2Token,
AuthorizationCode as OAuth2AuthorizationCode,
Client as OAuth2Client,
)
from aioauth.requests import Request as OAuth2Request, Post, Query
from aioauth.response_type import (
ResponseTypeBase,
ResponseTypeToken,
ResponseTypeAuthorizationCode,
)
from aioauth.responses import Response as OAuth2Response
from aioauth.server import AuthorizationServer
from aioauth.structures import CaseInsensitiveDict
from aioauth.types import (
RequestMethod,
GrantType,
ResponseType,
CodeChallengeMethod,
)
from aioauth.utils import catch_errors_and_unavailability
from fastapi import (
APIRouter,
Request,
Response,
Depends,
status,
HTTPException,
)
from fastapi.responses import RedirectResponse
from fastapi.security import OAuth2AuthorizationCodeBearer
from pydantic import BaseModel
from starlette.authentication import AuthenticationBackend
from .config import get_settings
from .http import get_edgedb_pool
from .models import DatabaseModel, User, IdPClient
from .orm import with_block, ComputableProperty
router = APIRouter(prefix="/oauth2", tags=["OAuth 2.0"])
class Client(DatabaseModel):
client_secret: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
ComputableProperty("client_id", "<str>__source__.id")
class AuthorizationCode(DatabaseModel):
code: str
client: Client
redirect_uri: str
response_type: ResponseType
scope: str
auth_time: int
code_challenge: Optional[str] = None
code_challenge_method: Optional[CodeChallengeMethod] = None
nonce: Optional[str] = None
class Token(DatabaseModel):
user: User
access_token: str
refresh_token: str
scope: str
issued_at: int
expires_in: int
client: Client
token_type: str = "Bearer"
revoked: bool = False
class OAuth2Backend(AuthenticationBackend):
async def authenticate(self, conn):
token = await _get_oauth2_scheme(str(conn.base_url))(conn)
print("token", token)
class DB(BaseDB):
"""Class for interacting with the database. Used by `AuthorizationServer`.
Here you need to override the methods that are responsible for creating tokens,
creating authorization code, getting a client from the database, etc.
"""
def __init__(self, pool_or_conn):
self._db = pool_or_conn
async def create_token(self, *args, **kwargs) -> OAuth2Token:
"""Create token code in db"""
token = await super().create_token(*args, **kwargs)
# NOTE: Save data from token to db here.
return token
async def create_authorization_code(
self, *args, **kwargs
) -> OAuth2AuthorizationCode:
"""Create authorization code in db"""
authorization_code = await super().create_authorization_code(
*args, **kwargs
)
data = authorization_code._asdict()
# TODO: handle None values for optional property
for key in list(data):
if data[key] is None:
data.pop(key)
client_id = data.pop("client_id")
obj = AuthorizationCode.construct(**data)
await self._db.query_one(
with_block("oauth2")
+ obj.insert(
current_module="oauth2",
client="(SELECT Client FILTER .id = <uuid>$client_id)",
),
client_id=client_id,
**data,
)
return authorization_code
async def get_token(self, *args, **kwargs) -> Optional[OAuth2Token]:
"""Get token from the database by provided request from user.
Returns:
Token: if token exists in db.
None: if no token in db.
"""
token_record = ...
if token_record is not None:
return OAuth2Token(
access_token=token_record.access_token,
refresh_token=token_record.refresh_token,
scope=token_record.scope,
issued_at=token_record.issued_at,
expires_in=token_record.expires_in,
client_id=token_record.client_id,
token_type=token_record.token_type,
revoked=token_record.revoked,
)
async def get_client(
self,
request: Request,
client_id: str,
client_secret: Optional[str] = None,
) -> Optional[OAuth2Client]:
"""Get client record from the database by provided request from user.
Returns:
`Client` instance if client exists in db.
`None` if no client in db.
"""
client_record = await self._db.query_one(
Client.select(*OAuth2Client._fields, filters=".id = <uuid>$id"),
id=client_id,
)
client_record = Client.from_obj(client_record)
if client_record is not None:
return OAuth2Client(
client_id=client_record.client_id,
client_secret=client_record.client_secret,
grant_types=client_record.grant_types,
response_types=client_record.response_types,
redirect_uris=client_record.redirect_uris,
scope=client_record.scope,
)
async def revoke_token(self, request: Request, token: str) -> None:
"""Revokes an existing token. The `revoked`
Flag of the Token must be set to True
"""
token_record = ...
token_record.revoked = True
token_record.save()
async def get_authorization_code(
self, *args, **kwargs
) -> Optional[OAuth2AuthorizationCode]:
...
async def delete_authorization_code(self, *args, **kwargs) -> None:
...
async def authenticate(self, *args, **kwargs) -> bool:
...
class AuthubServer(AuthorizationServer):
def __init__(self, pool_or_conn=Depends(get_edgedb_pool)):
super().__init__(DB(pool_or_conn))
@catch_errors_and_unavailability
async def validate_authorize_request(self, request: OAuth2Request):
ResponseTypeClass: Union[
Type[ResponseTypeToken],
Type[ResponseTypeAuthorizationCode],
Type[ResponseTypeBase],
] = self.response_type.get(
request.query.response_type, ResponseTypeBase
)
response_type = ResponseTypeClass(db=self.db)
return await response_type.validate_request(request)
def get_router(app):
return router
@lru_cache()
def get_aioauth_settings():
settings = get_settings()
return Settings(
TOKEN_EXPIRES_IN=settings.token_expires_in,
AUTHORIZATION_CODE_EXPIRES_IN=settings.authorization_code_expires_in,
INSECURE_TRANSPORT=settings.debug,
)
def _url_for(base_url, name, **path_params):
return router.url_path_for(name, **path_params).make_absolute_url(base_url)
@lru_cache()
def _get_oauth2_scheme(base_url):
return OAuth2AuthorizationCodeBearer(
authorizationUrl=_url_for(base_url, "oauth2_authorize"),
tokenUrl=_url_for(base_url, "oauth2_token"),
auto_error=False,
)
def oauth2_schema(request: Request):
return _get_oauth2_scheme(str(request.base_url))
async def _oauth2_request(request: Request):
"""Converts fastapi Request instance to OAuth2Request instance"""
form = await request.form()
def get(user, query_params):
post = dict(form)
method = request.method
headers = CaseInsensitiveDict(**request.headers)
url = str(request.url)
return OAuth2Request(
settings=get_aioauth_settings(),
method=RequestMethod[method],
headers=headers,
post=Post(**post),
query=Query(**query_params),
url=url,
user=user,
)
return get
def _to_fastapi_response(oauth2_response: OAuth2Response):
"""Converts OAuth2Response instance to fastapi Response instance"""
response_content = (
oauth2_response.content._asdict()
if oauth2_response.content is not None
else {}
)
headers = dict(oauth2_response.headers)
status_code = oauth2_response.status_code
content = json.dumps(response_content)
return Response(content=content, headers=headers, status_code=status_code)
@router.get("/authorize")
async def oauth2_authorize(
client_id: UUID,
redirect_uri: str,
response_type: str,
scope: str,
request: Request,
idp_client_id: Optional[UUID] = None,
db=Depends(get_edgedb_pool),
server=Depends(AuthubServer),
oauth2_request=Depends(_oauth2_request),
):
"""Endpoint to interact with the resource owner and obtain an authorization
grant.
See Section 4.1.1: https://tools.ietf.org/html/rfc6749#section-4.1.1
"""
if idp_client_id:
await db.query_one(
IdPClient.select(filters=".id = <uuid>$id"),
id=idp_client_id,
)
query_params = dict(request.query_params)
query_params.pop("idp_client_id", None)
await server.validate_authorize_request(oauth2_request(True, query_params))
request.session["client_id"] = str(client_id)
request.session["redirect_uri"] = redirect_uri
request.session["response_type"] = response_type
request.session["scope"] = scope
if idp_client_id:
return RedirectResponse(
request.url_for("login", idp_client_id=idp_client_id)
)
clients = await db.query(IdPClient.select("id", "name"))
return {
client.name: request.url_for("login", idp_client_id=client.id)
for client in clients
}
async def oauth2_authorized(request: Request, user):
async for tx in request.app.state.db.retrying_transaction():
async with tx:
server = AuthubServer(tx)
query_params = {}
for key in ["client_id", "redirect_uri", "response_type", "scope"]:
|
resp = await server.create_authorization_response(
(await _oauth2_request(request))(user, query_params)
)
return _to_fastapi_response(resp)
@router.post("/token")
async def oauth2_token(
request: Request, oauth2_request=Depends(_oauth2_request)
):
"""Endpoint to obtain an access and/or ID token by presenting an
authorization grant or refresh token.
See Section 4.1.3: https://tools.ietf.org/html/rfc6749#section-4.1.3
"""
class OAuth2ClientListOut(BaseModel):
client_id: str
href: str
@router.get("/clients", response_model=List[OAuth2ClientListOut])
async def list_oauth2_clients(request: Request, db=Depends(get_edgedb_pool)):
result = await db.query(Client.select("id", "client_id"))
return [
OAuth2ClientListOut(
client_id=obj.client_id,
href=request.url_for("get_oauth2_client", client_id=obj.id),
)
for obj in result
]
class OAuth2ClientIn(BaseModel):
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
class NewOAuth2Client(BaseModel):
client_id: str
client_secret: str
@router.post("/clients")
async def create_oauth2_clients(
client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
client_obj = Client(client_secret=uuid4().hex, **client.dict())
result = await db.query_one(
with_block("oauth2")
+ "SELECT ( "
+ client_obj.insert(current_module="oauth2")
+ ") { client_id, client_secret }",
**client_obj.dict(exclude={"id"}),
)
return NewOAuth2Client(**Client.from_obj(result).dict())
class OAuth2ClientOut(BaseModel):
client_id: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
@router.get("/clients/{client_id}", response_model=OAuth2ClientOut)
async def get_oauth2_client(client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
Client.select(
*OAuth2ClientOut.schema()["properties"], filters=".id = <uuid>$id"
),
id=client_id,
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.put("/clients/{client_id}", response_model=OAuth2ClientOut)
async def update_oauth2_client(
client_id: UUID, client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
with_block("oauth2")
+ "SELECT ("
+ Client.construct(**client.dict()).update(filters=".id = <uuid>$id")
+ ") { "
+ ", ".join(OAuth2ClientOut.schema()["properties"])
+ "}",
id=client_id,
**client.dict(),
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.delete("/clients/{client_id}")
async def delete_oauth2_client(client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
"""DELETE oauth2::Client FILTER .id = <uuid>$id""",
id=client_id,
)
if result:
return Response(status_code=status.HTTP_204_NO_CONTENT)
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
| query_params[key] = request.session.pop(key) | conditional_block |
oauth2.py | import json
from functools import lru_cache
from typing import Optional, Union, Type, List
from uuid import uuid4, UUID
from aioauth.base.database import BaseDB
from aioauth.config import Settings
from aioauth.models import (
Token as OAuth2Token,
AuthorizationCode as OAuth2AuthorizationCode,
Client as OAuth2Client,
)
from aioauth.requests import Request as OAuth2Request, Post, Query
from aioauth.response_type import (
ResponseTypeBase,
ResponseTypeToken,
ResponseTypeAuthorizationCode,
)
from aioauth.responses import Response as OAuth2Response
from aioauth.server import AuthorizationServer
from aioauth.structures import CaseInsensitiveDict
from aioauth.types import (
RequestMethod,
GrantType,
ResponseType,
CodeChallengeMethod,
)
from aioauth.utils import catch_errors_and_unavailability
from fastapi import (
APIRouter,
Request,
Response,
Depends,
status,
HTTPException,
)
from fastapi.responses import RedirectResponse
from fastapi.security import OAuth2AuthorizationCodeBearer
from pydantic import BaseModel
from starlette.authentication import AuthenticationBackend
from .config import get_settings
from .http import get_edgedb_pool
from .models import DatabaseModel, User, IdPClient
from .orm import with_block, ComputableProperty
router = APIRouter(prefix="/oauth2", tags=["OAuth 2.0"])
class Client(DatabaseModel):
client_secret: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
ComputableProperty("client_id", "<str>__source__.id")
class AuthorizationCode(DatabaseModel):
|
class Token(DatabaseModel):
user: User
access_token: str
refresh_token: str
scope: str
issued_at: int
expires_in: int
client: Client
token_type: str = "Bearer"
revoked: bool = False
class OAuth2Backend(AuthenticationBackend):
async def authenticate(self, conn):
token = await _get_oauth2_scheme(str(conn.base_url))(conn)
print("token", token)
class DB(BaseDB):
"""Class for interacting with the database. Used by `AuthorizationServer`.
Here you need to override the methods that are responsible for creating tokens,
creating authorization code, getting a client from the database, etc.
"""
def __init__(self, pool_or_conn):
self._db = pool_or_conn
async def create_token(self, *args, **kwargs) -> OAuth2Token:
"""Create token code in db"""
token = await super().create_token(*args, **kwargs)
# NOTE: Save data from token to db here.
return token
async def create_authorization_code(
self, *args, **kwargs
) -> OAuth2AuthorizationCode:
"""Create authorization code in db"""
authorization_code = await super().create_authorization_code(
*args, **kwargs
)
data = authorization_code._asdict()
# TODO: handle None values for optional property
for key in list(data):
if data[key] is None:
data.pop(key)
client_id = data.pop("client_id")
obj = AuthorizationCode.construct(**data)
await self._db.query_one(
with_block("oauth2")
+ obj.insert(
current_module="oauth2",
client="(SELECT Client FILTER .id = <uuid>$client_id)",
),
client_id=client_id,
**data,
)
return authorization_code
async def get_token(self, *args, **kwargs) -> Optional[OAuth2Token]:
"""Get token from the database by provided request from user.
Returns:
Token: if token exists in db.
None: if no token in db.
"""
token_record = ...
if token_record is not None:
return OAuth2Token(
access_token=token_record.access_token,
refresh_token=token_record.refresh_token,
scope=token_record.scope,
issued_at=token_record.issued_at,
expires_in=token_record.expires_in,
client_id=token_record.client_id,
token_type=token_record.token_type,
revoked=token_record.revoked,
)
async def get_client(
self,
request: Request,
client_id: str,
client_secret: Optional[str] = None,
) -> Optional[OAuth2Client]:
"""Get client record from the database by provided request from user.
Returns:
`Client` instance if client exists in db.
`None` if no client in db.
"""
client_record = await self._db.query_one(
Client.select(*OAuth2Client._fields, filters=".id = <uuid>$id"),
id=client_id,
)
client_record = Client.from_obj(client_record)
if client_record is not None:
return OAuth2Client(
client_id=client_record.client_id,
client_secret=client_record.client_secret,
grant_types=client_record.grant_types,
response_types=client_record.response_types,
redirect_uris=client_record.redirect_uris,
scope=client_record.scope,
)
async def revoke_token(self, request: Request, token: str) -> None:
"""Revokes an existing token. The `revoked`
Flag of the Token must be set to True
"""
token_record = ...
token_record.revoked = True
token_record.save()
async def get_authorization_code(
self, *args, **kwargs
) -> Optional[OAuth2AuthorizationCode]:
...
async def delete_authorization_code(self, *args, **kwargs) -> None:
...
async def authenticate(self, *args, **kwargs) -> bool:
...
class AuthubServer(AuthorizationServer):
def __init__(self, pool_or_conn=Depends(get_edgedb_pool)):
super().__init__(DB(pool_or_conn))
@catch_errors_and_unavailability
async def validate_authorize_request(self, request: OAuth2Request):
ResponseTypeClass: Union[
Type[ResponseTypeToken],
Type[ResponseTypeAuthorizationCode],
Type[ResponseTypeBase],
] = self.response_type.get(
request.query.response_type, ResponseTypeBase
)
response_type = ResponseTypeClass(db=self.db)
return await response_type.validate_request(request)
def get_router(app):
return router
@lru_cache()
def get_aioauth_settings():
settings = get_settings()
return Settings(
TOKEN_EXPIRES_IN=settings.token_expires_in,
AUTHORIZATION_CODE_EXPIRES_IN=settings.authorization_code_expires_in,
INSECURE_TRANSPORT=settings.debug,
)
def _url_for(base_url, name, **path_params):
return router.url_path_for(name, **path_params).make_absolute_url(base_url)
@lru_cache()
def _get_oauth2_scheme(base_url):
return OAuth2AuthorizationCodeBearer(
authorizationUrl=_url_for(base_url, "oauth2_authorize"),
tokenUrl=_url_for(base_url, "oauth2_token"),
auto_error=False,
)
def oauth2_schema(request: Request):
return _get_oauth2_scheme(str(request.base_url))
async def _oauth2_request(request: Request):
"""Converts fastapi Request instance to OAuth2Request instance"""
form = await request.form()
def get(user, query_params):
post = dict(form)
method = request.method
headers = CaseInsensitiveDict(**request.headers)
url = str(request.url)
return OAuth2Request(
settings=get_aioauth_settings(),
method=RequestMethod[method],
headers=headers,
post=Post(**post),
query=Query(**query_params),
url=url,
user=user,
)
return get
def _to_fastapi_response(oauth2_response: OAuth2Response):
"""Converts OAuth2Response instance to fastapi Response instance"""
response_content = (
oauth2_response.content._asdict()
if oauth2_response.content is not None
else {}
)
headers = dict(oauth2_response.headers)
status_code = oauth2_response.status_code
content = json.dumps(response_content)
return Response(content=content, headers=headers, status_code=status_code)
@router.get("/authorize")
async def oauth2_authorize(
client_id: UUID,
redirect_uri: str,
response_type: str,
scope: str,
request: Request,
idp_client_id: Optional[UUID] = None,
db=Depends(get_edgedb_pool),
server=Depends(AuthubServer),
oauth2_request=Depends(_oauth2_request),
):
"""Endpoint to interact with the resource owner and obtain an authorization
grant.
See Section 4.1.1: https://tools.ietf.org/html/rfc6749#section-4.1.1
"""
if idp_client_id:
await db.query_one(
IdPClient.select(filters=".id = <uuid>$id"),
id=idp_client_id,
)
query_params = dict(request.query_params)
query_params.pop("idp_client_id", None)
await server.validate_authorize_request(oauth2_request(True, query_params))
request.session["client_id"] = str(client_id)
request.session["redirect_uri"] = redirect_uri
request.session["response_type"] = response_type
request.session["scope"] = scope
if idp_client_id:
return RedirectResponse(
request.url_for("login", idp_client_id=idp_client_id)
)
clients = await db.query(IdPClient.select("id", "name"))
return {
client.name: request.url_for("login", idp_client_id=client.id)
for client in clients
}
async def oauth2_authorized(request: Request, user):
async for tx in request.app.state.db.retrying_transaction():
async with tx:
server = AuthubServer(tx)
query_params = {}
for key in ["client_id", "redirect_uri", "response_type", "scope"]:
query_params[key] = request.session.pop(key)
resp = await server.create_authorization_response(
(await _oauth2_request(request))(user, query_params)
)
return _to_fastapi_response(resp)
@router.post("/token")
async def oauth2_token(
request: Request, oauth2_request=Depends(_oauth2_request)
):
"""Endpoint to obtain an access and/or ID token by presenting an
authorization grant or refresh token.
See Section 4.1.3: https://tools.ietf.org/html/rfc6749#section-4.1.3
"""
class OAuth2ClientListOut(BaseModel):
client_id: str
href: str
@router.get("/clients", response_model=List[OAuth2ClientListOut])
async def list_oauth2_clients(request: Request, db=Depends(get_edgedb_pool)):
result = await db.query(Client.select("id", "client_id"))
return [
OAuth2ClientListOut(
client_id=obj.client_id,
href=request.url_for("get_oauth2_client", client_id=obj.id),
)
for obj in result
]
class OAuth2ClientIn(BaseModel):
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
class NewOAuth2Client(BaseModel):
client_id: str
client_secret: str
@router.post("/clients")
async def create_oauth2_clients(
client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
client_obj = Client(client_secret=uuid4().hex, **client.dict())
result = await db.query_one(
with_block("oauth2")
+ "SELECT ( "
+ client_obj.insert(current_module="oauth2")
+ ") { client_id, client_secret }",
**client_obj.dict(exclude={"id"}),
)
return NewOAuth2Client(**Client.from_obj(result).dict())
class OAuth2ClientOut(BaseModel):
client_id: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
@router.get("/clients/{client_id}", response_model=OAuth2ClientOut)
async def get_oauth2_client(client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
Client.select(
*OAuth2ClientOut.schema()["properties"], filters=".id = <uuid>$id"
),
id=client_id,
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.put("/clients/{client_id}", response_model=OAuth2ClientOut)
async def update_oauth2_client(
client_id: UUID, client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
with_block("oauth2")
+ "SELECT ("
+ Client.construct(**client.dict()).update(filters=".id = <uuid>$id")
+ ") { "
+ ", ".join(OAuth2ClientOut.schema()["properties"])
+ "}",
id=client_id,
**client.dict(),
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.delete("/clients/{client_id}")
async def delete_oauth2_client(client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
"""DELETE oauth2::Client FILTER .id = <uuid>$id""",
id=client_id,
)
if result:
return Response(status_code=status.HTTP_204_NO_CONTENT)
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
| code: str
client: Client
redirect_uri: str
response_type: ResponseType
scope: str
auth_time: int
code_challenge: Optional[str] = None
code_challenge_method: Optional[CodeChallengeMethod] = None
nonce: Optional[str] = None | identifier_body |
oauth2.py | import json
from functools import lru_cache
from typing import Optional, Union, Type, List
from uuid import uuid4, UUID
from aioauth.base.database import BaseDB
from aioauth.config import Settings
from aioauth.models import (
Token as OAuth2Token,
AuthorizationCode as OAuth2AuthorizationCode,
Client as OAuth2Client,
)
from aioauth.requests import Request as OAuth2Request, Post, Query
from aioauth.response_type import (
ResponseTypeBase,
ResponseTypeToken,
ResponseTypeAuthorizationCode,
)
from aioauth.responses import Response as OAuth2Response
from aioauth.server import AuthorizationServer
from aioauth.structures import CaseInsensitiveDict
from aioauth.types import (
RequestMethod,
GrantType,
ResponseType,
CodeChallengeMethod,
)
from aioauth.utils import catch_errors_and_unavailability
from fastapi import (
APIRouter,
Request,
Response,
Depends,
status,
HTTPException,
)
from fastapi.responses import RedirectResponse
from fastapi.security import OAuth2AuthorizationCodeBearer
from pydantic import BaseModel
from starlette.authentication import AuthenticationBackend
from .config import get_settings
from .http import get_edgedb_pool
from .models import DatabaseModel, User, IdPClient
from .orm import with_block, ComputableProperty
router = APIRouter(prefix="/oauth2", tags=["OAuth 2.0"])
class Client(DatabaseModel):
client_secret: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
ComputableProperty("client_id", "<str>__source__.id")
class AuthorizationCode(DatabaseModel):
code: str
client: Client
redirect_uri: str
response_type: ResponseType
scope: str
auth_time: int
code_challenge: Optional[str] = None
code_challenge_method: Optional[CodeChallengeMethod] = None
nonce: Optional[str] = None
class Token(DatabaseModel):
user: User
access_token: str
refresh_token: str
scope: str
issued_at: int
expires_in: int
client: Client
token_type: str = "Bearer"
revoked: bool = False
class OAuth2Backend(AuthenticationBackend):
async def authenticate(self, conn):
token = await _get_oauth2_scheme(str(conn.base_url))(conn)
print("token", token)
class DB(BaseDB):
"""Class for interacting with the database. Used by `AuthorizationServer`.
Here you need to override the methods that are responsible for creating tokens,
creating authorization code, getting a client from the database, etc.
"""
def __init__(self, pool_or_conn):
self._db = pool_or_conn
async def create_token(self, *args, **kwargs) -> OAuth2Token:
"""Create token code in db"""
token = await super().create_token(*args, **kwargs)
# NOTE: Save data from token to db here.
return token
async def create_authorization_code(
self, *args, **kwargs
) -> OAuth2AuthorizationCode:
"""Create authorization code in db"""
authorization_code = await super().create_authorization_code(
*args, **kwargs
)
data = authorization_code._asdict()
# TODO: handle None values for optional property
for key in list(data):
if data[key] is None:
data.pop(key)
client_id = data.pop("client_id")
obj = AuthorizationCode.construct(**data)
await self._db.query_one(
with_block("oauth2")
+ obj.insert(
current_module="oauth2",
client="(SELECT Client FILTER .id = <uuid>$client_id)",
), | )
return authorization_code
async def get_token(self, *args, **kwargs) -> Optional[OAuth2Token]:
"""Get token from the database by provided request from user.
Returns:
Token: if token exists in db.
None: if no token in db.
"""
token_record = ...
if token_record is not None:
return OAuth2Token(
access_token=token_record.access_token,
refresh_token=token_record.refresh_token,
scope=token_record.scope,
issued_at=token_record.issued_at,
expires_in=token_record.expires_in,
client_id=token_record.client_id,
token_type=token_record.token_type,
revoked=token_record.revoked,
)
async def get_client(
self,
request: Request,
client_id: str,
client_secret: Optional[str] = None,
) -> Optional[OAuth2Client]:
"""Get client record from the database by provided request from user.
Returns:
`Client` instance if client exists in db.
`None` if no client in db.
"""
client_record = await self._db.query_one(
Client.select(*OAuth2Client._fields, filters=".id = <uuid>$id"),
id=client_id,
)
client_record = Client.from_obj(client_record)
if client_record is not None:
return OAuth2Client(
client_id=client_record.client_id,
client_secret=client_record.client_secret,
grant_types=client_record.grant_types,
response_types=client_record.response_types,
redirect_uris=client_record.redirect_uris,
scope=client_record.scope,
)
async def revoke_token(self, request: Request, token: str) -> None:
"""Revokes an existing token. The `revoked`
Flag of the Token must be set to True
"""
token_record = ...
token_record.revoked = True
token_record.save()
async def get_authorization_code(
self, *args, **kwargs
) -> Optional[OAuth2AuthorizationCode]:
...
async def delete_authorization_code(self, *args, **kwargs) -> None:
...
async def authenticate(self, *args, **kwargs) -> bool:
...
class AuthubServer(AuthorizationServer):
def __init__(self, pool_or_conn=Depends(get_edgedb_pool)):
super().__init__(DB(pool_or_conn))
@catch_errors_and_unavailability
async def validate_authorize_request(self, request: OAuth2Request):
ResponseTypeClass: Union[
Type[ResponseTypeToken],
Type[ResponseTypeAuthorizationCode],
Type[ResponseTypeBase],
] = self.response_type.get(
request.query.response_type, ResponseTypeBase
)
response_type = ResponseTypeClass(db=self.db)
return await response_type.validate_request(request)
def get_router(app):
return router
@lru_cache()
def get_aioauth_settings():
settings = get_settings()
return Settings(
TOKEN_EXPIRES_IN=settings.token_expires_in,
AUTHORIZATION_CODE_EXPIRES_IN=settings.authorization_code_expires_in,
INSECURE_TRANSPORT=settings.debug,
)
def _url_for(base_url, name, **path_params):
return router.url_path_for(name, **path_params).make_absolute_url(base_url)
@lru_cache()
def _get_oauth2_scheme(base_url):
return OAuth2AuthorizationCodeBearer(
authorizationUrl=_url_for(base_url, "oauth2_authorize"),
tokenUrl=_url_for(base_url, "oauth2_token"),
auto_error=False,
)
def oauth2_schema(request: Request):
return _get_oauth2_scheme(str(request.base_url))
async def _oauth2_request(request: Request):
"""Converts fastapi Request instance to OAuth2Request instance"""
form = await request.form()
def get(user, query_params):
post = dict(form)
method = request.method
headers = CaseInsensitiveDict(**request.headers)
url = str(request.url)
return OAuth2Request(
settings=get_aioauth_settings(),
method=RequestMethod[method],
headers=headers,
post=Post(**post),
query=Query(**query_params),
url=url,
user=user,
)
return get
def _to_fastapi_response(oauth2_response: OAuth2Response):
"""Converts OAuth2Response instance to fastapi Response instance"""
response_content = (
oauth2_response.content._asdict()
if oauth2_response.content is not None
else {}
)
headers = dict(oauth2_response.headers)
status_code = oauth2_response.status_code
content = json.dumps(response_content)
return Response(content=content, headers=headers, status_code=status_code)
@router.get("/authorize")
async def oauth2_authorize(
client_id: UUID,
redirect_uri: str,
response_type: str,
scope: str,
request: Request,
idp_client_id: Optional[UUID] = None,
db=Depends(get_edgedb_pool),
server=Depends(AuthubServer),
oauth2_request=Depends(_oauth2_request),
):
"""Endpoint to interact with the resource owner and obtain an authorization
grant.
See Section 4.1.1: https://tools.ietf.org/html/rfc6749#section-4.1.1
"""
if idp_client_id:
await db.query_one(
IdPClient.select(filters=".id = <uuid>$id"),
id=idp_client_id,
)
query_params = dict(request.query_params)
query_params.pop("idp_client_id", None)
await server.validate_authorize_request(oauth2_request(True, query_params))
request.session["client_id"] = str(client_id)
request.session["redirect_uri"] = redirect_uri
request.session["response_type"] = response_type
request.session["scope"] = scope
if idp_client_id:
return RedirectResponse(
request.url_for("login", idp_client_id=idp_client_id)
)
clients = await db.query(IdPClient.select("id", "name"))
return {
client.name: request.url_for("login", idp_client_id=client.id)
for client in clients
}
async def oauth2_authorized(request: Request, user):
async for tx in request.app.state.db.retrying_transaction():
async with tx:
server = AuthubServer(tx)
query_params = {}
for key in ["client_id", "redirect_uri", "response_type", "scope"]:
query_params[key] = request.session.pop(key)
resp = await server.create_authorization_response(
(await _oauth2_request(request))(user, query_params)
)
return _to_fastapi_response(resp)
@router.post("/token")
async def oauth2_token(
request: Request, oauth2_request=Depends(_oauth2_request)
):
"""Endpoint to obtain an access and/or ID token by presenting an
authorization grant or refresh token.
See Section 4.1.3: https://tools.ietf.org/html/rfc6749#section-4.1.3
"""
class OAuth2ClientListOut(BaseModel):
client_id: str
href: str
@router.get("/clients", response_model=List[OAuth2ClientListOut])
async def list_oauth2_clients(request: Request, db=Depends(get_edgedb_pool)):
result = await db.query(Client.select("id", "client_id"))
return [
OAuth2ClientListOut(
client_id=obj.client_id,
href=request.url_for("get_oauth2_client", client_id=obj.id),
)
for obj in result
]
class OAuth2ClientIn(BaseModel):
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
class NewOAuth2Client(BaseModel):
client_id: str
client_secret: str
@router.post("/clients")
async def create_oauth2_clients(
client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
client_obj = Client(client_secret=uuid4().hex, **client.dict())
result = await db.query_one(
with_block("oauth2")
+ "SELECT ( "
+ client_obj.insert(current_module="oauth2")
+ ") { client_id, client_secret }",
**client_obj.dict(exclude={"id"}),
)
return NewOAuth2Client(**Client.from_obj(result).dict())
class OAuth2ClientOut(BaseModel):
client_id: str
grant_types: List[GrantType] = []
response_types: List[ResponseType] = []
redirect_uris: List[str] = []
scope: str = ""
@router.get("/clients/{client_id}", response_model=OAuth2ClientOut)
async def get_oauth2_client(client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
Client.select(
*OAuth2ClientOut.schema()["properties"], filters=".id = <uuid>$id"
),
id=client_id,
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.put("/clients/{client_id}", response_model=OAuth2ClientOut)
async def update_oauth2_client(
client_id: UUID, client: OAuth2ClientIn, db=Depends(get_edgedb_pool)
):
result = await db.query_one(
with_block("oauth2")
+ "SELECT ("
+ Client.construct(**client.dict()).update(filters=".id = <uuid>$id")
+ ") { "
+ ", ".join(OAuth2ClientOut.schema()["properties"])
+ "}",
id=client_id,
**client.dict(),
)
return OAuth2ClientOut(**Client.from_obj(result).dict())
@router.delete("/clients/{client_id}")
async def delete_oauth2_client(client_id: UUID, db=Depends(get_edgedb_pool)):
result = await db.query_one(
"""DELETE oauth2::Client FILTER .id = <uuid>$id""",
id=client_id,
)
if result:
return Response(status_code=status.HTTP_204_NO_CONTENT)
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND) | client_id=client_id,
**data, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.