file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
mod.rs | #![warn(missing_docs)]
//! Contains all data structures and method to work with model resources.
//!
//! Model is an isolated scene that is used to create copies of its data - this
//! process is known as `instantiation`. Isolation in this context means that
//! such scene cannot be modified, rendered, etc. It just a data source.
//!
//! All instances will have references to resource they were created from - this
//! will help to get correct vertex and indices buffers when loading a save file,
//! loader will just take all needed data from resource so we don't need to store
//! such data in save file. Also this mechanism works perfectly when you changing
//! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign
//! correct visual data when loading a saved game.
//!
//! # Supported formats
//!
//! Currently only FBX (common format in game industry for storing complex 3d models)
//! and RGS (native Fyroxed format) formats are supported.
use crate::{
animation::Animation,
asset::{
manager::ResourceManager, options::ImportOptions, Resource, ResourceData,
MODEL_RESOURCE_UUID,
},
core::{
algebra::{UnitQuaternion, Vector3},
log::{Log, MessageKind},
pool::Handle,
reflect::prelude::*,
uuid::Uuid,
variable::mark_inheritable_properties_non_modified,
visitor::{Visit, VisitError, VisitResult, Visitor},
TypeUuidProvider,
},
engine::SerializationContext,
resource::fbx::{self, error::FbxError},
scene::{
animation::AnimationPlayer,
graph::{map::NodeHandleMap, Graph},
node::Node,
Scene, SceneLoader,
},
};
use serde::{Deserialize, Serialize};
use std::{
any::Any,
borrow::Cow,
fmt::{Display, Formatter},
path::{Path, PathBuf},
sync::Arc,
};
use strum_macros::{AsRefStr, EnumString, EnumVariantNames};
pub mod loader;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)]
#[repr(u32)]
pub(crate) enum NodeMapping {
UseNames = 0,
UseHandles = 1,
}
/// See module docs.
#[derive(Debug, Visit, Reflect)]
pub struct Model {
pub(crate) path: PathBuf,
#[visit(skip)]
pub(crate) mapping: NodeMapping,
#[visit(skip)]
scene: Scene,
}
impl TypeUuidProvider for Model {
fn type_uuid() -> Uuid {
MODEL_RESOURCE_UUID
}
}
/// Type alias for model resources.
pub type ModelResource = Resource<Model>;
/// Extension trait for model resources.
pub trait ModelResourceExtension: Sized {
/// Tries to instantiate model from given resource.
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap);
/// Tries to instantiate model from given resource.
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>;
/// Instantiates a prefab and places it at specified position and orientation in global coordinates.
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene.
///
/// Animation retargeting allows you to "transfer" animation from a model to a model
/// instance on a scene. Imagine you have a character that should have multiple animations
/// like idle, run, shoot, walk, etc. and you want to store each animation in a separate
/// file. Then when you creating a character on a level you want to have all possible
/// animations assigned to a character, this is where this function comes into play:
/// you just load a model of your character with skeleton, but without any animations,
/// then you load several "models" which have only skeleton with some animation (such
/// "models" can be considered as "animation" resources). After this you need to
/// instantiate model on your level and retarget all animations you need to that instance
/// from other "models". All you have after this is a handle to a model and bunch of
/// handles to specific animations. After this animations can be blended in any combinations
/// you need to. For example idle animation can be blended with walk animation when your
/// character starts walking.
///
/// # Notes
///
/// Most of the 3d model formats can contain only one animation, so in most cases
/// this function will return vector with only one animation.
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to the specified animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`]
/// component.
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to a first animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`).
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>;
}
impl ModelResourceExtension for ModelResource {
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap) {
let (root, old_to_new) =
model_data
.scene
.graph
.copy_node(handle, dest_graph, &mut |_, _| true);
// Notify instantiated nodes about resource they were created from.
let mut stack = vec![root];
while let Some(node_handle) = stack.pop() {
let node = &mut dest_graph[node_handle];
node.resource = Some(model.clone());
// Reset resource instance root flag, this is needed because a node after instantiation cannot
// be a root anymore.
node.is_resource_instance_root = false;
// Reset inheritable properties, so property inheritance system will take properties
// from parent objects on resolve stage.
node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node));
// Continue on children.
stack.extend_from_slice(node.children());
}
// Fill original handles to instances.
for (&old, &new) in old_to_new.inner().iter() {
dest_graph[new].original_handle_in_resource = old;
}
dest_graph.update_hierarchical_data_for_descendants(root);
(root, old_to_new)
}
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> {
let data = self.data_ref();
let instance_root = Self::instantiate_from(
self.clone(),
&data,
data.scene.graph.get_root(),
&mut dest_scene.graph,
)
.0;
dest_scene.graph[instance_root].is_resource_instance_root = true;
std::mem::drop(data);
instance_root
}
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
let root = self.instantiate(scene);
scene.graph[root]
.local_transform_mut()
.set_position(position)
.set_rotation(orientation);
scene.graph.update_hierarchical_data_for_descendants(root);
root
}
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> {
let mut retargetted_animations = Vec::new();
let data = self.data_ref();
for src_node_ref in data.scene.graph.linear_iter() {
if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() {
for src_anim in src_player.animations().iter() {
let mut anim_copy = src_anim.clone();
// Remap animation track nodes from resource to instance. This is required
// because we've made a plain copy and it has tracks with node handles mapped
// to nodes of internal scene.
for (i, ref_track) in src_anim.tracks().iter().enumerate() {
let ref_node = &data.scene.graph[ref_track.target()];
let track = &mut anim_copy.tracks_mut()[i];
// Find instantiated node that corresponds to node in resource
match graph.find_by_name(root, ref_node.name()) {
Some((instance_node, _)) => {
// One-to-one track mapping so there is [i] indexing.
track.set_target(instance_node);
}
None => {
track.set_target(Handle::NONE);
Log::writeln(
MessageKind::Error,
format!(
"Failed to retarget animation {:?} for node {}",
data.path(),
ref_node.name()
),
);
}
}
}
retargetted_animations.push(anim_copy);
}
}
}
retargetted_animations
}
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>> {
let mut animation_handles = Vec::new();
let animations = self.retarget_animations_directly(root, graph);
let dest_animation_player = graph[dest_animation_player]
.query_component_mut::<AnimationPlayer>()
.unwrap();
for animation in animations {
animation_handles.push(dest_animation_player.animations_mut().add(animation));
}
animation_handles
}
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> {
if let Some((animation_player, _)) = graph.find(root, &mut |n| {
n.query_component_ref::<AnimationPlayer>().is_some()
}) {
self.retarget_animations_to_player(root, animation_player, graph)
} else {
Default::default()
}
}
}
impl ResourceData for Model {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(&self.path)
}
fn set_path(&mut self, path: PathBuf) {
self.path = path;
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn type_uuid(&self) -> Uuid {
<Self as TypeUuidProvider>::type_uuid()
}
}
impl Default for Model {
fn default() -> Self {
Self {
path: PathBuf::new(),
mapping: NodeMapping::UseNames,
scene: Scene::new(),
}
}
}
/// Defines a way of searching materials when loading a model resource from foreign file format such as FBX.
///
/// # Motivation
///
/// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths,
/// which makes it impossible to use with "location-independent" application like games. To fix that issue, the
/// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping
/// everything but file name from an external resource's path, then it uses one of the following methods to find
/// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`]
/// method, or even use global search starting from the working directory of your game
/// ([`MaterialSearchOptions::WorkingDirectory`])
#[derive(
Clone,
Debug,
Visit,
PartialEq,
Eq,
Deserialize,
Serialize,
Reflect,
AsRefStr,
EnumString,
EnumVariantNames,
)]
pub enum MaterialSearchOptions {
/// Search in specified materials directory. It is suitable for cases when
/// your model resource use shared textures.
///
/// # Platform specific
///
/// Works on every platform.
MaterialsDirectory(PathBuf),
/// Recursive-up search. It is suitable for cases when textures are placed
/// near your model resource. This is **default** option.
///
/// # Platform specific
///
/// Works on every platform.
RecursiveUp,
/// Global search starting from working directory. Slowest option with a lot of ambiguities -
/// it may load unexpected file in cases when there are two or more files with same name but
/// lying in different directories.
///
/// # Platform specific
///
/// WebAssembly - **not supported** due to lack of file system.
WorkingDirectory,
/// Try to use paths stored in the model resource directly. This options has limited usage,
/// it is suitable to load animations, or any other model which does not have any materials.
///
/// # Important notes
///
/// RGS (native engine scenes) files should be loaded with this option by default, otherwise
/// the engine won't be able to correctly find materials.
UsePathDirectly,
}
impl Default for MaterialSearchOptions {
fn default() -> Self {
Self::RecursiveUp
}
}
impl MaterialSearchOptions {
/// A helper to create MaterialsDirectory variant.
pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self {
Self::MaterialsDirectory(path.as_ref().to_path_buf())
}
}
/// A set of options that will be applied to a model resource when loading it from external source.
///
/// # Details
///
/// The engine has a convenient way of storing import options in a `.options` files. For example you may
/// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options`
/// extension: `foo.fbx.options`. The content of an options file could be something like this:
///
/// ```text
/// (
/// material_search_options: RecursiveUp
/// )
/// ```
///
/// Check documentation of the field of the structure for more info about each parameter.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)]
pub struct ModelImportOptions {
/// See [`MaterialSearchOptions`] docs for more info.
#[serde(default)]
pub material_search_options: MaterialSearchOptions,
}
impl ImportOptions for ModelImportOptions {}
/// All possible errors that may occur while trying to load model from some
/// data source.
#[derive(Debug)]
pub enum ModelLoadError {
/// An error occurred while reading a data source.
Visit(VisitError),
/// Format is not supported.
NotSupported(String),
/// An error occurred while loading FBX file.
Fbx(FbxError),
}
impl Display for ModelLoadError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ModelLoadError::Visit(v) => {
write!(f, "An error occurred while reading a data source {v:?}")
}
ModelLoadError::NotSupported(v) => {
write!(f, "Model format is not supported: {v}")
}
ModelLoadError::Fbx(v) => v.fmt(f),
}
}
}
impl From<FbxError> for ModelLoadError {
fn from(fbx: FbxError) -> Self {
ModelLoadError::Fbx(fbx)
}
}
impl From<VisitError> for ModelLoadError {
fn from(e: VisitError) -> Self |
}
impl Model {
pub(crate) async fn load<P: AsRef<Path>>(
path: P,
serialization_context: Arc<SerializationContext>,
resource_manager: ResourceManager,
model_import_options: ModelImportOptions,
) -> Result<Self, ModelLoadError> {
let extension = path
.as_ref()
.extension()
.unwrap_or_default()
.to_string_lossy()
.as_ref()
.to_lowercase();
let (scene, mapping) = match extension.as_ref() {
"fbx" => {
let mut scene = Scene::new();
if let Some(filename) = path.as_ref().file_name() {
let root = scene.graph.get_root();
scene.graph[root].set_name(&filename.to_string_lossy());
}
fbx::load_to_scene(
&mut scene,
resource_manager,
path.as_ref(),
&model_import_options,
)
.await?;
// Set NodeMapping::UseNames as mapping here because FBX does not have
// any persistent unique ids, and we have to use names.
(scene, NodeMapping::UseNames)
}
// Scene can be used directly as model resource. Such scenes can be created in
// Fyroxed.
"rgs" => (
SceneLoader::from_file(
path.as_ref(),
serialization_context,
resource_manager.clone(),
)
.await?
.finish()
.await,
NodeMapping::UseHandles,
),
// TODO: Add more formats.
_ => {
return Err(ModelLoadError::NotSupported(format!(
"Unsupported model resource format: {}",
extension
)))
}
};
Ok(Self {
path: path.as_ref().to_owned(),
scene,
mapping,
})
}
/// Returns shared reference to internal scene, there is no way to obtain
/// mutable reference to inner scene because resource is immutable source
/// of data.
pub fn get_scene(&self) -> &Scene {
&self.scene
}
/// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a
/// handle and a reference to the found node. If nothing is found, it returns [`None`].
pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> {
self.scene.graph.find_by_name_from_root(name)
}
pub(crate) fn get_scene_mut(&mut self) -> &mut Scene {
&mut self.scene
}
}
| {
ModelLoadError::Visit(e)
} | identifier_body |
mod.rs | #![warn(missing_docs)]
//! Contains all data structures and method to work with model resources.
//!
//! Model is an isolated scene that is used to create copies of its data - this
//! process is known as `instantiation`. Isolation in this context means that
//! such scene cannot be modified, rendered, etc. It just a data source.
//!
//! All instances will have references to resource they were created from - this
//! will help to get correct vertex and indices buffers when loading a save file,
//! loader will just take all needed data from resource so we don't need to store
//! such data in save file. Also this mechanism works perfectly when you changing
//! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign
//! correct visual data when loading a saved game.
//!
//! # Supported formats
//!
//! Currently only FBX (common format in game industry for storing complex 3d models)
//! and RGS (native Fyroxed format) formats are supported.
use crate::{
animation::Animation,
asset::{
manager::ResourceManager, options::ImportOptions, Resource, ResourceData,
MODEL_RESOURCE_UUID,
},
core::{
algebra::{UnitQuaternion, Vector3},
log::{Log, MessageKind},
pool::Handle,
reflect::prelude::*,
uuid::Uuid,
variable::mark_inheritable_properties_non_modified,
visitor::{Visit, VisitError, VisitResult, Visitor},
TypeUuidProvider,
},
engine::SerializationContext,
resource::fbx::{self, error::FbxError},
scene::{
animation::AnimationPlayer,
graph::{map::NodeHandleMap, Graph},
node::Node,
Scene, SceneLoader,
},
};
use serde::{Deserialize, Serialize};
use std::{
any::Any,
borrow::Cow,
fmt::{Display, Formatter},
path::{Path, PathBuf},
sync::Arc,
};
use strum_macros::{AsRefStr, EnumString, EnumVariantNames};
pub mod loader;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)]
#[repr(u32)]
pub(crate) enum NodeMapping {
UseNames = 0,
UseHandles = 1,
}
/// See module docs.
#[derive(Debug, Visit, Reflect)]
pub struct | {
pub(crate) path: PathBuf,
#[visit(skip)]
pub(crate) mapping: NodeMapping,
#[visit(skip)]
scene: Scene,
}
impl TypeUuidProvider for Model {
fn type_uuid() -> Uuid {
MODEL_RESOURCE_UUID
}
}
/// Type alias for model resources.
pub type ModelResource = Resource<Model>;
/// Extension trait for model resources.
pub trait ModelResourceExtension: Sized {
/// Tries to instantiate model from given resource.
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap);
/// Tries to instantiate model from given resource.
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>;
/// Instantiates a prefab and places it at specified position and orientation in global coordinates.
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene.
///
/// Animation retargeting allows you to "transfer" animation from a model to a model
/// instance on a scene. Imagine you have a character that should have multiple animations
/// like idle, run, shoot, walk, etc. and you want to store each animation in a separate
/// file. Then when you creating a character on a level you want to have all possible
/// animations assigned to a character, this is where this function comes into play:
/// you just load a model of your character with skeleton, but without any animations,
/// then you load several "models" which have only skeleton with some animation (such
/// "models" can be considered as "animation" resources). After this you need to
/// instantiate model on your level and retarget all animations you need to that instance
/// from other "models". All you have after this is a handle to a model and bunch of
/// handles to specific animations. After this animations can be blended in any combinations
/// you need to. For example idle animation can be blended with walk animation when your
/// character starts walking.
///
/// # Notes
///
/// Most of the 3d model formats can contain only one animation, so in most cases
/// this function will return vector with only one animation.
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to the specified animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`]
/// component.
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to a first animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`).
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>;
}
impl ModelResourceExtension for ModelResource {
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap) {
let (root, old_to_new) =
model_data
.scene
.graph
.copy_node(handle, dest_graph, &mut |_, _| true);
// Notify instantiated nodes about resource they were created from.
let mut stack = vec![root];
while let Some(node_handle) = stack.pop() {
let node = &mut dest_graph[node_handle];
node.resource = Some(model.clone());
// Reset resource instance root flag, this is needed because a node after instantiation cannot
// be a root anymore.
node.is_resource_instance_root = false;
// Reset inheritable properties, so property inheritance system will take properties
// from parent objects on resolve stage.
node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node));
// Continue on children.
stack.extend_from_slice(node.children());
}
// Fill original handles to instances.
for (&old, &new) in old_to_new.inner().iter() {
dest_graph[new].original_handle_in_resource = old;
}
dest_graph.update_hierarchical_data_for_descendants(root);
(root, old_to_new)
}
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> {
let data = self.data_ref();
let instance_root = Self::instantiate_from(
self.clone(),
&data,
data.scene.graph.get_root(),
&mut dest_scene.graph,
)
.0;
dest_scene.graph[instance_root].is_resource_instance_root = true;
std::mem::drop(data);
instance_root
}
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
let root = self.instantiate(scene);
scene.graph[root]
.local_transform_mut()
.set_position(position)
.set_rotation(orientation);
scene.graph.update_hierarchical_data_for_descendants(root);
root
}
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> {
let mut retargetted_animations = Vec::new();
let data = self.data_ref();
for src_node_ref in data.scene.graph.linear_iter() {
if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() {
for src_anim in src_player.animations().iter() {
let mut anim_copy = src_anim.clone();
// Remap animation track nodes from resource to instance. This is required
// because we've made a plain copy and it has tracks with node handles mapped
// to nodes of internal scene.
for (i, ref_track) in src_anim.tracks().iter().enumerate() {
let ref_node = &data.scene.graph[ref_track.target()];
let track = &mut anim_copy.tracks_mut()[i];
// Find instantiated node that corresponds to node in resource
match graph.find_by_name(root, ref_node.name()) {
Some((instance_node, _)) => {
// One-to-one track mapping so there is [i] indexing.
track.set_target(instance_node);
}
None => {
track.set_target(Handle::NONE);
Log::writeln(
MessageKind::Error,
format!(
"Failed to retarget animation {:?} for node {}",
data.path(),
ref_node.name()
),
);
}
}
}
retargetted_animations.push(anim_copy);
}
}
}
retargetted_animations
}
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>> {
let mut animation_handles = Vec::new();
let animations = self.retarget_animations_directly(root, graph);
let dest_animation_player = graph[dest_animation_player]
.query_component_mut::<AnimationPlayer>()
.unwrap();
for animation in animations {
animation_handles.push(dest_animation_player.animations_mut().add(animation));
}
animation_handles
}
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> {
if let Some((animation_player, _)) = graph.find(root, &mut |n| {
n.query_component_ref::<AnimationPlayer>().is_some()
}) {
self.retarget_animations_to_player(root, animation_player, graph)
} else {
Default::default()
}
}
}
impl ResourceData for Model {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(&self.path)
}
fn set_path(&mut self, path: PathBuf) {
self.path = path;
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn type_uuid(&self) -> Uuid {
<Self as TypeUuidProvider>::type_uuid()
}
}
impl Default for Model {
fn default() -> Self {
Self {
path: PathBuf::new(),
mapping: NodeMapping::UseNames,
scene: Scene::new(),
}
}
}
/// Defines a way of searching materials when loading a model resource from foreign file format such as FBX.
///
/// # Motivation
///
/// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths,
/// which makes it impossible to use with "location-independent" application like games. To fix that issue, the
/// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping
/// everything but file name from an external resource's path, then it uses one of the following methods to find
/// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`]
/// method, or even use global search starting from the working directory of your game
/// ([`MaterialSearchOptions::WorkingDirectory`])
#[derive(
Clone,
Debug,
Visit,
PartialEq,
Eq,
Deserialize,
Serialize,
Reflect,
AsRefStr,
EnumString,
EnumVariantNames,
)]
pub enum MaterialSearchOptions {
/// Search in specified materials directory. It is suitable for cases when
/// your model resource use shared textures.
///
/// # Platform specific
///
/// Works on every platform.
MaterialsDirectory(PathBuf),
/// Recursive-up search. It is suitable for cases when textures are placed
/// near your model resource. This is **default** option.
///
/// # Platform specific
///
/// Works on every platform.
RecursiveUp,
/// Global search starting from working directory. Slowest option with a lot of ambiguities -
/// it may load unexpected file in cases when there are two or more files with same name but
/// lying in different directories.
///
/// # Platform specific
///
/// WebAssembly - **not supported** due to lack of file system.
WorkingDirectory,
/// Try to use paths stored in the model resource directly. This options has limited usage,
/// it is suitable to load animations, or any other model which does not have any materials.
///
/// # Important notes
///
/// RGS (native engine scenes) files should be loaded with this option by default, otherwise
/// the engine won't be able to correctly find materials.
UsePathDirectly,
}
impl Default for MaterialSearchOptions {
fn default() -> Self {
Self::RecursiveUp
}
}
impl MaterialSearchOptions {
/// A helper to create MaterialsDirectory variant.
pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self {
Self::MaterialsDirectory(path.as_ref().to_path_buf())
}
}
/// A set of options that will be applied to a model resource when loading it from external source.
///
/// # Details
///
/// The engine has a convenient way of storing import options in a `.options` files. For example you may
/// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options`
/// extension: `foo.fbx.options`. The content of an options file could be something like this:
///
/// ```text
/// (
/// material_search_options: RecursiveUp
/// )
/// ```
///
/// Check documentation of the field of the structure for more info about each parameter.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)]
pub struct ModelImportOptions {
/// See [`MaterialSearchOptions`] docs for more info.
#[serde(default)]
pub material_search_options: MaterialSearchOptions,
}
impl ImportOptions for ModelImportOptions {}
/// All possible errors that may occur while trying to load model from some
/// data source.
#[derive(Debug)]
pub enum ModelLoadError {
/// An error occurred while reading a data source.
Visit(VisitError),
/// Format is not supported.
NotSupported(String),
/// An error occurred while loading FBX file.
Fbx(FbxError),
}
impl Display for ModelLoadError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ModelLoadError::Visit(v) => {
write!(f, "An error occurred while reading a data source {v:?}")
}
ModelLoadError::NotSupported(v) => {
write!(f, "Model format is not supported: {v}")
}
ModelLoadError::Fbx(v) => v.fmt(f),
}
}
}
impl From<FbxError> for ModelLoadError {
fn from(fbx: FbxError) -> Self {
ModelLoadError::Fbx(fbx)
}
}
impl From<VisitError> for ModelLoadError {
fn from(e: VisitError) -> Self {
ModelLoadError::Visit(e)
}
}
impl Model {
pub(crate) async fn load<P: AsRef<Path>>(
path: P,
serialization_context: Arc<SerializationContext>,
resource_manager: ResourceManager,
model_import_options: ModelImportOptions,
) -> Result<Self, ModelLoadError> {
let extension = path
.as_ref()
.extension()
.unwrap_or_default()
.to_string_lossy()
.as_ref()
.to_lowercase();
let (scene, mapping) = match extension.as_ref() {
"fbx" => {
let mut scene = Scene::new();
if let Some(filename) = path.as_ref().file_name() {
let root = scene.graph.get_root();
scene.graph[root].set_name(&filename.to_string_lossy());
}
fbx::load_to_scene(
&mut scene,
resource_manager,
path.as_ref(),
&model_import_options,
)
.await?;
// Set NodeMapping::UseNames as mapping here because FBX does not have
// any persistent unique ids, and we have to use names.
(scene, NodeMapping::UseNames)
}
// Scene can be used directly as model resource. Such scenes can be created in
// Fyroxed.
"rgs" => (
SceneLoader::from_file(
path.as_ref(),
serialization_context,
resource_manager.clone(),
)
.await?
.finish()
.await,
NodeMapping::UseHandles,
),
// TODO: Add more formats.
_ => {
return Err(ModelLoadError::NotSupported(format!(
"Unsupported model resource format: {}",
extension
)))
}
};
Ok(Self {
path: path.as_ref().to_owned(),
scene,
mapping,
})
}
/// Returns shared reference to internal scene, there is no way to obtain
/// mutable reference to inner scene because resource is immutable source
/// of data.
pub fn get_scene(&self) -> &Scene {
&self.scene
}
/// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a
/// handle and a reference to the found node. If nothing is found, it returns [`None`].
pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> {
self.scene.graph.find_by_name_from_root(name)
}
pub(crate) fn get_scene_mut(&mut self) -> &mut Scene {
&mut self.scene
}
}
| Model | identifier_name |
hypersonic.go | // Results from the competition are here:
// https://www.codingame.com/challengereport/5708992986028c48464b3d45bbb4a490b2d6015
package main
import "fmt"
//import "os"
import "bytes"
import "strconv"
// These indicate the contents of each cell of the floor in the input
const WALL = -2
const CELL = -1
const BOX = 0
const ITEM_RANGE = 1
const ITEM_BOMB = 2
const EXPLOSION = 32
// These are the values given to each type of cell during scoring of the floor
const WALL_SCORE = -20
const DANGER_SCORE = -30
const CELL_SCORE = -1
const TOO_FAR = 100
// These are entity types
const PLAYER = 0
const BOMB = 1
const ITEM = 2
// floor constraints
const WIDTH = 13
const HEIGHT = 11
// time it takes for a bomb to explode, in turns (useless?)
const MAX_BOMB_TIME = 8
// turns I'm willing to spend searching for a path to a particular cell
// I had timeouts when this was > 8, which of course would also depend upon the ineffficiency of my code at the time
const SEARCH_DEPTH_LIMIT = 8
type Bomb struct {
x, y int // coordinates of the bomb
time int // number of turns until explosion
reach int // number of cells it reaches in each direction
}
type Cell struct {
score int
distance int
}
func main() {
var width, height, myId int
fmt.Scan(&width, &height, &myId)
turnCounter := 0
myX, myY := 2, 2
var myReach int // explosion range of my bombs
var myBombCount int
var floor [WIDTH][HEIGHT]int
for {
bombsOnTheFloor := []Bomb{}
floor = [WIDTH][HEIGHT]int{}
for i := 0; i < height; i++ { // the grid of boxes and empty cells
var row string
fmt.Scan(&row)
// fmt.Fprintln(os.Stderr, row)
floor = buildTheFloor(row, i, floor)
}
// fmt.Fprintln(os.Stderr, floorToString(floor))
var entities int
fmt.Scan(&entities) // how many players & bombs are on the grid
for i := 0; i < entities; i++ { // info about all the players & bombs on the grid
var entityType, owner, x, y, param1, param2 int
fmt.Scan(&entityType, &owner, &x, &y, ¶m1, ¶m2)
if entityType == PLAYER && owner == myId {
myBombCount = param1
myReach = param2 // may have changed due to power ups
myX = x
myY = y
}
if entityType == BOMB { // don't bother going here (get x,y and affect their score somehow)
bombsOnTheFloor = append(bombsOnTheFloor,
Bomb{x: x, y: y, time: param1, reach: param2})
}
}
// fmt.Fprintln(os.Stderr, bombsOnTheFloor)
xT, yT := myX, myY
maxScore := Cell{score: WALL_SCORE, distance: TOO_FAR}
scoreFloor := scoreTheFloor(myX, myY, bombsOnTheFloor, myReach, floor)
scoreFloor = markBombs(bombsOnTheFloor, scoreFloor)
floor = transferExplosions(scoreFloor, floor)
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score > maxScore.score || (scoreFloor[i][j].score == maxScore.score && scoreFloor[i][j].distance < maxScore.distance) {
maxScore = scoreFloor[i][j]
xT = i
yT = j
}
}
}
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MAX: %d, TARGET: %d, x: %d, y: %d", maxScore, scoreFloor[xT][yT], xT, yT))
// fmt.Fprintln(os.Stderr, scoreFloorToString(scoreFloor))
// fmt.Fprintln(os.Stderr, floorToString(floor))
// if canIBeHere(myX, myY, 0, bombsOnTheFloor, floor) && !canIBeHere(x, y, 1, bombsOnTheFloor, floor)
if myBombCount > 0 && myX == xT && myY == yT && canIEscapeThisBomb(myX, myY, Bomb{x: myX, y: myY, reach: myReach, time: MAX_BOMB_TIME}, MAX_BOMB_TIME, 0, myReach, bombsOnTheFloor, floor) { // drop bomb on current cell while moving toward target cell (could be equivalent)
fmt.Println(fmt.Sprintf("BOMB %d %d BUTT SOUP", xT, yT))
} else {
fmt.Println(fmt.Sprintf("MOVE %d %d (%d, %d) = %d", xT, yT, xT, yT, scoreFloor[xT][yT]))
}
turnCounter++
}
}
/**
*
**/
func canIEscapeThisBomb(myX int, myY int, bomb Bomb, turnLimit int, numTurns int, reach int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MX: %d, MY: %d, BX: %d, BY: %d, t: %d, r: %d", myX, myY, bombX, bombY, numTurns, reach))
// Already safe on a diagonal from the bomb's cell, don't need to move
if myX != bomb.x && myY != bomb.y {return true}
// I'm lined up with the bomb, but out of its reach
// fmt.Fprintln(os.Stderr, fmt.Sprintf("myX: %d, myY: %d, BX: %d, BY: %d, turns: %d", myX, myY, bombX, bombY, numTurns))
if myX > bomb.x + reach || myX < bomb.x - reach || myY > bomb.y + reach || myY < bomb.y - reach {return true}
// fmt.Fprintln(os.Stderr, "MARKER 1")
// In danger, need to move, but there is no time left
if turnLimit - numTurns < 1 {return false}
// In danger, need to move, have some time left
if canIBeHere(myX+1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX+1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX-1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX-1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY+1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY+1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY-1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY-1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
// in danger, no where to go
return false
}
func willIDieHere(x int, y int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
for _, bomb := range bombs {
if canIEscapeThisBomb(x, y, bomb, bomb.time, 0, bomb.reach, bombs, floor) {
return false
}
}
return true
}
/**
* How many boxes are within bombing range of the given cell, are there items in those boxes, and can I get there?
**/
func scoreACell(x int, y int, myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) Cell {
if (myX != x || myY != y) { // I'm not already standing here
if !canIBeHere(x, y, 1, bombsOnTheFloor, floor) {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot move to here next turn
}
moves, maybe := canIGoToThere(myX, myY, myX, myY, x, y, SEARCH_DEPTH_LIMIT, bombsOnTheFloor, floor)
if !maybe {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot get here, even after multiple turns
if willIDieHere(x, y, bombsOnTheFloor, floor) {return Cell{score: DANGER_SCORE, distance: TOO_FAR}} // does not account for time left on the bomb, could optimize here rather than walling it off
score := 0
for i := 0; i < myReach; i++ {
if x+i < WIDTH && floor[x+i][y] >= BOX {score++}
if x-i > 0 && floor[x-i][y] >= BOX {score++}
if y+i < HEIGHT && floor[x][y+i] >= BOX {score++}
if y-i > 0 && floor[x][y-i] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func scoreTheFloor(myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
moves, yes = canIGoToThere(x, y, myX, myY+1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Up
moves, yes = canIGoToThere(x, y, myX, myY-1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// all possibilities exhausted
return minMoves, isPathFound
}
func markBombs(bombs []Bomb, scoreFloor [WIDTH][HEIGHT]Cell) [WIDTH][HEIGHT]Cell {
var dangerScore int
for _, bomb := range bombs {
if bomb.time < 2 {
dangerScore = DANGER_SCORE
// on the bomb and it's exploding
scoreFloor[bomb.x][bomb.y].score = dangerScore
} else {
dangerScore = DANGER_SCORE / (bomb.time - 1) // treat the bomb's timer as if we'd already advanced to the next turn
}
// on the bomb
if scoreFloor[bomb.x][bomb.y].score > dangerScore {scoreFloor[bomb.x][bomb.y].score = WALL_SCORE}
// left of the bomb
for i := bomb.x; i >= bomb.x - bomb.reach; i-- {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// right of the bomb
for i := bomb.x; i <= bomb.x+bomb.reach; i++ {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// below the bomb
for i := bomb.y; i >= bomb.y - bomb.reach; i-- {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// above the bomb
for i := bomb.y; i <= bomb.y+bomb.reach; i++ {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
}
return scoreFloor
}
func buildTheFloor(row string, y int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
width := len(row)
for x := 0; x < width; x++ {
if string(row[x]) == "." {
floor[x][y] = CELL
} else if string(row[x]) == "X" {
floor[x][y] = WALL
} else {
floor[x][y] = int(row[x] - '0')
}
}
return floor
}
func canIBeHere(x int, y int, timeElapsed int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
if !amIWithinTheBoundaries(x, y) {return false}
if floor[x][y] == WALL || floor[x][y] >= BOX || floor[x][y] == EXPLOSION {return false}
for _, bomb := range bombs {
if x == bomb.x && y == bomb.y {return false} // can't walk through bombs once they're placed
}
return true
}
func amIWithinTheBoundaries(x int, y int) bool |
// TODO: this does not account for walls & boxes, which block propagation of the explosion
func amIWithinTheBlastRadius(myX int, myY int, bomb Bomb) bool{
if myX > bomb.x + bomb.reach || myX < bomb.x - bomb.reach || myY > bomb.y + bomb.reach || myY < bomb.y - bomb.reach {return true}
return false
}
func transferExplosions(scoreFloor [WIDTH][HEIGHT]Cell, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score == DANGER_SCORE {floor[i][j] = EXPLOSION}
}
}
return floor
}
func scoreFloorToString(floor [WIDTH][HEIGHT]Cell) string {
var buffer bytes.Buffer
var scoreStr, distanceStr string
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
scoreStr = strconv.Itoa(floor[j][i].score)
distanceStr = strconv.Itoa(floor[j][i].distance)
buffer.WriteString("[")
for f := 0; f < 3 - len(scoreStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(scoreStr)
buffer.WriteString(", ")
for f := 0; f < 3 - len(distanceStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(distanceStr)
buffer.WriteString("]")
}
buffer.WriteString("\n")
}
return buffer.String()
}
func floorToString(floor [WIDTH][HEIGHT]int) string {
var buffer bytes.Buffer
var cell int
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
cell = floor[j][i]
buffer.WriteString(" ")
if cell == BOX {buffer.WriteString("B")}
if cell == WALL {buffer.WriteString("W")}
if cell == CELL {buffer.WriteString("_")}
if cell == EXPLOSION {buffer.WriteString("E")}
if cell == ITEM_RANGE || cell == ITEM_BOMB {buffer.WriteString("I")}
buffer.WriteString(" ")
}
buffer.WriteString("\n")
}
return buffer.String()
}
| {
if x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT {return false}
return true
} | identifier_body |
hypersonic.go | // Results from the competition are here:
// https://www.codingame.com/challengereport/5708992986028c48464b3d45bbb4a490b2d6015
package main
import "fmt"
//import "os"
import "bytes"
import "strconv"
// These indicate the contents of each cell of the floor in the input
const WALL = -2
const CELL = -1
const BOX = 0
const ITEM_RANGE = 1
const ITEM_BOMB = 2
const EXPLOSION = 32
// These are the values given to each type of cell during scoring of the floor
const WALL_SCORE = -20
const DANGER_SCORE = -30
const CELL_SCORE = -1
const TOO_FAR = 100
// These are entity types
const PLAYER = 0
const BOMB = 1
const ITEM = 2
// floor constraints
const WIDTH = 13
const HEIGHT = 11
// time it takes for a bomb to explode, in turns (useless?)
const MAX_BOMB_TIME = 8
// turns I'm willing to spend searching for a path to a particular cell
// I had timeouts when this was > 8, which of course would also depend upon the ineffficiency of my code at the time
const SEARCH_DEPTH_LIMIT = 8
type Bomb struct {
x, y int // coordinates of the bomb
time int // number of turns until explosion
reach int // number of cells it reaches in each direction
}
type Cell struct {
score int
distance int
}
func main() {
var width, height, myId int
fmt.Scan(&width, &height, &myId)
turnCounter := 0
myX, myY := 2, 2
var myReach int // explosion range of my bombs
var myBombCount int
var floor [WIDTH][HEIGHT]int
for {
bombsOnTheFloor := []Bomb{}
floor = [WIDTH][HEIGHT]int{}
for i := 0; i < height; i++ { // the grid of boxes and empty cells
var row string
fmt.Scan(&row)
// fmt.Fprintln(os.Stderr, row)
floor = buildTheFloor(row, i, floor)
}
// fmt.Fprintln(os.Stderr, floorToString(floor))
var entities int
fmt.Scan(&entities) // how many players & bombs are on the grid
for i := 0; i < entities; i++ { // info about all the players & bombs on the grid
var entityType, owner, x, y, param1, param2 int
fmt.Scan(&entityType, &owner, &x, &y, ¶m1, ¶m2)
if entityType == PLAYER && owner == myId {
myBombCount = param1
myReach = param2 // may have changed due to power ups
myX = x
myY = y
}
if entityType == BOMB { // don't bother going here (get x,y and affect their score somehow)
bombsOnTheFloor = append(bombsOnTheFloor,
Bomb{x: x, y: y, time: param1, reach: param2})
}
}
// fmt.Fprintln(os.Stderr, bombsOnTheFloor)
xT, yT := myX, myY
maxScore := Cell{score: WALL_SCORE, distance: TOO_FAR}
scoreFloor := scoreTheFloor(myX, myY, bombsOnTheFloor, myReach, floor)
scoreFloor = markBombs(bombsOnTheFloor, scoreFloor)
floor = transferExplosions(scoreFloor, floor)
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score > maxScore.score || (scoreFloor[i][j].score == maxScore.score && scoreFloor[i][j].distance < maxScore.distance) {
maxScore = scoreFloor[i][j]
xT = i
yT = j
}
}
}
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MAX: %d, TARGET: %d, x: %d, y: %d", maxScore, scoreFloor[xT][yT], xT, yT))
// fmt.Fprintln(os.Stderr, scoreFloorToString(scoreFloor))
// fmt.Fprintln(os.Stderr, floorToString(floor))
// if canIBeHere(myX, myY, 0, bombsOnTheFloor, floor) && !canIBeHere(x, y, 1, bombsOnTheFloor, floor)
if myBombCount > 0 && myX == xT && myY == yT && canIEscapeThisBomb(myX, myY, Bomb{x: myX, y: myY, reach: myReach, time: MAX_BOMB_TIME}, MAX_BOMB_TIME, 0, myReach, bombsOnTheFloor, floor) { // drop bomb on current cell while moving toward target cell (could be equivalent)
fmt.Println(fmt.Sprintf("BOMB %d %d BUTT SOUP", xT, yT))
} else {
fmt.Println(fmt.Sprintf("MOVE %d %d (%d, %d) = %d", xT, yT, xT, yT, scoreFloor[xT][yT]))
}
turnCounter++
}
}
/**
*
**/
func canIEscapeThisBomb(myX int, myY int, bomb Bomb, turnLimit int, numTurns int, reach int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MX: %d, MY: %d, BX: %d, BY: %d, t: %d, r: %d", myX, myY, bombX, bombY, numTurns, reach))
// Already safe on a diagonal from the bomb's cell, don't need to move
if myX != bomb.x && myY != bomb.y {return true}
// I'm lined up with the bomb, but out of its reach
// fmt.Fprintln(os.Stderr, fmt.Sprintf("myX: %d, myY: %d, BX: %d, BY: %d, turns: %d", myX, myY, bombX, bombY, numTurns))
if myX > bomb.x + reach || myX < bomb.x - reach || myY > bomb.y + reach || myY < bomb.y - reach {return true}
// fmt.Fprintln(os.Stderr, "MARKER 1")
// In danger, need to move, but there is no time left
if turnLimit - numTurns < 1 {return false}
// In danger, need to move, have some time left
if canIBeHere(myX+1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX+1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX-1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX-1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY+1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY+1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY-1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY-1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
// in danger, no where to go
return false
}
func willIDieHere(x int, y int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
for _, bomb := range bombs {
if canIEscapeThisBomb(x, y, bomb, bomb.time, 0, bomb.reach, bombs, floor) {
return false
}
}
return true
}
/**
* How many boxes are within bombing range of the given cell, are there items in those boxes, and can I get there?
**/
func scoreACell(x int, y int, myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) Cell {
if (myX != x || myY != y) { // I'm not already standing here
if !canIBeHere(x, y, 1, bombsOnTheFloor, floor) {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot move to here next turn
}
moves, maybe := canIGoToThere(myX, myY, myX, myY, x, y, SEARCH_DEPTH_LIMIT, bombsOnTheFloor, floor)
if !maybe {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot get here, even after multiple turns
if willIDieHere(x, y, bombsOnTheFloor, floor) {return Cell{score: DANGER_SCORE, distance: TOO_FAR}} // does not account for time left on the bomb, could optimize here rather than walling it off
score := 0
for i := 0; i < myReach; i++ {
if x+i < WIDTH && floor[x+i][y] >= BOX {score++}
if x-i > 0 && floor[x-i][y] >= BOX {score++}
if y+i < HEIGHT && floor[x][y+i] >= BOX {score++}
if y-i > 0 && floor[x][y-i] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func scoreTheFloor(myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
moves, yes = canIGoToThere(x, y, myX, myY+1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Up
moves, yes = canIGoToThere(x, y, myX, myY-1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// all possibilities exhausted
return minMoves, isPathFound
}
func markBombs(bombs []Bomb, scoreFloor [WIDTH][HEIGHT]Cell) [WIDTH][HEIGHT]Cell {
var dangerScore int
for _, bomb := range bombs {
if bomb.time < 2 {
dangerScore = DANGER_SCORE
// on the bomb and it's exploding
scoreFloor[bomb.x][bomb.y].score = dangerScore
} else {
dangerScore = DANGER_SCORE / (bomb.time - 1) // treat the bomb's timer as if we'd already advanced to the next turn
}
// on the bomb
if scoreFloor[bomb.x][bomb.y].score > dangerScore {scoreFloor[bomb.x][bomb.y].score = WALL_SCORE}
// left of the bomb
for i := bomb.x; i >= bomb.x - bomb.reach; i-- {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// right of the bomb
for i := bomb.x; i <= bomb.x+bomb.reach; i++ {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// below the bomb
for i := bomb.y; i >= bomb.y - bomb.reach; i-- {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// above the bomb
for i := bomb.y; i <= bomb.y+bomb.reach; i++ {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
}
return scoreFloor
}
func buildTheFloor(row string, y int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
width := len(row)
for x := 0; x < width; x++ {
if string(row[x]) == "." {
floor[x][y] = CELL
} else if string(row[x]) == "X" {
floor[x][y] = WALL
} else {
floor[x][y] = int(row[x] - '0')
}
}
return floor
}
func canIBeHere(x int, y int, timeElapsed int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
if !amIWithinTheBoundaries(x, y) {return false}
if floor[x][y] == WALL || floor[x][y] >= BOX || floor[x][y] == EXPLOSION {return false}
for _, bomb := range bombs {
if x == bomb.x && y == bomb.y {return false} // can't walk through bombs once they're placed
}
return true
}
func amIWithinTheBoundaries(x int, y int) bool {
if x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT {return false}
return true
}
// TODO: this does not account for walls & boxes, which block propagation of the explosion
func amIWithinTheBlastRadius(myX int, myY int, bomb Bomb) bool{
if myX > bomb.x + bomb.reach || myX < bomb.x - bomb.reach || myY > bomb.y + bomb.reach || myY < bomb.y - bomb.reach {return true}
return false
}
func transferExplosions(scoreFloor [WIDTH][HEIGHT]Cell, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score == DANGER_SCORE {floor[i][j] = EXPLOSION}
}
}
return floor
}
func scoreFloorToString(floor [WIDTH][HEIGHT]Cell) string {
var buffer bytes.Buffer
var scoreStr, distanceStr string
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
scoreStr = strconv.Itoa(floor[j][i].score)
distanceStr = strconv.Itoa(floor[j][i].distance)
buffer.WriteString("[")
for f := 0; f < 3 - len(scoreStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(scoreStr)
buffer.WriteString(", ")
for f := 0; f < 3 - len(distanceStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(distanceStr)
buffer.WriteString("]")
}
buffer.WriteString("\n")
}
return buffer.String()
}
func floorToString(floor [WIDTH][HEIGHT]int) string {
var buffer bytes.Buffer
var cell int
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
cell = floor[j][i]
buffer.WriteString(" ")
if cell == BOX {buffer.WriteString("B")}
if cell == WALL {buffer.WriteString("W")}
if cell == CELL {buffer.WriteString("_")}
if cell == EXPLOSION |
if cell == ITEM_RANGE || cell == ITEM_BOMB {buffer.WriteString("I")}
buffer.WriteString(" ")
}
buffer.WriteString("\n")
}
return buffer.String()
}
| {buffer.WriteString("E")} | conditional_block |
hypersonic.go | // Results from the competition are here:
// https://www.codingame.com/challengereport/5708992986028c48464b3d45bbb4a490b2d6015
package main
import "fmt"
//import "os"
import "bytes"
import "strconv"
// These indicate the contents of each cell of the floor in the input
const WALL = -2
const CELL = -1
const BOX = 0
const ITEM_RANGE = 1
const ITEM_BOMB = 2
const EXPLOSION = 32
// These are the values given to each type of cell during scoring of the floor
const WALL_SCORE = -20
const DANGER_SCORE = -30
const CELL_SCORE = -1
const TOO_FAR = 100
// These are entity types
const PLAYER = 0
const BOMB = 1
const ITEM = 2
// floor constraints
const WIDTH = 13
const HEIGHT = 11
// time it takes for a bomb to explode, in turns (useless?)
const MAX_BOMB_TIME = 8
// turns I'm willing to spend searching for a path to a particular cell
// I had timeouts when this was > 8, which of course would also depend upon the ineffficiency of my code at the time
const SEARCH_DEPTH_LIMIT = 8
type Bomb struct {
x, y int // coordinates of the bomb
time int // number of turns until explosion
reach int // number of cells it reaches in each direction
}
type Cell struct {
score int
distance int
}
func main() {
var width, height, myId int
fmt.Scan(&width, &height, &myId)
turnCounter := 0
myX, myY := 2, 2
var myReach int // explosion range of my bombs
var myBombCount int
var floor [WIDTH][HEIGHT]int
for {
bombsOnTheFloor := []Bomb{}
floor = [WIDTH][HEIGHT]int{}
for i := 0; i < height; i++ { // the grid of boxes and empty cells
var row string
fmt.Scan(&row)
// fmt.Fprintln(os.Stderr, row)
floor = buildTheFloor(row, i, floor)
}
// fmt.Fprintln(os.Stderr, floorToString(floor))
var entities int
fmt.Scan(&entities) // how many players & bombs are on the grid
for i := 0; i < entities; i++ { // info about all the players & bombs on the grid
var entityType, owner, x, y, param1, param2 int
fmt.Scan(&entityType, &owner, &x, &y, ¶m1, ¶m2)
if entityType == PLAYER && owner == myId {
myBombCount = param1
myReach = param2 // may have changed due to power ups
myX = x
myY = y
}
if entityType == BOMB { // don't bother going here (get x,y and affect their score somehow)
bombsOnTheFloor = append(bombsOnTheFloor,
Bomb{x: x, y: y, time: param1, reach: param2})
}
}
// fmt.Fprintln(os.Stderr, bombsOnTheFloor)
xT, yT := myX, myY
maxScore := Cell{score: WALL_SCORE, distance: TOO_FAR}
scoreFloor := scoreTheFloor(myX, myY, bombsOnTheFloor, myReach, floor)
scoreFloor = markBombs(bombsOnTheFloor, scoreFloor)
floor = transferExplosions(scoreFloor, floor)
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score > maxScore.score || (scoreFloor[i][j].score == maxScore.score && scoreFloor[i][j].distance < maxScore.distance) {
maxScore = scoreFloor[i][j]
xT = i
yT = j
}
}
}
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MAX: %d, TARGET: %d, x: %d, y: %d", maxScore, scoreFloor[xT][yT], xT, yT))
// fmt.Fprintln(os.Stderr, scoreFloorToString(scoreFloor))
// fmt.Fprintln(os.Stderr, floorToString(floor))
// if canIBeHere(myX, myY, 0, bombsOnTheFloor, floor) && !canIBeHere(x, y, 1, bombsOnTheFloor, floor)
if myBombCount > 0 && myX == xT && myY == yT && canIEscapeThisBomb(myX, myY, Bomb{x: myX, y: myY, reach: myReach, time: MAX_BOMB_TIME}, MAX_BOMB_TIME, 0, myReach, bombsOnTheFloor, floor) { // drop bomb on current cell while moving toward target cell (could be equivalent)
fmt.Println(fmt.Sprintf("BOMB %d %d BUTT SOUP", xT, yT))
} else {
fmt.Println(fmt.Sprintf("MOVE %d %d (%d, %d) = %d", xT, yT, xT, yT, scoreFloor[xT][yT]))
}
turnCounter++
}
}
/**
*
**/
func canIEscapeThisBomb(myX int, myY int, bomb Bomb, turnLimit int, numTurns int, reach int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MX: %d, MY: %d, BX: %d, BY: %d, t: %d, r: %d", myX, myY, bombX, bombY, numTurns, reach))
// Already safe on a diagonal from the bomb's cell, don't need to move
if myX != bomb.x && myY != bomb.y {return true}
// I'm lined up with the bomb, but out of its reach
// fmt.Fprintln(os.Stderr, fmt.Sprintf("myX: %d, myY: %d, BX: %d, BY: %d, turns: %d", myX, myY, bombX, bombY, numTurns))
if myX > bomb.x + reach || myX < bomb.x - reach || myY > bomb.y + reach || myY < bomb.y - reach {return true}
// fmt.Fprintln(os.Stderr, "MARKER 1")
// In danger, need to move, but there is no time left
if turnLimit - numTurns < 1 {return false}
// In danger, need to move, have some time left
if canIBeHere(myX+1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX+1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX-1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX-1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY+1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY+1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY-1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY-1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
// in danger, no where to go
return false
}
func willIDieHere(x int, y int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
for _, bomb := range bombs {
if canIEscapeThisBomb(x, y, bomb, bomb.time, 0, bomb.reach, bombs, floor) {
return false
}
}
return true
}
/**
* How many boxes are within bombing range of the given cell, are there items in those boxes, and can I get there?
**/
func scoreACell(x int, y int, myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) Cell {
if (myX != x || myY != y) { // I'm not already standing here
if !canIBeHere(x, y, 1, bombsOnTheFloor, floor) {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot move to here next turn
}
moves, maybe := canIGoToThere(myX, myY, myX, myY, x, y, SEARCH_DEPTH_LIMIT, bombsOnTheFloor, floor)
if !maybe {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot get here, even after multiple turns
if willIDieHere(x, y, bombsOnTheFloor, floor) {return Cell{score: DANGER_SCORE, distance: TOO_FAR}} // does not account for time left on the bomb, could optimize here rather than walling it off
score := 0
for i := 0; i < myReach; i++ {
if x+i < WIDTH && floor[x+i][y] >= BOX {score++}
if x-i > 0 && floor[x-i][y] >= BOX {score++}
if y+i < HEIGHT && floor[x][y+i] >= BOX {score++}
if y-i > 0 && floor[x][y-i] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func scoreTheFloor(myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
moves, yes = canIGoToThere(x, y, myX, myY+1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Up
moves, yes = canIGoToThere(x, y, myX, myY-1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// all possibilities exhausted
return minMoves, isPathFound
}
func markBombs(bombs []Bomb, scoreFloor [WIDTH][HEIGHT]Cell) [WIDTH][HEIGHT]Cell {
var dangerScore int
for _, bomb := range bombs {
if bomb.time < 2 {
dangerScore = DANGER_SCORE
// on the bomb and it's exploding
scoreFloor[bomb.x][bomb.y].score = dangerScore
} else {
dangerScore = DANGER_SCORE / (bomb.time - 1) // treat the bomb's timer as if we'd already advanced to the next turn
}
// on the bomb
if scoreFloor[bomb.x][bomb.y].score > dangerScore {scoreFloor[bomb.x][bomb.y].score = WALL_SCORE}
// left of the bomb
for i := bomb.x; i >= bomb.x - bomb.reach; i-- {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// right of the bomb
for i := bomb.x; i <= bomb.x+bomb.reach; i++ {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// below the bomb
for i := bomb.y; i >= bomb.y - bomb.reach; i-- {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// above the bomb
for i := bomb.y; i <= bomb.y+bomb.reach; i++ {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
}
return scoreFloor
}
func buildTheFloor(row string, y int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
width := len(row)
for x := 0; x < width; x++ {
if string(row[x]) == "." {
floor[x][y] = CELL
} else if string(row[x]) == "X" {
floor[x][y] = WALL
} else {
floor[x][y] = int(row[x] - '0')
}
}
return floor
}
func canIBeHere(x int, y int, timeElapsed int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
if !amIWithinTheBoundaries(x, y) {return false}
if floor[x][y] == WALL || floor[x][y] >= BOX || floor[x][y] == EXPLOSION {return false}
for _, bomb := range bombs {
if x == bomb.x && y == bomb.y {return false} // can't walk through bombs once they're placed
}
return true
}
func amIWithinTheBoundaries(x int, y int) bool {
if x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT {return false}
return true
}
// TODO: this does not account for walls & boxes, which block propagation of the explosion
func amIWithinTheBlastRadius(myX int, myY int, bomb Bomb) bool{
if myX > bomb.x + bomb.reach || myX < bomb.x - bomb.reach || myY > bomb.y + bomb.reach || myY < bomb.y - bomb.reach {return true}
return false
}
func transferExplosions(scoreFloor [WIDTH][HEIGHT]Cell, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score == DANGER_SCORE {floor[i][j] = EXPLOSION}
}
}
return floor
}
func scoreFloorToString(floor [WIDTH][HEIGHT]Cell) string {
var buffer bytes.Buffer
var scoreStr, distanceStr string
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
scoreStr = strconv.Itoa(floor[j][i].score)
distanceStr = strconv.Itoa(floor[j][i].distance)
buffer.WriteString("[")
for f := 0; f < 3 - len(scoreStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(scoreStr)
buffer.WriteString(", ")
for f := 0; f < 3 - len(distanceStr); f++ {buffer.WriteString(" ")} | return buffer.String()
}
func floorToString(floor [WIDTH][HEIGHT]int) string {
var buffer bytes.Buffer
var cell int
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
cell = floor[j][i]
buffer.WriteString(" ")
if cell == BOX {buffer.WriteString("B")}
if cell == WALL {buffer.WriteString("W")}
if cell == CELL {buffer.WriteString("_")}
if cell == EXPLOSION {buffer.WriteString("E")}
if cell == ITEM_RANGE || cell == ITEM_BOMB {buffer.WriteString("I")}
buffer.WriteString(" ")
}
buffer.WriteString("\n")
}
return buffer.String()
} | buffer.WriteString(distanceStr)
buffer.WriteString("]")
}
buffer.WriteString("\n")
} | random_line_split |
hypersonic.go | // Results from the competition are here:
// https://www.codingame.com/challengereport/5708992986028c48464b3d45bbb4a490b2d6015
package main
import "fmt"
//import "os"
import "bytes"
import "strconv"
// These indicate the contents of each cell of the floor in the input
const WALL = -2
const CELL = -1
const BOX = 0
const ITEM_RANGE = 1
const ITEM_BOMB = 2
const EXPLOSION = 32
// These are the values given to each type of cell during scoring of the floor
const WALL_SCORE = -20
const DANGER_SCORE = -30
const CELL_SCORE = -1
const TOO_FAR = 100
// These are entity types
const PLAYER = 0
const BOMB = 1
const ITEM = 2
// floor constraints
const WIDTH = 13
const HEIGHT = 11
// time it takes for a bomb to explode, in turns (useless?)
const MAX_BOMB_TIME = 8
// turns I'm willing to spend searching for a path to a particular cell
// I had timeouts when this was > 8, which of course would also depend upon the ineffficiency of my code at the time
const SEARCH_DEPTH_LIMIT = 8
type Bomb struct {
x, y int // coordinates of the bomb
time int // number of turns until explosion
reach int // number of cells it reaches in each direction
}
type Cell struct {
score int
distance int
}
func main() {
var width, height, myId int
fmt.Scan(&width, &height, &myId)
turnCounter := 0
myX, myY := 2, 2
var myReach int // explosion range of my bombs
var myBombCount int
var floor [WIDTH][HEIGHT]int
for {
bombsOnTheFloor := []Bomb{}
floor = [WIDTH][HEIGHT]int{}
for i := 0; i < height; i++ { // the grid of boxes and empty cells
var row string
fmt.Scan(&row)
// fmt.Fprintln(os.Stderr, row)
floor = buildTheFloor(row, i, floor)
}
// fmt.Fprintln(os.Stderr, floorToString(floor))
var entities int
fmt.Scan(&entities) // how many players & bombs are on the grid
for i := 0; i < entities; i++ { // info about all the players & bombs on the grid
var entityType, owner, x, y, param1, param2 int
fmt.Scan(&entityType, &owner, &x, &y, ¶m1, ¶m2)
if entityType == PLAYER && owner == myId {
myBombCount = param1
myReach = param2 // may have changed due to power ups
myX = x
myY = y
}
if entityType == BOMB { // don't bother going here (get x,y and affect their score somehow)
bombsOnTheFloor = append(bombsOnTheFloor,
Bomb{x: x, y: y, time: param1, reach: param2})
}
}
// fmt.Fprintln(os.Stderr, bombsOnTheFloor)
xT, yT := myX, myY
maxScore := Cell{score: WALL_SCORE, distance: TOO_FAR}
scoreFloor := scoreTheFloor(myX, myY, bombsOnTheFloor, myReach, floor)
scoreFloor = markBombs(bombsOnTheFloor, scoreFloor)
floor = transferExplosions(scoreFloor, floor)
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score > maxScore.score || (scoreFloor[i][j].score == maxScore.score && scoreFloor[i][j].distance < maxScore.distance) {
maxScore = scoreFloor[i][j]
xT = i
yT = j
}
}
}
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MAX: %d, TARGET: %d, x: %d, y: %d", maxScore, scoreFloor[xT][yT], xT, yT))
// fmt.Fprintln(os.Stderr, scoreFloorToString(scoreFloor))
// fmt.Fprintln(os.Stderr, floorToString(floor))
// if canIBeHere(myX, myY, 0, bombsOnTheFloor, floor) && !canIBeHere(x, y, 1, bombsOnTheFloor, floor)
if myBombCount > 0 && myX == xT && myY == yT && canIEscapeThisBomb(myX, myY, Bomb{x: myX, y: myY, reach: myReach, time: MAX_BOMB_TIME}, MAX_BOMB_TIME, 0, myReach, bombsOnTheFloor, floor) { // drop bomb on current cell while moving toward target cell (could be equivalent)
fmt.Println(fmt.Sprintf("BOMB %d %d BUTT SOUP", xT, yT))
} else {
fmt.Println(fmt.Sprintf("MOVE %d %d (%d, %d) = %d", xT, yT, xT, yT, scoreFloor[xT][yT]))
}
turnCounter++
}
}
/**
*
**/
func canIEscapeThisBomb(myX int, myY int, bomb Bomb, turnLimit int, numTurns int, reach int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MX: %d, MY: %d, BX: %d, BY: %d, t: %d, r: %d", myX, myY, bombX, bombY, numTurns, reach))
// Already safe on a diagonal from the bomb's cell, don't need to move
if myX != bomb.x && myY != bomb.y {return true}
// I'm lined up with the bomb, but out of its reach
// fmt.Fprintln(os.Stderr, fmt.Sprintf("myX: %d, myY: %d, BX: %d, BY: %d, turns: %d", myX, myY, bombX, bombY, numTurns))
if myX > bomb.x + reach || myX < bomb.x - reach || myY > bomb.y + reach || myY < bomb.y - reach {return true}
// fmt.Fprintln(os.Stderr, "MARKER 1")
// In danger, need to move, but there is no time left
if turnLimit - numTurns < 1 {return false}
// In danger, need to move, have some time left
if canIBeHere(myX+1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX+1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX-1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX-1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY+1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY+1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY-1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY-1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
// in danger, no where to go
return false
}
func willIDieHere(x int, y int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
for _, bomb := range bombs {
if canIEscapeThisBomb(x, y, bomb, bomb.time, 0, bomb.reach, bombs, floor) {
return false
}
}
return true
}
/**
* How many boxes are within bombing range of the given cell, are there items in those boxes, and can I get there?
**/
func scoreACell(x int, y int, myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) Cell {
if (myX != x || myY != y) { // I'm not already standing here
if !canIBeHere(x, y, 1, bombsOnTheFloor, floor) {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot move to here next turn
}
moves, maybe := canIGoToThere(myX, myY, myX, myY, x, y, SEARCH_DEPTH_LIMIT, bombsOnTheFloor, floor)
if !maybe {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot get here, even after multiple turns
if willIDieHere(x, y, bombsOnTheFloor, floor) {return Cell{score: DANGER_SCORE, distance: TOO_FAR}} // does not account for time left on the bomb, could optimize here rather than walling it off
score := 0
for i := 0; i < myReach; i++ {
if x+i < WIDTH && floor[x+i][y] >= BOX {score++}
if x-i > 0 && floor[x-i][y] >= BOX {score++}
if y+i < HEIGHT && floor[x][y+i] >= BOX {score++}
if y-i > 0 && floor[x][y-i] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func | (myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
moves, yes = canIGoToThere(x, y, myX, myY+1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Up
moves, yes = canIGoToThere(x, y, myX, myY-1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// all possibilities exhausted
return minMoves, isPathFound
}
func markBombs(bombs []Bomb, scoreFloor [WIDTH][HEIGHT]Cell) [WIDTH][HEIGHT]Cell {
var dangerScore int
for _, bomb := range bombs {
if bomb.time < 2 {
dangerScore = DANGER_SCORE
// on the bomb and it's exploding
scoreFloor[bomb.x][bomb.y].score = dangerScore
} else {
dangerScore = DANGER_SCORE / (bomb.time - 1) // treat the bomb's timer as if we'd already advanced to the next turn
}
// on the bomb
if scoreFloor[bomb.x][bomb.y].score > dangerScore {scoreFloor[bomb.x][bomb.y].score = WALL_SCORE}
// left of the bomb
for i := bomb.x; i >= bomb.x - bomb.reach; i-- {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// right of the bomb
for i := bomb.x; i <= bomb.x+bomb.reach; i++ {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// below the bomb
for i := bomb.y; i >= bomb.y - bomb.reach; i-- {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// above the bomb
for i := bomb.y; i <= bomb.y+bomb.reach; i++ {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
}
return scoreFloor
}
func buildTheFloor(row string, y int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
width := len(row)
for x := 0; x < width; x++ {
if string(row[x]) == "." {
floor[x][y] = CELL
} else if string(row[x]) == "X" {
floor[x][y] = WALL
} else {
floor[x][y] = int(row[x] - '0')
}
}
return floor
}
func canIBeHere(x int, y int, timeElapsed int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
if !amIWithinTheBoundaries(x, y) {return false}
if floor[x][y] == WALL || floor[x][y] >= BOX || floor[x][y] == EXPLOSION {return false}
for _, bomb := range bombs {
if x == bomb.x && y == bomb.y {return false} // can't walk through bombs once they're placed
}
return true
}
func amIWithinTheBoundaries(x int, y int) bool {
if x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT {return false}
return true
}
// TODO: this does not account for walls & boxes, which block propagation of the explosion
func amIWithinTheBlastRadius(myX int, myY int, bomb Bomb) bool{
if myX > bomb.x + bomb.reach || myX < bomb.x - bomb.reach || myY > bomb.y + bomb.reach || myY < bomb.y - bomb.reach {return true}
return false
}
func transferExplosions(scoreFloor [WIDTH][HEIGHT]Cell, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score == DANGER_SCORE {floor[i][j] = EXPLOSION}
}
}
return floor
}
func scoreFloorToString(floor [WIDTH][HEIGHT]Cell) string {
var buffer bytes.Buffer
var scoreStr, distanceStr string
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
scoreStr = strconv.Itoa(floor[j][i].score)
distanceStr = strconv.Itoa(floor[j][i].distance)
buffer.WriteString("[")
for f := 0; f < 3 - len(scoreStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(scoreStr)
buffer.WriteString(", ")
for f := 0; f < 3 - len(distanceStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(distanceStr)
buffer.WriteString("]")
}
buffer.WriteString("\n")
}
return buffer.String()
}
func floorToString(floor [WIDTH][HEIGHT]int) string {
var buffer bytes.Buffer
var cell int
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
cell = floor[j][i]
buffer.WriteString(" ")
if cell == BOX {buffer.WriteString("B")}
if cell == WALL {buffer.WriteString("W")}
if cell == CELL {buffer.WriteString("_")}
if cell == EXPLOSION {buffer.WriteString("E")}
if cell == ITEM_RANGE || cell == ITEM_BOMB {buffer.WriteString("I")}
buffer.WriteString(" ")
}
buffer.WriteString("\n")
}
return buffer.String()
}
| scoreTheFloor | identifier_name |
util.py | """iminuit utility functions and classes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import types
from collections import OrderedDict, namedtuple
from . import repr_html
from . import repr_text
from operator import itemgetter
class Matrix(tuple):
"""Matrix data object (tuple of tuples)."""
__slots__ = ()
def __new__(self, names, data):
self.names = names
return tuple.__new__(Matrix, (tuple(x) for x in data))
def _repr_html_(self):
return repr_html.matrix(self)
def __str__(self):
return repr_text.matrix(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("Matrix(...)")
else:
p.text(str(self))
class dict_interface_mixin(object):
"Provides a dict-like interface for a namedtuple."
__slots__ = ()
def __getitem__(self, key):
base = super(dict_interface_mixin, self)
if isinstance(key, int):
return base.__getitem__(key)
else:
return base.__getattribute__(key)
def __contains__(self, key):
return key in self.keys()
def __iter__(self):
return iter(self.keys())
def keys(self):
return self._fields
def values(self):
base = super(dict_interface_mixin, self)
return tuple(base.__getitem__(i) for i in range(len(self)))
def items(self):
keys = self.keys()
values = self.values()
return tuple((keys[i], values[i]) for i in range(len(self)))
def __str__(self):
return self.__class__.__name__ + "(" + ", ".join("{}={}".format(k, repr(v)) for (k, v) in self.items()) + ")"
class Param(dict_interface_mixin, namedtuple("Param",
"number name value error is_const is_fixed has_limits "
"has_lower_limit has_upper_limit lower_limit upper_limit")):
"""Data object for a single Parameter."""
__slots__ = ()
class Params(list):
"""List of parameter data objects."""
def __init__(self, seq, merrors):
list.__init__(self, seq)
self.merrors = merrors
def _repr_html_(self):
return repr_html.params(self)
def __str__(self):
return repr_text.params(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("[...]")
else:
p.text(str(self))
class MError(dict_interface_mixin, namedtuple("MError",
"name is_valid lower upper lower_valid upper_valid at_lower_limit at_upper_limit "
"at_lower_max_fcn at_upper_max_fcn lower_new_min upper_new_min nfcn min")):
"""Minos result object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.merror(self)
def __str__(self):
return repr_text.merror(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MError(...)")
else:
p.text(str(self))
class MErrors(OrderedDict):
"""Dict from parameter name to Minos result object."""
def _repr_html_(self):
return "\n".join([x._repr_html_() for x in self.values()])
def __str__(self):
return "\n".join([str(x) for x in self.values()])
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MErrors(...)")
else:
p.text(str(self))
class FMin(dict_interface_mixin, namedtuple("FMin",
"fval edm tolerance nfcn ncalls up is_valid has_valid_parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso:: |
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
if k.startswith(p):
vn = k[len(p):]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
def true_param(p):
"""Check if ``p`` is a parameter name, not a limit/error/fix attributes.
"""
return (not p.startswith('limit_') and
not p.startswith('error_') and
not p.startswith('fix_'))
def param_name(p):
"""Extract parameter name from attributes.
Examples:
- ``fix_x`` -> ``x``
- ``error_x`` -> ``x``
- ``limit_x`` -> ``x``
"""
prefix = ['limit_', 'error_', 'fix_']
for prf in prefix:
if p.startswith(prf):
return p[len(prf):]
return p
def extract_iv(b):
"""Extract initial value from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if true_param(k))
def extract_limit(b):
"""Extract limit from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('limit_'))
def extract_error(b):
"""Extract error from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('error_'))
def extract_fix(b):
"""extract fix attribute from fitargs dictionary"""
return dict((k, v) for k, v in b.items() if k.startswith('fix_'))
def remove_var(b, exclude):
"""Exclude variable in exclude list from b."""
return dict((k, v) for k, v in b.items() if param_name(k) not in exclude)
def make_func_code(params):
"""Make a func_code object to fake function signature.
You can make a funccode from describable object by::
make_func_code(describe(f))
"""
class FuncCode(object):
__slots__ = ('co_varnames', 'co_argcount')
fc = FuncCode()
fc.co_varnames = params
fc.co_argcount = len(params)
return fc
def format_exception(etype, evalue, tb):
# work around for https://bugs.python.org/issue17413
# the issue is not fixed in Python-3.7
import traceback
s = "".join(traceback.format_tb(tb))
return "%s: %s\n%s" % (etype.__name__, evalue, s) |
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose) | random_line_split |
util.py | """iminuit utility functions and classes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import types
from collections import OrderedDict, namedtuple
from . import repr_html
from . import repr_text
from operator import itemgetter
class Matrix(tuple):
"""Matrix data object (tuple of tuples)."""
__slots__ = ()
def __new__(self, names, data):
self.names = names
return tuple.__new__(Matrix, (tuple(x) for x in data))
def _repr_html_(self):
return repr_html.matrix(self)
def __str__(self):
return repr_text.matrix(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("Matrix(...)")
else:
p.text(str(self))
class dict_interface_mixin(object):
"Provides a dict-like interface for a namedtuple."
__slots__ = ()
def __getitem__(self, key):
base = super(dict_interface_mixin, self)
if isinstance(key, int):
return base.__getitem__(key)
else:
return base.__getattribute__(key)
def __contains__(self, key):
return key in self.keys()
def __iter__(self):
return iter(self.keys())
def keys(self):
return self._fields
def values(self):
base = super(dict_interface_mixin, self)
return tuple(base.__getitem__(i) for i in range(len(self)))
def items(self):
keys = self.keys()
values = self.values()
return tuple((keys[i], values[i]) for i in range(len(self)))
def __str__(self):
return self.__class__.__name__ + "(" + ", ".join("{}={}".format(k, repr(v)) for (k, v) in self.items()) + ")"
class Param(dict_interface_mixin, namedtuple("Param",
"number name value error is_const is_fixed has_limits "
"has_lower_limit has_upper_limit lower_limit upper_limit")):
"""Data object for a single Parameter."""
__slots__ = ()
class Params(list):
"""List of parameter data objects."""
def __init__(self, seq, merrors):
list.__init__(self, seq)
self.merrors = merrors
def _repr_html_(self):
return repr_html.params(self)
def __str__(self):
return repr_text.params(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("[...]")
else:
p.text(str(self))
class MError(dict_interface_mixin, namedtuple("MError",
"name is_valid lower upper lower_valid upper_valid at_lower_limit at_upper_limit "
"at_lower_max_fcn at_upper_max_fcn lower_new_min upper_new_min nfcn min")):
"""Minos result object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.merror(self)
def __str__(self):
return repr_text.merror(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MError(...)")
else:
p.text(str(self))
class MErrors(OrderedDict):
"""Dict from parameter name to Minos result object."""
def _repr_html_(self):
return "\n".join([x._repr_html_() for x in self.values()])
def __str__(self):
return "\n".join([str(x) for x in self.values()])
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MErrors(...)")
else:
p.text(str(self))
class FMin(dict_interface_mixin, namedtuple("FMin",
"fval edm tolerance nfcn ncalls up is_valid has_valid_parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso::
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose)
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
if k.startswith(p):
vn = k[len(p):]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
def true_param(p):
"""Check if ``p`` is a parameter name, not a limit/error/fix attributes.
"""
return (not p.startswith('limit_') and
not p.startswith('error_') and
not p.startswith('fix_'))
def param_name(p):
"""Extract parameter name from attributes.
Examples:
- ``fix_x`` -> ``x``
- ``error_x`` -> ``x``
- ``limit_x`` -> ``x``
"""
prefix = ['limit_', 'error_', 'fix_']
for prf in prefix:
if p.startswith(prf):
return p[len(prf):]
return p
def extract_iv(b):
"""Extract initial value from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if true_param(k))
def extract_limit(b):
"""Extract limit from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('limit_'))
def | (b):
"""Extract error from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('error_'))
def extract_fix(b):
"""extract fix attribute from fitargs dictionary"""
return dict((k, v) for k, v in b.items() if k.startswith('fix_'))
def remove_var(b, exclude):
"""Exclude variable in exclude list from b."""
return dict((k, v) for k, v in b.items() if param_name(k) not in exclude)
def make_func_code(params):
"""Make a func_code object to fake function signature.
You can make a funccode from describable object by::
make_func_code(describe(f))
"""
class FuncCode(object):
__slots__ = ('co_varnames', 'co_argcount')
fc = FuncCode()
fc.co_varnames = params
fc.co_argcount = len(params)
return fc
def format_exception(etype, evalue, tb):
# work around for https://bugs.python.org/issue17413
# the issue is not fixed in Python-3.7
import traceback
s = "".join(traceback.format_tb(tb))
return "%s: %s\n%s" % (etype.__name__, evalue, s)
| extract_error | identifier_name |
util.py | """iminuit utility functions and classes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import types
from collections import OrderedDict, namedtuple
from . import repr_html
from . import repr_text
from operator import itemgetter
class Matrix(tuple):
"""Matrix data object (tuple of tuples)."""
__slots__ = ()
def __new__(self, names, data):
self.names = names
return tuple.__new__(Matrix, (tuple(x) for x in data))
def _repr_html_(self):
return repr_html.matrix(self)
def __str__(self):
return repr_text.matrix(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("Matrix(...)")
else:
p.text(str(self))
class dict_interface_mixin(object):
"Provides a dict-like interface for a namedtuple."
__slots__ = ()
def __getitem__(self, key):
base = super(dict_interface_mixin, self)
if isinstance(key, int):
return base.__getitem__(key)
else:
return base.__getattribute__(key)
def __contains__(self, key):
return key in self.keys()
def __iter__(self):
return iter(self.keys())
def keys(self):
return self._fields
def values(self):
base = super(dict_interface_mixin, self)
return tuple(base.__getitem__(i) for i in range(len(self)))
def items(self):
keys = self.keys()
values = self.values()
return tuple((keys[i], values[i]) for i in range(len(self)))
def __str__(self):
return self.__class__.__name__ + "(" + ", ".join("{}={}".format(k, repr(v)) for (k, v) in self.items()) + ")"
class Param(dict_interface_mixin, namedtuple("Param",
"number name value error is_const is_fixed has_limits "
"has_lower_limit has_upper_limit lower_limit upper_limit")):
"""Data object for a single Parameter."""
__slots__ = ()
class Params(list):
"""List of parameter data objects."""
def __init__(self, seq, merrors):
list.__init__(self, seq)
self.merrors = merrors
def _repr_html_(self):
return repr_html.params(self)
def __str__(self):
return repr_text.params(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("[...]")
else:
p.text(str(self))
class MError(dict_interface_mixin, namedtuple("MError",
"name is_valid lower upper lower_valid upper_valid at_lower_limit at_upper_limit "
"at_lower_max_fcn at_upper_max_fcn lower_new_min upper_new_min nfcn min")):
"""Minos result object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.merror(self)
def __str__(self):
return repr_text.merror(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MError(...)")
else:
p.text(str(self))
class MErrors(OrderedDict):
"""Dict from parameter name to Minos result object."""
def _repr_html_(self):
return "\n".join([x._repr_html_() for x in self.values()])
def __str__(self):
return "\n".join([str(x) for x in self.values()])
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MErrors(...)")
else:
p.text(str(self))
class FMin(dict_interface_mixin, namedtuple("FMin",
"fval edm tolerance nfcn ncalls up is_valid has_valid_parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso::
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose)
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
if k.startswith(p):
vn = k[len(p):]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
def true_param(p):
"""Check if ``p`` is a parameter name, not a limit/error/fix attributes.
"""
return (not p.startswith('limit_') and
not p.startswith('error_') and
not p.startswith('fix_'))
def param_name(p):
"""Extract parameter name from attributes.
Examples:
- ``fix_x`` -> ``x``
- ``error_x`` -> ``x``
- ``limit_x`` -> ``x``
"""
prefix = ['limit_', 'error_', 'fix_']
for prf in prefix:
if p.startswith(prf):
return p[len(prf):]
return p
def extract_iv(b):
"""Extract initial value from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if true_param(k))
def extract_limit(b):
"""Extract limit from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('limit_'))
def extract_error(b):
"""Extract error from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('error_'))
def extract_fix(b):
"""extract fix attribute from fitargs dictionary"""
return dict((k, v) for k, v in b.items() if k.startswith('fix_'))
def remove_var(b, exclude):
|
def make_func_code(params):
"""Make a func_code object to fake function signature.
You can make a funccode from describable object by::
make_func_code(describe(f))
"""
class FuncCode(object):
__slots__ = ('co_varnames', 'co_argcount')
fc = FuncCode()
fc.co_varnames = params
fc.co_argcount = len(params)
return fc
def format_exception(etype, evalue, tb):
# work around for https://bugs.python.org/issue17413
# the issue is not fixed in Python-3.7
import traceback
s = "".join(traceback.format_tb(tb))
return "%s: %s\n%s" % (etype.__name__, evalue, s)
| """Exclude variable in exclude list from b."""
return dict((k, v) for k, v in b.items() if param_name(k) not in exclude) | identifier_body |
util.py | """iminuit utility functions and classes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
import types
from collections import OrderedDict, namedtuple
from . import repr_html
from . import repr_text
from operator import itemgetter
class Matrix(tuple):
"""Matrix data object (tuple of tuples)."""
__slots__ = ()
def __new__(self, names, data):
self.names = names
return tuple.__new__(Matrix, (tuple(x) for x in data))
def _repr_html_(self):
return repr_html.matrix(self)
def __str__(self):
return repr_text.matrix(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("Matrix(...)")
else:
p.text(str(self))
class dict_interface_mixin(object):
"Provides a dict-like interface for a namedtuple."
__slots__ = ()
def __getitem__(self, key):
base = super(dict_interface_mixin, self)
if isinstance(key, int):
return base.__getitem__(key)
else:
return base.__getattribute__(key)
def __contains__(self, key):
return key in self.keys()
def __iter__(self):
return iter(self.keys())
def keys(self):
return self._fields
def values(self):
base = super(dict_interface_mixin, self)
return tuple(base.__getitem__(i) for i in range(len(self)))
def items(self):
keys = self.keys()
values = self.values()
return tuple((keys[i], values[i]) for i in range(len(self)))
def __str__(self):
return self.__class__.__name__ + "(" + ", ".join("{}={}".format(k, repr(v)) for (k, v) in self.items()) + ")"
class Param(dict_interface_mixin, namedtuple("Param",
"number name value error is_const is_fixed has_limits "
"has_lower_limit has_upper_limit lower_limit upper_limit")):
"""Data object for a single Parameter."""
__slots__ = ()
class Params(list):
"""List of parameter data objects."""
def __init__(self, seq, merrors):
list.__init__(self, seq)
self.merrors = merrors
def _repr_html_(self):
return repr_html.params(self)
def __str__(self):
return repr_text.params(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("[...]")
else:
p.text(str(self))
class MError(dict_interface_mixin, namedtuple("MError",
"name is_valid lower upper lower_valid upper_valid at_lower_limit at_upper_limit "
"at_lower_max_fcn at_upper_max_fcn lower_new_min upper_new_min nfcn min")):
"""Minos result object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.merror(self)
def __str__(self):
return repr_text.merror(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MError(...)")
else:
p.text(str(self))
class MErrors(OrderedDict):
"""Dict from parameter name to Minos result object."""
def _repr_html_(self):
return "\n".join([x._repr_html_() for x in self.values()])
def __str__(self):
return "\n".join([str(x) for x in self.values()])
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MErrors(...)")
else:
p.text(str(self))
class FMin(dict_interface_mixin, namedtuple("FMin",
"fval edm tolerance nfcn ncalls up is_valid has_valid_parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso::
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose)
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
|
newvn = pf + ren(vn)
ret[newvn] = v
return ret
def true_param(p):
"""Check if ``p`` is a parameter name, not a limit/error/fix attributes.
"""
return (not p.startswith('limit_') and
not p.startswith('error_') and
not p.startswith('fix_'))
def param_name(p):
"""Extract parameter name from attributes.
Examples:
- ``fix_x`` -> ``x``
- ``error_x`` -> ``x``
- ``limit_x`` -> ``x``
"""
prefix = ['limit_', 'error_', 'fix_']
for prf in prefix:
if p.startswith(prf):
return p[len(prf):]
return p
def extract_iv(b):
"""Extract initial value from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if true_param(k))
def extract_limit(b):
"""Extract limit from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('limit_'))
def extract_error(b):
"""Extract error from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('error_'))
def extract_fix(b):
"""extract fix attribute from fitargs dictionary"""
return dict((k, v) for k, v in b.items() if k.startswith('fix_'))
def remove_var(b, exclude):
"""Exclude variable in exclude list from b."""
return dict((k, v) for k, v in b.items() if param_name(k) not in exclude)
def make_func_code(params):
"""Make a func_code object to fake function signature.
You can make a funccode from describable object by::
make_func_code(describe(f))
"""
class FuncCode(object):
__slots__ = ('co_varnames', 'co_argcount')
fc = FuncCode()
fc.co_varnames = params
fc.co_argcount = len(params)
return fc
def format_exception(etype, evalue, tb):
# work around for https://bugs.python.org/issue17413
# the issue is not fixed in Python-3.7
import traceback
s = "".join(traceback.format_tb(tb))
return "%s: %s\n%s" % (etype.__name__, evalue, s)
| if k.startswith(p):
vn = k[len(p):]
pf = p | conditional_block |
properties.ts | import {SignalRef, TimeInterval} from 'vega';
import {isArray, isNumber} from 'vega-util';
import {isBinned, isBinning, isBinParams} from '../../bin';
import {
COLOR,
FILL,
getSecondaryRangeChannel,
isXorY,
isXorYOffset,
POLAR_POSITION_SCALE_CHANNELS,
POSITION_SCALE_CHANNELS,
ScaleChannel,
STROKE
} from '../../channel';
import {
getFieldDef,
getFieldOrDatumDef,
isFieldDef,
ScaleDatumDef,
ScaleFieldDef,
TypedFieldDef,
valueExpr
} from '../../channeldef';
import {Config} from '../../config';
import {isDateTime} from '../../datetime';
import {channelHasNestedOffsetScale} from '../../encoding';
import * as log from '../../log';
import {Mark, MarkDef, RectConfig} from '../../mark';
import {
channelScalePropertyIncompatability,
Domain,
hasContinuousDomain,
isContinuousToContinuous,
isContinuousToDiscrete,
Scale,
ScaleConfig,
ScaleType,
scaleTypeSupportProperty
} from '../../scale';
import {Sort} from '../../sort';
import {Type} from '../../type';
import * as util from '../../util';
import {contains, getFirstDefined, keys} from '../../util';
import {isSignalRef, VgScale} from '../../vega.schema';
import {getBinSignalName} from '../data/bin';
import {isUnitModel, Model} from '../model';
import {SignalRefWrapper} from '../signal';
import {Explicit, mergeValuesWithExplicit, tieBreakByComparing} from '../split';
import {UnitModel} from '../unit';
import {ScaleComponentIndex, ScaleComponentProps} from './component';
import {parseUnitScaleRange} from './range';
export function parseScaleProperty(model: Model, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
if (isUnitModel(model)) {
parseUnitScaleProperty(model, property);
} else {
parseNonUnitScaleProperty(model, property);
}
}
function parseUnitScaleProperty(model: UnitModel, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
const {config, encoding, markDef, specifiedScales} = model;
for (const channel of keys(localScaleComponents)) {
const specifiedScale = specifiedScales[channel];
const localScaleCmpt = localScaleComponents[channel];
const mergedScaleCmpt = model.getScaleComponent(channel);
const fieldOrDatumDef = getFieldOrDatumDef(encoding[channel]) as ScaleFieldDef<string, Type> | ScaleDatumDef;
const specifiedValue = specifiedScale[property];
const scaleType = mergedScaleCmpt.get('type');
const scalePadding = mergedScaleCmpt.get('padding');
const scalePaddingInner = mergedScaleCmpt.get('paddingInner');
const supportedByScaleType = scaleTypeSupportProperty(scaleType, property);
const channelIncompatability = channelScalePropertyIncompatability(channel, property);
if (specifiedValue !== undefined) {
// If there is a specified value, check if it is compatible with scale type and channel
if (!supportedByScaleType) {
log.warn(log.message.scalePropertyNotWorkWithScaleType(scaleType, property, channel));
} else if (channelIncompatability) {
// channel
log.warn(channelIncompatability);
}
}
if (supportedByScaleType && channelIncompatability === undefined) {
if (specifiedValue !== undefined) {
const timeUnit = fieldOrDatumDef['timeUnit'];
const type = fieldOrDatumDef.type;
switch (property) {
// domainMax/Min to signal if the value is a datetime object
case 'domainMax':
case 'domainMin':
if (isDateTime(specifiedScale[property]) || type === 'temporal' || timeUnit) {
localScaleCmpt.set(property, {signal: valueExpr(specifiedScale[property], {type, timeUnit})}, true);
} else {
localScaleCmpt.set(property, specifiedScale[property] as any, true);
}
break;
default:
localScaleCmpt.copyKeyFromObject<Omit<ScaleComponentProps, 'range' | 'domainMin' | 'domainMax'>>(
property,
specifiedScale
);
}
} else {
const value =
property in scaleRules
? scaleRules[property]({
model,
channel,
fieldOrDatumDef,
scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>(
valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => {
switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y')) {
return barConfig.continuousBandSize;
}
}
}
if (scaleType === ScaleType.POINT) {
return scaleConfig.pointPadding;
}
}
return undefined;
}
export function paddingInner(
paddingValue: number | SignalRef,
channel: ScaleChannel,
mark: Mark,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingInner.
return undefined;
}
if (isXorY(channel)) {
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
// paddingOuter would only be called if it's a band scale, just return the default for bandScale.
const {bandPaddingInner, barBandPaddingInner, rectBandPaddingInner, bandWithNestedOffsetPaddingInner} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingInner;
}
return getFirstDefined(bandPaddingInner, mark === 'bar' ? barBandPaddingInner : rectBandPaddingInner);
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingInner;
}
}
return undefined;
}
export function paddingOuter(
paddingValue: number | SignalRef,
channel: ScaleChannel,
scaleType: ScaleType,
paddingInnerValue: number | SignalRef,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingOuter.
return undefined;
}
if (isXorY(channel)) | else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.POINT) {
return 0.5; // so the point positions align with centers of band scales.
} else if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingOuter;
}
}
return undefined;
}
export function reverse(
scaleType: ScaleType,
sort: Sort<string>,
channel: ScaleChannel,
scaleConfig: ScaleConfig<SignalRef>
) {
if (channel === 'x' && scaleConfig.xReverse !== undefined) {
if (hasContinuousDomain(scaleType) && sort === 'descending') {
if (isSignalRef(scaleConfig.xReverse)) {
return {signal: `!${scaleConfig.xReverse.signal}`};
} else {
return !scaleConfig.xReverse;
}
}
return scaleConfig.xReverse;
}
if (hasContinuousDomain(scaleType) && sort === 'descending') {
// For continuous domain scales, Vega does not support domain sort.
// Thus, we reverse range instead if sort is descending
return true;
}
return undefined;
}
export function zero(
channel: ScaleChannel,
fieldDef: TypedFieldDef<string> | ScaleDatumDef,
specifiedDomain: Domain,
markDef: MarkDef,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasSecondaryRangeChannel: boolean
) {
// If users explicitly provide a domain, we should not augment zero as that will be unexpected.
const hasCustomDomain = !!specifiedDomain && specifiedDomain !== 'unaggregated';
if (hasCustomDomain) {
if (hasContinuousDomain(scaleType)) {
if (isArray(specifiedDomain)) {
const first = specifiedDomain[0];
const last = specifiedDomain[specifiedDomain.length - 1];
if (isNumber(first) && first <= 0 && isNumber(last) && last >= 0) {
// if the domain includes zero, make zero remain true
return true;
}
}
return false;
}
}
// If there is no custom domain, return configZero value (=`true` as default) only for the following cases:
// 1) using quantitative field with size
// While this can be either ratio or interval fields, our assumption is that
// ratio are more common. However, if the scaleType is discretizing scale, we want to return
// false so that range doesn't start at zero
if (channel === 'size' && fieldDef.type === 'quantitative' && !isContinuousToDiscrete(scaleType)) {
return true;
}
// 2) non-binned, quantitative x-scale or y-scale
// (For binning, we should not include zero by default because binning are calculated without zero.)
// (For area/bar charts with ratio scale chart, we should always include zero.)
if (
!(isFieldDef(fieldDef) && fieldDef.bin) &&
util.contains([...POSITION_SCALE_CHANNELS, ...POLAR_POSITION_SCALE_CHANNELS], channel)
) {
const {orient, type} = markDef;
if (contains(['bar', 'area', 'line', 'trail'], type)) {
if ((orient === 'horizontal' && channel === 'y') || (orient === 'vertical' && channel === 'x')) {
return false;
}
}
if (contains(['bar', 'area'], type) && !hasSecondaryRangeChannel) {
return true;
}
return scaleConfig?.zero;
}
return false;
}
| {
const {bandPaddingOuter, bandWithNestedOffsetPaddingOuter} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingOuter;
}
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
if (scaleType === ScaleType.BAND) {
return getFirstDefined(
bandPaddingOuter,
/* By default, paddingOuter is paddingInner / 2. The reason is that
size (width/height) = step * (cardinality - paddingInner + 2 * paddingOuter).
and we want the width/height to be integer by default.
Note that step (by default) and cardinality are integers.) */
isSignalRef(paddingInnerValue) ? {signal: `${paddingInnerValue.signal}/2`} : paddingInnerValue / 2
);
}
} | conditional_block |
properties.ts | import {SignalRef, TimeInterval} from 'vega';
import {isArray, isNumber} from 'vega-util';
import {isBinned, isBinning, isBinParams} from '../../bin';
import {
COLOR,
FILL,
getSecondaryRangeChannel,
isXorY,
isXorYOffset,
POLAR_POSITION_SCALE_CHANNELS,
POSITION_SCALE_CHANNELS,
ScaleChannel,
STROKE
} from '../../channel';
import {
getFieldDef,
getFieldOrDatumDef,
isFieldDef,
ScaleDatumDef,
ScaleFieldDef,
TypedFieldDef,
valueExpr
} from '../../channeldef';
import {Config} from '../../config';
import {isDateTime} from '../../datetime';
import {channelHasNestedOffsetScale} from '../../encoding';
import * as log from '../../log';
import {Mark, MarkDef, RectConfig} from '../../mark';
import {
channelScalePropertyIncompatability,
Domain,
hasContinuousDomain,
isContinuousToContinuous,
isContinuousToDiscrete,
Scale,
ScaleConfig,
ScaleType,
scaleTypeSupportProperty
} from '../../scale';
import {Sort} from '../../sort';
import {Type} from '../../type';
import * as util from '../../util';
import {contains, getFirstDefined, keys} from '../../util';
import {isSignalRef, VgScale} from '../../vega.schema';
import {getBinSignalName} from '../data/bin';
import {isUnitModel, Model} from '../model';
import {SignalRefWrapper} from '../signal';
import {Explicit, mergeValuesWithExplicit, tieBreakByComparing} from '../split';
import {UnitModel} from '../unit';
import {ScaleComponentIndex, ScaleComponentProps} from './component';
import {parseUnitScaleRange} from './range';
export function | (model: Model, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
if (isUnitModel(model)) {
parseUnitScaleProperty(model, property);
} else {
parseNonUnitScaleProperty(model, property);
}
}
function parseUnitScaleProperty(model: UnitModel, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
const {config, encoding, markDef, specifiedScales} = model;
for (const channel of keys(localScaleComponents)) {
const specifiedScale = specifiedScales[channel];
const localScaleCmpt = localScaleComponents[channel];
const mergedScaleCmpt = model.getScaleComponent(channel);
const fieldOrDatumDef = getFieldOrDatumDef(encoding[channel]) as ScaleFieldDef<string, Type> | ScaleDatumDef;
const specifiedValue = specifiedScale[property];
const scaleType = mergedScaleCmpt.get('type');
const scalePadding = mergedScaleCmpt.get('padding');
const scalePaddingInner = mergedScaleCmpt.get('paddingInner');
const supportedByScaleType = scaleTypeSupportProperty(scaleType, property);
const channelIncompatability = channelScalePropertyIncompatability(channel, property);
if (specifiedValue !== undefined) {
// If there is a specified value, check if it is compatible with scale type and channel
if (!supportedByScaleType) {
log.warn(log.message.scalePropertyNotWorkWithScaleType(scaleType, property, channel));
} else if (channelIncompatability) {
// channel
log.warn(channelIncompatability);
}
}
if (supportedByScaleType && channelIncompatability === undefined) {
if (specifiedValue !== undefined) {
const timeUnit = fieldOrDatumDef['timeUnit'];
const type = fieldOrDatumDef.type;
switch (property) {
// domainMax/Min to signal if the value is a datetime object
case 'domainMax':
case 'domainMin':
if (isDateTime(specifiedScale[property]) || type === 'temporal' || timeUnit) {
localScaleCmpt.set(property, {signal: valueExpr(specifiedScale[property], {type, timeUnit})}, true);
} else {
localScaleCmpt.set(property, specifiedScale[property] as any, true);
}
break;
default:
localScaleCmpt.copyKeyFromObject<Omit<ScaleComponentProps, 'range' | 'domainMin' | 'domainMax'>>(
property,
specifiedScale
);
}
} else {
const value =
property in scaleRules
? scaleRules[property]({
model,
channel,
fieldOrDatumDef,
scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>(
valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => {
switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y')) {
return barConfig.continuousBandSize;
}
}
}
if (scaleType === ScaleType.POINT) {
return scaleConfig.pointPadding;
}
}
return undefined;
}
export function paddingInner(
paddingValue: number | SignalRef,
channel: ScaleChannel,
mark: Mark,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingInner.
return undefined;
}
if (isXorY(channel)) {
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
// paddingOuter would only be called if it's a band scale, just return the default for bandScale.
const {bandPaddingInner, barBandPaddingInner, rectBandPaddingInner, bandWithNestedOffsetPaddingInner} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingInner;
}
return getFirstDefined(bandPaddingInner, mark === 'bar' ? barBandPaddingInner : rectBandPaddingInner);
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingInner;
}
}
return undefined;
}
export function paddingOuter(
paddingValue: number | SignalRef,
channel: ScaleChannel,
scaleType: ScaleType,
paddingInnerValue: number | SignalRef,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingOuter.
return undefined;
}
if (isXorY(channel)) {
const {bandPaddingOuter, bandWithNestedOffsetPaddingOuter} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingOuter;
}
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
if (scaleType === ScaleType.BAND) {
return getFirstDefined(
bandPaddingOuter,
/* By default, paddingOuter is paddingInner / 2. The reason is that
size (width/height) = step * (cardinality - paddingInner + 2 * paddingOuter).
and we want the width/height to be integer by default.
Note that step (by default) and cardinality are integers.) */
isSignalRef(paddingInnerValue) ? {signal: `${paddingInnerValue.signal}/2`} : paddingInnerValue / 2
);
}
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.POINT) {
return 0.5; // so the point positions align with centers of band scales.
} else if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingOuter;
}
}
return undefined;
}
export function reverse(
scaleType: ScaleType,
sort: Sort<string>,
channel: ScaleChannel,
scaleConfig: ScaleConfig<SignalRef>
) {
if (channel === 'x' && scaleConfig.xReverse !== undefined) {
if (hasContinuousDomain(scaleType) && sort === 'descending') {
if (isSignalRef(scaleConfig.xReverse)) {
return {signal: `!${scaleConfig.xReverse.signal}`};
} else {
return !scaleConfig.xReverse;
}
}
return scaleConfig.xReverse;
}
if (hasContinuousDomain(scaleType) && sort === 'descending') {
// For continuous domain scales, Vega does not support domain sort.
// Thus, we reverse range instead if sort is descending
return true;
}
return undefined;
}
export function zero(
channel: ScaleChannel,
fieldDef: TypedFieldDef<string> | ScaleDatumDef,
specifiedDomain: Domain,
markDef: MarkDef,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasSecondaryRangeChannel: boolean
) {
// If users explicitly provide a domain, we should not augment zero as that will be unexpected.
const hasCustomDomain = !!specifiedDomain && specifiedDomain !== 'unaggregated';
if (hasCustomDomain) {
if (hasContinuousDomain(scaleType)) {
if (isArray(specifiedDomain)) {
const first = specifiedDomain[0];
const last = specifiedDomain[specifiedDomain.length - 1];
if (isNumber(first) && first <= 0 && isNumber(last) && last >= 0) {
// if the domain includes zero, make zero remain true
return true;
}
}
return false;
}
}
// If there is no custom domain, return configZero value (=`true` as default) only for the following cases:
// 1) using quantitative field with size
// While this can be either ratio or interval fields, our assumption is that
// ratio are more common. However, if the scaleType is discretizing scale, we want to return
// false so that range doesn't start at zero
if (channel === 'size' && fieldDef.type === 'quantitative' && !isContinuousToDiscrete(scaleType)) {
return true;
}
// 2) non-binned, quantitative x-scale or y-scale
// (For binning, we should not include zero by default because binning are calculated without zero.)
// (For area/bar charts with ratio scale chart, we should always include zero.)
if (
!(isFieldDef(fieldDef) && fieldDef.bin) &&
util.contains([...POSITION_SCALE_CHANNELS, ...POLAR_POSITION_SCALE_CHANNELS], channel)
) {
const {orient, type} = markDef;
if (contains(['bar', 'area', 'line', 'trail'], type)) {
if ((orient === 'horizontal' && channel === 'y') || (orient === 'vertical' && channel === 'x')) {
return false;
}
}
if (contains(['bar', 'area'], type) && !hasSecondaryRangeChannel) {
return true;
}
return scaleConfig?.zero;
}
return false;
}
| parseScaleProperty | identifier_name |
properties.ts | import {SignalRef, TimeInterval} from 'vega';
import {isArray, isNumber} from 'vega-util';
import {isBinned, isBinning, isBinParams} from '../../bin';
import {
COLOR,
FILL,
getSecondaryRangeChannel,
isXorY,
isXorYOffset,
POLAR_POSITION_SCALE_CHANNELS,
POSITION_SCALE_CHANNELS,
ScaleChannel,
STROKE
} from '../../channel';
import {
getFieldDef,
getFieldOrDatumDef,
isFieldDef,
ScaleDatumDef,
ScaleFieldDef,
TypedFieldDef,
valueExpr
} from '../../channeldef';
import {Config} from '../../config';
import {isDateTime} from '../../datetime';
import {channelHasNestedOffsetScale} from '../../encoding';
import * as log from '../../log';
import {Mark, MarkDef, RectConfig} from '../../mark';
import {
channelScalePropertyIncompatability,
Domain,
hasContinuousDomain,
isContinuousToContinuous,
isContinuousToDiscrete,
Scale,
ScaleConfig,
ScaleType,
scaleTypeSupportProperty
} from '../../scale';
import {Sort} from '../../sort';
import {Type} from '../../type';
import * as util from '../../util';
import {contains, getFirstDefined, keys} from '../../util';
import {isSignalRef, VgScale} from '../../vega.schema';
import {getBinSignalName} from '../data/bin';
import {isUnitModel, Model} from '../model';
import {SignalRefWrapper} from '../signal';
import {Explicit, mergeValuesWithExplicit, tieBreakByComparing} from '../split';
import {UnitModel} from '../unit';
import {ScaleComponentIndex, ScaleComponentProps} from './component';
import {parseUnitScaleRange} from './range';
export function parseScaleProperty(model: Model, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
if (isUnitModel(model)) {
parseUnitScaleProperty(model, property);
} else {
parseNonUnitScaleProperty(model, property);
}
}
function parseUnitScaleProperty(model: UnitModel, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
const {config, encoding, markDef, specifiedScales} = model;
for (const channel of keys(localScaleComponents)) {
const specifiedScale = specifiedScales[channel];
const localScaleCmpt = localScaleComponents[channel];
const mergedScaleCmpt = model.getScaleComponent(channel);
const fieldOrDatumDef = getFieldOrDatumDef(encoding[channel]) as ScaleFieldDef<string, Type> | ScaleDatumDef;
const specifiedValue = specifiedScale[property];
const scaleType = mergedScaleCmpt.get('type');
const scalePadding = mergedScaleCmpt.get('padding');
const scalePaddingInner = mergedScaleCmpt.get('paddingInner');
const supportedByScaleType = scaleTypeSupportProperty(scaleType, property);
const channelIncompatability = channelScalePropertyIncompatability(channel, property);
if (specifiedValue !== undefined) {
// If there is a specified value, check if it is compatible with scale type and channel
if (!supportedByScaleType) {
log.warn(log.message.scalePropertyNotWorkWithScaleType(scaleType, property, channel));
} else if (channelIncompatability) {
// channel
log.warn(channelIncompatability);
}
}
if (supportedByScaleType && channelIncompatability === undefined) {
if (specifiedValue !== undefined) {
const timeUnit = fieldOrDatumDef['timeUnit'];
const type = fieldOrDatumDef.type;
switch (property) {
// domainMax/Min to signal if the value is a datetime object
case 'domainMax':
case 'domainMin':
if (isDateTime(specifiedScale[property]) || type === 'temporal' || timeUnit) {
localScaleCmpt.set(property, {signal: valueExpr(specifiedScale[property], {type, timeUnit})}, true);
} else {
localScaleCmpt.set(property, specifiedScale[property] as any, true);
}
break;
default:
localScaleCmpt.copyKeyFromObject<Omit<ScaleComponentProps, 'range' | 'domainMin' | 'domainMax'>>(
property,
specifiedScale
);
}
} else {
const value =
property in scaleRules
? scaleRules[property]({
model,
channel,
fieldOrDatumDef,
scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>( | switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y')) {
return barConfig.continuousBandSize;
}
}
}
if (scaleType === ScaleType.POINT) {
return scaleConfig.pointPadding;
}
}
return undefined;
}
export function paddingInner(
paddingValue: number | SignalRef,
channel: ScaleChannel,
mark: Mark,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingInner.
return undefined;
}
if (isXorY(channel)) {
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
// paddingOuter would only be called if it's a band scale, just return the default for bandScale.
const {bandPaddingInner, barBandPaddingInner, rectBandPaddingInner, bandWithNestedOffsetPaddingInner} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingInner;
}
return getFirstDefined(bandPaddingInner, mark === 'bar' ? barBandPaddingInner : rectBandPaddingInner);
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingInner;
}
}
return undefined;
}
export function paddingOuter(
paddingValue: number | SignalRef,
channel: ScaleChannel,
scaleType: ScaleType,
paddingInnerValue: number | SignalRef,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingOuter.
return undefined;
}
if (isXorY(channel)) {
const {bandPaddingOuter, bandWithNestedOffsetPaddingOuter} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingOuter;
}
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
if (scaleType === ScaleType.BAND) {
return getFirstDefined(
bandPaddingOuter,
/* By default, paddingOuter is paddingInner / 2. The reason is that
size (width/height) = step * (cardinality - paddingInner + 2 * paddingOuter).
and we want the width/height to be integer by default.
Note that step (by default) and cardinality are integers.) */
isSignalRef(paddingInnerValue) ? {signal: `${paddingInnerValue.signal}/2`} : paddingInnerValue / 2
);
}
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.POINT) {
return 0.5; // so the point positions align with centers of band scales.
} else if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingOuter;
}
}
return undefined;
}
export function reverse(
scaleType: ScaleType,
sort: Sort<string>,
channel: ScaleChannel,
scaleConfig: ScaleConfig<SignalRef>
) {
if (channel === 'x' && scaleConfig.xReverse !== undefined) {
if (hasContinuousDomain(scaleType) && sort === 'descending') {
if (isSignalRef(scaleConfig.xReverse)) {
return {signal: `!${scaleConfig.xReverse.signal}`};
} else {
return !scaleConfig.xReverse;
}
}
return scaleConfig.xReverse;
}
if (hasContinuousDomain(scaleType) && sort === 'descending') {
// For continuous domain scales, Vega does not support domain sort.
// Thus, we reverse range instead if sort is descending
return true;
}
return undefined;
}
export function zero(
channel: ScaleChannel,
fieldDef: TypedFieldDef<string> | ScaleDatumDef,
specifiedDomain: Domain,
markDef: MarkDef,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasSecondaryRangeChannel: boolean
) {
// If users explicitly provide a domain, we should not augment zero as that will be unexpected.
const hasCustomDomain = !!specifiedDomain && specifiedDomain !== 'unaggregated';
if (hasCustomDomain) {
if (hasContinuousDomain(scaleType)) {
if (isArray(specifiedDomain)) {
const first = specifiedDomain[0];
const last = specifiedDomain[specifiedDomain.length - 1];
if (isNumber(first) && first <= 0 && isNumber(last) && last >= 0) {
// if the domain includes zero, make zero remain true
return true;
}
}
return false;
}
}
// If there is no custom domain, return configZero value (=`true` as default) only for the following cases:
// 1) using quantitative field with size
// While this can be either ratio or interval fields, our assumption is that
// ratio are more common. However, if the scaleType is discretizing scale, we want to return
// false so that range doesn't start at zero
if (channel === 'size' && fieldDef.type === 'quantitative' && !isContinuousToDiscrete(scaleType)) {
return true;
}
// 2) non-binned, quantitative x-scale or y-scale
// (For binning, we should not include zero by default because binning are calculated without zero.)
// (For area/bar charts with ratio scale chart, we should always include zero.)
if (
!(isFieldDef(fieldDef) && fieldDef.bin) &&
util.contains([...POSITION_SCALE_CHANNELS, ...POLAR_POSITION_SCALE_CHANNELS], channel)
) {
const {orient, type} = markDef;
if (contains(['bar', 'area', 'line', 'trail'], type)) {
if ((orient === 'horizontal' && channel === 'y') || (orient === 'vertical' && channel === 'x')) {
return false;
}
}
if (contains(['bar', 'area'], type) && !hasSecondaryRangeChannel) {
return true;
}
return scaleConfig?.zero;
}
return false;
} | valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => { | random_line_split |
properties.ts | import {SignalRef, TimeInterval} from 'vega';
import {isArray, isNumber} from 'vega-util';
import {isBinned, isBinning, isBinParams} from '../../bin';
import {
COLOR,
FILL,
getSecondaryRangeChannel,
isXorY,
isXorYOffset,
POLAR_POSITION_SCALE_CHANNELS,
POSITION_SCALE_CHANNELS,
ScaleChannel,
STROKE
} from '../../channel';
import {
getFieldDef,
getFieldOrDatumDef,
isFieldDef,
ScaleDatumDef,
ScaleFieldDef,
TypedFieldDef,
valueExpr
} from '../../channeldef';
import {Config} from '../../config';
import {isDateTime} from '../../datetime';
import {channelHasNestedOffsetScale} from '../../encoding';
import * as log from '../../log';
import {Mark, MarkDef, RectConfig} from '../../mark';
import {
channelScalePropertyIncompatability,
Domain,
hasContinuousDomain,
isContinuousToContinuous,
isContinuousToDiscrete,
Scale,
ScaleConfig,
ScaleType,
scaleTypeSupportProperty
} from '../../scale';
import {Sort} from '../../sort';
import {Type} from '../../type';
import * as util from '../../util';
import {contains, getFirstDefined, keys} from '../../util';
import {isSignalRef, VgScale} from '../../vega.schema';
import {getBinSignalName} from '../data/bin';
import {isUnitModel, Model} from '../model';
import {SignalRefWrapper} from '../signal';
import {Explicit, mergeValuesWithExplicit, tieBreakByComparing} from '../split';
import {UnitModel} from '../unit';
import {ScaleComponentIndex, ScaleComponentProps} from './component';
import {parseUnitScaleRange} from './range';
export function parseScaleProperty(model: Model, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
if (isUnitModel(model)) {
parseUnitScaleProperty(model, property);
} else {
parseNonUnitScaleProperty(model, property);
}
}
function parseUnitScaleProperty(model: UnitModel, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
const {config, encoding, markDef, specifiedScales} = model;
for (const channel of keys(localScaleComponents)) {
const specifiedScale = specifiedScales[channel];
const localScaleCmpt = localScaleComponents[channel];
const mergedScaleCmpt = model.getScaleComponent(channel);
const fieldOrDatumDef = getFieldOrDatumDef(encoding[channel]) as ScaleFieldDef<string, Type> | ScaleDatumDef;
const specifiedValue = specifiedScale[property];
const scaleType = mergedScaleCmpt.get('type');
const scalePadding = mergedScaleCmpt.get('padding');
const scalePaddingInner = mergedScaleCmpt.get('paddingInner');
const supportedByScaleType = scaleTypeSupportProperty(scaleType, property);
const channelIncompatability = channelScalePropertyIncompatability(channel, property);
if (specifiedValue !== undefined) {
// If there is a specified value, check if it is compatible with scale type and channel
if (!supportedByScaleType) {
log.warn(log.message.scalePropertyNotWorkWithScaleType(scaleType, property, channel));
} else if (channelIncompatability) {
// channel
log.warn(channelIncompatability);
}
}
if (supportedByScaleType && channelIncompatability === undefined) {
if (specifiedValue !== undefined) {
const timeUnit = fieldOrDatumDef['timeUnit'];
const type = fieldOrDatumDef.type;
switch (property) {
// domainMax/Min to signal if the value is a datetime object
case 'domainMax':
case 'domainMin':
if (isDateTime(specifiedScale[property]) || type === 'temporal' || timeUnit) {
localScaleCmpt.set(property, {signal: valueExpr(specifiedScale[property], {type, timeUnit})}, true);
} else {
localScaleCmpt.set(property, specifiedScale[property] as any, true);
}
break;
default:
localScaleCmpt.copyKeyFromObject<Omit<ScaleComponentProps, 'range' | 'domainMin' | 'domainMax'>>(
property,
specifiedScale
);
}
} else {
const value =
property in scaleRules
? scaleRules[property]({
model,
channel,
fieldOrDatumDef,
scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>(
valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => {
switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y')) {
return barConfig.continuousBandSize;
}
}
}
if (scaleType === ScaleType.POINT) {
return scaleConfig.pointPadding;
}
}
return undefined;
}
export function paddingInner(
paddingValue: number | SignalRef,
channel: ScaleChannel,
mark: Mark,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) |
export function paddingOuter(
paddingValue: number | SignalRef,
channel: ScaleChannel,
scaleType: ScaleType,
paddingInnerValue: number | SignalRef,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingOuter.
return undefined;
}
if (isXorY(channel)) {
const {bandPaddingOuter, bandWithNestedOffsetPaddingOuter} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingOuter;
}
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
if (scaleType === ScaleType.BAND) {
return getFirstDefined(
bandPaddingOuter,
/* By default, paddingOuter is paddingInner / 2. The reason is that
size (width/height) = step * (cardinality - paddingInner + 2 * paddingOuter).
and we want the width/height to be integer by default.
Note that step (by default) and cardinality are integers.) */
isSignalRef(paddingInnerValue) ? {signal: `${paddingInnerValue.signal}/2`} : paddingInnerValue / 2
);
}
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.POINT) {
return 0.5; // so the point positions align with centers of band scales.
} else if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingOuter;
}
}
return undefined;
}
export function reverse(
scaleType: ScaleType,
sort: Sort<string>,
channel: ScaleChannel,
scaleConfig: ScaleConfig<SignalRef>
) {
if (channel === 'x' && scaleConfig.xReverse !== undefined) {
if (hasContinuousDomain(scaleType) && sort === 'descending') {
if (isSignalRef(scaleConfig.xReverse)) {
return {signal: `!${scaleConfig.xReverse.signal}`};
} else {
return !scaleConfig.xReverse;
}
}
return scaleConfig.xReverse;
}
if (hasContinuousDomain(scaleType) && sort === 'descending') {
// For continuous domain scales, Vega does not support domain sort.
// Thus, we reverse range instead if sort is descending
return true;
}
return undefined;
}
export function zero(
channel: ScaleChannel,
fieldDef: TypedFieldDef<string> | ScaleDatumDef,
specifiedDomain: Domain,
markDef: MarkDef,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasSecondaryRangeChannel: boolean
) {
// If users explicitly provide a domain, we should not augment zero as that will be unexpected.
const hasCustomDomain = !!specifiedDomain && specifiedDomain !== 'unaggregated';
if (hasCustomDomain) {
if (hasContinuousDomain(scaleType)) {
if (isArray(specifiedDomain)) {
const first = specifiedDomain[0];
const last = specifiedDomain[specifiedDomain.length - 1];
if (isNumber(first) && first <= 0 && isNumber(last) && last >= 0) {
// if the domain includes zero, make zero remain true
return true;
}
}
return false;
}
}
// If there is no custom domain, return configZero value (=`true` as default) only for the following cases:
// 1) using quantitative field with size
// While this can be either ratio or interval fields, our assumption is that
// ratio are more common. However, if the scaleType is discretizing scale, we want to return
// false so that range doesn't start at zero
if (channel === 'size' && fieldDef.type === 'quantitative' && !isContinuousToDiscrete(scaleType)) {
return true;
}
// 2) non-binned, quantitative x-scale or y-scale
// (For binning, we should not include zero by default because binning are calculated without zero.)
// (For area/bar charts with ratio scale chart, we should always include zero.)
if (
!(isFieldDef(fieldDef) && fieldDef.bin) &&
util.contains([...POSITION_SCALE_CHANNELS, ...POLAR_POSITION_SCALE_CHANNELS], channel)
) {
const {orient, type} = markDef;
if (contains(['bar', 'area', 'line', 'trail'], type)) {
if ((orient === 'horizontal' && channel === 'y') || (orient === 'vertical' && channel === 'x')) {
return false;
}
}
if (contains(['bar', 'area'], type) && !hasSecondaryRangeChannel) {
return true;
}
return scaleConfig?.zero;
}
return false;
}
| {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingInner.
return undefined;
}
if (isXorY(channel)) {
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
// paddingOuter would only be called if it's a band scale, just return the default for bandScale.
const {bandPaddingInner, barBandPaddingInner, rectBandPaddingInner, bandWithNestedOffsetPaddingInner} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingInner;
}
return getFirstDefined(bandPaddingInner, mark === 'bar' ? barBandPaddingInner : rectBandPaddingInner);
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingInner;
}
}
return undefined;
} | identifier_body |
io.rs | //! Persistent storage backend for blocks.
use std::collections::VecDeque;
use std::fs;
use std::io::{self, Read, Seek, Write};
use std::iter;
use std::mem;
use std::path::Path;
use nakamoto_common::bitcoin::consensus::encode::{Decodable, Encodable};
use nakamoto_common::block::store::{Error, Store};
use nakamoto_common::block::Height;
/// Append a block to the end of the stream.
fn put<H: Sized + Encodable, S: Seek + Write, I: Iterator<Item = H>>(
mut stream: S,
headers: I,
) -> Result<Height, Error> {
let mut pos = stream.seek(io::SeekFrom::End(0))?;
let size = std::mem::size_of::<H>();
for header in headers {
pos += header.consensus_encode(&mut stream)? as u64;
}
Ok(pos / size as u64)
}
/// Get a block from the stream.
fn get<H: Decodable, S: Seek + Read>(mut stream: S, ix: u64) -> Result<H, Error> {
let size = std::mem::size_of::<H>();
let mut buf = vec![0; size]; // TODO: Use an array when rust has const-generics.
stream.seek(io::SeekFrom::Start(ix * size as u64))?;
stream.read_exact(&mut buf)?;
H::consensus_decode(&mut buf.as_slice()).map_err(Error::from)
}
/// Reads from a file in an I/O optmized way.
#[derive(Debug)]
struct FileReader<H> {
file: fs::File,
queue: VecDeque<H>,
index: u64,
}
impl<H: Decodable> FileReader<H> {
const BATCH_SIZE: usize = 16;
fn new(file: fs::File) -> Self {
Self {
file,
queue: VecDeque::new(),
index: 0,
}
}
fn next(&mut self) -> Result<Option<H>, Error> {
let size = std::mem::size_of::<H>();
if self.queue.is_empty() {
let mut buf = vec![0; size * Self::BATCH_SIZE];
let from = self.file.seek(io::SeekFrom::Start(self.index))?;
match self.file.read_exact(&mut buf) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => |
Err(err) => return Err(err.into()),
}
self.index += buf.len() as u64;
let items = buf.len() / size;
let mut cursor = io::Cursor::new(buf);
let mut item = vec![0; size];
for _ in 0..items {
cursor.read_exact(&mut item)?;
let item = H::consensus_decode(&mut item.as_slice())?;
self.queue.push_back(item);
}
}
Ok(self.queue.pop_front())
}
}
/// An iterator over block headers in a file.
#[derive(Debug)]
pub struct Iter<H> {
height: Height,
file: FileReader<H>,
}
impl<H: Decodable> Iter<H> {
fn new(file: fs::File) -> Self {
Self {
file: FileReader::new(file),
height: 1,
}
}
}
impl<H: Decodable> Iterator for Iter<H> {
type Item = Result<(Height, H), Error>;
fn next(&mut self) -> Option<Self::Item> {
let height = self.height;
assert!(height > 0);
match self.file.next() {
// If we hit this branch, it's because we're trying to read passed the end
// of the file, which means there are no further headers remaining.
Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None,
// If another kind of error occurs, we want to yield it to the caller, so
// that it can be propagated.
Err(err) => Some(Err(err)),
Ok(Some(header)) => {
self.height = height + 1;
Some(Ok((height, header)))
}
Ok(None) => None,
}
}
}
/// A `Store` backed by a single file.
#[derive(Debug)]
pub struct File<H> {
file: fs::File,
genesis: H,
}
impl<H> File<H> {
/// Open a new file store from the given path and genesis header.
pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> {
fs::OpenOptions::new()
.create(true)
.read(true)
.append(true)
.open(path)
.map(|file| Self { file, genesis })
}
/// Create a new file store at the given path, with the provided genesis header.
pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> {
let file = fs::OpenOptions::new()
.create_new(true)
.read(true)
.append(true)
.open(path)?;
Ok(Self { file, genesis })
}
}
impl<H: 'static + Copy + Encodable + Decodable> Store for File<H> {
type Header = H;
/// Get the genesis block.
fn genesis(&self) -> H {
self.genesis
}
/// Append a block to the end of the file.
fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> {
self::put(&mut self.file, headers)
}
/// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if
/// the height is not found.
fn get(&self, height: Height) -> Result<H, Error> {
if let Some(ix) = height.checked_sub(1) {
// Clone so this function doesn't have to take a `&mut self`.
let mut file = self.file.try_clone()?;
get(&mut file, ix)
} else {
Ok(self.genesis)
}
}
/// Rollback the chain to the given height. Behavior is undefined if the given
/// height is not contained in the store.
fn rollback(&mut self, height: Height) -> Result<(), Error> {
let size = mem::size_of::<H>();
self.file
.set_len((height) * size as u64)
.map_err(Error::from)
}
/// Flush changes to disk.
fn sync(&mut self) -> Result<(), Error> {
self.file.sync_data().map_err(Error::from)
}
/// Iterate over all headers in the store.
fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> {
// Clone so this function doesn't have to take a `&mut self`.
match self.file.try_clone() {
Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))),
Err(err) => Box::new(iter::once(Err(Error::Io(err)))),
}
}
/// Return the number of headers in the store.
fn len(&self) -> Result<usize, Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
if len as usize % size != 0 {
return Err(Error::Corruption);
}
Ok(len as usize / size + 1)
}
/// Return the block height of the store.
fn height(&self) -> Result<Height, Error> {
self.len().map(|n| n as Height - 1)
}
/// Check the file store integrity.
fn check(&self) -> Result<(), Error> {
self.len().map(|_| ())
}
/// Attempt to heal data corruption.
fn heal(&self) -> Result<(), Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
let extraneous = len as usize % size;
if extraneous != 0 {
self.file.set_len(len - extraneous as u64)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use std::{io, iter};
use nakamoto_common::bitcoin::TxMerkleNode;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::BlockHash;
use super::{Error, File, Height, Store};
use crate::block::BlockHeader;
const HEADER_SIZE: usize = 80;
fn store(path: &str) -> File<BlockHeader> {
let tmp = tempfile::tempdir().unwrap();
let genesis = BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 39123818,
nonce: 0,
};
File::open(tmp.path().join(path), genesis).unwrap()
}
#[test]
fn test_put_get() {
let mut store = store("headers.db");
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis.block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
};
assert_eq!(
store.get(0).unwrap(),
store.genesis,
"when the store is empty, we can `get` the genesis"
);
assert!(
store.get(1).is_err(),
"when the store is empty, we can't get height `1`"
);
let height = store.put(iter::once(header)).unwrap();
store.sync().unwrap();
assert_eq!(height, 1);
assert_eq!(store.get(height).unwrap(), header);
}
#[test]
fn test_put_get_batch() {
let mut store = store("headers.db");
assert_eq!(store.len().unwrap(), 1);
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
// Put all headers into the store and check that we can retrieve them.
{
let height = store.put(iter).unwrap();
assert_eq!(height, headers.len() as Height);
assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis.
for (i, h) in headers.iter().enumerate() {
assert_eq!(&store.get(i as Height + 1).unwrap(), h);
}
assert!(&store.get(32 + 1).is_err());
}
// Rollback and overwrite the history.
{
let h = headers.len() as Height / 2; // Some point `h` in the past.
assert!(&store.get(h + 1).is_ok());
assert_eq!(store.get(h + 1).unwrap(), headers[h as usize]);
store.rollback(h).unwrap();
assert!(
&store.get(h + 1).is_err(),
"after the rollback, we can't access blocks passed `h`"
);
assert_eq!(store.len().unwrap(), h as usize + 1);
// We can now overwrite the block at position `h + 1`.
let header = BlockHeader {
nonce: 49219374,
..header
};
let height = store.put(iter::once(header)).unwrap();
assert!(header != headers[height as usize]);
assert_eq!(height, h + 1);
assert_eq!(store.get(height).unwrap(), header);
// Blocks up to and including `h` are unaffected by the rollback.
assert_eq!(store.get(0).unwrap(), store.genesis);
assert_eq!(store.get(1).unwrap(), headers[0]);
assert_eq!(store.get(h).unwrap(), headers[h as usize - 1]);
}
}
#[test]
fn test_iter() {
let mut store = store("headers.db");
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
store.put(iter).unwrap();
let mut iter = store.iter();
assert_eq!(iter.next().unwrap().unwrap(), (0, store.genesis));
for (i, result) in iter.enumerate() {
let (height, header) = result.unwrap();
assert_eq!(i as u64 + 1, height);
assert_eq!(header, headers[height as usize - 1]);
}
}
#[test]
fn test_corrupt_file() {
let mut store = store("headers.db");
store.check().expect("checking always works");
store.heal().expect("healing when there is no corruption");
let headers = &[
BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
},
BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x1ffffff,
time: 1842918920,
nonce: 913716378,
},
];
store.put(headers.iter().cloned()).unwrap();
store.check().unwrap();
assert_eq!(store.len().unwrap(), 3);
let size = std::mem::size_of::<BlockHeader>();
assert_eq!(size, HEADER_SIZE);
// Intentionally corrupt the file, by truncating it by 32 bytes.
store
.file
.set_len(headers.len() as u64 * size as u64 - 32)
.unwrap();
assert_eq!(
store.get(1).unwrap(),
headers[0],
"the first header is intact"
);
matches! {
store
.get(2)
.expect_err("the second header has been corrupted"),
Error::Io(err) if err.kind() == io::ErrorKind::UnexpectedEof
};
store.len().expect_err("data is corrupted");
store.check().expect_err("data is corrupted");
store.heal().unwrap();
store.check().unwrap();
assert_eq!(
store.len().unwrap(),
2,
"the last (corrupted) header was removed"
);
}
}
| {
self.file.seek(io::SeekFrom::Start(from))?;
let n = self.file.read_to_end(&mut buf)?;
buf.truncate(n);
} | conditional_block |
io.rs | //! Persistent storage backend for blocks.
use std::collections::VecDeque;
use std::fs;
use std::io::{self, Read, Seek, Write};
use std::iter;
use std::mem;
use std::path::Path;
use nakamoto_common::bitcoin::consensus::encode::{Decodable, Encodable};
use nakamoto_common::block::store::{Error, Store};
use nakamoto_common::block::Height;
/// Append a block to the end of the stream.
fn put<H: Sized + Encodable, S: Seek + Write, I: Iterator<Item = H>>(
mut stream: S,
headers: I,
) -> Result<Height, Error> {
let mut pos = stream.seek(io::SeekFrom::End(0))?;
let size = std::mem::size_of::<H>();
for header in headers {
pos += header.consensus_encode(&mut stream)? as u64;
}
Ok(pos / size as u64)
}
/// Get a block from the stream.
fn get<H: Decodable, S: Seek + Read>(mut stream: S, ix: u64) -> Result<H, Error> {
let size = std::mem::size_of::<H>();
let mut buf = vec![0; size]; // TODO: Use an array when rust has const-generics.
stream.seek(io::SeekFrom::Start(ix * size as u64))?;
stream.read_exact(&mut buf)?;
H::consensus_decode(&mut buf.as_slice()).map_err(Error::from)
}
/// Reads from a file in an I/O optmized way.
#[derive(Debug)]
struct FileReader<H> {
file: fs::File,
queue: VecDeque<H>,
index: u64,
}
impl<H: Decodable> FileReader<H> {
const BATCH_SIZE: usize = 16;
fn new(file: fs::File) -> Self {
Self {
file,
queue: VecDeque::new(),
index: 0,
}
}
fn next(&mut self) -> Result<Option<H>, Error> {
let size = std::mem::size_of::<H>();
if self.queue.is_empty() {
let mut buf = vec![0; size * Self::BATCH_SIZE];
let from = self.file.seek(io::SeekFrom::Start(self.index))?;
match self.file.read_exact(&mut buf) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => {
self.file.seek(io::SeekFrom::Start(from))?;
let n = self.file.read_to_end(&mut buf)?;
buf.truncate(n);
}
Err(err) => return Err(err.into()),
}
self.index += buf.len() as u64;
let items = buf.len() / size;
let mut cursor = io::Cursor::new(buf);
let mut item = vec![0; size];
| }
}
Ok(self.queue.pop_front())
}
}
/// An iterator over block headers in a file.
#[derive(Debug)]
pub struct Iter<H> {
height: Height,
file: FileReader<H>,
}
impl<H: Decodable> Iter<H> {
fn new(file: fs::File) -> Self {
Self {
file: FileReader::new(file),
height: 1,
}
}
}
impl<H: Decodable> Iterator for Iter<H> {
type Item = Result<(Height, H), Error>;
fn next(&mut self) -> Option<Self::Item> {
let height = self.height;
assert!(height > 0);
match self.file.next() {
// If we hit this branch, it's because we're trying to read passed the end
// of the file, which means there are no further headers remaining.
Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None,
// If another kind of error occurs, we want to yield it to the caller, so
// that it can be propagated.
Err(err) => Some(Err(err)),
Ok(Some(header)) => {
self.height = height + 1;
Some(Ok((height, header)))
}
Ok(None) => None,
}
}
}
/// A `Store` backed by a single file.
#[derive(Debug)]
pub struct File<H> {
file: fs::File,
genesis: H,
}
impl<H> File<H> {
/// Open a new file store from the given path and genesis header.
pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> {
fs::OpenOptions::new()
.create(true)
.read(true)
.append(true)
.open(path)
.map(|file| Self { file, genesis })
}
/// Create a new file store at the given path, with the provided genesis header.
pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> {
let file = fs::OpenOptions::new()
.create_new(true)
.read(true)
.append(true)
.open(path)?;
Ok(Self { file, genesis })
}
}
impl<H: 'static + Copy + Encodable + Decodable> Store for File<H> {
type Header = H;
/// Get the genesis block.
fn genesis(&self) -> H {
self.genesis
}
/// Append a block to the end of the file.
fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> {
self::put(&mut self.file, headers)
}
/// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if
/// the height is not found.
fn get(&self, height: Height) -> Result<H, Error> {
if let Some(ix) = height.checked_sub(1) {
// Clone so this function doesn't have to take a `&mut self`.
let mut file = self.file.try_clone()?;
get(&mut file, ix)
} else {
Ok(self.genesis)
}
}
/// Rollback the chain to the given height. Behavior is undefined if the given
/// height is not contained in the store.
fn rollback(&mut self, height: Height) -> Result<(), Error> {
let size = mem::size_of::<H>();
self.file
.set_len((height) * size as u64)
.map_err(Error::from)
}
/// Flush changes to disk.
fn sync(&mut self) -> Result<(), Error> {
self.file.sync_data().map_err(Error::from)
}
/// Iterate over all headers in the store.
fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> {
// Clone so this function doesn't have to take a `&mut self`.
match self.file.try_clone() {
Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))),
Err(err) => Box::new(iter::once(Err(Error::Io(err)))),
}
}
/// Return the number of headers in the store.
fn len(&self) -> Result<usize, Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
if len as usize % size != 0 {
return Err(Error::Corruption);
}
Ok(len as usize / size + 1)
}
/// Return the block height of the store.
fn height(&self) -> Result<Height, Error> {
self.len().map(|n| n as Height - 1)
}
/// Check the file store integrity.
fn check(&self) -> Result<(), Error> {
self.len().map(|_| ())
}
/// Attempt to heal data corruption.
fn heal(&self) -> Result<(), Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
let extraneous = len as usize % size;
if extraneous != 0 {
self.file.set_len(len - extraneous as u64)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use std::{io, iter};
use nakamoto_common::bitcoin::TxMerkleNode;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::BlockHash;
use super::{Error, File, Height, Store};
use crate::block::BlockHeader;
const HEADER_SIZE: usize = 80;
fn store(path: &str) -> File<BlockHeader> {
let tmp = tempfile::tempdir().unwrap();
let genesis = BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 39123818,
nonce: 0,
};
File::open(tmp.path().join(path), genesis).unwrap()
}
#[test]
fn test_put_get() {
let mut store = store("headers.db");
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis.block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
};
assert_eq!(
store.get(0).unwrap(),
store.genesis,
"when the store is empty, we can `get` the genesis"
);
assert!(
store.get(1).is_err(),
"when the store is empty, we can't get height `1`"
);
let height = store.put(iter::once(header)).unwrap();
store.sync().unwrap();
assert_eq!(height, 1);
assert_eq!(store.get(height).unwrap(), header);
}
#[test]
fn test_put_get_batch() {
let mut store = store("headers.db");
assert_eq!(store.len().unwrap(), 1);
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
// Put all headers into the store and check that we can retrieve them.
{
let height = store.put(iter).unwrap();
assert_eq!(height, headers.len() as Height);
assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis.
for (i, h) in headers.iter().enumerate() {
assert_eq!(&store.get(i as Height + 1).unwrap(), h);
}
assert!(&store.get(32 + 1).is_err());
}
// Rollback and overwrite the history.
{
let h = headers.len() as Height / 2; // Some point `h` in the past.
assert!(&store.get(h + 1).is_ok());
assert_eq!(store.get(h + 1).unwrap(), headers[h as usize]);
store.rollback(h).unwrap();
assert!(
&store.get(h + 1).is_err(),
"after the rollback, we can't access blocks passed `h`"
);
assert_eq!(store.len().unwrap(), h as usize + 1);
// We can now overwrite the block at position `h + 1`.
let header = BlockHeader {
nonce: 49219374,
..header
};
let height = store.put(iter::once(header)).unwrap();
assert!(header != headers[height as usize]);
assert_eq!(height, h + 1);
assert_eq!(store.get(height).unwrap(), header);
// Blocks up to and including `h` are unaffected by the rollback.
assert_eq!(store.get(0).unwrap(), store.genesis);
assert_eq!(store.get(1).unwrap(), headers[0]);
assert_eq!(store.get(h).unwrap(), headers[h as usize - 1]);
}
}
#[test]
fn test_iter() {
let mut store = store("headers.db");
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
store.put(iter).unwrap();
let mut iter = store.iter();
assert_eq!(iter.next().unwrap().unwrap(), (0, store.genesis));
for (i, result) in iter.enumerate() {
let (height, header) = result.unwrap();
assert_eq!(i as u64 + 1, height);
assert_eq!(header, headers[height as usize - 1]);
}
}
#[test]
fn test_corrupt_file() {
let mut store = store("headers.db");
store.check().expect("checking always works");
store.heal().expect("healing when there is no corruption");
let headers = &[
BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
},
BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x1ffffff,
time: 1842918920,
nonce: 913716378,
},
];
store.put(headers.iter().cloned()).unwrap();
store.check().unwrap();
assert_eq!(store.len().unwrap(), 3);
let size = std::mem::size_of::<BlockHeader>();
assert_eq!(size, HEADER_SIZE);
// Intentionally corrupt the file, by truncating it by 32 bytes.
store
.file
.set_len(headers.len() as u64 * size as u64 - 32)
.unwrap();
assert_eq!(
store.get(1).unwrap(),
headers[0],
"the first header is intact"
);
matches! {
store
.get(2)
.expect_err("the second header has been corrupted"),
Error::Io(err) if err.kind() == io::ErrorKind::UnexpectedEof
};
store.len().expect_err("data is corrupted");
store.check().expect_err("data is corrupted");
store.heal().unwrap();
store.check().unwrap();
assert_eq!(
store.len().unwrap(),
2,
"the last (corrupted) header was removed"
);
}
} | for _ in 0..items {
cursor.read_exact(&mut item)?;
let item = H::consensus_decode(&mut item.as_slice())?;
self.queue.push_back(item); | random_line_split |
io.rs | //! Persistent storage backend for blocks.
use std::collections::VecDeque;
use std::fs;
use std::io::{self, Read, Seek, Write};
use std::iter;
use std::mem;
use std::path::Path;
use nakamoto_common::bitcoin::consensus::encode::{Decodable, Encodable};
use nakamoto_common::block::store::{Error, Store};
use nakamoto_common::block::Height;
/// Append a block to the end of the stream.
fn put<H: Sized + Encodable, S: Seek + Write, I: Iterator<Item = H>>(
mut stream: S,
headers: I,
) -> Result<Height, Error> {
let mut pos = stream.seek(io::SeekFrom::End(0))?;
let size = std::mem::size_of::<H>();
for header in headers {
pos += header.consensus_encode(&mut stream)? as u64;
}
Ok(pos / size as u64)
}
/// Get a block from the stream.
fn get<H: Decodable, S: Seek + Read>(mut stream: S, ix: u64) -> Result<H, Error> {
let size = std::mem::size_of::<H>();
let mut buf = vec![0; size]; // TODO: Use an array when rust has const-generics.
stream.seek(io::SeekFrom::Start(ix * size as u64))?;
stream.read_exact(&mut buf)?;
H::consensus_decode(&mut buf.as_slice()).map_err(Error::from)
}
/// Reads from a file in an I/O optmized way.
#[derive(Debug)]
struct FileReader<H> {
file: fs::File,
queue: VecDeque<H>,
index: u64,
}
impl<H: Decodable> FileReader<H> {
const BATCH_SIZE: usize = 16;
fn new(file: fs::File) -> Self {
Self {
file,
queue: VecDeque::new(),
index: 0,
}
}
fn next(&mut self) -> Result<Option<H>, Error> {
let size = std::mem::size_of::<H>();
if self.queue.is_empty() {
let mut buf = vec![0; size * Self::BATCH_SIZE];
let from = self.file.seek(io::SeekFrom::Start(self.index))?;
match self.file.read_exact(&mut buf) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => {
self.file.seek(io::SeekFrom::Start(from))?;
let n = self.file.read_to_end(&mut buf)?;
buf.truncate(n);
}
Err(err) => return Err(err.into()),
}
self.index += buf.len() as u64;
let items = buf.len() / size;
let mut cursor = io::Cursor::new(buf);
let mut item = vec![0; size];
for _ in 0..items {
cursor.read_exact(&mut item)?;
let item = H::consensus_decode(&mut item.as_slice())?;
self.queue.push_back(item);
}
}
Ok(self.queue.pop_front())
}
}
/// An iterator over block headers in a file.
#[derive(Debug)]
pub struct Iter<H> {
height: Height,
file: FileReader<H>,
}
impl<H: Decodable> Iter<H> {
fn new(file: fs::File) -> Self {
Self {
file: FileReader::new(file),
height: 1,
}
}
}
impl<H: Decodable> Iterator for Iter<H> {
type Item = Result<(Height, H), Error>;
fn | (&mut self) -> Option<Self::Item> {
let height = self.height;
assert!(height > 0);
match self.file.next() {
// If we hit this branch, it's because we're trying to read passed the end
// of the file, which means there are no further headers remaining.
Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None,
// If another kind of error occurs, we want to yield it to the caller, so
// that it can be propagated.
Err(err) => Some(Err(err)),
Ok(Some(header)) => {
self.height = height + 1;
Some(Ok((height, header)))
}
Ok(None) => None,
}
}
}
/// A `Store` backed by a single file.
#[derive(Debug)]
pub struct File<H> {
file: fs::File,
genesis: H,
}
impl<H> File<H> {
/// Open a new file store from the given path and genesis header.
pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> {
fs::OpenOptions::new()
.create(true)
.read(true)
.append(true)
.open(path)
.map(|file| Self { file, genesis })
}
/// Create a new file store at the given path, with the provided genesis header.
pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> {
let file = fs::OpenOptions::new()
.create_new(true)
.read(true)
.append(true)
.open(path)?;
Ok(Self { file, genesis })
}
}
impl<H: 'static + Copy + Encodable + Decodable> Store for File<H> {
type Header = H;
/// Get the genesis block.
fn genesis(&self) -> H {
self.genesis
}
/// Append a block to the end of the file.
fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> {
self::put(&mut self.file, headers)
}
/// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if
/// the height is not found.
fn get(&self, height: Height) -> Result<H, Error> {
if let Some(ix) = height.checked_sub(1) {
// Clone so this function doesn't have to take a `&mut self`.
let mut file = self.file.try_clone()?;
get(&mut file, ix)
} else {
Ok(self.genesis)
}
}
/// Rollback the chain to the given height. Behavior is undefined if the given
/// height is not contained in the store.
fn rollback(&mut self, height: Height) -> Result<(), Error> {
let size = mem::size_of::<H>();
self.file
.set_len((height) * size as u64)
.map_err(Error::from)
}
/// Flush changes to disk.
fn sync(&mut self) -> Result<(), Error> {
self.file.sync_data().map_err(Error::from)
}
/// Iterate over all headers in the store.
fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> {
// Clone so this function doesn't have to take a `&mut self`.
match self.file.try_clone() {
Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))),
Err(err) => Box::new(iter::once(Err(Error::Io(err)))),
}
}
/// Return the number of headers in the store.
fn len(&self) -> Result<usize, Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
if len as usize % size != 0 {
return Err(Error::Corruption);
}
Ok(len as usize / size + 1)
}
/// Return the block height of the store.
fn height(&self) -> Result<Height, Error> {
self.len().map(|n| n as Height - 1)
}
/// Check the file store integrity.
fn check(&self) -> Result<(), Error> {
self.len().map(|_| ())
}
/// Attempt to heal data corruption.
fn heal(&self) -> Result<(), Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
let extraneous = len as usize % size;
if extraneous != 0 {
self.file.set_len(len - extraneous as u64)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use std::{io, iter};
use nakamoto_common::bitcoin::TxMerkleNode;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::BlockHash;
use super::{Error, File, Height, Store};
use crate::block::BlockHeader;
const HEADER_SIZE: usize = 80;
fn store(path: &str) -> File<BlockHeader> {
let tmp = tempfile::tempdir().unwrap();
let genesis = BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 39123818,
nonce: 0,
};
File::open(tmp.path().join(path), genesis).unwrap()
}
#[test]
fn test_put_get() {
let mut store = store("headers.db");
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis.block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
};
assert_eq!(
store.get(0).unwrap(),
store.genesis,
"when the store is empty, we can `get` the genesis"
);
assert!(
store.get(1).is_err(),
"when the store is empty, we can't get height `1`"
);
let height = store.put(iter::once(header)).unwrap();
store.sync().unwrap();
assert_eq!(height, 1);
assert_eq!(store.get(height).unwrap(), header);
}
#[test]
fn test_put_get_batch() {
let mut store = store("headers.db");
assert_eq!(store.len().unwrap(), 1);
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
// Put all headers into the store and check that we can retrieve them.
{
let height = store.put(iter).unwrap();
assert_eq!(height, headers.len() as Height);
assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis.
for (i, h) in headers.iter().enumerate() {
assert_eq!(&store.get(i as Height + 1).unwrap(), h);
}
assert!(&store.get(32 + 1).is_err());
}
// Rollback and overwrite the history.
{
let h = headers.len() as Height / 2; // Some point `h` in the past.
assert!(&store.get(h + 1).is_ok());
assert_eq!(store.get(h + 1).unwrap(), headers[h as usize]);
store.rollback(h).unwrap();
assert!(
&store.get(h + 1).is_err(),
"after the rollback, we can't access blocks passed `h`"
);
assert_eq!(store.len().unwrap(), h as usize + 1);
// We can now overwrite the block at position `h + 1`.
let header = BlockHeader {
nonce: 49219374,
..header
};
let height = store.put(iter::once(header)).unwrap();
assert!(header != headers[height as usize]);
assert_eq!(height, h + 1);
assert_eq!(store.get(height).unwrap(), header);
// Blocks up to and including `h` are unaffected by the rollback.
assert_eq!(store.get(0).unwrap(), store.genesis);
assert_eq!(store.get(1).unwrap(), headers[0]);
assert_eq!(store.get(h).unwrap(), headers[h as usize - 1]);
}
}
#[test]
fn test_iter() {
let mut store = store("headers.db");
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
store.put(iter).unwrap();
let mut iter = store.iter();
assert_eq!(iter.next().unwrap().unwrap(), (0, store.genesis));
for (i, result) in iter.enumerate() {
let (height, header) = result.unwrap();
assert_eq!(i as u64 + 1, height);
assert_eq!(header, headers[height as usize - 1]);
}
}
#[test]
fn test_corrupt_file() {
let mut store = store("headers.db");
store.check().expect("checking always works");
store.heal().expect("healing when there is no corruption");
let headers = &[
BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
},
BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x1ffffff,
time: 1842918920,
nonce: 913716378,
},
];
store.put(headers.iter().cloned()).unwrap();
store.check().unwrap();
assert_eq!(store.len().unwrap(), 3);
let size = std::mem::size_of::<BlockHeader>();
assert_eq!(size, HEADER_SIZE);
// Intentionally corrupt the file, by truncating it by 32 bytes.
store
.file
.set_len(headers.len() as u64 * size as u64 - 32)
.unwrap();
assert_eq!(
store.get(1).unwrap(),
headers[0],
"the first header is intact"
);
matches! {
store
.get(2)
.expect_err("the second header has been corrupted"),
Error::Io(err) if err.kind() == io::ErrorKind::UnexpectedEof
};
store.len().expect_err("data is corrupted");
store.check().expect_err("data is corrupted");
store.heal().unwrap();
store.check().unwrap();
assert_eq!(
store.len().unwrap(),
2,
"the last (corrupted) header was removed"
);
}
}
| next | identifier_name |
tweet_utils.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from searchtweets import load_credentials,gen_request_parameters,collect_results,result_stream,utils
# from secrets_ar import *
import pandas as pd
import glob
import os
import seaborn as sns
import langid,re
import collections
import matplotlib.pyplot as plt
from googletrans import Translator
import itertools
import json
import time
def parseOperators(df):
df[0] = df[0].apply(lambda x:'('+x+')')
# Put brackets around each
df[0] = df[0].apply(lambda x:re.sub(' OR ',') OR (',x))
# Break up ORs into separate clauses
df[0] = df[0].apply(lambda x:re.sub(' AND ',' ',x))
# Replace AND operator with a space
return df
def cleanText(s):
s = re.sub(r'[/\\\-]',' ',s)
return re.sub(r'[^\w\s]','',s)
def getLang(t):
return langid.classify(t)[0]
def makeSimpleQuery():
'''
Makes a simple query to be input into TW search API
'NK', 'Armenia', 'Azerbaijan' in either language
Returns a list of queries @qs and language prefix @prefix
Note: Currently hardcoded. If amended query must be
less than 1024 characters or else it must be split
'''
print('Making simple query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
if prefix == 'hy':
qs = ['Լեռնային Ղարաբաղ OR Ադրբեջան OR Ադրբեջանցի']
prefix = 'hy'
#HY
else:
qs = ['Dağlıq Qarabağ OR Erməni OR Ermənistan']
prefix = 'az'
#AZ
return qs,prefix
def removeNoisyTerms(df,noisyTerms = ['veteran','truce']):
'''
Removes a set of noisy terms from DataFrame with
all declined keywords
Returns @df
'''
removeNoisy = input('Remove noisy terms? ({:s}) (Y/n)'.format(','.join(noisyTerms)))
if removeNoisy == 'n':
removeNoisy = False
print('Not removing')
else:
removeNoisy == True
print('Removing')
df = df[~df[0].isin(noisyTerms)]
return df
def splitQueriesSimple(keywords, max_query_lenght = 400, additional_query_parameters = ''):
'''
Simpler verstion to generate the query strings from list of a keywords
:param keywords: list[string] list of keywords
:param max_query_lenght: int the length of generated query strings,
depending on account type it might be 400 or 1000
:additional_query_parameters
:return :list[string] of generated query strings
'''
queries = []
query = keywords[0]
for keyword in keywords[1:]:
tmp_query = '{} OR "{}"'.format(query, keyword)
if len(tmp_query + additional_query_parameters) > max_query_lenght:
queries.append(f'{tmp_query} {additional_query_parameters}')
query = f'"{keyword}"'
continue
query = tmp_query
queries.append(f'{tmp_query} {additional_query_parameters}')
return queries
def splitQueries(declensionsDf,prefix,writeToFile = True):
'''
Function to take a DataFrame of keywords and
combine with OR operators to make a series of
queries under 1024 characters. Optionally write
the queries to a series of files
'''
n = 0
lastN = 0
nFile = 0
tempQ = ''
qs = []
print('Splitting queries')
if writeToFile:
path = input('Enter path stem (query_{:s}[_<n>.csv])'.format(prefix))
if path == '':
path = 'query_{:s}'.format(prefix)
cleanPath = 'n'
cleanPath = input('Clean existing query files? (y/N)').lower()
if cleanPath in ['','y']:
cleanPath = True
else:
cleanPath = False
if cleanPath:
print('Removing {:s}*'.format(path))
for file in glob.glob('{:s}*'.format(path)):
os.remove(file)
print('Shape:',declensionsDf.shape[0])
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of declined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d %H:%M"))
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab 1k tweets first to see how far it goes
if len(results) > 0:
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df,drop = True)
# Get rid of the tokens for now
dfs[n] = dfs[n].append(df)
if verbose:
print('Takes us to',dfs[n].index[-1].isoformat())
print('{:d} tweets so far'.format(dfs[n].shape[0]))
print() | if breakOut:
print('Breaking out...')
break
print('Now we are done')
print('Got {:d} tweets in total'.format(dfs[n].shape[0]))
print('Between:')
print(dfs[n].index[0])
print(dfs[n].index[-1])
print('+++++++\n')
else:
print('No results...\n+++++++\n')
dfs.append(pd.DataFrame())
return dfs
def countTerms(text,stopWords = None):
'''
Convenience function to count terms in
an iterable of text (pandas series, list etc)
Returns @c counter object
'''
c = collections.Counter()
text = text.astype(str)
text.apply(lambda x:c.update(x.lower().split()))
if stopWords:
for sw in stopWords:
del c[sw]
return c
def writeData(dfs,prefix):
'''
Write dataframes with results to file
'''
stem = input('Enter data file stem (data_{:s}[_<n>.csv])'.format(prefix))
if stem == '':
stem = 'data_{:s}_'.format(prefix)
for n,df in enumerate(dfs):
fileName = '{:s}{:d}.csv'.format(stem,n)
df.to_csv(fileName)
#print('Print to',fileName)
def getMatchingKeywords(t,qs):
'''
Returns a list of keywords that match a string
'''
matches = []
tokens = t.lower().split()
for q in qs:
for kw in q.split(' OR '):
if kw in tokens:
#print('MATCHED',kw)
matches.append(kw)
return matches
def queryToList(q):
'''
Convenience function to split query string back
up into keywords
'''
return q.split(' OR ')
def get_query_results_tw(queries, startdate, enddate):
if os.environ['VERBOSE'] == 'VERBOSE':
print('get_query_results_tw', queries, startdate, enddate)
print(type(enddate))
search_args = load_credentials(filename='/content/tw_keys.yaml', yaml_key="search_tweets_v2")
tweets = []
for query_ in queries:
query = gen_request_parameters(query_, False, results_per_call=100 ,
tweet_fields='attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld',
place_fields='contained_within,country,country_code,full_name,geo,id,name,place_type',
user_fields='created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld',
start_time=startdate.isoformat()[:10],
end_time=enddate.isoformat()[:10])
i = 0
while True:
time.sleep(8)
i += 1
if i > 50:
print('more that 50 pages have been collected for query :', query_)
break
if len(tweets) > 0:
if "next_token" not in tweets[-1]["meta"]:
break
k = json.loads(query)
k['next_token'] = tweets[-1]["meta"]["next_token"]
query = json.dumps(k)
tweets += collect_results(query,
max_tweets=100,
result_stream_args=search_args)
tweets_ = tweets[-1]
if os.environ['VERBOSE'] == 'VERBOSE':
print(tweets_["meta"], len(tweets_["data"]))
df = pd.DataFrame(itertools.chain.from_iterable([i["data"] for i in tweets]))
df["like_count"] = df.public_metrics.apply(lambda x: x["like_count"])
df["quote_count"] = df.public_metrics.apply(lambda x: x["quote_count"])
df["reply_count"] = df.public_metrics.apply(lambda x: x["reply_count"])
df["retweet_count"] = df.public_metrics.apply(lambda x: x["retweet_count"])
return df | else:
print('No results....')
dfs[n] = dfs[n].append(pd.DataFrame())
breakOut = True
| random_line_split |
tweet_utils.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from searchtweets import load_credentials,gen_request_parameters,collect_results,result_stream,utils
# from secrets_ar import *
import pandas as pd
import glob
import os
import seaborn as sns
import langid,re
import collections
import matplotlib.pyplot as plt
from googletrans import Translator
import itertools
import json
import time
def parseOperators(df):
df[0] = df[0].apply(lambda x:'('+x+')')
# Put brackets around each
df[0] = df[0].apply(lambda x:re.sub(' OR ',') OR (',x))
# Break up ORs into separate clauses
df[0] = df[0].apply(lambda x:re.sub(' AND ',' ',x))
# Replace AND operator with a space
return df
def cleanText(s):
s = re.sub(r'[/\\\-]',' ',s)
return re.sub(r'[^\w\s]','',s)
def getLang(t):
return langid.classify(t)[0]
def makeSimpleQuery():
'''
Makes a simple query to be input into TW search API
'NK', 'Armenia', 'Azerbaijan' in either language
Returns a list of queries @qs and language prefix @prefix
Note: Currently hardcoded. If amended query must be
less than 1024 characters or else it must be split
'''
print('Making simple query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
if prefix == 'hy':
qs = ['Լեռնային Ղարաբաղ OR Ադրբեջան OR Ադրբեջանցի']
prefix = 'hy'
#HY
else:
qs = ['Dağlıq Qarabağ OR Erməni OR Ermənistan']
prefix = 'az'
#AZ
return qs,prefix
def removeNoisyTerms(df,noisyTerms = ['veteran','truce']):
'''
Removes a set of noisy terms from DataFrame with
all declined keywords
Returns @df
'''
removeNoisy = input('Remove noisy terms? ({:s}) (Y/n)'.format(','.join(noisyTerms)))
if removeNoisy == 'n':
removeNoisy = False
print('Not removing')
else:
removeNoisy == True
print('Removing')
df = df[~df[0].isin(noisyTerms)]
return df
def splitQueriesSimple(keywords, max_query_lenght = 400, additional_query_parameters = ''):
'''
Simpler verstion to generate the query strings from list of a keywords
:param keywords: list[string] list of keywords
:param max_query_lenght: int the length of generated query strings,
depending on account type it might be 400 or 1000
:additional_query_parameters
:return :list[string] of generated query strings
'''
queries = []
query = keywords[0]
for keyword in keywords[1:]:
tmp_query = '{} OR "{}"'.format(query, keyword)
if len(tmp_query + additional_query_parameters) > max_query_lenght:
queries.append(f'{tmp_query} {additional_query_parameters}')
query = f'"{keyword}"'
continue
query = tmp_query
queries.append(f'{tmp_query} {additional_query_parameters}')
return queries
def splitQueries(declensionsDf,prefix,writ | ue):
'''
Function to take a DataFrame of keywords and
combine with OR operators to make a series of
queries under 1024 characters. Optionally write
the queries to a series of files
'''
n = 0
lastN = 0
nFile = 0
tempQ = ''
qs = []
print('Splitting queries')
if writeToFile:
path = input('Enter path stem (query_{:s}[_<n>.csv])'.format(prefix))
if path == '':
path = 'query_{:s}'.format(prefix)
cleanPath = 'n'
cleanPath = input('Clean existing query files? (y/N)').lower()
if cleanPath in ['','y']:
cleanPath = True
else:
cleanPath = False
if cleanPath:
print('Removing {:s}*'.format(path))
for file in glob.glob('{:s}*'.format(path)):
os.remove(file)
print('Shape:',declensionsDf.shape[0])
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of declined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d %H:%M"))
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab 1k tweets first to see how far it goes
if len(results) > 0:
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df,drop = True)
# Get rid of the tokens for now
dfs[n] = dfs[n].append(df)
if verbose:
print('Takes us to',dfs[n].index[-1].isoformat())
print('{:d} tweets so far'.format(dfs[n].shape[0]))
print()
else:
print('No results....')
dfs[n] = dfs[n].append(pd.DataFrame())
breakOut = True
if breakOut:
print('Breaking out...')
break
print('Now we are done')
print('Got {:d} tweets in total'.format(dfs[n].shape[0]))
print('Between:')
print(dfs[n].index[0])
print(dfs[n].index[-1])
print('+++++++\n')
else:
print('No results...\n+++++++\n')
dfs.append(pd.DataFrame())
return dfs
def countTerms(text,stopWords = None):
'''
Convenience function to count terms in
an iterable of text (pandas series, list etc)
Returns @c counter object
'''
c = collections.Counter()
text = text.astype(str)
text.apply(lambda x:c.update(x.lower().split()))
if stopWords:
for sw in stopWords:
del c[sw]
return c
def writeData(dfs,prefix):
'''
Write dataframes with results to file
'''
stem = input('Enter data file stem (data_{:s}[_<n>.csv])'.format(prefix))
if stem == '':
stem = 'data_{:s}_'.format(prefix)
for n,df in enumerate(dfs):
fileName = '{:s}{:d}.csv'.format(stem,n)
df.to_csv(fileName)
#print('Print to',fileName)
def getMatchingKeywords(t,qs):
'''
Returns a list of keywords that match a string
'''
matches = []
tokens = t.lower().split()
for q in qs:
for kw in q.split(' OR '):
if kw in tokens:
#print('MATCHED',kw)
matches.append(kw)
return matches
def queryToList(q):
'''
Convenience function to split query string back
up into keywords
'''
return q.split(' OR ')
def get_query_results_tw(queries, startdate, enddate):
if os.environ['VERBOSE'] == 'VERBOSE':
print('get_query_results_tw', queries, startdate, enddate)
print(type(enddate))
search_args = load_credentials(filename='/content/tw_keys.yaml', yaml_key="search_tweets_v2")
tweets = []
for query_ in queries:
query = gen_request_parameters(query_, False, results_per_call=100 ,
tweet_fields='attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld',
place_fields='contained_within,country,country_code,full_name,geo,id,name,place_type',
user_fields='created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld',
start_time=startdate.isoformat()[:10],
end_time=enddate.isoformat()[:10])
i = 0
while True:
time.sleep(8)
i += 1
if i > 50:
print('more that 50 pages have been collected for query :', query_)
break
if len(tweets) > 0:
if "next_token" not in tweets[-1]["meta"]:
break
k = json.loads(query)
k['next_token'] = tweets[-1]["meta"]["next_token"]
query = json.dumps(k)
tweets += collect_results(query,
max_tweets=100,
result_stream_args=search_args)
tweets_ = tweets[-1]
if os.environ['VERBOSE'] == 'VERBOSE':
print(tweets_["meta"], len(tweets_["data"]))
df = pd.DataFrame(itertools.chain.from_iterable([i["data"] for i in tweets]))
df["like_count"] = df.public_metrics.apply(lambda x: x["like_count"])
df["quote_count"] = df.public_metrics.apply(lambda x: x["quote_count"])
df["reply_count"] = df.public_metrics.apply(lambda x: x["reply_count"])
df["retweet_count"] = df.public_metrics.apply(lambda x: x["retweet_count"])
return df | eToFile = Tr | identifier_name |
tweet_utils.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from searchtweets import load_credentials,gen_request_parameters,collect_results,result_stream,utils
# from secrets_ar import *
import pandas as pd
import glob
import os
import seaborn as sns
import langid,re
import collections
import matplotlib.pyplot as plt
from googletrans import Translator
import itertools
import json
import time
def parseOperators(df):
df[0] = df[0].apply(lambda x:'('+x+')')
# Put brackets around each
df[0] = df[0].apply(lambda x:re.sub(' OR ',') OR (',x))
# Break up ORs into separate clauses
df[0] = df[0].apply(lambda x:re.sub(' AND ',' ',x))
# Replace AND operator with a space
return df
def cleanText(s):
s = re.sub(r'[/\\\-]',' ',s)
return re.sub(r'[^\w\s]','',s)
def getLang(t):
return langid.classify(t)[0]
def makeSimpleQuery():
'''
Makes a simple query to be input into TW search API
'NK', 'Armenia', 'Azerbaijan' in either language
Returns a list of queries @qs and language prefix @prefix
Note: Currently hardcoded. If amended query must be
less than 1024 characters or else it must be split
'''
print('Making simple query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
if prefix == 'hy':
qs = ['Լեռնային Ղարաբաղ OR Ադրբեջան OR Ադրբեջանցի']
prefix = 'hy'
#HY
else:
qs = ['Dağlıq Qarabağ OR Erməni OR Ermənistan']
prefix = 'az'
#AZ
return qs,prefix
def removeNoisyTerms(df,noisyTerms = ['veteran','truce']):
'''
Removes a set of noisy terms from DataFrame with
all declined keywords
Returns @df
'''
removeNoisy = input('Remove noisy terms? ({:s}) (Y/n)'.format(','.join(noisyTerms)))
if removeNoisy == 'n':
removeNoisy = False
print('Not removing')
else:
removeNoisy == True
print('Removing')
df = df[~df[0].isin(noisyTerms)]
return df
def splitQueriesSimple(keywords, max_query_lenght = 400, additional_query_parameters = ''):
'''
Simpler verstion to generate the query strings from list of a keywords
:param keywords: list[string] list of keywords
:param max_query_lenght: int the length of generated query strings,
depending on account type it might be 400 or 1000
:additional_query_parameters
:return :list[string] of generated query strings
'''
queries = []
query = keywords[0]
for keyword in keywords[1:]:
tmp_query = '{} OR "{}"'.format(query, keyword)
if len(tmp_query + additional_query_parameters) > max_query_lenght:
queries.append(f'{tmp_query} {additional_query_parameters}')
query = f'"{keyword}"'
continue
query = tmp_query
queries.append(f'{tmp_query} {additional_query_parameters}')
return queries
def splitQueries(declensionsDf,prefix,writeToFile = True):
'''
Function to take a DataFrame of keywords and
combine with OR operators to make a series of
queries under 1024 characters. Optionally write
the queries to a series of files
'''
n = 0
lastN = 0
nFile = 0
tempQ = ''
qs = []
print('Splitting queries')
if writeToFile:
path = input('Enter path stem (query_{:s}[_<n>.csv])'.format(prefix))
if path == '':
path = 'query_{:s}'.format(prefix)
cleanPath = 'n'
cleanPath = input('Clean existing query files? (y/N)').lower()
if cleanPath in ['','y']:
cleanPath = True
else:
cleanPath = False
if cleanPath:
print('Removing {:s}*'.format(path))
for file in glob.glob('{:s}*'.format(path)):
os.remove(file)
print('Shape:',declensionsDf.shape[0])
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of declined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d %H:%M"))
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab 1k tweets first to see how far it goes
if len(results) > 0:
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df,drop = True)
# Get rid of the tokens for now
dfs[n] = dfs[n].append(df)
if verbose:
print('Takes us to',dfs[n].index[-1].isoformat())
print('{:d} tweets so far'.format(dfs[n].shape[0]))
print()
else:
print('No results....')
dfs[n] = dfs[n].append(pd.DataFrame())
breakOut = True
if breakOut:
print('Breaking out...')
break
print('Now we are done')
print('Got {:d} tweets in total'.format(dfs[n].shape[0]))
print('Between:')
print(dfs[n].index[0])
print(dfs[n].index[-1])
print('+++++++\n')
else:
print('No results...\n+++++++\n')
dfs.append(pd.DataFrame())
return dfs
def countTerms(text,stopWords = None):
'''
Convenience function to count terms in
an iterable of text (pandas series, list etc)
Returns @c counter object
'''
c = collections.Counter()
text = text.astype(str)
text.apply(lambda x:c.update(x.lower().split()))
if stopWords:
for sw in stopWords:
del c[sw]
return c
def writeData(dfs,prefix):
'''
Write dataframes with results to file
'''
stem = input('Enter data file stem (data_{:s}[_<n>.csv])'.format(prefix))
if stem == '':
stem = 'data_{:s}_'.format(prefix)
for n,df in enumerate(dfs):
fileName = '{:s}{:d}.csv'.format(stem,n)
df.to_csv(fileName)
#print('Print to',fileName)
def getMatchingKeywords(t,qs):
'''
Returns a list of keywords tha | enience function to split query string back
up into keywords
'''
return q.split(' OR ')
def get_query_results_tw(queries, startdate, enddate):
if os.environ['VERBOSE'] == 'VERBOSE':
print('get_query_results_tw', queries, startdate, enddate)
print(type(enddate))
search_args = load_credentials(filename='/content/tw_keys.yaml', yaml_key="search_tweets_v2")
tweets = []
for query_ in queries:
query = gen_request_parameters(query_, False, results_per_call=100 ,
tweet_fields='attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld',
place_fields='contained_within,country,country_code,full_name,geo,id,name,place_type',
user_fields='created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld',
start_time=startdate.isoformat()[:10],
end_time=enddate.isoformat()[:10])
i = 0
while True:
time.sleep(8)
i += 1
if i > 50:
print('more that 50 pages have been collected for query :', query_)
break
if len(tweets) > 0:
if "next_token" not in tweets[-1]["meta"]:
break
k = json.loads(query)
k['next_token'] = tweets[-1]["meta"]["next_token"]
query = json.dumps(k)
tweets += collect_results(query,
max_tweets=100,
result_stream_args=search_args)
tweets_ = tweets[-1]
if os.environ['VERBOSE'] == 'VERBOSE':
print(tweets_["meta"], len(tweets_["data"]))
df = pd.DataFrame(itertools.chain.from_iterable([i["data"] for i in tweets]))
df["like_count"] = df.public_metrics.apply(lambda x: x["like_count"])
df["quote_count"] = df.public_metrics.apply(lambda x: x["quote_count"])
df["reply_count"] = df.public_metrics.apply(lambda x: x["reply_count"])
df["retweet_count"] = df.public_metrics.apply(lambda x: x["retweet_count"])
return df | t match a string
'''
matches = []
tokens = t.lower().split()
for q in qs:
for kw in q.split(' OR '):
if kw in tokens:
#print('MATCHED',kw)
matches.append(kw)
return matches
def queryToList(q):
'''
Conv | identifier_body |
tweet_utils.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from searchtweets import load_credentials,gen_request_parameters,collect_results,result_stream,utils
# from secrets_ar import *
import pandas as pd
import glob
import os
import seaborn as sns
import langid,re
import collections
import matplotlib.pyplot as plt
from googletrans import Translator
import itertools
import json
import time
def parseOperators(df):
df[0] = df[0].apply(lambda x:'('+x+')')
# Put brackets around each
df[0] = df[0].apply(lambda x:re.sub(' OR ',') OR (',x))
# Break up ORs into separate clauses
df[0] = df[0].apply(lambda x:re.sub(' AND ',' ',x))
# Replace AND operator with a space
return df
def cleanText(s):
s = re.sub(r'[/\\\-]',' ',s)
return re.sub(r'[^\w\s]','',s)
def getLang(t):
return langid.classify(t)[0]
def makeSimpleQuery():
'''
Makes a simple query to be input into TW search API
'NK', 'Armenia', 'Azerbaijan' in either language
Returns a list of queries @qs and language prefix @prefix
Note: Currently hardcoded. If amended query must be
less than 1024 characters or else it must be split
'''
print('Making simple query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
if prefix == 'hy':
qs = ['Լեռնային Ղարաբաղ OR Ադրբեջան OR Ադրբեջանցի']
prefix = 'hy'
#HY
else:
qs = ['Dağlıq Qarabağ OR Erməni OR Ermənistan']
prefix = 'az'
#AZ
return qs,prefix
def removeNoisyTerms(df,noisyTerms = ['veteran','truce']):
'''
Removes a set of noisy terms from DataFrame with
all declined keywords
Returns @df
'''
removeNoisy = input('Remove noisy terms? ({:s}) (Y/n)'.format(','.join(noisyTerms)))
if removeNoisy == 'n':
removeNoisy = False
print('Not removing')
else:
removeNoisy == True
print('Removing')
df = df[~df[0].isin(noisyTerms)]
return df
def splitQueriesSimple(keywords, max_query_lenght = 400, additional_query_parameters = ''):
'''
Simpler verstion to generate the query strings from list of a keywords
:param keywords: list[string] list of keywords
:param max_query_lenght: int the length of generated query strings,
depending on account type it might be 400 or 1000
:additional_query_parameters
:return :list[string] of generated query strings
'''
queries = []
query = keywords[0]
for keyword in keywords[1:]:
tmp_query = '{} OR "{}"'.format(query, keyword)
if len(tmp_query + additional_query_parameters) > max_query_lenght:
queries.append(f'{tmp_query} {additional_query_parameters}')
query = f'"{keyword}"'
continue
query = tmp_query
queries.append(f'{tmp_query} {additional_query_parameters}')
return queries
def splitQueries(declensionsDf,prefix,writeToFile = True):
'''
Function to take a DataFrame of keywords and
combine with OR operators to make a series of
queries under 1024 characters. Optionally write
the queries to a series of files
'''
n = 0
lastN = 0
nFile = 0
tempQ = ''
qs = []
print('Splitting queries')
if writeToFile:
path = input('Enter path stem (query_{:s}[_<n>.csv])'.format(prefix))
if path == '':
path = 'query_{:s}'.format(prefix)
cleanPath = 'n'
cleanPath = input('Clean existing query files? (y/N)').lower()
if cleanPath in ['','y']:
cleanPath = True
else:
cleanPath = False
if cleanPath:
print('Removing {:s}*'.format(path))
for file in glob.glob('{:s}*'.format(path)):
os.remove(file)
print('Shape:',declensionsDf.shape[0])
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?') | eclined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d %H:%M"))
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab 1k tweets first to see how far it goes
if len(results) > 0:
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df,drop = True)
# Get rid of the tokens for now
dfs[n] = dfs[n].append(df)
if verbose:
print('Takes us to',dfs[n].index[-1].isoformat())
print('{:d} tweets so far'.format(dfs[n].shape[0]))
print()
else:
print('No results....')
dfs[n] = dfs[n].append(pd.DataFrame())
breakOut = True
if breakOut:
print('Breaking out...')
break
print('Now we are done')
print('Got {:d} tweets in total'.format(dfs[n].shape[0]))
print('Between:')
print(dfs[n].index[0])
print(dfs[n].index[-1])
print('+++++++\n')
else:
print('No results...\n+++++++\n')
dfs.append(pd.DataFrame())
return dfs
def countTerms(text,stopWords = None):
'''
Convenience function to count terms in
an iterable of text (pandas series, list etc)
Returns @c counter object
'''
c = collections.Counter()
text = text.astype(str)
text.apply(lambda x:c.update(x.lower().split()))
if stopWords:
for sw in stopWords:
del c[sw]
return c
def writeData(dfs,prefix):
'''
Write dataframes with results to file
'''
stem = input('Enter data file stem (data_{:s}[_<n>.csv])'.format(prefix))
if stem == '':
stem = 'data_{:s}_'.format(prefix)
for n,df in enumerate(dfs):
fileName = '{:s}{:d}.csv'.format(stem,n)
df.to_csv(fileName)
#print('Print to',fileName)
def getMatchingKeywords(t,qs):
'''
Returns a list of keywords that match a string
'''
matches = []
tokens = t.lower().split()
for q in qs:
for kw in q.split(' OR '):
if kw in tokens:
#print('MATCHED',kw)
matches.append(kw)
return matches
def queryToList(q):
'''
Convenience function to split query string back
up into keywords
'''
return q.split(' OR ')
def get_query_results_tw(queries, startdate, enddate):
if os.environ['VERBOSE'] == 'VERBOSE':
print('get_query_results_tw', queries, startdate, enddate)
print(type(enddate))
search_args = load_credentials(filename='/content/tw_keys.yaml', yaml_key="search_tweets_v2")
tweets = []
for query_ in queries:
query = gen_request_parameters(query_, False, results_per_call=100 ,
tweet_fields='attachments,author_id,context_annotations,conversation_id,created_at,entities,geo,id,in_reply_to_user_id,lang,possibly_sensitive,public_metrics,referenced_tweets,reply_settings,source,text,withheld',
place_fields='contained_within,country,country_code,full_name,geo,id,name,place_type',
user_fields='created_at,description,entities,id,location,name,pinned_tweet_id,profile_image_url,protected,public_metrics,url,username,verified,withheld',
start_time=startdate.isoformat()[:10],
end_time=enddate.isoformat()[:10])
i = 0
while True:
time.sleep(8)
i += 1
if i > 50:
print('more that 50 pages have been collected for query :', query_)
break
if len(tweets) > 0:
if "next_token" not in tweets[-1]["meta"]:
break
k = json.loads(query)
k['next_token'] = tweets[-1]["meta"]["next_token"]
query = json.dumps(k)
tweets += collect_results(query,
max_tweets=100,
result_stream_args=search_args)
tweets_ = tweets[-1]
if os.environ['VERBOSE'] == 'VERBOSE':
print(tweets_["meta"], len(tweets_["data"]))
df = pd.DataFrame(itertools.chain.from_iterable([i["data"] for i in tweets]))
df["like_count"] = df.public_metrics.apply(lambda x: x["like_count"])
df["quote_count"] = df.public_metrics.apply(lambda x: x["quote_count"])
df["reply_count"] = df.public_metrics.apply(lambda x: x["reply_count"])
df["retweet_count"] = df.public_metrics.apply(lambda x: x["retweet_count"])
return df |
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of d | conditional_block |
dataset_data_particle.py | #!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import time
import ntplib
import base64
import json
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
|
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function, value_range=None):
"""
Encode a value using the encoding function, if it fails store the error in a queue
:param value_range tuple containing min/max numerical values or min/max lengths
"""
encoded_val = None
# noinspection PyBroadException
# - custom encoding_function exceptions are not known a priori
try:
encoded_val = encoding_function(value)
except ValueError as e:
log.error('Unable to convert %s to %s.', encoded_val, encoding_function)
self._encoding_errors.append({name: value})
except Exception as e:
log.error('Data particle error encoding. Name: %s Value: %s, Encoding: %s', name, value, encoding_function)
self._encoding_errors.append({name: value})
# optional range checking
if value_range:
try:
vmin, vmax = value_range
except ValueError as e: # this only occurs as a programming error and should cause the parser to exit
log.exception('_encode_value must have exactly two values for tuple argument value_range')
raise ValueError(e)
if encoding_function in [int, float]:
if vmin and encoded_val < vmin:
log.error('Particle value (%s) below minimum threshold (%s < %s)', name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and encoded_val > vmax:
log.error('Particle value (%s) exceeds maximum threshold (%s > %s)', name, value, vmax)
self._encoding_errors.append({name: value})
elif hasattr(encoded_val, '__len__'):
try:
if vmin and len(encoded_val) < vmin:
log.error('Particle value (%s) length below minimum threshold (%s < %s)',
name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and len(encoded_val) > vmax:
log.error('Particle value (%s) length exceeds maximum threshold (%s > %s)',
name, value, vmax)
self._encoding_errors.append({name: value})
# in the unlikely event that a range was specified and the encoding object created a bogus len()
# we'll just ignore the range check
except TypeError:
log.warning('_encode_value received an encoding function (%s) that claimed to implement len() but '
'does not. Unable to apply range test to %s', encoding_function, name)
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
raise SampleException("raw data not a complete port agent packet. missing %s" % param)
payload = None
length = None
type = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
type = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: type},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result
| """ Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None | identifier_body |
dataset_data_particle.py | #!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import time
import ntplib
import base64
import json
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self): | Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function, value_range=None):
"""
Encode a value using the encoding function, if it fails store the error in a queue
:param value_range tuple containing min/max numerical values or min/max lengths
"""
encoded_val = None
# noinspection PyBroadException
# - custom encoding_function exceptions are not known a priori
try:
encoded_val = encoding_function(value)
except ValueError as e:
log.error('Unable to convert %s to %s.', encoded_val, encoding_function)
self._encoding_errors.append({name: value})
except Exception as e:
log.error('Data particle error encoding. Name: %s Value: %s, Encoding: %s', name, value, encoding_function)
self._encoding_errors.append({name: value})
# optional range checking
if value_range:
try:
vmin, vmax = value_range
except ValueError as e: # this only occurs as a programming error and should cause the parser to exit
log.exception('_encode_value must have exactly two values for tuple argument value_range')
raise ValueError(e)
if encoding_function in [int, float]:
if vmin and encoded_val < vmin:
log.error('Particle value (%s) below minimum threshold (%s < %s)', name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and encoded_val > vmax:
log.error('Particle value (%s) exceeds maximum threshold (%s > %s)', name, value, vmax)
self._encoding_errors.append({name: value})
elif hasattr(encoded_val, '__len__'):
try:
if vmin and len(encoded_val) < vmin:
log.error('Particle value (%s) length below minimum threshold (%s < %s)',
name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and len(encoded_val) > vmax:
log.error('Particle value (%s) length exceeds maximum threshold (%s > %s)',
name, value, vmax)
self._encoding_errors.append({name: value})
# in the unlikely event that a range was specified and the encoding object created a bogus len()
# we'll just ignore the range check
except TypeError:
log.warning('_encode_value received an encoding function (%s) that claimed to implement len() but '
'does not. Unable to apply range test to %s', encoding_function, name)
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
raise SampleException("raw data not a complete port agent packet. missing %s" % param)
payload = None
length = None
type = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
type = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: type},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result | """ | random_line_split |
dataset_data_particle.py | #!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import time
import ntplib
import base64
import json
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function, value_range=None):
"""
Encode a value using the encoding function, if it fails store the error in a queue
:param value_range tuple containing min/max numerical values or min/max lengths
"""
encoded_val = None
# noinspection PyBroadException
# - custom encoding_function exceptions are not known a priori
try:
encoded_val = encoding_function(value)
except ValueError as e:
log.error('Unable to convert %s to %s.', encoded_val, encoding_function)
self._encoding_errors.append({name: value})
except Exception as e:
log.error('Data particle error encoding. Name: %s Value: %s, Encoding: %s', name, value, encoding_function)
self._encoding_errors.append({name: value})
# optional range checking
if value_range:
try:
vmin, vmax = value_range
except ValueError as e: # this only occurs as a programming error and should cause the parser to exit
log.exception('_encode_value must have exactly two values for tuple argument value_range')
raise ValueError(e)
if encoding_function in [int, float]:
if vmin and encoded_val < vmin:
log.error('Particle value (%s) below minimum threshold (%s < %s)', name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and encoded_val > vmax:
log.error('Particle value (%s) exceeds maximum threshold (%s > %s)', name, value, vmax)
self._encoding_errors.append({name: value})
elif hasattr(encoded_val, '__len__'):
try:
if vmin and len(encoded_val) < vmin:
log.error('Particle value (%s) length below minimum threshold (%s < %s)',
name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and len(encoded_val) > vmax:
log.error('Particle value (%s) length exceeds maximum threshold (%s > %s)',
name, value, vmax)
self._encoding_errors.append({name: value})
# in the unlikely event that a range was specified and the encoding object created a bogus len()
# we'll just ignore the range check
except TypeError:
log.warning('_encode_value received an encoding function (%s) that claimed to implement len() but '
'does not. Unable to apply range test to %s', encoding_function, name)
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
|
payload = None
length = None
type = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
type = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: type},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result
| raise SampleException("raw data not a complete port agent packet. missing %s" % param) | conditional_block |
dataset_data_particle.py | #!/usr/bin/env python
"""
@package mi.core.instrument.data_particle_generator Base data particle generator
@file mi/core/instrument/data_particle_generator.py
@author Steve Foley
@brief Contains logic to generate data particles to be exchanged between
the driver and agent. This involves a JSON interchange format
"""
import time
import ntplib
import base64
import json
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, ReadOnlyException, NotImplementedException, InstrumentParameterException
from mi.core.log import get_logger
log = get_logger()
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
class CommonDataParticleType(BaseEnum):
"""
This enum defines all the common particle types defined in the modules. Currently there is only one, but by
using an enum here we have the opportunity to define more common data particles.
"""
RAW = "raw"
class DataParticleKey(BaseEnum):
PKT_FORMAT_ID = "pkt_format_id"
PKT_VERSION = "pkt_version"
STREAM_NAME = "stream_name"
INTERNAL_TIMESTAMP = "internal_timestamp"
PORT_TIMESTAMP = "port_timestamp"
DRIVER_TIMESTAMP = "driver_timestamp"
PREFERRED_TIMESTAMP = "preferred_timestamp"
QUALITY_FLAG = "quality_flag"
VALUES = "values"
VALUE_ID = "value_id"
VALUE = "value"
BINARY = "binary"
NEW_SEQUENCE = "new_sequence"
class DataParticleValue(BaseEnum):
JSON_DATA = "JSON_Data"
ENG = "eng"
OK = "ok"
CHECKSUM_FAILED = "checksum_failed"
OUT_OF_RANGE = "out_of_range"
INVALID = "invalid"
QUESTIONABLE = "questionable"
class DataParticle(object):
"""
This class is responsible for storing and ultimately generating data
particles in the designated format from the associated inputs. It
fills in fields as necessary, and is a valid Data Particle
that can be sent up to the InstrumentAgent.
It is the intent that this class is subclassed as needed if an instrument must
modify fields in the outgoing packet. The hope is to have most of the superclass
code be called by the child class with just values overridden as needed.
"""
# data particle type is intended to be defined in each derived data particle class. This value should be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def | (self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function, value_range=None):
"""
Encode a value using the encoding function, if it fails store the error in a queue
:param value_range tuple containing min/max numerical values or min/max lengths
"""
encoded_val = None
# noinspection PyBroadException
# - custom encoding_function exceptions are not known a priori
try:
encoded_val = encoding_function(value)
except ValueError as e:
log.error('Unable to convert %s to %s.', encoded_val, encoding_function)
self._encoding_errors.append({name: value})
except Exception as e:
log.error('Data particle error encoding. Name: %s Value: %s, Encoding: %s', name, value, encoding_function)
self._encoding_errors.append({name: value})
# optional range checking
if value_range:
try:
vmin, vmax = value_range
except ValueError as e: # this only occurs as a programming error and should cause the parser to exit
log.exception('_encode_value must have exactly two values for tuple argument value_range')
raise ValueError(e)
if encoding_function in [int, float]:
if vmin and encoded_val < vmin:
log.error('Particle value (%s) below minimum threshold (%s < %s)', name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and encoded_val > vmax:
log.error('Particle value (%s) exceeds maximum threshold (%s > %s)', name, value, vmax)
self._encoding_errors.append({name: value})
elif hasattr(encoded_val, '__len__'):
try:
if vmin and len(encoded_val) < vmin:
log.error('Particle value (%s) length below minimum threshold (%s < %s)',
name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and len(encoded_val) > vmax:
log.error('Particle value (%s) length exceeds maximum threshold (%s > %s)',
name, value, vmax)
self._encoding_errors.append({name: value})
# in the unlikely event that a range was specified and the encoding object created a bogus len()
# we'll just ignore the range check
except TypeError:
log.warning('_encode_value received an encoding function (%s) that claimed to implement len() but '
'does not. Unable to apply range test to %s', encoding_function, name)
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
raise SampleException("raw data not a complete port agent packet. missing %s" % param)
payload = None
length = None
type = None
checksum = None
# Attempt to convert values
try:
payload = base64.b64encode(port_agent_packet.get("raw"))
except TypeError:
pass
try:
length = int(port_agent_packet.get("length"))
except TypeError:
pass
try:
type = int(port_agent_packet.get("type"))
except TypeError:
pass
try:
checksum = int(port_agent_packet.get("checksum"))
except TypeError:
pass
result = [{
DataParticleKey.VALUE_ID: RawDataParticleKey.PAYLOAD,
DataParticleKey.VALUE: payload,
DataParticleKey.BINARY: True},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.LENGTH,
DataParticleKey.VALUE: length},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.TYPE,
DataParticleKey.VALUE: type},
{
DataParticleKey.VALUE_ID: RawDataParticleKey.CHECKSUM,
DataParticleKey.VALUE: checksum},
]
return result
| set_internal_timestamp | identifier_name |
perception.py | # System
from threading import Condition
# ROS
import rospy
from sensor_msgs.msg import Image, RegionOfInterest
from std_srvs.srv import Empty
# TU/e Robotics
from image_recognition_msgs.srv import Annotate, Recognize, RecognizeResponse, GetFaceProperties
from image_recognition_msgs.msg import Annotation, Recognition
from rgbd.srv import Project2DTo3D, Project2DTo3DRequest
from robot_part import RobotPart
from .util.kdl_conversions import VectorStamped
from .util.image_operations import img_recognitions_to_rois, img_cutout
class Perception(RobotPart):
def __init__(self, robot_name, tf_listener):
super(Perception, self).__init__(robot_name=robot_name, tf_listener=tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def _image_cb(self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
|
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability)
return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition?
def clear_face(self):
"""
clearing all faces from the OpenFace node.
:return: no return
"""
rospy.loginfo('clearing all learned faces')
self._clear_srv()
# Skybiometry
def get_face_properties(self, faces=None, image=None):
"""
Get the face properties of all faces or in an image. If faces is provided, image is ignored. If both aren't
provided, an image is collected.
:param faces: images of all faces
:type faces: list[sensor_msgs/Image]
:param image: image containing the faces
:type image: sensor_msgs/Image
:return: list of face properties
:rtype: list[image_recognition_msgs/FaceProperties]
"""
if not faces:
if not image:
image = self.get_image()
face_recognitions = self.detect_faces(image=image)
rois = img_recognitions_to_rois(face_recognitions)
faces = img_cutout(image, rois)
face_properties = []
try:
face_properties_response = self._face_properties_srv(faces)
face_properties = face_properties_response.properties_array
except Exception as e:
rospy.logerr(str(e))
return [None] * len(faces)
face_log = '\n - '.join([''] + [repr(s) for s in face_properties])
rospy.loginfo('face_properties:%s', face_log)
return face_properties
| return self._get_faces(image).recognitions, image.header.stamp | conditional_block |
perception.py | # System
from threading import Condition
# ROS
import rospy
from sensor_msgs.msg import Image, RegionOfInterest
from std_srvs.srv import Empty
# TU/e Robotics
from image_recognition_msgs.srv import Annotate, Recognize, RecognizeResponse, GetFaceProperties
from image_recognition_msgs.msg import Annotation, Recognition
from rgbd.srv import Project2DTo3D, Project2DTo3DRequest
from robot_part import RobotPart
from .util.kdl_conversions import VectorStamped
from .util.image_operations import img_recognitions_to_rois, img_cutout
class Perception(RobotPart):
def __init__(self, robot_name, tf_listener):
super(Perception, self).__init__(robot_name=robot_name, tf_listener=tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def _image_cb(self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
return self._get_faces(image).recognitions, image.header.stamp
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability)
return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition?
def clear_face(self):
|
# Skybiometry
def get_face_properties(self, faces=None, image=None):
"""
Get the face properties of all faces or in an image. If faces is provided, image is ignored. If both aren't
provided, an image is collected.
:param faces: images of all faces
:type faces: list[sensor_msgs/Image]
:param image: image containing the faces
:type image: sensor_msgs/Image
:return: list of face properties
:rtype: list[image_recognition_msgs/FaceProperties]
"""
if not faces:
if not image:
image = self.get_image()
face_recognitions = self.detect_faces(image=image)
rois = img_recognitions_to_rois(face_recognitions)
faces = img_cutout(image, rois)
face_properties = []
try:
face_properties_response = self._face_properties_srv(faces)
face_properties = face_properties_response.properties_array
except Exception as e:
rospy.logerr(str(e))
return [None] * len(faces)
face_log = '\n - '.join([''] + [repr(s) for s in face_properties])
rospy.loginfo('face_properties:%s', face_log)
return face_properties
| """
clearing all faces from the OpenFace node.
:return: no return
"""
rospy.loginfo('clearing all learned faces')
self._clear_srv() | identifier_body |
perception.py | # System
from threading import Condition
# ROS
import rospy
from sensor_msgs.msg import Image, RegionOfInterest
from std_srvs.srv import Empty
# TU/e Robotics
from image_recognition_msgs.srv import Annotate, Recognize, RecognizeResponse, GetFaceProperties
from image_recognition_msgs.msg import Annotation, Recognition
from rgbd.srv import Project2DTo3D, Project2DTo3DRequest
from robot_part import RobotPart
from .util.kdl_conversions import VectorStamped
from .util.image_operations import img_recognitions_to_rois, img_cutout
class Perception(RobotPart):
def __init__(self, robot_name, tf_listener):
super(Perception, self).__init__(robot_name=robot_name, tf_listener=tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def _image_cb(self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
return self._get_faces(image).recognitions, image.header.stamp
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability) |
def clear_face(self):
"""
clearing all faces from the OpenFace node.
:return: no return
"""
rospy.loginfo('clearing all learned faces')
self._clear_srv()
# Skybiometry
def get_face_properties(self, faces=None, image=None):
"""
Get the face properties of all faces or in an image. If faces is provided, image is ignored. If both aren't
provided, an image is collected.
:param faces: images of all faces
:type faces: list[sensor_msgs/Image]
:param image: image containing the faces
:type image: sensor_msgs/Image
:return: list of face properties
:rtype: list[image_recognition_msgs/FaceProperties]
"""
if not faces:
if not image:
image = self.get_image()
face_recognitions = self.detect_faces(image=image)
rois = img_recognitions_to_rois(face_recognitions)
faces = img_cutout(image, rois)
face_properties = []
try:
face_properties_response = self._face_properties_srv(faces)
face_properties = face_properties_response.properties_array
except Exception as e:
rospy.logerr(str(e))
return [None] * len(faces)
face_log = '\n - '.join([''] + [repr(s) for s in face_properties])
rospy.loginfo('face_properties:%s', face_log)
return face_properties | return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition? | random_line_split |
perception.py | # System
from threading import Condition
# ROS
import rospy
from sensor_msgs.msg import Image, RegionOfInterest
from std_srvs.srv import Empty
# TU/e Robotics
from image_recognition_msgs.srv import Annotate, Recognize, RecognizeResponse, GetFaceProperties
from image_recognition_msgs.msg import Annotation, Recognition
from rgbd.srv import Project2DTo3D, Project2DTo3DRequest
from robot_part import RobotPart
from .util.kdl_conversions import VectorStamped
from .util.image_operations import img_recognitions_to_rois, img_cutout
class Perception(RobotPart):
def __init__(self, robot_name, tf_listener):
super(Perception, self).__init__(robot_name=robot_name, tf_listener=tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def | (self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
return self._get_faces(image).recognitions, image.header.stamp
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability)
return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition?
def clear_face(self):
"""
clearing all faces from the OpenFace node.
:return: no return
"""
rospy.loginfo('clearing all learned faces')
self._clear_srv()
# Skybiometry
def get_face_properties(self, faces=None, image=None):
"""
Get the face properties of all faces or in an image. If faces is provided, image is ignored. If both aren't
provided, an image is collected.
:param faces: images of all faces
:type faces: list[sensor_msgs/Image]
:param image: image containing the faces
:type image: sensor_msgs/Image
:return: list of face properties
:rtype: list[image_recognition_msgs/FaceProperties]
"""
if not faces:
if not image:
image = self.get_image()
face_recognitions = self.detect_faces(image=image)
rois = img_recognitions_to_rois(face_recognitions)
faces = img_cutout(image, rois)
face_properties = []
try:
face_properties_response = self._face_properties_srv(faces)
face_properties = face_properties_response.properties_array
except Exception as e:
rospy.logerr(str(e))
return [None] * len(faces)
face_log = '\n - '.join([''] + [repr(s) for s in face_properties])
rospy.loginfo('face_properties:%s', face_log)
return face_properties
| _image_cb | identifier_name |
routeOperator.go | // Copyright 2019-2023 The Liqo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routeoperator
import (
"context"
"os"
"os/signal"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/vishvananda/netlink"
k8sApiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1"
liqoconst "github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/liqonet/overlay"
liqorouting "github.com/liqotech/liqo/pkg/liqonet/routing"
liqonetutils "github.com/liqotech/liqo/pkg/liqonet/utils"
)
var (
result = ctrl.Result{}
)
// RouteController reconciles a TunnelEndpoint object.
type RouteController struct {
client.Client
record.EventRecorder
liqorouting.Routing
vxlanDev *overlay.VxlanDevice
podIP string
firewallChan chan bool
}
// NewRouteController returns a configured route controller ready to be started.
func NewRouteController(podIP string, vxlanDevice *overlay.VxlanDevice, router liqorouting.Routing, er record.EventRecorder,
cl client.Client) *RouteController {
r := &RouteController{
Client: cl,
Routing: router,
vxlanDev: vxlanDevice,
EventRecorder: er,
podIP: podIP,
}
return r
}
// cluster-role
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get
// role
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=secrets,verbs=create;update;patch;get;list;watch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=update;patch;get;list;watch
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=services,verbs=update;patch;get;list;watch
// Reconcile handle requests on TunnelEndpoint object to create and configure routes on Nodes.
func (rc *RouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return
}
}
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); err != nil {
klog.Errorf("un error occurred while cleaning up policy routing rules: %v", err)
}
}
if rc.vxlanDev != nil {
err := netlink.LinkDel(rc.vxlanDev.Link)
if err != nil && err.Error() != "Link not found" {
klog.Errorf("an error occurred while deleting vxlan device {%s}: %v", rc.vxlanDev.Link.Name, err)
}
}
// Attempt to remove our finalizer from all tunnel endpoints. In case this operation fails,
// the cleanup will be performed by tunnel-operator when a tunnel endpoint is going to be deleted.
var teps netv1alpha1.TunnelEndpointList
if err := rc.List(context.Background(), &teps); err != nil {
klog.Errorf("an error occurred while listing tunnel endpoints: %v", err)
return
}
for i := range teps.Items {
original := teps.Items[i].DeepCopy()
if controllerutil.RemoveFinalizer(&teps.Items[i], liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// Using patch instead of update, to prevent issues in case of conflicts.
if err := rc.Client.Patch(context.Background(), &teps.Items[i], client.MergeFrom(original)); err != nil {
klog.Errorf("%s -> unable to remove finalizer from tunnel endpoint %q: %v",
original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]), err)
continue
}
klog.V(4).Infof("%s -> finalizer successfully removed from tunnel endpoint %q", original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]))
}
}
}
// SetupWithManager used to set up the controller with a given manager.
func (rc *RouteController) SetupWithManager(mgr ctrl.Manager) error |
// SetupSignalHandlerForRouteOperator registers for SIGTERM, SIGINT. Interrupt. A stop context is returned
// which is closed on one of these signals.
func (rc *RouteController) SetupSignalHandlerForRouteOperator() context.Context {
ctx, done := context.WithCancel(context.Background())
c := make(chan os.Signal, 1)
signal.Notify(c, liqonetutils.ShutdownSignals...)
go func(r *RouteController) {
sig := <-c
klog.Infof("the operator received signal {%s}: cleaning up", sig.String())
r.cleanUp()
done()
}(rc)
return ctx
}
| {
resourceToBeProccesedPredicate := predicate.Funcs{
DeleteFunc: func(e event.DeleteEvent) bool {
// Finalizers are used to check if a resource is being deleted, and perform there the needed actions
// we don't want to reconcile on the delete of a resource.
return false
},
}
return ctrl.NewControllerManagedBy(mgr).WithEventFilter(resourceToBeProccesedPredicate).
For(&netv1alpha1.TunnelEndpoint{}).
Complete(rc)
} | identifier_body |
routeOperator.go | // Copyright 2019-2023 The Liqo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routeoperator
import (
"context"
"os"
"os/signal"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/vishvananda/netlink"
k8sApiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1"
liqoconst "github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/liqonet/overlay"
liqorouting "github.com/liqotech/liqo/pkg/liqonet/routing"
liqonetutils "github.com/liqotech/liqo/pkg/liqonet/utils"
)
var (
result = ctrl.Result{}
)
// RouteController reconciles a TunnelEndpoint object.
type RouteController struct {
client.Client
record.EventRecorder
liqorouting.Routing
vxlanDev *overlay.VxlanDevice
podIP string
firewallChan chan bool
}
// NewRouteController returns a configured route controller ready to be started.
func NewRouteController(podIP string, vxlanDevice *overlay.VxlanDevice, router liqorouting.Routing, er record.EventRecorder,
cl client.Client) *RouteController {
r := &RouteController{
Client: cl,
Routing: router,
vxlanDev: vxlanDevice,
EventRecorder: er,
podIP: podIP,
}
return r
}
// cluster-role
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get
// role
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=secrets,verbs=create;update;patch;get;list;watch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=update;patch;get;list;watch
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=services,verbs=update;patch;get;list;watch
// Reconcile handle requests on TunnelEndpoint object to create and configure routes on Nodes.
func (rc *RouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for |
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); err != nil {
klog.Errorf("un error occurred while cleaning up policy routing rules: %v", err)
}
}
if rc.vxlanDev != nil {
err := netlink.LinkDel(rc.vxlanDev.Link)
if err != nil && err.Error() != "Link not found" {
klog.Errorf("an error occurred while deleting vxlan device {%s}: %v", rc.vxlanDev.Link.Name, err)
}
}
// Attempt to remove our finalizer from all tunnel endpoints. In case this operation fails,
// the cleanup will be performed by tunnel-operator when a tunnel endpoint is going to be deleted.
var teps netv1alpha1.TunnelEndpointList
if err := rc.List(context.Background(), &teps); err != nil {
klog.Errorf("an error occurred while listing tunnel endpoints: %v", err)
return
}
for i := range teps.Items {
original := teps.Items[i].DeepCopy()
if controllerutil.RemoveFinalizer(&teps.Items[i], liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// Using patch instead of update, to prevent issues in case of conflicts.
if err := rc.Client.Patch(context.Background(), &teps.Items[i], client.MergeFrom(original)); err != nil {
klog.Errorf("%s -> unable to remove finalizer from tunnel endpoint %q: %v",
original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]), err)
continue
}
klog.V(4).Infof("%s -> finalizer successfully removed from tunnel endpoint %q", original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]))
}
}
}
// SetupWithManager used to set up the controller with a given manager.
func (rc *RouteController) SetupWithManager(mgr ctrl.Manager) error {
resourceToBeProccesedPredicate := predicate.Funcs{
DeleteFunc: func(e event.DeleteEvent) bool {
// Finalizers are used to check if a resource is being deleted, and perform there the needed actions
// we don't want to reconcile on the delete of a resource.
return false
},
}
return ctrl.NewControllerManagedBy(mgr).WithEventFilter(resourceToBeProccesedPredicate).
For(&netv1alpha1.TunnelEndpoint{}).
Complete(rc)
}
// SetupSignalHandlerForRouteOperator registers for SIGTERM, SIGINT. Interrupt. A stop context is returned
// which is closed on one of these signals.
func (rc *RouteController) SetupSignalHandlerForRouteOperator() context.Context {
ctx, done := context.WithCancel(context.Background())
c := make(chan os.Signal, 1)
signal.Notify(c, liqonetutils.ShutdownSignals...)
go func(r *RouteController) {
sig := <-c
klog.Infof("the operator received signal {%s}: cleaning up", sig.String())
r.cleanUp()
done()
}(rc)
return ctx
}
| {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return
}
} | conditional_block |
routeOperator.go | // Copyright 2019-2023 The Liqo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routeoperator
import (
"context"
"os"
"os/signal"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/vishvananda/netlink"
k8sApiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1"
liqoconst "github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/liqonet/overlay"
liqorouting "github.com/liqotech/liqo/pkg/liqonet/routing"
liqonetutils "github.com/liqotech/liqo/pkg/liqonet/utils"
)
var (
result = ctrl.Result{}
)
// RouteController reconciles a TunnelEndpoint object.
type RouteController struct {
client.Client
record.EventRecorder
liqorouting.Routing
vxlanDev *overlay.VxlanDevice
podIP string
firewallChan chan bool
}
// NewRouteController returns a configured route controller ready to be started.
func | (podIP string, vxlanDevice *overlay.VxlanDevice, router liqorouting.Routing, er record.EventRecorder,
cl client.Client) *RouteController {
r := &RouteController{
Client: cl,
Routing: router,
vxlanDev: vxlanDevice,
EventRecorder: er,
podIP: podIP,
}
return r
}
// cluster-role
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get
// role
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=secrets,verbs=create;update;patch;get;list;watch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=update;patch;get;list;watch
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=services,verbs=update;patch;get;list;watch
// Reconcile handle requests on TunnelEndpoint object to create and configure routes on Nodes.
func (rc *RouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return
}
}
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); err != nil {
klog.Errorf("un error occurred while cleaning up policy routing rules: %v", err)
}
}
if rc.vxlanDev != nil {
err := netlink.LinkDel(rc.vxlanDev.Link)
if err != nil && err.Error() != "Link not found" {
klog.Errorf("an error occurred while deleting vxlan device {%s}: %v", rc.vxlanDev.Link.Name, err)
}
}
// Attempt to remove our finalizer from all tunnel endpoints. In case this operation fails,
// the cleanup will be performed by tunnel-operator when a tunnel endpoint is going to be deleted.
var teps netv1alpha1.TunnelEndpointList
if err := rc.List(context.Background(), &teps); err != nil {
klog.Errorf("an error occurred while listing tunnel endpoints: %v", err)
return
}
for i := range teps.Items {
original := teps.Items[i].DeepCopy()
if controllerutil.RemoveFinalizer(&teps.Items[i], liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// Using patch instead of update, to prevent issues in case of conflicts.
if err := rc.Client.Patch(context.Background(), &teps.Items[i], client.MergeFrom(original)); err != nil {
klog.Errorf("%s -> unable to remove finalizer from tunnel endpoint %q: %v",
original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]), err)
continue
}
klog.V(4).Infof("%s -> finalizer successfully removed from tunnel endpoint %q", original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]))
}
}
}
// SetupWithManager used to set up the controller with a given manager.
func (rc *RouteController) SetupWithManager(mgr ctrl.Manager) error {
resourceToBeProccesedPredicate := predicate.Funcs{
DeleteFunc: func(e event.DeleteEvent) bool {
// Finalizers are used to check if a resource is being deleted, and perform there the needed actions
// we don't want to reconcile on the delete of a resource.
return false
},
}
return ctrl.NewControllerManagedBy(mgr).WithEventFilter(resourceToBeProccesedPredicate).
For(&netv1alpha1.TunnelEndpoint{}).
Complete(rc)
}
// SetupSignalHandlerForRouteOperator registers for SIGTERM, SIGINT. Interrupt. A stop context is returned
// which is closed on one of these signals.
func (rc *RouteController) SetupSignalHandlerForRouteOperator() context.Context {
ctx, done := context.WithCancel(context.Background())
c := make(chan os.Signal, 1)
signal.Notify(c, liqonetutils.ShutdownSignals...)
go func(r *RouteController) {
sig := <-c
klog.Infof("the operator received signal {%s}: cleaning up", sig.String())
r.cleanUp()
done()
}(rc)
return ctx
}
| NewRouteController | identifier_name |
routeOperator.go | // Copyright 2019-2023 The Liqo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package routeoperator
import (
"context"
"os"
"os/signal"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/vishvananda/netlink"
k8sApiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1"
liqoconst "github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/liqonet/overlay"
liqorouting "github.com/liqotech/liqo/pkg/liqonet/routing"
liqonetutils "github.com/liqotech/liqo/pkg/liqonet/utils"
)
var (
result = ctrl.Result{}
)
// RouteController reconciles a TunnelEndpoint object.
type RouteController struct {
client.Client
record.EventRecorder
liqorouting.Routing
vxlanDev *overlay.VxlanDevice
podIP string
firewallChan chan bool
}
// NewRouteController returns a configured route controller ready to be started.
func NewRouteController(podIP string, vxlanDevice *overlay.VxlanDevice, router liqorouting.Routing, er record.EventRecorder,
cl client.Client) *RouteController {
r := &RouteController{
Client: cl,
Routing: router,
vxlanDev: vxlanDevice,
EventRecorder: er,
podIP: podIP,
}
return r
}
// cluster-role
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get
// role
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=secrets,verbs=create;update;patch;get;list;watch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=update;patch;get;list;watch
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=services,verbs=update;patch;get;list;watch
// Reconcile handle requests on TunnelEndpoint object to create and configure routes on Nodes.
func (rc *RouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return
}
}
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); err != nil {
klog.Errorf("un error occurred while cleaning up policy routing rules: %v", err)
}
}
if rc.vxlanDev != nil {
err := netlink.LinkDel(rc.vxlanDev.Link)
if err != nil && err.Error() != "Link not found" {
klog.Errorf("an error occurred while deleting vxlan device {%s}: %v", rc.vxlanDev.Link.Name, err)
}
}
// Attempt to remove our finalizer from all tunnel endpoints. In case this operation fails,
// the cleanup will be performed by tunnel-operator when a tunnel endpoint is going to be deleted.
var teps netv1alpha1.TunnelEndpointList
if err := rc.List(context.Background(), &teps); err != nil {
klog.Errorf("an error occurred while listing tunnel endpoints: %v", err)
return
}
for i := range teps.Items {
original := teps.Items[i].DeepCopy()
if controllerutil.RemoveFinalizer(&teps.Items[i], liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// Using patch instead of update, to prevent issues in case of conflicts.
if err := rc.Client.Patch(context.Background(), &teps.Items[i], client.MergeFrom(original)); err != nil {
klog.Errorf("%s -> unable to remove finalizer from tunnel endpoint %q: %v",
original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]), err)
continue
}
klog.V(4).Infof("%s -> finalizer successfully removed from tunnel endpoint %q", original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]))
}
}
}
// SetupWithManager used to set up the controller with a given manager.
func (rc *RouteController) SetupWithManager(mgr ctrl.Manager) error {
resourceToBeProccesedPredicate := predicate.Funcs{
DeleteFunc: func(e event.DeleteEvent) bool {
// Finalizers are used to check if a resource is being deleted, and perform there the needed actions
// we don't want to reconcile on the delete of a resource.
return false
},
}
return ctrl.NewControllerManagedBy(mgr).WithEventFilter(resourceToBeProccesedPredicate).
For(&netv1alpha1.TunnelEndpoint{}).
Complete(rc)
} | // SetupSignalHandlerForRouteOperator registers for SIGTERM, SIGINT. Interrupt. A stop context is returned
// which is closed on one of these signals.
func (rc *RouteController) SetupSignalHandlerForRouteOperator() context.Context {
ctx, done := context.WithCancel(context.Background())
c := make(chan os.Signal, 1)
signal.Notify(c, liqonetutils.ShutdownSignals...)
go func(r *RouteController) {
sig := <-c
klog.Infof("the operator received signal {%s}: cleaning up", sig.String())
r.cleanUp()
done()
}(rc)
return ctx
} | random_line_split | |
th_logistic_regression.py | """
**************************************************************************
Theano Logistic Regression
**************************************************************************
This version was just for local testing (Vee ran his version for our SBEL batch jobs)
@author: Jason Feriante <feriante@cs.wisc.edu>
@date: 10 July 2015
**************************************************************************
logistic regression using Theano and stochastic gradient descent. Logistic regression is a
probabilistic, linear classifier. It is parametrized by a weight matrix :math:`W` and a bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability. Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of the vector whose i'th element is P(Y=i|x).
.. math:: y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method suitable for large datasets.
"""
import cPickle, time, os, sys, numpy, theano
from sklearn import metrics
import theano.tensor as T
from lib.theano import helpers
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )
# symbolic expression for computing the matrix of class-membership probabilities where:
# W is a matrix where column-k represent the separation hyper plain for class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper plane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995 # a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many minibatches before checking the network on the validation set; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
# print( 'epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) )
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
|
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
# print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) %
# ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) )
# save the best model
with open(write_model_file, 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%')
% (test_fold, best_validation_loss * 100., test_score * 100.) )
print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time))
# print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time)))
# end-snippet-4
# Now we do the predictions
# load the saved best model for this fold
classifier = cPickle.load(open(write_model_file))
# compile a predictor function
predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x])
# compile a confidence predictor function
# predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x)
# We can test it on some examples from test test
""" *************** build AUC curve *************** """
# get the probability of our predictions
test_set = test_set_x.get_value()
predicted_values, conf_preds = predict_model(test_set[:(rows_test)])
conf_predictions = []
for i in range(len(conf_preds)):
# ignore the first column; this gives a lower score that seems wrong.
conf_predictions.append(conf_preds[i][1])
# determine ROC / AUC
fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions)
auc = metrics.auc(fpr, tpr) # e.g. 0.855
""" *********************************************** """
num_correct = 0
num_false = 0
for i in range(len(predicted_values)):
if predicted_values[i] == test_set_labels[i]:
num_correct += 1
else:
num_false += 1
total = len(predicted_values)
percent_correct = num_correct / float(total)
fold_results = ''
fold_results += '#################### Results for ' + data_type + ' ####################' + '\n'
fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \
str(total) + ' wrong: ' + \
str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc)
print fold_results
write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt'
with open(write_predictions_file, 'w') as f:
f.write(fold_results + "\n")
# def run_predictions(data_type, curr_target):
# fold_path = get_fold_path(data_type)
# targets = build_targets(fold_path, data_type)
# # print "Found " + str(len(targets)) + " targets for " + data_type
# fold_accuracies = {}
# did_something = False
# for target, fnames in targets.iteritems():
# if (target != curr_target):
# continue
# else:
# did_something = True
# # retrieve our stratified folds
# folds = get_folds(data_type, fold_path, target, fnames)
# pct_ct = []
# roc_auc = []
# # run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# # folds 1-4
# temp_data = []
# for i in range(len(folds)):
# if(i == curr_fl):
# # don't include the test fold
# continue
# else:
# temp_data += folds[i]
# # vs current 5th test fold
# test_data = folds[curr_fl]
# """ Turning 1024 bits into features is a slow process """
# # build training data
# X = []
# Y = []
# for i in range(len(temp_data)):
# row = []
# for bit in temp_data[i][0]:
# row.append(int(bit))
# X.append(row)
# Y.append(int(temp_data[i][1]))
# X = np.array(X)
# Y = np.array(Y)
# # build test data
# X_test = []
# Y_test = []
# for i in range(len(test_data)):
# row = []
# for bit in test_data[i][0]:
# row.append(int(bit))
# X_test.append(row)
# Y_test.append(int(test_data[i][1]))
# X_test = np.array(X_test)
# Y_test = np.array(Y_test)
# percent_correct, auc = random_forest(target, X, Y, X_test, Y_test, curr_fl)
# pct_ct.append(percent_correct)
# roc_auc.append(auc)
# # now get the average fold results for this target
# accuracy = sum(pct_ct) / float(len(pct_ct))
# all_auc = sum(roc_auc) / float(len(roc_auc))
# print 'Results for '+ target + ': accuracy: ' + str(accuracy) + ', auc: ' + str(all_auc)
# # update fold accuracies
# fold_accuracies[target] = (accuracy, all_auc)
if(did_something == False):
print curr_target + ' not found in ' + data_type + '!'
exit(0)
print '#################### Results for ' + data_type + ' ####################'
# output results
accuracies = 0.00
aucs = 0.00
num_targets = 0.00
for target, obj in fold_accuracies.iteritems():
acc = obj[0]
auc = obj[1]
print target + ' accuracy: ' + str(acc) + ', auc:' + str(auc)
accuracies += acc
aucs += auc
num_targets += 1
# overall_acc = accuracies / num_targets
# overall_auc = aucs / num_targets
# print ' overall accuracy: ' + str(overall_acc) + ', overall auc: ' + str(overall_auc)
print '############################################################'
def main(args):
if(len(args) < 3 or len(args[2]) < 1):
print 'usage: <tox21, dud_e, muv, or pcba> <target> '
return
dataset = args[1]
target = args[2]
# in case of typos
if(dataset == 'dude'):
dataset = 'dud_e'
print "Running Theano Logistic Regression for " \
+ dataset + "........."
is_numeric = helpers.is_numeric(target)
if(is_numeric):
target_list = helpers.get_target_list(dataset)
target = target_list[int(target)]
model_dir = 'theano_saved/logistic_regression'
if(dataset == 'tox21'):
sgd_optimization('Tox21', target, model_dir)
elif(dataset == 'dud_e'):
sgd_optimization('DUD-E', target, model_dir)
elif(dataset == 'muv'):
sgd_optimization('MUV', target, model_dir)
elif(dataset == 'pcba'):
sgd_optimization('PCBA', target, model_dir)
else:
print 'dataset param not found. options: tox21, dud_e, muv, or pcba'
if __name__ == '__main__':
start_time = time.clock()
main(sys.argv)
end_time = time.clock()
print 'runtime: %.2f secs.' % (end_time - start_time)
| patience = max(patience, iter * patience_increase) | conditional_block |
th_logistic_regression.py | """
**************************************************************************
Theano Logistic Regression
**************************************************************************
This version was just for local testing (Vee ran his version for our SBEL batch jobs)
@author: Jason Feriante <feriante@cs.wisc.edu>
@date: 10 July 2015
**************************************************************************
logistic regression using Theano and stochastic gradient descent. Logistic regression is a
probabilistic, linear classifier. It is parametrized by a weight matrix :math:`W` and a bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability. Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of the vector whose i'th element is P(Y=i|x).
.. math:: y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method suitable for large datasets.
"""
import cPickle, time, os, sys, numpy, theano
from sklearn import metrics
import theano.tensor as T
from lib.theano import helpers
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )
# symbolic expression for computing the matrix of class-membership probabilities where:
# W is a matrix where column-k represent the separation hyper plain for class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper plane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995 # a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many minibatches before checking the network on the validation set; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
# print( 'epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) )
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
# print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) %
# ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) )
# save the best model
with open(write_model_file, 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%')
% (test_fold, best_validation_loss * 100., test_score * 100.) )
print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time))
# print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time)))
# end-snippet-4
# Now we do the predictions
# load the saved best model for this fold
classifier = cPickle.load(open(write_model_file))
# compile a predictor function
predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x])
# compile a confidence predictor function
# predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x)
# We can test it on some examples from test test
""" *************** build AUC curve *************** """
# get the probability of our predictions
test_set = test_set_x.get_value()
predicted_values, conf_preds = predict_model(test_set[:(rows_test)])
conf_predictions = []
for i in range(len(conf_preds)):
# ignore the first column; this gives a lower score that seems wrong.
conf_predictions.append(conf_preds[i][1])
# determine ROC / AUC
fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions)
auc = metrics.auc(fpr, tpr) # e.g. 0.855
""" *********************************************** """
num_correct = 0
num_false = 0
for i in range(len(predicted_values)):
if predicted_values[i] == test_set_labels[i]:
num_correct += 1
else:
num_false += 1
total = len(predicted_values)
percent_correct = num_correct / float(total)
fold_results = ''
fold_results += '#################### Results for ' + data_type + ' ####################' + '\n'
fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \
str(total) + ' wrong: ' + \
str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc)
print fold_results
write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt'
with open(write_predictions_file, 'w') as f:
f.write(fold_results + "\n")
# def run_predictions(data_type, curr_target):
# fold_path = get_fold_path(data_type)
# targets = build_targets(fold_path, data_type)
# # print "Found " + str(len(targets)) + " targets for " + data_type
# fold_accuracies = {}
# did_something = False
# for target, fnames in targets.iteritems():
# if (target != curr_target):
# continue
# else:
# did_something = True
# # retrieve our stratified folds
# folds = get_folds(data_type, fold_path, target, fnames)
# pct_ct = []
# roc_auc = []
# # run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# # folds 1-4
# temp_data = []
# for i in range(len(folds)):
# if(i == curr_fl):
# # don't include the test fold
# continue
# else:
# temp_data += folds[i]
# # vs current 5th test fold
# test_data = folds[curr_fl]
# """ Turning 1024 bits into features is a slow process """
# # build training data
# X = []
# Y = []
# for i in range(len(temp_data)):
# row = []
# for bit in temp_data[i][0]:
# row.append(int(bit))
# X.append(row)
# Y.append(int(temp_data[i][1]))
# X = np.array(X)
# Y = np.array(Y)
# # build test data
# X_test = []
# Y_test = []
# for i in range(len(test_data)):
# row = []
# for bit in test_data[i][0]:
# row.append(int(bit))
# X_test.append(row)
# Y_test.append(int(test_data[i][1]))
# X_test = np.array(X_test)
|
# percent_correct, auc = random_forest(target, X, Y, X_test, Y_test, curr_fl)
# pct_ct.append(percent_correct)
# roc_auc.append(auc)
# # now get the average fold results for this target
# accuracy = sum(pct_ct) / float(len(pct_ct))
# all_auc = sum(roc_auc) / float(len(roc_auc))
# print 'Results for '+ target + ': accuracy: ' + str(accuracy) + ', auc: ' + str(all_auc)
# # update fold accuracies
# fold_accuracies[target] = (accuracy, all_auc)
if(did_something == False):
print curr_target + ' not found in ' + data_type + '!'
exit(0)
print '#################### Results for ' + data_type + ' ####################'
# output results
accuracies = 0.00
aucs = 0.00
num_targets = 0.00
for target, obj in fold_accuracies.iteritems():
acc = obj[0]
auc = obj[1]
print target + ' accuracy: ' + str(acc) + ', auc:' + str(auc)
accuracies += acc
aucs += auc
num_targets += 1
# overall_acc = accuracies / num_targets
# overall_auc = aucs / num_targets
# print ' overall accuracy: ' + str(overall_acc) + ', overall auc: ' + str(overall_auc)
print '############################################################'
def main(args):
if(len(args) < 3 or len(args[2]) < 1):
print 'usage: <tox21, dud_e, muv, or pcba> <target> '
return
dataset = args[1]
target = args[2]
# in case of typos
if(dataset == 'dude'):
dataset = 'dud_e'
print "Running Theano Logistic Regression for " \
+ dataset + "........."
is_numeric = helpers.is_numeric(target)
if(is_numeric):
target_list = helpers.get_target_list(dataset)
target = target_list[int(target)]
model_dir = 'theano_saved/logistic_regression'
if(dataset == 'tox21'):
sgd_optimization('Tox21', target, model_dir)
elif(dataset == 'dud_e'):
sgd_optimization('DUD-E', target, model_dir)
elif(dataset == 'muv'):
sgd_optimization('MUV', target, model_dir)
elif(dataset == 'pcba'):
sgd_optimization('PCBA', target, model_dir)
else:
print 'dataset param not found. options: tox21, dud_e, muv, or pcba'
if __name__ == '__main__':
start_time = time.clock()
main(sys.argv)
end_time = time.clock()
print 'runtime: %.2f secs.' % (end_time - start_time) | # Y_test = np.array(Y_test)
| random_line_split |
th_logistic_regression.py | """
**************************************************************************
Theano Logistic Regression
**************************************************************************
This version was just for local testing (Vee ran his version for our SBEL batch jobs)
@author: Jason Feriante <feriante@cs.wisc.edu>
@date: 10 July 2015
**************************************************************************
logistic regression using Theano and stochastic gradient descent. Logistic regression is a
probabilistic, linear classifier. It is parametrized by a weight matrix :math:`W` and a bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability. Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of the vector whose i'th element is P(Y=i|x).
.. math:: y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method suitable for large datasets.
"""
import cPickle, time, os, sys, numpy, theano
from sklearn import metrics
import theano.tensor as T
from lib.theano import helpers
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
|
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995 # a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many minibatches before checking the network on the validation set; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
# print( 'epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) )
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
# print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) %
# ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) )
# save the best model
with open(write_model_file, 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%')
% (test_fold, best_validation_loss * 100., test_score * 100.) )
print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time))
# print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time)))
# end-snippet-4
# Now we do the predictions
# load the saved best model for this fold
classifier = cPickle.load(open(write_model_file))
# compile a predictor function
predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x])
# compile a confidence predictor function
# predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x)
# We can test it on some examples from test test
""" *************** build AUC curve *************** """
# get the probability of our predictions
test_set = test_set_x.get_value()
predicted_values, conf_preds = predict_model(test_set[:(rows_test)])
conf_predictions = []
for i in range(len(conf_preds)):
# ignore the first column; this gives a lower score that seems wrong.
conf_predictions.append(conf_preds[i][1])
# determine ROC / AUC
fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions)
auc = metrics.auc(fpr, tpr) # e.g. 0.855
""" *********************************************** """
num_correct = 0
num_false = 0
for i in range(len(predicted_values)):
if predicted_values[i] == test_set_labels[i]:
num_correct += 1
else:
num_false += 1
total = len(predicted_values)
percent_correct = num_correct / float(total)
fold_results = ''
fold_results += '#################### Results for ' + data_type + ' ####################' + '\n'
fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \
str(total) + ' wrong: ' + \
str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc)
print fold_results
write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt'
with open(write_predictions_file, 'w') as f:
f.write(fold_results + "\n")
# def run_predictions(data_type, curr_target):
# fold_path = get_fold_path(data_type)
# targets = build_targets(fold_path, data_type)
# # print "Found " + str(len(targets)) + " targets for " + data_type
# fold_accuracies = {}
# did_something = False
# for target, fnames in targets.iteritems():
# if (target != curr_target):
# continue
# else:
# did_something = True
# # retrieve our stratified folds
# folds = get_folds(data_type, fold_path, target, fnames)
# pct_ct = []
# roc_auc = []
# # run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# # folds 1-4
# temp_data = []
# for i in range(len(folds)):
# if(i == curr_fl):
# # don't include the test fold
# continue
# else:
# temp_data += folds[i]
# # vs current 5th test fold
# test_data = folds[curr_fl]
# """ Turning 1024 bits into features is a slow process """
# # build training data
# X = []
# Y = []
# for i in range(len(temp_data)):
# row = []
# for bit in temp_data[i][0]:
# row.append(int(bit))
# X.append(row)
# Y.append(int(temp_data[i][1]))
# X = np.array(X)
# Y = np.array(Y)
# # build test data
# X_test = []
# Y_test = []
# for i in range(len(test_data)):
# row = []
# for bit in test_data[i][0]:
# row.append(int(bit))
# X_test.append(row)
# Y_test.append(int(test_data[i][1]))
# X_test = np.array(X_test)
# Y_test = np.array(Y_test)
# percent_correct, auc = random_forest(target, X, Y, X_test, Y_test, curr_fl)
# pct_ct.append(percent_correct)
# roc_auc.append(auc)
# # now get the average fold results for this target
# accuracy = sum(pct_ct) / float(len(pct_ct))
# all_auc = sum(roc_auc) / float(len(roc_auc))
# print 'Results for '+ target + ': accuracy: ' + str(accuracy) + ', auc: ' + str(all_auc)
# # update fold accuracies
# fold_accuracies[target] = (accuracy, all_auc)
if(did_something == False):
print curr_target + ' not found in ' + data_type + '!'
exit(0)
print '#################### Results for ' + data_type + ' ####################'
# output results
accuracies = 0.00
aucs = 0.00
num_targets = 0.00
for target, obj in fold_accuracies.iteritems():
acc = obj[0]
auc = obj[1]
print target + ' accuracy: ' + str(acc) + ', auc:' + str(auc)
accuracies += acc
aucs += auc
num_targets += 1
# overall_acc = accuracies / num_targets
# overall_auc = aucs / num_targets
# print ' overall accuracy: ' + str(overall_acc) + ', overall auc: ' + str(overall_auc)
print '############################################################'
def main(args):
if(len(args) < 3 or len(args[2]) < 1):
print 'usage: <tox21, dud_e, muv, or pcba> <target> '
return
dataset = args[1]
target = args[2]
# in case of typos
if(dataset == 'dude'):
dataset = 'dud_e'
print "Running Theano Logistic Regression for " \
+ dataset + "........."
is_numeric = helpers.is_numeric(target)
if(is_numeric):
target_list = helpers.get_target_list(dataset)
target = target_list[int(target)]
model_dir = 'theano_saved/logistic_regression'
if(dataset == 'tox21'):
sgd_optimization('Tox21', target, model_dir)
elif(dataset == 'dud_e'):
sgd_optimization('DUD-E', target, model_dir)
elif(dataset == 'muv'):
sgd_optimization('MUV', target, model_dir)
elif(dataset == 'pcba'):
sgd_optimization('PCBA', target, model_dir)
else:
print 'dataset param not found. options: tox21, dud_e, muv, or pcba'
if __name__ == '__main__':
start_time = time.clock()
main(sys.argv)
end_time = time.clock()
print 'runtime: %.2f secs.' % (end_time - start_time)
| """ Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )
# symbolic expression for computing the matrix of class-membership probabilities where:
# W is a matrix where column-k represent the separation hyper plain for class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper plane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input | identifier_body |
th_logistic_regression.py | """
**************************************************************************
Theano Logistic Regression
**************************************************************************
This version was just for local testing (Vee ran his version for our SBEL batch jobs)
@author: Jason Feriante <feriante@cs.wisc.edu>
@date: 10 July 2015
**************************************************************************
logistic regression using Theano and stochastic gradient descent. Logistic regression is a
probabilistic, linear classifier. It is parametrized by a weight matrix :math:`W` and a bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability. Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of the vector whose i'th element is P(Y=i|x).
.. math:: y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method suitable for large datasets.
"""
import cPickle, time, os, sys, numpy, theano
from sklearn import metrics
import theano.tensor as T
from lib.theano import helpers
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W` and bias vector :math:`b`.
Classification is done by projecting data points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def | (self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )
# symbolic expression for computing the matrix of class-membership probabilities where:
# W is a matrix where column-k represent the separation hyper plain for class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper plane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995 # a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many minibatches before checking the network on the validation set; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
# print( 'epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) )
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
# print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) %
# ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) )
# save the best model
with open(write_model_file, 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%')
% (test_fold, best_validation_loss * 100., test_score * 100.) )
print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time))
# print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time)))
# end-snippet-4
# Now we do the predictions
# load the saved best model for this fold
classifier = cPickle.load(open(write_model_file))
# compile a predictor function
predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x])
# compile a confidence predictor function
# predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x)
# We can test it on some examples from test test
""" *************** build AUC curve *************** """
# get the probability of our predictions
test_set = test_set_x.get_value()
predicted_values, conf_preds = predict_model(test_set[:(rows_test)])
conf_predictions = []
for i in range(len(conf_preds)):
# ignore the first column; this gives a lower score that seems wrong.
conf_predictions.append(conf_preds[i][1])
# determine ROC / AUC
fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions)
auc = metrics.auc(fpr, tpr) # e.g. 0.855
""" *********************************************** """
num_correct = 0
num_false = 0
for i in range(len(predicted_values)):
if predicted_values[i] == test_set_labels[i]:
num_correct += 1
else:
num_false += 1
total = len(predicted_values)
percent_correct = num_correct / float(total)
fold_results = ''
fold_results += '#################### Results for ' + data_type + ' ####################' + '\n'
fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \
str(total) + ' wrong: ' + \
str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc)
print fold_results
write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt'
with open(write_predictions_file, 'w') as f:
f.write(fold_results + "\n")
# def run_predictions(data_type, curr_target):
# fold_path = get_fold_path(data_type)
# targets = build_targets(fold_path, data_type)
# # print "Found " + str(len(targets)) + " targets for " + data_type
# fold_accuracies = {}
# did_something = False
# for target, fnames in targets.iteritems():
# if (target != curr_target):
# continue
# else:
# did_something = True
# # retrieve our stratified folds
# folds = get_folds(data_type, fold_path, target, fnames)
# pct_ct = []
# roc_auc = []
# # run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# # folds 1-4
# temp_data = []
# for i in range(len(folds)):
# if(i == curr_fl):
# # don't include the test fold
# continue
# else:
# temp_data += folds[i]
# # vs current 5th test fold
# test_data = folds[curr_fl]
# """ Turning 1024 bits into features is a slow process """
# # build training data
# X = []
# Y = []
# for i in range(len(temp_data)):
# row = []
# for bit in temp_data[i][0]:
# row.append(int(bit))
# X.append(row)
# Y.append(int(temp_data[i][1]))
# X = np.array(X)
# Y = np.array(Y)
# # build test data
# X_test = []
# Y_test = []
# for i in range(len(test_data)):
# row = []
# for bit in test_data[i][0]:
# row.append(int(bit))
# X_test.append(row)
# Y_test.append(int(test_data[i][1]))
# X_test = np.array(X_test)
# Y_test = np.array(Y_test)
# percent_correct, auc = random_forest(target, X, Y, X_test, Y_test, curr_fl)
# pct_ct.append(percent_correct)
# roc_auc.append(auc)
# # now get the average fold results for this target
# accuracy = sum(pct_ct) / float(len(pct_ct))
# all_auc = sum(roc_auc) / float(len(roc_auc))
# print 'Results for '+ target + ': accuracy: ' + str(accuracy) + ', auc: ' + str(all_auc)
# # update fold accuracies
# fold_accuracies[target] = (accuracy, all_auc)
if(did_something == False):
print curr_target + ' not found in ' + data_type + '!'
exit(0)
print '#################### Results for ' + data_type + ' ####################'
# output results
accuracies = 0.00
aucs = 0.00
num_targets = 0.00
for target, obj in fold_accuracies.iteritems():
acc = obj[0]
auc = obj[1]
print target + ' accuracy: ' + str(acc) + ', auc:' + str(auc)
accuracies += acc
aucs += auc
num_targets += 1
# overall_acc = accuracies / num_targets
# overall_auc = aucs / num_targets
# print ' overall accuracy: ' + str(overall_acc) + ', overall auc: ' + str(overall_auc)
print '############################################################'
def main(args):
if(len(args) < 3 or len(args[2]) < 1):
print 'usage: <tox21, dud_e, muv, or pcba> <target> '
return
dataset = args[1]
target = args[2]
# in case of typos
if(dataset == 'dude'):
dataset = 'dud_e'
print "Running Theano Logistic Regression for " \
+ dataset + "........."
is_numeric = helpers.is_numeric(target)
if(is_numeric):
target_list = helpers.get_target_list(dataset)
target = target_list[int(target)]
model_dir = 'theano_saved/logistic_regression'
if(dataset == 'tox21'):
sgd_optimization('Tox21', target, model_dir)
elif(dataset == 'dud_e'):
sgd_optimization('DUD-E', target, model_dir)
elif(dataset == 'muv'):
sgd_optimization('MUV', target, model_dir)
elif(dataset == 'pcba'):
sgd_optimization('PCBA', target, model_dir)
else:
print 'dataset param not found. options: tox21, dud_e, muv, or pcba'
if __name__ == '__main__':
start_time = time.clock()
main(sys.argv)
end_time = time.clock()
print 'runtime: %.2f secs.' % (end_time - start_time)
| __init__ | identifier_name |
schema.go | package gen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var knownSchemaFields = make(map[string]bool)
func init() {
for _, f := range getJSONFieldNames(Schema{}) {
knownSchemaFields[f] = true
}
}
// JSONType is the enumeration of JSONSchema's supported types.
type JSONType uint8
// Each of these is a core type of JSONSchema, except for JSONUnknown, which is a useful zero value.
const (
JSONUnknown JSONType = iota
JSONArray
JSONBoolean
JSONInteger
JSONNull
JSONNumber
JSONObject
JSONString
)
var simpleTypeNames = map[string]JSONType{
"array": JSONArray,
"boolean": JSONBoolean,
"integer": JSONInteger,
"null": JSONNull,
"number": JSONNumber,
"object": JSONObject,
"string": JSONString,
}
// TypeField wraps the type field in JSONSchema, supporting either an array of types or a single type as the metaschema
// allows
type TypeField []JSONType
// UnmarshalJSON unmarshals JSON into the TypeField
func (t *TypeField) UnmarshalJSON(b []byte) error {
var val interface{}
if err := json.Unmarshal(b, &val); err != nil {
return err
}
switch v := val.(type) {
case string:
*t = append(*t, simpleTypeNames[v])
return nil
case []interface{}:
*t = make(TypeField, 0, len(v))
for _, v := range v {
var typ JSONType
if s, ok := v.(string); ok {
typ = simpleTypeNames[s]
}
*t = append(*t, typ)
}
return nil
}
return fmt.Errorf("unable to unmarshal %T into TypeField", val)
}
// convenience method to draw out the first token; if this errs, later calls will err anyway so discards
// the err
func peekToken(data []byte) json.Token {
tok, _ := json.NewDecoder(bytes.NewReader(data)).Token()
return tok
}
// BoolOrSchema may have either a boolean or a RefOrSchema.
type BoolOrSchema struct {
Bool *bool
Schema *RefOrSchema
}
func (a *BoolOrSchema) Present() bool {
return a != nil && (a.Schema != nil || (a.Bool != nil && *a.Bool))
}
// UnmarshalJSON performs some custom deserialization of JSON into BoolOrSchema
func (a *BoolOrSchema) UnmarshalJSON(data []byte) error {
if b, ok := peekToken(data).(bool); ok {
a.Bool = &b
return nil
}
a.Schema = new(RefOrSchema)
return json.Unmarshal(data, a.Schema)
}
// ItemsField contains information indicating whether the modified array is a dynamically sized list of multiple
// types or a "tuple" -- a specifically sized array with potentially different types for each position.
type ItemsField struct {
Items *RefOrSchema
TupleFields []*RefOrSchema
}
func (i *ItemsField) Present() bool {
return i != nil && (i.Items != nil || len(i.TupleFields) > 0)
}
// UnmarshalJSON conditionally deserializes into ItemsField according to the shape of the provided JSON
func (i *ItemsField) UnmarshalJSON(data []byte) error {
if peekToken(data) == json.Delim('{') {
i.Items = new(RefOrSchema)
return json.Unmarshal(data, i.Items)
}
return json.Unmarshal(data, &i.TupleFields)
}
// TagMap contains all of the different user extended tags as json.RawMessage for later deserialization
type TagMap map[string]json.RawMessage
// GetString attempts to deserialize the value for the provided key into a string. If the key is absent or there is an
// error deserializing the value, the returned string will be empty.
func (t TagMap) GetString(k string) (s string) {
_, _ = t.Unmarshal(k, &s)
return
}
// Read unmarshals the json at the provided key into the provided interface (which should be a pointer amenable to
// json.Read. If the key is not present, the pointer will be untouched, and false and nil will be returned. If the
// deserialization fails, an error will be returned.
func (t TagMap) Unmarshal(k string, val interface{}) (bool, error) {
msg, ok := t[k]
if !ok {
return false, nil
}
err := json.Unmarshal(msg, val)
return true, err
}
// NewRefOrSchema is a convenience constructor for RefOrSchema
func NewRefOrSchema(s *Schema, ref *string) *RefOrSchema {
return &RefOrSchema{ref: ref, schema: s}
}
// RefOrSchema is either a schema or a reference to a schema.
type RefOrSchema struct {
ref *string
schema *Schema
}
// UnmarshalJSON conditionally deserializes the JSON, either into a reference or a schema.
func (r *RefOrSchema) UnmarshalJSON(b []byte) error {
var ref struct {
Ref string `json:"$ref"`
}
if err := json.Unmarshal(b, &ref); err != nil {
return fmt.Errorf("unmarshal $ref: %w", err)
}
if ref.Ref != "" {
r.ref = &ref.Ref
return nil
}
r.schema = new(Schema)
return json.Unmarshal(b, r.schema)
}
// Resolve either returns the schema if set or else resolves the reference using the referer schema and loader.
func (r *RefOrSchema) Resolve(ctx context.Context, referer *Schema, loader Loader) (*Schema, error) {
if r.ref == nil {
return r.schema, nil
}
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
}
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
}
if s.AdditionalProperties != nil {
push(s.AdditionalProperties.Schema, "additionalProperties")
}
for _, m := range []struct {
name string
schemas map[string]*RefOrSchema
}{
{"definitions", s.Definitions},
{"properties", s.Properties},
{"patternProperties", s.PatternProperties},
{"dependencies", s.Dependencies},
} {
for k, v := range m.schemas {
push(v, m.name, k)
}
}
for _, a := range []struct {
name string
schemas []*RefOrSchema
}{
{"allOf", s.AllOf},
{"anyOf", s.AnyOf},
{"oneOf", s.OneOf},
} {
for i, v := range a.schemas {
push(v, a.name, i)
}
}
push(s.Not, "not")
return
}
// String returns a simple string identifier for the schema
func (s *Schema) String() string {
if s.ID == nil {
return "<nil>"
}
return s.ID.String()
}
// ChooseType returns the best known type for this field.
func (s *Schema) ChooseType() JSONType {
switch {
case s.Type != nil && len(*s.Type) > 0:
return (*s.Type)[0]
case len(s.Properties) > 0,
s.AdditionalProperties.Present(),
len(s.PatternProperties) > 0,
s.MinProperties > 0,
s.MaxProperties != nil,
len(s.AllOf) > 0:
return JSONObject
case s.Items.Present(),
s.UniqueItems,
s.MinItems != 0,
s.MaxItems != nil:
return JSONArray
case s.Pattern != nil,
s.MinLength > 0,
s.MaxLength != nil:
return JSONString
}
return JSONUnknown
}
// UnmarshalJSON is custom JSON deserialization for the Schema type
func (s *Schema) UnmarshalJSON(data []byte) error {
{
type schema Schema
var s2 schema
if err := json.Unmarshal(data, &s2); err != nil {
return fmt.Errorf("unmarshal schema: %w", err)
}
*s = Schema(s2)
}
var possAnnos map[string]json.RawMessage
if err := json.Unmarshal(data, &possAnnos); err != nil {
return fmt.Errorf("unmarshal annotations: %w", err)
}
for field, v := range possAnnos {
if knownSchemaFields[field] {
continue
}
if s.Annotations == nil {
s.Annotations = make(map[string]json.RawMessage)
}
s.Annotations[field] = v
}
for _, key := range []string{"$id", "id"} {
idBytes, ok := s.Annotations[key]
if !ok {
continue
}
var (
id string
err error
)
if err = json.Unmarshal(idBytes, &id); err != nil {
return err
}
if s.ID, err = url.Parse(id); err != nil {
return err
}
break
}
return nil
}
func getJSONFieldNames(val interface{}) (fields []string) {
t := reflect.TypeOf(val)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if r, _ := utf8.DecodeRuneInString(field.Name); r == utf8.RuneError || unicode.IsLower(r) {
continue
}
vals := strings.SplitN(field.Tag.Get("json"), ",", 2)
if len(vals) == 0 || vals[0] == "" {
fields = append(fields, field.Name)
continue
}
if vals[0] != "-" {
fields = append(fields, vals[0])
}
}
return
}
// NormalizeComment takes a comment string and makes sure it's normalized for Go
func NormalizeComment(s string) string {
if s == "" |
var parts []string
for _, p := range strings.Split(s, "\n") {
parts = append(parts, "// "+p)
}
return strings.Join(parts, "\n")
}
| {
return ""
} | conditional_block |
schema.go | package gen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var knownSchemaFields = make(map[string]bool)
func init() {
for _, f := range getJSONFieldNames(Schema{}) {
knownSchemaFields[f] = true
}
}
// JSONType is the enumeration of JSONSchema's supported types.
type JSONType uint8
// Each of these is a core type of JSONSchema, except for JSONUnknown, which is a useful zero value.
const (
JSONUnknown JSONType = iota
JSONArray
JSONBoolean
JSONInteger
JSONNull
JSONNumber
JSONObject
JSONString
)
var simpleTypeNames = map[string]JSONType{
"array": JSONArray,
"boolean": JSONBoolean,
"integer": JSONInteger,
"null": JSONNull,
"number": JSONNumber,
"object": JSONObject,
"string": JSONString,
}
// TypeField wraps the type field in JSONSchema, supporting either an array of types or a single type as the metaschema
// allows
type TypeField []JSONType
// UnmarshalJSON unmarshals JSON into the TypeField
func (t *TypeField) UnmarshalJSON(b []byte) error {
var val interface{}
if err := json.Unmarshal(b, &val); err != nil {
return err
}
switch v := val.(type) {
case string:
*t = append(*t, simpleTypeNames[v])
return nil
case []interface{}:
*t = make(TypeField, 0, len(v))
for _, v := range v {
var typ JSONType
if s, ok := v.(string); ok {
typ = simpleTypeNames[s]
}
*t = append(*t, typ)
}
return nil
}
return fmt.Errorf("unable to unmarshal %T into TypeField", val)
}
// convenience method to draw out the first token; if this errs, later calls will err anyway so discards
// the err
func peekToken(data []byte) json.Token {
tok, _ := json.NewDecoder(bytes.NewReader(data)).Token()
return tok
}
// BoolOrSchema may have either a boolean or a RefOrSchema.
type BoolOrSchema struct {
Bool *bool
Schema *RefOrSchema
}
func (a *BoolOrSchema) Present() bool {
return a != nil && (a.Schema != nil || (a.Bool != nil && *a.Bool))
}
// UnmarshalJSON performs some custom deserialization of JSON into BoolOrSchema
func (a *BoolOrSchema) UnmarshalJSON(data []byte) error {
if b, ok := peekToken(data).(bool); ok {
a.Bool = &b
return nil
}
a.Schema = new(RefOrSchema)
return json.Unmarshal(data, a.Schema)
}
// ItemsField contains information indicating whether the modified array is a dynamically sized list of multiple
// types or a "tuple" -- a specifically sized array with potentially different types for each position.
type ItemsField struct {
Items *RefOrSchema
TupleFields []*RefOrSchema
}
func (i *ItemsField) Present() bool {
return i != nil && (i.Items != nil || len(i.TupleFields) > 0)
}
// UnmarshalJSON conditionally deserializes into ItemsField according to the shape of the provided JSON
func (i *ItemsField) UnmarshalJSON(data []byte) error {
if peekToken(data) == json.Delim('{') {
i.Items = new(RefOrSchema)
return json.Unmarshal(data, i.Items)
}
return json.Unmarshal(data, &i.TupleFields)
}
// TagMap contains all of the different user extended tags as json.RawMessage for later deserialization
type TagMap map[string]json.RawMessage
// GetString attempts to deserialize the value for the provided key into a string. If the key is absent or there is an
// error deserializing the value, the returned string will be empty.
func (t TagMap) GetString(k string) (s string) {
_, _ = t.Unmarshal(k, &s)
return
}
// Read unmarshals the json at the provided key into the provided interface (which should be a pointer amenable to
// json.Read. If the key is not present, the pointer will be untouched, and false and nil will be returned. If the
// deserialization fails, an error will be returned.
func (t TagMap) Unmarshal(k string, val interface{}) (bool, error) {
msg, ok := t[k]
if !ok {
return false, nil
}
err := json.Unmarshal(msg, val)
return true, err
}
// NewRefOrSchema is a convenience constructor for RefOrSchema
func NewRefOrSchema(s *Schema, ref *string) *RefOrSchema {
return &RefOrSchema{ref: ref, schema: s}
}
// RefOrSchema is either a schema or a reference to a schema.
type RefOrSchema struct {
ref *string
schema *Schema
}
// UnmarshalJSON conditionally deserializes the JSON, either into a reference or a schema.
func (r *RefOrSchema) UnmarshalJSON(b []byte) error {
var ref struct {
Ref string `json:"$ref"`
}
if err := json.Unmarshal(b, &ref); err != nil {
return fmt.Errorf("unmarshal $ref: %w", err)
}
if ref.Ref != "" {
r.ref = &ref.Ref
return nil
}
r.schema = new(Schema)
return json.Unmarshal(b, r.schema)
}
// Resolve either returns the schema if set or else resolves the reference using the referer schema and loader.
func (r *RefOrSchema) Resolve(ctx context.Context, referer *Schema, loader Loader) (*Schema, error) {
if r.ref == nil {
return r.schema, nil
}
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
}
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
}
if s.AdditionalProperties != nil {
push(s.AdditionalProperties.Schema, "additionalProperties")
}
for _, m := range []struct {
name string
schemas map[string]*RefOrSchema
}{
{"definitions", s.Definitions},
{"properties", s.Properties},
{"patternProperties", s.PatternProperties},
{"dependencies", s.Dependencies},
} {
for k, v := range m.schemas {
push(v, m.name, k)
}
}
for _, a := range []struct {
name string
schemas []*RefOrSchema
}{
{"allOf", s.AllOf},
{"anyOf", s.AnyOf},
{"oneOf", s.OneOf},
} {
for i, v := range a.schemas {
push(v, a.name, i)
}
}
push(s.Not, "not")
return
}
// String returns a simple string identifier for the schema
func (s *Schema) String() string {
if s.ID == nil {
return "<nil>"
}
return s.ID.String()
}
// ChooseType returns the best known type for this field.
func (s *Schema) ChooseType() JSONType {
switch {
case s.Type != nil && len(*s.Type) > 0:
return (*s.Type)[0]
case len(s.Properties) > 0,
s.AdditionalProperties.Present(),
len(s.PatternProperties) > 0,
s.MinProperties > 0,
s.MaxProperties != nil,
len(s.AllOf) > 0:
return JSONObject
case s.Items.Present(),
s.UniqueItems,
s.MinItems != 0,
s.MaxItems != nil:
return JSONArray
case s.Pattern != nil,
s.MinLength > 0,
s.MaxLength != nil:
return JSONString
}
return JSONUnknown
}
// UnmarshalJSON is custom JSON deserialization for the Schema type
func (s *Schema) UnmarshalJSON(data []byte) error {
{
type schema Schema
var s2 schema
if err := json.Unmarshal(data, &s2); err != nil {
return fmt.Errorf("unmarshal schema: %w", err)
}
*s = Schema(s2)
}
var possAnnos map[string]json.RawMessage
if err := json.Unmarshal(data, &possAnnos); err != nil {
return fmt.Errorf("unmarshal annotations: %w", err)
}
for field, v := range possAnnos {
if knownSchemaFields[field] {
continue
}
if s.Annotations == nil {
s.Annotations = make(map[string]json.RawMessage)
}
s.Annotations[field] = v
}
for _, key := range []string{"$id", "id"} {
idBytes, ok := s.Annotations[key]
if !ok {
continue
}
var (
id string
err error
)
if err = json.Unmarshal(idBytes, &id); err != nil {
return err
}
if s.ID, err = url.Parse(id); err != nil {
return err
}
break
}
return nil
}
func getJSONFieldNames(val interface{}) (fields []string) {
t := reflect.TypeOf(val)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if r, _ := utf8.DecodeRuneInString(field.Name); r == utf8.RuneError || unicode.IsLower(r) {
continue
}
vals := strings.SplitN(field.Tag.Get("json"), ",", 2) | if len(vals) == 0 || vals[0] == "" {
fields = append(fields, field.Name)
continue
}
if vals[0] != "-" {
fields = append(fields, vals[0])
}
}
return
}
// NormalizeComment takes a comment string and makes sure it's normalized for Go
func NormalizeComment(s string) string {
if s == "" {
return ""
}
var parts []string
for _, p := range strings.Split(s, "\n") {
parts = append(parts, "// "+p)
}
return strings.Join(parts, "\n")
} | random_line_split | |
schema.go | package gen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var knownSchemaFields = make(map[string]bool)
func init() {
for _, f := range getJSONFieldNames(Schema{}) {
knownSchemaFields[f] = true
}
}
// JSONType is the enumeration of JSONSchema's supported types.
type JSONType uint8
// Each of these is a core type of JSONSchema, except for JSONUnknown, which is a useful zero value.
const (
JSONUnknown JSONType = iota
JSONArray
JSONBoolean
JSONInteger
JSONNull
JSONNumber
JSONObject
JSONString
)
var simpleTypeNames = map[string]JSONType{
"array": JSONArray,
"boolean": JSONBoolean,
"integer": JSONInteger,
"null": JSONNull,
"number": JSONNumber,
"object": JSONObject,
"string": JSONString,
}
// TypeField wraps the type field in JSONSchema, supporting either an array of types or a single type as the metaschema
// allows
type TypeField []JSONType
// UnmarshalJSON unmarshals JSON into the TypeField
func (t *TypeField) UnmarshalJSON(b []byte) error {
var val interface{}
if err := json.Unmarshal(b, &val); err != nil {
return err
}
switch v := val.(type) {
case string:
*t = append(*t, simpleTypeNames[v])
return nil
case []interface{}:
*t = make(TypeField, 0, len(v))
for _, v := range v {
var typ JSONType
if s, ok := v.(string); ok {
typ = simpleTypeNames[s]
}
*t = append(*t, typ)
}
return nil
}
return fmt.Errorf("unable to unmarshal %T into TypeField", val)
}
// convenience method to draw out the first token; if this errs, later calls will err anyway so discards
// the err
func peekToken(data []byte) json.Token {
tok, _ := json.NewDecoder(bytes.NewReader(data)).Token()
return tok
}
// BoolOrSchema may have either a boolean or a RefOrSchema.
type BoolOrSchema struct {
Bool *bool
Schema *RefOrSchema
}
func (a *BoolOrSchema) Present() bool {
return a != nil && (a.Schema != nil || (a.Bool != nil && *a.Bool))
}
// UnmarshalJSON performs some custom deserialization of JSON into BoolOrSchema
func (a *BoolOrSchema) UnmarshalJSON(data []byte) error {
if b, ok := peekToken(data).(bool); ok {
a.Bool = &b
return nil
}
a.Schema = new(RefOrSchema)
return json.Unmarshal(data, a.Schema)
}
// ItemsField contains information indicating whether the modified array is a dynamically sized list of multiple
// types or a "tuple" -- a specifically sized array with potentially different types for each position.
type ItemsField struct {
Items *RefOrSchema
TupleFields []*RefOrSchema
}
func (i *ItemsField) Present() bool {
return i != nil && (i.Items != nil || len(i.TupleFields) > 0)
}
// UnmarshalJSON conditionally deserializes into ItemsField according to the shape of the provided JSON
func (i *ItemsField) UnmarshalJSON(data []byte) error {
if peekToken(data) == json.Delim('{') {
i.Items = new(RefOrSchema)
return json.Unmarshal(data, i.Items)
}
return json.Unmarshal(data, &i.TupleFields)
}
// TagMap contains all of the different user extended tags as json.RawMessage for later deserialization
type TagMap map[string]json.RawMessage
// GetString attempts to deserialize the value for the provided key into a string. If the key is absent or there is an
// error deserializing the value, the returned string will be empty.
func (t TagMap) | (k string) (s string) {
_, _ = t.Unmarshal(k, &s)
return
}
// Read unmarshals the json at the provided key into the provided interface (which should be a pointer amenable to
// json.Read. If the key is not present, the pointer will be untouched, and false and nil will be returned. If the
// deserialization fails, an error will be returned.
func (t TagMap) Unmarshal(k string, val interface{}) (bool, error) {
msg, ok := t[k]
if !ok {
return false, nil
}
err := json.Unmarshal(msg, val)
return true, err
}
// NewRefOrSchema is a convenience constructor for RefOrSchema
func NewRefOrSchema(s *Schema, ref *string) *RefOrSchema {
return &RefOrSchema{ref: ref, schema: s}
}
// RefOrSchema is either a schema or a reference to a schema.
type RefOrSchema struct {
ref *string
schema *Schema
}
// UnmarshalJSON conditionally deserializes the JSON, either into a reference or a schema.
func (r *RefOrSchema) UnmarshalJSON(b []byte) error {
var ref struct {
Ref string `json:"$ref"`
}
if err := json.Unmarshal(b, &ref); err != nil {
return fmt.Errorf("unmarshal $ref: %w", err)
}
if ref.Ref != "" {
r.ref = &ref.Ref
return nil
}
r.schema = new(Schema)
return json.Unmarshal(b, r.schema)
}
// Resolve either returns the schema if set or else resolves the reference using the referer schema and loader.
func (r *RefOrSchema) Resolve(ctx context.Context, referer *Schema, loader Loader) (*Schema, error) {
if r.ref == nil {
return r.schema, nil
}
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
}
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
}
if s.AdditionalProperties != nil {
push(s.AdditionalProperties.Schema, "additionalProperties")
}
for _, m := range []struct {
name string
schemas map[string]*RefOrSchema
}{
{"definitions", s.Definitions},
{"properties", s.Properties},
{"patternProperties", s.PatternProperties},
{"dependencies", s.Dependencies},
} {
for k, v := range m.schemas {
push(v, m.name, k)
}
}
for _, a := range []struct {
name string
schemas []*RefOrSchema
}{
{"allOf", s.AllOf},
{"anyOf", s.AnyOf},
{"oneOf", s.OneOf},
} {
for i, v := range a.schemas {
push(v, a.name, i)
}
}
push(s.Not, "not")
return
}
// String returns a simple string identifier for the schema
func (s *Schema) String() string {
if s.ID == nil {
return "<nil>"
}
return s.ID.String()
}
// ChooseType returns the best known type for this field.
func (s *Schema) ChooseType() JSONType {
switch {
case s.Type != nil && len(*s.Type) > 0:
return (*s.Type)[0]
case len(s.Properties) > 0,
s.AdditionalProperties.Present(),
len(s.PatternProperties) > 0,
s.MinProperties > 0,
s.MaxProperties != nil,
len(s.AllOf) > 0:
return JSONObject
case s.Items.Present(),
s.UniqueItems,
s.MinItems != 0,
s.MaxItems != nil:
return JSONArray
case s.Pattern != nil,
s.MinLength > 0,
s.MaxLength != nil:
return JSONString
}
return JSONUnknown
}
// UnmarshalJSON is custom JSON deserialization for the Schema type
func (s *Schema) UnmarshalJSON(data []byte) error {
{
type schema Schema
var s2 schema
if err := json.Unmarshal(data, &s2); err != nil {
return fmt.Errorf("unmarshal schema: %w", err)
}
*s = Schema(s2)
}
var possAnnos map[string]json.RawMessage
if err := json.Unmarshal(data, &possAnnos); err != nil {
return fmt.Errorf("unmarshal annotations: %w", err)
}
for field, v := range possAnnos {
if knownSchemaFields[field] {
continue
}
if s.Annotations == nil {
s.Annotations = make(map[string]json.RawMessage)
}
s.Annotations[field] = v
}
for _, key := range []string{"$id", "id"} {
idBytes, ok := s.Annotations[key]
if !ok {
continue
}
var (
id string
err error
)
if err = json.Unmarshal(idBytes, &id); err != nil {
return err
}
if s.ID, err = url.Parse(id); err != nil {
return err
}
break
}
return nil
}
func getJSONFieldNames(val interface{}) (fields []string) {
t := reflect.TypeOf(val)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if r, _ := utf8.DecodeRuneInString(field.Name); r == utf8.RuneError || unicode.IsLower(r) {
continue
}
vals := strings.SplitN(field.Tag.Get("json"), ",", 2)
if len(vals) == 0 || vals[0] == "" {
fields = append(fields, field.Name)
continue
}
if vals[0] != "-" {
fields = append(fields, vals[0])
}
}
return
}
// NormalizeComment takes a comment string and makes sure it's normalized for Go
func NormalizeComment(s string) string {
if s == "" {
return ""
}
var parts []string
for _, p := range strings.Split(s, "\n") {
parts = append(parts, "// "+p)
}
return strings.Join(parts, "\n")
}
| GetString | identifier_name |
schema.go | package gen
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/url"
"reflect"
"strings"
"unicode"
"unicode/utf8"
)
var knownSchemaFields = make(map[string]bool)
func init() {
for _, f := range getJSONFieldNames(Schema{}) {
knownSchemaFields[f] = true
}
}
// JSONType is the enumeration of JSONSchema's supported types.
type JSONType uint8
// Each of these is a core type of JSONSchema, except for JSONUnknown, which is a useful zero value.
const (
JSONUnknown JSONType = iota
JSONArray
JSONBoolean
JSONInteger
JSONNull
JSONNumber
JSONObject
JSONString
)
var simpleTypeNames = map[string]JSONType{
"array": JSONArray,
"boolean": JSONBoolean,
"integer": JSONInteger,
"null": JSONNull,
"number": JSONNumber,
"object": JSONObject,
"string": JSONString,
}
// TypeField wraps the type field in JSONSchema, supporting either an array of types or a single type as the metaschema
// allows
type TypeField []JSONType
// UnmarshalJSON unmarshals JSON into the TypeField
func (t *TypeField) UnmarshalJSON(b []byte) error {
var val interface{}
if err := json.Unmarshal(b, &val); err != nil {
return err
}
switch v := val.(type) {
case string:
*t = append(*t, simpleTypeNames[v])
return nil
case []interface{}:
*t = make(TypeField, 0, len(v))
for _, v := range v {
var typ JSONType
if s, ok := v.(string); ok {
typ = simpleTypeNames[s]
}
*t = append(*t, typ)
}
return nil
}
return fmt.Errorf("unable to unmarshal %T into TypeField", val)
}
// convenience method to draw out the first token; if this errs, later calls will err anyway so discards
// the err
func peekToken(data []byte) json.Token {
tok, _ := json.NewDecoder(bytes.NewReader(data)).Token()
return tok
}
// BoolOrSchema may have either a boolean or a RefOrSchema.
type BoolOrSchema struct {
Bool *bool
Schema *RefOrSchema
}
func (a *BoolOrSchema) Present() bool {
return a != nil && (a.Schema != nil || (a.Bool != nil && *a.Bool))
}
// UnmarshalJSON performs some custom deserialization of JSON into BoolOrSchema
func (a *BoolOrSchema) UnmarshalJSON(data []byte) error {
if b, ok := peekToken(data).(bool); ok {
a.Bool = &b
return nil
}
a.Schema = new(RefOrSchema)
return json.Unmarshal(data, a.Schema)
}
// ItemsField contains information indicating whether the modified array is a dynamically sized list of multiple
// types or a "tuple" -- a specifically sized array with potentially different types for each position.
type ItemsField struct {
Items *RefOrSchema
TupleFields []*RefOrSchema
}
func (i *ItemsField) Present() bool {
return i != nil && (i.Items != nil || len(i.TupleFields) > 0)
}
// UnmarshalJSON conditionally deserializes into ItemsField according to the shape of the provided JSON
func (i *ItemsField) UnmarshalJSON(data []byte) error {
if peekToken(data) == json.Delim('{') {
i.Items = new(RefOrSchema)
return json.Unmarshal(data, i.Items)
}
return json.Unmarshal(data, &i.TupleFields)
}
// TagMap contains all of the different user extended tags as json.RawMessage for later deserialization
type TagMap map[string]json.RawMessage
// GetString attempts to deserialize the value for the provided key into a string. If the key is absent or there is an
// error deserializing the value, the returned string will be empty.
func (t TagMap) GetString(k string) (s string) {
_, _ = t.Unmarshal(k, &s)
return
}
// Read unmarshals the json at the provided key into the provided interface (which should be a pointer amenable to
// json.Read. If the key is not present, the pointer will be untouched, and false and nil will be returned. If the
// deserialization fails, an error will be returned.
func (t TagMap) Unmarshal(k string, val interface{}) (bool, error) {
msg, ok := t[k]
if !ok {
return false, nil
}
err := json.Unmarshal(msg, val)
return true, err
}
// NewRefOrSchema is a convenience constructor for RefOrSchema
func NewRefOrSchema(s *Schema, ref *string) *RefOrSchema {
return &RefOrSchema{ref: ref, schema: s}
}
// RefOrSchema is either a schema or a reference to a schema.
type RefOrSchema struct {
ref *string
schema *Schema
}
// UnmarshalJSON conditionally deserializes the JSON, either into a reference or a schema.
func (r *RefOrSchema) UnmarshalJSON(b []byte) error {
var ref struct {
Ref string `json:"$ref"`
}
if err := json.Unmarshal(b, &ref); err != nil {
return fmt.Errorf("unmarshal $ref: %w", err)
}
if ref.Ref != "" {
r.ref = &ref.Ref
return nil
}
r.schema = new(Schema)
return json.Unmarshal(b, r.schema)
}
// Resolve either returns the schema if set or else resolves the reference using the referer schema and loader.
func (r *RefOrSchema) Resolve(ctx context.Context, referer *Schema, loader Loader) (*Schema, error) |
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
}
if s.AdditionalProperties != nil {
push(s.AdditionalProperties.Schema, "additionalProperties")
}
for _, m := range []struct {
name string
schemas map[string]*RefOrSchema
}{
{"definitions", s.Definitions},
{"properties", s.Properties},
{"patternProperties", s.PatternProperties},
{"dependencies", s.Dependencies},
} {
for k, v := range m.schemas {
push(v, m.name, k)
}
}
for _, a := range []struct {
name string
schemas []*RefOrSchema
}{
{"allOf", s.AllOf},
{"anyOf", s.AnyOf},
{"oneOf", s.OneOf},
} {
for i, v := range a.schemas {
push(v, a.name, i)
}
}
push(s.Not, "not")
return
}
// String returns a simple string identifier for the schema
func (s *Schema) String() string {
if s.ID == nil {
return "<nil>"
}
return s.ID.String()
}
// ChooseType returns the best known type for this field.
func (s *Schema) ChooseType() JSONType {
switch {
case s.Type != nil && len(*s.Type) > 0:
return (*s.Type)[0]
case len(s.Properties) > 0,
s.AdditionalProperties.Present(),
len(s.PatternProperties) > 0,
s.MinProperties > 0,
s.MaxProperties != nil,
len(s.AllOf) > 0:
return JSONObject
case s.Items.Present(),
s.UniqueItems,
s.MinItems != 0,
s.MaxItems != nil:
return JSONArray
case s.Pattern != nil,
s.MinLength > 0,
s.MaxLength != nil:
return JSONString
}
return JSONUnknown
}
// UnmarshalJSON is custom JSON deserialization for the Schema type
func (s *Schema) UnmarshalJSON(data []byte) error {
{
type schema Schema
var s2 schema
if err := json.Unmarshal(data, &s2); err != nil {
return fmt.Errorf("unmarshal schema: %w", err)
}
*s = Schema(s2)
}
var possAnnos map[string]json.RawMessage
if err := json.Unmarshal(data, &possAnnos); err != nil {
return fmt.Errorf("unmarshal annotations: %w", err)
}
for field, v := range possAnnos {
if knownSchemaFields[field] {
continue
}
if s.Annotations == nil {
s.Annotations = make(map[string]json.RawMessage)
}
s.Annotations[field] = v
}
for _, key := range []string{"$id", "id"} {
idBytes, ok := s.Annotations[key]
if !ok {
continue
}
var (
id string
err error
)
if err = json.Unmarshal(idBytes, &id); err != nil {
return err
}
if s.ID, err = url.Parse(id); err != nil {
return err
}
break
}
return nil
}
func getJSONFieldNames(val interface{}) (fields []string) {
t := reflect.TypeOf(val)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if r, _ := utf8.DecodeRuneInString(field.Name); r == utf8.RuneError || unicode.IsLower(r) {
continue
}
vals := strings.SplitN(field.Tag.Get("json"), ",", 2)
if len(vals) == 0 || vals[0] == "" {
fields = append(fields, field.Name)
continue
}
if vals[0] != "-" {
fields = append(fields, vals[0])
}
}
return
}
// NormalizeComment takes a comment string and makes sure it's normalized for Go
func NormalizeComment(s string) string {
if s == "" {
return ""
}
var parts []string
for _, p := range strings.Split(s, "\n") {
parts = append(parts, "// "+p)
}
return strings.Join(parts, "\n")
}
| {
if r.ref == nil {
return r.schema, nil
}
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
} | identifier_body |
policy.go | package miner
import (
"fmt"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
"github.com/filecoin-project/specs-actors/v8/actors/builtin"
)
// The period over which a miner's active sectors are expected to be proven via WindowPoSt.
// This guarantees that (1) user data is proven daily, (2) user data is stored for 24h by a rational miner
// (due to Window PoSt cost assumption).
var WPoStProvingPeriod = abi.ChainEpoch(builtin.EpochsInDay) // 24 hours PARAM_SPEC
// The period between the opening and the closing of a WindowPoSt deadline in which the miner is expected to
// provide a Window PoSt proof.
// This provides a miner enough time to compute and propagate a Window PoSt proof.
var WPoStChallengeWindow = abi.ChainEpoch(30 * 60 / builtin.EpochDurationSeconds) // 30 minutes (48 per day) PARAM_SPEC
// WPoStDisputeWindow is the period after a challenge window ends during which
// PoSts submitted during that period may be disputed.
var WPoStDisputeWindow = 2 * ChainFinality // PARAM_SPEC
// The number of non-overlapping PoSt deadlines in a proving period.
// This spreads a miner's Window PoSt work across a proving period.
const WPoStPeriodDeadlines = uint64(48) // PARAM_SPEC
// MaxPartitionsPerDeadline is the maximum number of partitions that will be assigned to a deadline.
// For a minimum storage of upto 1Eib, we need 300 partitions per deadline.
// 48 * 32GiB * 2349 * 300 = 1.00808144 EiB
// So, to support upto 10Eib storage, we set this to 3000.
const MaxPartitionsPerDeadline = 3000
func init() |
// The maximum number of partitions that can be loaded in a single invocation.
// This limits the number of simultaneous fault, recovery, or sector-extension declarations.
// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline.
const AddressedPartitionsMax = MaxPartitionsPerDeadline
// Maximum number of unique "declarations" in batch operations.
const DeclarationsMax = AddressedPartitionsMax
// The maximum number of sector infos that can be loaded in a single invocation.
// This limits the amount of state to be read in a single message execution.
const AddressedSectorsMax = 25_000 // PARAM_SPEC
// Libp2p peer info limits.
const (
// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID.
// Most Peer IDs are expected to be less than 50 bytes.
MaxPeerIDLength = 128 // PARAM_SPEC
// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs.
MaxMultiaddrData = 1024 // PARAM_SPEC
)
// Maximum number of control addresses a miner may register.
const MaxControlAddresses = 10
// The maximum number of partitions that may be required to be loaded in a single invocation,
// when all the sector infos for the partitions will be loaded.
func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 {
return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax)
}
// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible)
// This is a conservative value that is chosen via simulations of all known attacks.
const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC
// Prefix for sealed sector CIDs (CommR).
var SealedCIDPrefix = cid.Prefix{
Version: 1,
Codec: cid.FilCommitmentSealed,
MhType: mh.POSEIDON_BLS12_381_A1_FC1,
MhLength: 32,
}
// List of proof types which may be used when creating a new miner actor.
// This is mutable to allow configuration of testing and development networks.
var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency
const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges.
const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC
// The maximum age of a fault before the sector is terminated.
// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral.
var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC
// Staging period for a miner worker key change.
// This delay prevents a miner choosing a more favorable worker key that wins leader elections.
const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC
// Minimum number of epochs past the current epoch a sector may be set to expire.
const MinSectorExpiration = 180 * builtin.EpochsInDay // PARAM_SPEC
// The maximum number of epochs past the current epoch that sector lifetime may be extended.
// A sector may be extended multiple times, however, the total maximum lifetime is also bounded by
// the associated seal proof's maximum lifetime.
const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay // PARAM_SPEC
// Ratio of sector size to maximum number of deals per sector.
// The maximum number of deals is the sector size divided by this number (2^27)
// which limits 32GiB sectors to 256 deals and 64GiB sectors to 512
const DealLimitDenominator = 134217728 // PARAM_SPEC
// Number of epochs after a consensus fault for which a miner is ineligible
// for permissioned actor methods and winning block elections.
const ConsensusFaultIneligibilityDuration = ChainFinality
// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector.
// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector.
// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier.
// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier.
// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier.
// SectorQuality of a sector is a weighted average of multipliers based on their proportions.
func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality {
// sectorSpaceTime = size * duration
sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration)))
// totalDealSpaceTime = dealWeight + verifiedWeight
totalDealSpaceTime := big.Add(dealWeight, verifiedWeight)
// Base - all size * duration of non-deals
// weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier
weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier)
// Deal - all deal size * deal duration * 10
// weightedDealSpaceTime = dealWeight * DealWeightMultiplier
weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier)
// Verified - all verified deal size * verified deal duration * 100
// weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier
weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier)
// Sum - sum of all spacetime
// weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime
weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime)
// scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20
scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision)
// Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10)
return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier)
}
// The power for a sector size, committed duration, and weight.
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
quality := QualityForWeight(size, duration, dealWeight, verifiedWeight)
return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision)
}
// The quality-adjusted power for a sector.
func QAPowerForSector(size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower {
duration := sector.Expiration - sector.Activation
return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight)
}
// Determine maximum number of deal miner's sector can hold
func SectorDealsMax(size abi.SectorSize) uint64 {
return max64(256, uint64(size/DealLimitDenominator))
}
// Default share of block reward allocated as reward to the consensus fault reporter.
// Applied as epochReward / (expectedLeadersPerEpoch * consensusFaultReporterDefaultShare)
const consensusFaultReporterDefaultShare int64 = 4
// Specification for a linear vesting schedule.
type VestSpec struct {
InitialDelay abi.ChainEpoch // Delay before any amount starts vesting.
VestPeriod abi.ChainEpoch // Period over which the total should vest, after the initial delay.
StepDuration abi.ChainEpoch // Duration between successive incremental vests (independent of vesting period).
Quantization abi.ChainEpoch // Maximum precision of vesting table (limits cardinality of table).
}
// The vesting schedule for total rewards (block reward + gas reward) earned by a block producer.
var RewardVestingSpec = VestSpec{ // PARAM_SPEC
InitialDelay: abi.ChainEpoch(0),
VestPeriod: abi.ChainEpoch(180 * builtin.EpochsInDay),
StepDuration: abi.ChainEpoch(1 * builtin.EpochsInDay),
Quantization: 12 * builtin.EpochsInHour,
}
// When an actor reports a consensus fault, they earn a share of the penalty paid by the miner.
func RewardForConsensusSlashReport(epochReward abi.TokenAmount) abi.TokenAmount {
return big.Div(epochReward,
big.Mul(big.NewInt(builtin.ExpectedLeadersPerEpoch),
big.NewInt(consensusFaultReporterDefaultShare)),
)
}
// The reward given for successfully disputing a window post.
func RewardForDisputedWindowPoSt(proofType abi.RegisteredPoStProof, disputedPower PowerPair) abi.TokenAmount {
// This is currently just the base. In the future, the fee may scale based on the disputed power.
return BaseRewardForDisputedWindowPoSt
}
const MaxAggregatedSectors = 819
const MinAggregatedSectors = 4
const MaxAggregateProofSize = 81960
// The delay between pre commit expiration and clean up from state. This enforces that expired pre-commits
// stay in state for a period of time creating a grace period during which a late-running aggregated prove-commit
// can still prove its non-expired precommits without resubmitting a message
const ExpiredPreCommitCleanUpDelay = 8 * builtin.EpochsInHour
| {
// Check that the challenge windows divide the proving period evenly.
if WPoStProvingPeriod%WPoStChallengeWindow != 0 {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check that WPoStPeriodDeadlines is consistent with the proving period and challenge window.
if abi.ChainEpoch(WPoStPeriodDeadlines)*WPoStChallengeWindow != WPoStProvingPeriod {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check to make sure the dispute window is longer than finality so there's always some time to dispute bad proofs.
if WPoStDisputeWindow <= ChainFinality {
panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality))
}
// A deadline becomes immutable one challenge window before it's challenge window opens.
// The challenge lookback must fall within this immutability period.
if WPoStChallengeLookback > WPoStChallengeWindow {
panic("the challenge lookback cannot exceed one challenge window")
}
// Deadlines are immutable when the challenge window is open, and during
// the previous challenge window.
immutableWindow := 2 * WPoStChallengeWindow
// We want to reserve at least one deadline's worth of time to compact a
// deadline.
minCompactionWindow := WPoStChallengeWindow
// Make sure we have enough time in the proving period to do everything we need.
if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod {
panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)",
minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod))
}
} | identifier_body |
policy.go | package miner
import (
"fmt"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
"github.com/filecoin-project/specs-actors/v8/actors/builtin"
)
// The period over which a miner's active sectors are expected to be proven via WindowPoSt.
// This guarantees that (1) user data is proven daily, (2) user data is stored for 24h by a rational miner
// (due to Window PoSt cost assumption).
var WPoStProvingPeriod = abi.ChainEpoch(builtin.EpochsInDay) // 24 hours PARAM_SPEC
// The period between the opening and the closing of a WindowPoSt deadline in which the miner is expected to
// provide a Window PoSt proof.
// This provides a miner enough time to compute and propagate a Window PoSt proof.
var WPoStChallengeWindow = abi.ChainEpoch(30 * 60 / builtin.EpochDurationSeconds) // 30 minutes (48 per day) PARAM_SPEC
// WPoStDisputeWindow is the period after a challenge window ends during which
// PoSts submitted during that period may be disputed.
var WPoStDisputeWindow = 2 * ChainFinality // PARAM_SPEC
// The number of non-overlapping PoSt deadlines in a proving period.
// This spreads a miner's Window PoSt work across a proving period.
const WPoStPeriodDeadlines = uint64(48) // PARAM_SPEC
// MaxPartitionsPerDeadline is the maximum number of partitions that will be assigned to a deadline.
// For a minimum storage of upto 1Eib, we need 300 partitions per deadline.
// 48 * 32GiB * 2349 * 300 = 1.00808144 EiB
// So, to support upto 10Eib storage, we set this to 3000.
const MaxPartitionsPerDeadline = 3000
func init() {
// Check that the challenge windows divide the proving period evenly.
if WPoStProvingPeriod%WPoStChallengeWindow != 0 {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check that WPoStPeriodDeadlines is consistent with the proving period and challenge window.
if abi.ChainEpoch(WPoStPeriodDeadlines)*WPoStChallengeWindow != WPoStProvingPeriod {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check to make sure the dispute window is longer than finality so there's always some time to dispute bad proofs.
if WPoStDisputeWindow <= ChainFinality |
// A deadline becomes immutable one challenge window before it's challenge window opens.
// The challenge lookback must fall within this immutability period.
if WPoStChallengeLookback > WPoStChallengeWindow {
panic("the challenge lookback cannot exceed one challenge window")
}
// Deadlines are immutable when the challenge window is open, and during
// the previous challenge window.
immutableWindow := 2 * WPoStChallengeWindow
// We want to reserve at least one deadline's worth of time to compact a
// deadline.
minCompactionWindow := WPoStChallengeWindow
// Make sure we have enough time in the proving period to do everything we need.
if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod {
panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)",
minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod))
}
}
// The maximum number of partitions that can be loaded in a single invocation.
// This limits the number of simultaneous fault, recovery, or sector-extension declarations.
// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline.
const AddressedPartitionsMax = MaxPartitionsPerDeadline
// Maximum number of unique "declarations" in batch operations.
const DeclarationsMax = AddressedPartitionsMax
// The maximum number of sector infos that can be loaded in a single invocation.
// This limits the amount of state to be read in a single message execution.
const AddressedSectorsMax = 25_000 // PARAM_SPEC
// Libp2p peer info limits.
const (
// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID.
// Most Peer IDs are expected to be less than 50 bytes.
MaxPeerIDLength = 128 // PARAM_SPEC
// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs.
MaxMultiaddrData = 1024 // PARAM_SPEC
)
// Maximum number of control addresses a miner may register.
const MaxControlAddresses = 10
// The maximum number of partitions that may be required to be loaded in a single invocation,
// when all the sector infos for the partitions will be loaded.
func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 {
return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax)
}
// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible)
// This is a conservative value that is chosen via simulations of all known attacks.
const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC
// Prefix for sealed sector CIDs (CommR).
var SealedCIDPrefix = cid.Prefix{
Version: 1,
Codec: cid.FilCommitmentSealed,
MhType: mh.POSEIDON_BLS12_381_A1_FC1,
MhLength: 32,
}
// List of proof types which may be used when creating a new miner actor.
// This is mutable to allow configuration of testing and development networks.
var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency
const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges.
const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC
// The maximum age of a fault before the sector is terminated.
// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral.
var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC
// Staging period for a miner worker key change.
// This delay prevents a miner choosing a more favorable worker key that wins leader elections.
const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC
// Minimum number of epochs past the current epoch a sector may be set to expire.
const MinSectorExpiration = 180 * builtin.EpochsInDay // PARAM_SPEC
// The maximum number of epochs past the current epoch that sector lifetime may be extended.
// A sector may be extended multiple times, however, the total maximum lifetime is also bounded by
// the associated seal proof's maximum lifetime.
const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay // PARAM_SPEC
// Ratio of sector size to maximum number of deals per sector.
// The maximum number of deals is the sector size divided by this number (2^27)
// which limits 32GiB sectors to 256 deals and 64GiB sectors to 512
const DealLimitDenominator = 134217728 // PARAM_SPEC
// Number of epochs after a consensus fault for which a miner is ineligible
// for permissioned actor methods and winning block elections.
const ConsensusFaultIneligibilityDuration = ChainFinality
// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector.
// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector.
// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier.
// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier.
// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier.
// SectorQuality of a sector is a weighted average of multipliers based on their proportions.
func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality {
// sectorSpaceTime = size * duration
sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration)))
// totalDealSpaceTime = dealWeight + verifiedWeight
totalDealSpaceTime := big.Add(dealWeight, verifiedWeight)
// Base - all size * duration of non-deals
// weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier
weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier)
// Deal - all deal size * deal duration * 10
// weightedDealSpaceTime = dealWeight * DealWeightMultiplier
weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier)
// Verified - all verified deal size * verified deal duration * 100
// weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier
weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier)
// Sum - sum of all spacetime
// weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime
weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime)
// scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20
scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision)
// Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10)
return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier)
}
// The power for a sector size, committed duration, and weight.
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
quality := QualityForWeight(size, duration, dealWeight, verifiedWeight)
return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision)
}
// The quality-adjusted power for a sector.
func QAPowerForSector(size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower {
duration := sector.Expiration - sector.Activation
return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight)
}
// Determine maximum number of deal miner's sector can hold
func SectorDealsMax(size abi.SectorSize) uint64 {
return max64(256, uint64(size/DealLimitDenominator))
}
// Default share of block reward allocated as reward to the consensus fault reporter.
// Applied as epochReward / (expectedLeadersPerEpoch * consensusFaultReporterDefaultShare)
const consensusFaultReporterDefaultShare int64 = 4
// Specification for a linear vesting schedule.
type VestSpec struct {
InitialDelay abi.ChainEpoch // Delay before any amount starts vesting.
VestPeriod abi.ChainEpoch // Period over which the total should vest, after the initial delay.
StepDuration abi.ChainEpoch // Duration between successive incremental vests (independent of vesting period).
Quantization abi.ChainEpoch // Maximum precision of vesting table (limits cardinality of table).
}
// The vesting schedule for total rewards (block reward + gas reward) earned by a block producer.
var RewardVestingSpec = VestSpec{ // PARAM_SPEC
InitialDelay: abi.ChainEpoch(0),
VestPeriod: abi.ChainEpoch(180 * builtin.EpochsInDay),
StepDuration: abi.ChainEpoch(1 * builtin.EpochsInDay),
Quantization: 12 * builtin.EpochsInHour,
}
// When an actor reports a consensus fault, they earn a share of the penalty paid by the miner.
func RewardForConsensusSlashReport(epochReward abi.TokenAmount) abi.TokenAmount {
return big.Div(epochReward,
big.Mul(big.NewInt(builtin.ExpectedLeadersPerEpoch),
big.NewInt(consensusFaultReporterDefaultShare)),
)
}
// The reward given for successfully disputing a window post.
func RewardForDisputedWindowPoSt(proofType abi.RegisteredPoStProof, disputedPower PowerPair) abi.TokenAmount {
// This is currently just the base. In the future, the fee may scale based on the disputed power.
return BaseRewardForDisputedWindowPoSt
}
const MaxAggregatedSectors = 819
const MinAggregatedSectors = 4
const MaxAggregateProofSize = 81960
// The delay between pre commit expiration and clean up from state. This enforces that expired pre-commits
// stay in state for a period of time creating a grace period during which a late-running aggregated prove-commit
// can still prove its non-expired precommits without resubmitting a message
const ExpiredPreCommitCleanUpDelay = 8 * builtin.EpochsInHour
| {
panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality))
} | conditional_block |
policy.go | package miner
import (
"fmt"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
"github.com/filecoin-project/specs-actors/v8/actors/builtin"
)
// The period over which a miner's active sectors are expected to be proven via WindowPoSt.
// This guarantees that (1) user data is proven daily, (2) user data is stored for 24h by a rational miner
// (due to Window PoSt cost assumption).
var WPoStProvingPeriod = abi.ChainEpoch(builtin.EpochsInDay) // 24 hours PARAM_SPEC
// The period between the opening and the closing of a WindowPoSt deadline in which the miner is expected to
// provide a Window PoSt proof.
// This provides a miner enough time to compute and propagate a Window PoSt proof.
var WPoStChallengeWindow = abi.ChainEpoch(30 * 60 / builtin.EpochDurationSeconds) // 30 minutes (48 per day) PARAM_SPEC
// WPoStDisputeWindow is the period after a challenge window ends during which
// PoSts submitted during that period may be disputed.
var WPoStDisputeWindow = 2 * ChainFinality // PARAM_SPEC
// The number of non-overlapping PoSt deadlines in a proving period.
// This spreads a miner's Window PoSt work across a proving period.
const WPoStPeriodDeadlines = uint64(48) // PARAM_SPEC
// MaxPartitionsPerDeadline is the maximum number of partitions that will be assigned to a deadline.
// For a minimum storage of upto 1Eib, we need 300 partitions per deadline.
// 48 * 32GiB * 2349 * 300 = 1.00808144 EiB
// So, to support upto 10Eib storage, we set this to 3000.
const MaxPartitionsPerDeadline = 3000
func init() {
// Check that the challenge windows divide the proving period evenly.
if WPoStProvingPeriod%WPoStChallengeWindow != 0 {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check that WPoStPeriodDeadlines is consistent with the proving period and challenge window.
if abi.ChainEpoch(WPoStPeriodDeadlines)*WPoStChallengeWindow != WPoStProvingPeriod {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check to make sure the dispute window is longer than finality so there's always some time to dispute bad proofs.
if WPoStDisputeWindow <= ChainFinality {
panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality))
}
// A deadline becomes immutable one challenge window before it's challenge window opens.
// The challenge lookback must fall within this immutability period.
if WPoStChallengeLookback > WPoStChallengeWindow {
panic("the challenge lookback cannot exceed one challenge window")
}
// Deadlines are immutable when the challenge window is open, and during
// the previous challenge window.
immutableWindow := 2 * WPoStChallengeWindow
// We want to reserve at least one deadline's worth of time to compact a
// deadline.
minCompactionWindow := WPoStChallengeWindow
// Make sure we have enough time in the proving period to do everything we need.
if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod {
panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)",
minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod))
}
}
// The maximum number of partitions that can be loaded in a single invocation.
// This limits the number of simultaneous fault, recovery, or sector-extension declarations.
// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline.
const AddressedPartitionsMax = MaxPartitionsPerDeadline
// Maximum number of unique "declarations" in batch operations.
const DeclarationsMax = AddressedPartitionsMax
// The maximum number of sector infos that can be loaded in a single invocation.
// This limits the amount of state to be read in a single message execution.
const AddressedSectorsMax = 25_000 // PARAM_SPEC
// Libp2p peer info limits.
const (
// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID.
// Most Peer IDs are expected to be less than 50 bytes.
MaxPeerIDLength = 128 // PARAM_SPEC
// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs.
MaxMultiaddrData = 1024 // PARAM_SPEC
)
// Maximum number of control addresses a miner may register.
const MaxControlAddresses = 10
// The maximum number of partitions that may be required to be loaded in a single invocation,
// when all the sector infos for the partitions will be loaded.
func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 {
return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax)
}
// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible)
// This is a conservative value that is chosen via simulations of all known attacks.
const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC
// Prefix for sealed sector CIDs (CommR).
var SealedCIDPrefix = cid.Prefix{
Version: 1,
Codec: cid.FilCommitmentSealed,
MhType: mh.POSEIDON_BLS12_381_A1_FC1,
MhLength: 32,
}
// List of proof types which may be used when creating a new miner actor.
// This is mutable to allow configuration of testing and development networks.
var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency | const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges.
const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC
// The maximum age of a fault before the sector is terminated.
// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral.
var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC
// Staging period for a miner worker key change.
// This delay prevents a miner choosing a more favorable worker key that wins leader elections.
const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC
// Minimum number of epochs past the current epoch a sector may be set to expire.
const MinSectorExpiration = 180 * builtin.EpochsInDay // PARAM_SPEC
// The maximum number of epochs past the current epoch that sector lifetime may be extended.
// A sector may be extended multiple times, however, the total maximum lifetime is also bounded by
// the associated seal proof's maximum lifetime.
const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay // PARAM_SPEC
// Ratio of sector size to maximum number of deals per sector.
// The maximum number of deals is the sector size divided by this number (2^27)
// which limits 32GiB sectors to 256 deals and 64GiB sectors to 512
const DealLimitDenominator = 134217728 // PARAM_SPEC
// Number of epochs after a consensus fault for which a miner is ineligible
// for permissioned actor methods and winning block elections.
const ConsensusFaultIneligibilityDuration = ChainFinality
// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector.
// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector.
// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier.
// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier.
// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier.
// SectorQuality of a sector is a weighted average of multipliers based on their proportions.
func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality {
// sectorSpaceTime = size * duration
sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration)))
// totalDealSpaceTime = dealWeight + verifiedWeight
totalDealSpaceTime := big.Add(dealWeight, verifiedWeight)
// Base - all size * duration of non-deals
// weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier
weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier)
// Deal - all deal size * deal duration * 10
// weightedDealSpaceTime = dealWeight * DealWeightMultiplier
weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier)
// Verified - all verified deal size * verified deal duration * 100
// weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier
weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier)
// Sum - sum of all spacetime
// weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime
weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime)
// scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20
scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision)
// Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10)
return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier)
}
// The power for a sector size, committed duration, and weight.
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
quality := QualityForWeight(size, duration, dealWeight, verifiedWeight)
return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision)
}
// The quality-adjusted power for a sector.
func QAPowerForSector(size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower {
duration := sector.Expiration - sector.Activation
return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight)
}
// Determine maximum number of deal miner's sector can hold
func SectorDealsMax(size abi.SectorSize) uint64 {
return max64(256, uint64(size/DealLimitDenominator))
}
// Default share of block reward allocated as reward to the consensus fault reporter.
// Applied as epochReward / (expectedLeadersPerEpoch * consensusFaultReporterDefaultShare)
const consensusFaultReporterDefaultShare int64 = 4
// Specification for a linear vesting schedule.
type VestSpec struct {
InitialDelay abi.ChainEpoch // Delay before any amount starts vesting.
VestPeriod abi.ChainEpoch // Period over which the total should vest, after the initial delay.
StepDuration abi.ChainEpoch // Duration between successive incremental vests (independent of vesting period).
Quantization abi.ChainEpoch // Maximum precision of vesting table (limits cardinality of table).
}
// The vesting schedule for total rewards (block reward + gas reward) earned by a block producer.
var RewardVestingSpec = VestSpec{ // PARAM_SPEC
InitialDelay: abi.ChainEpoch(0),
VestPeriod: abi.ChainEpoch(180 * builtin.EpochsInDay),
StepDuration: abi.ChainEpoch(1 * builtin.EpochsInDay),
Quantization: 12 * builtin.EpochsInHour,
}
// When an actor reports a consensus fault, they earn a share of the penalty paid by the miner.
func RewardForConsensusSlashReport(epochReward abi.TokenAmount) abi.TokenAmount {
return big.Div(epochReward,
big.Mul(big.NewInt(builtin.ExpectedLeadersPerEpoch),
big.NewInt(consensusFaultReporterDefaultShare)),
)
}
// The reward given for successfully disputing a window post.
func RewardForDisputedWindowPoSt(proofType abi.RegisteredPoStProof, disputedPower PowerPair) abi.TokenAmount {
// This is currently just the base. In the future, the fee may scale based on the disputed power.
return BaseRewardForDisputedWindowPoSt
}
const MaxAggregatedSectors = 819
const MinAggregatedSectors = 4
const MaxAggregateProofSize = 81960
// The delay between pre commit expiration and clean up from state. This enforces that expired pre-commits
// stay in state for a period of time creating a grace period during which a late-running aggregated prove-commit
// can still prove its non-expired precommits without resubmitting a message
const ExpiredPreCommitCleanUpDelay = 8 * builtin.EpochsInHour | random_line_split | |
policy.go | package miner
import (
"fmt"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
"github.com/filecoin-project/specs-actors/v8/actors/builtin"
)
// The period over which a miner's active sectors are expected to be proven via WindowPoSt.
// This guarantees that (1) user data is proven daily, (2) user data is stored for 24h by a rational miner
// (due to Window PoSt cost assumption).
var WPoStProvingPeriod = abi.ChainEpoch(builtin.EpochsInDay) // 24 hours PARAM_SPEC
// The period between the opening and the closing of a WindowPoSt deadline in which the miner is expected to
// provide a Window PoSt proof.
// This provides a miner enough time to compute and propagate a Window PoSt proof.
var WPoStChallengeWindow = abi.ChainEpoch(30 * 60 / builtin.EpochDurationSeconds) // 30 minutes (48 per day) PARAM_SPEC
// WPoStDisputeWindow is the period after a challenge window ends during which
// PoSts submitted during that period may be disputed.
var WPoStDisputeWindow = 2 * ChainFinality // PARAM_SPEC
// The number of non-overlapping PoSt deadlines in a proving period.
// This spreads a miner's Window PoSt work across a proving period.
const WPoStPeriodDeadlines = uint64(48) // PARAM_SPEC
// MaxPartitionsPerDeadline is the maximum number of partitions that will be assigned to a deadline.
// For a minimum storage of upto 1Eib, we need 300 partitions per deadline.
// 48 * 32GiB * 2349 * 300 = 1.00808144 EiB
// So, to support upto 10Eib storage, we set this to 3000.
const MaxPartitionsPerDeadline = 3000
func init() {
// Check that the challenge windows divide the proving period evenly.
if WPoStProvingPeriod%WPoStChallengeWindow != 0 {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check that WPoStPeriodDeadlines is consistent with the proving period and challenge window.
if abi.ChainEpoch(WPoStPeriodDeadlines)*WPoStChallengeWindow != WPoStProvingPeriod {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check to make sure the dispute window is longer than finality so there's always some time to dispute bad proofs.
if WPoStDisputeWindow <= ChainFinality {
panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality))
}
// A deadline becomes immutable one challenge window before it's challenge window opens.
// The challenge lookback must fall within this immutability period.
if WPoStChallengeLookback > WPoStChallengeWindow {
panic("the challenge lookback cannot exceed one challenge window")
}
// Deadlines are immutable when the challenge window is open, and during
// the previous challenge window.
immutableWindow := 2 * WPoStChallengeWindow
// We want to reserve at least one deadline's worth of time to compact a
// deadline.
minCompactionWindow := WPoStChallengeWindow
// Make sure we have enough time in the proving period to do everything we need.
if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod {
panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)",
minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod))
}
}
// The maximum number of partitions that can be loaded in a single invocation.
// This limits the number of simultaneous fault, recovery, or sector-extension declarations.
// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline.
const AddressedPartitionsMax = MaxPartitionsPerDeadline
// Maximum number of unique "declarations" in batch operations.
const DeclarationsMax = AddressedPartitionsMax
// The maximum number of sector infos that can be loaded in a single invocation.
// This limits the amount of state to be read in a single message execution.
const AddressedSectorsMax = 25_000 // PARAM_SPEC
// Libp2p peer info limits.
const (
// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID.
// Most Peer IDs are expected to be less than 50 bytes.
MaxPeerIDLength = 128 // PARAM_SPEC
// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs.
MaxMultiaddrData = 1024 // PARAM_SPEC
)
// Maximum number of control addresses a miner may register.
const MaxControlAddresses = 10
// The maximum number of partitions that may be required to be loaded in a single invocation,
// when all the sector infos for the partitions will be loaded.
func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 {
return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax)
}
// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible)
// This is a conservative value that is chosen via simulations of all known attacks.
const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC
// Prefix for sealed sector CIDs (CommR).
var SealedCIDPrefix = cid.Prefix{
Version: 1,
Codec: cid.FilCommitmentSealed,
MhType: mh.POSEIDON_BLS12_381_A1_FC1,
MhLength: 32,
}
// List of proof types which may be used when creating a new miner actor.
// This is mutable to allow configuration of testing and development networks.
var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency
const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges.
const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC
// The maximum age of a fault before the sector is terminated.
// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral.
var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC
// Staging period for a miner worker key change.
// This delay prevents a miner choosing a more favorable worker key that wins leader elections.
const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC
// Minimum number of epochs past the current epoch a sector may be set to expire.
const MinSectorExpiration = 180 * builtin.EpochsInDay // PARAM_SPEC
// The maximum number of epochs past the current epoch that sector lifetime may be extended.
// A sector may be extended multiple times, however, the total maximum lifetime is also bounded by
// the associated seal proof's maximum lifetime.
const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay // PARAM_SPEC
// Ratio of sector size to maximum number of deals per sector.
// The maximum number of deals is the sector size divided by this number (2^27)
// which limits 32GiB sectors to 256 deals and 64GiB sectors to 512
const DealLimitDenominator = 134217728 // PARAM_SPEC
// Number of epochs after a consensus fault for which a miner is ineligible
// for permissioned actor methods and winning block elections.
const ConsensusFaultIneligibilityDuration = ChainFinality
// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector.
// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector.
// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier.
// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier.
// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier.
// SectorQuality of a sector is a weighted average of multipliers based on their proportions.
func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality {
// sectorSpaceTime = size * duration
sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration)))
// totalDealSpaceTime = dealWeight + verifiedWeight
totalDealSpaceTime := big.Add(dealWeight, verifiedWeight)
// Base - all size * duration of non-deals
// weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier
weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier)
// Deal - all deal size * deal duration * 10
// weightedDealSpaceTime = dealWeight * DealWeightMultiplier
weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier)
// Verified - all verified deal size * verified deal duration * 100
// weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier
weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier)
// Sum - sum of all spacetime
// weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime
weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime)
// scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20
scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision)
// Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10)
return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier)
}
// The power for a sector size, committed duration, and weight.
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
quality := QualityForWeight(size, duration, dealWeight, verifiedWeight)
return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision)
}
// The quality-adjusted power for a sector.
func | (size abi.SectorSize, sector *SectorOnChainInfo) abi.StoragePower {
duration := sector.Expiration - sector.Activation
return QAPowerForWeight(size, duration, sector.DealWeight, sector.VerifiedDealWeight)
}
// Determine maximum number of deal miner's sector can hold
func SectorDealsMax(size abi.SectorSize) uint64 {
return max64(256, uint64(size/DealLimitDenominator))
}
// Default share of block reward allocated as reward to the consensus fault reporter.
// Applied as epochReward / (expectedLeadersPerEpoch * consensusFaultReporterDefaultShare)
const consensusFaultReporterDefaultShare int64 = 4
// Specification for a linear vesting schedule.
type VestSpec struct {
InitialDelay abi.ChainEpoch // Delay before any amount starts vesting.
VestPeriod abi.ChainEpoch // Period over which the total should vest, after the initial delay.
StepDuration abi.ChainEpoch // Duration between successive incremental vests (independent of vesting period).
Quantization abi.ChainEpoch // Maximum precision of vesting table (limits cardinality of table).
}
// The vesting schedule for total rewards (block reward + gas reward) earned by a block producer.
var RewardVestingSpec = VestSpec{ // PARAM_SPEC
InitialDelay: abi.ChainEpoch(0),
VestPeriod: abi.ChainEpoch(180 * builtin.EpochsInDay),
StepDuration: abi.ChainEpoch(1 * builtin.EpochsInDay),
Quantization: 12 * builtin.EpochsInHour,
}
// When an actor reports a consensus fault, they earn a share of the penalty paid by the miner.
func RewardForConsensusSlashReport(epochReward abi.TokenAmount) abi.TokenAmount {
return big.Div(epochReward,
big.Mul(big.NewInt(builtin.ExpectedLeadersPerEpoch),
big.NewInt(consensusFaultReporterDefaultShare)),
)
}
// The reward given for successfully disputing a window post.
func RewardForDisputedWindowPoSt(proofType abi.RegisteredPoStProof, disputedPower PowerPair) abi.TokenAmount {
// This is currently just the base. In the future, the fee may scale based on the disputed power.
return BaseRewardForDisputedWindowPoSt
}
const MaxAggregatedSectors = 819
const MinAggregatedSectors = 4
const MaxAggregateProofSize = 81960
// The delay between pre commit expiration and clean up from state. This enforces that expired pre-commits
// stay in state for a period of time creating a grace period during which a late-running aggregated prove-commit
// can still prove its non-expired precommits without resubmitting a message
const ExpiredPreCommitCleanUpDelay = 8 * builtin.EpochsInHour
| QAPowerForSector | identifier_name |
yuva_info.rs | use super::image_info;
use crate::{prelude::*, EncodedOrigin, ISize, Matrix};
use skia_bindings::{self as sb, SkYUVAInfo, SkYUVAInfo_Subsampling};
use std::{fmt, ptr};
/// Specifies the structure of planes for a YUV image with optional alpha. The actual planar data
/// is not part of this structure and depending on usage is in external textures or pixmaps.
pub type YUVAInfo = Handle<SkYUVAInfo>;
unsafe_send_sync!(YUVAInfo);
impl NativeDrop for SkYUVAInfo {
fn drop(&mut self) {
unsafe { sb::C_SkYUVAInfo_destruct(self) }
}
}
/// Specifies how YUV (and optionally A) are divided among planes. Planes are separated by
/// underscores in the enum value names. Within each plane the pixmap/texture channels are
/// mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane
/// 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering
/// within a pixmap/texture given the channels it contains:
/// A: 0:A
/// Luminance/Gray: 0:Gray
/// Luminance/Gray + Alpha: 0:Gray, 1:A
/// RG 0:R, 1:G
/// RGB 0:R, 1:G, 2:B
/// RGBA 0:R, 1:G, 2:B, 3:A
pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig;
variant_name!(PlaneConfig::YUV);
/// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is
/// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub-
/// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values
/// that have U and V in different planes than Y (and A, if present).
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Subsampling {
Unknown = SkYUVAInfo_Subsampling::kUnknown as _,
S444 = SkYUVAInfo_Subsampling::k444 as _,
S422 = SkYUVAInfo_Subsampling::k422 as _,
S420 = SkYUVAInfo_Subsampling::k420 as _,
S440 = SkYUVAInfo_Subsampling::k440 as _,
S411 = SkYUVAInfo_Subsampling::k411 as _,
S410 = SkYUVAInfo_Subsampling::k410 as _,
}
native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout);
/// Describes how subsampled chroma values are sited relative to luma values.
///
/// Currently only centered siting is supported but will expand to support additional sitings.
pub use sb::SkYUVAInfo_Siting as Siting;
variant_name!(Siting::Centered);
/// Ratio of Y/A values to U/V values in x and y.
pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) };
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)`
/// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling]
/// `plane_index` combinations. `(0, 0)` is returned for invalid inputs.
pub fn plane_subsampling_factors(
plane: PlaneConfig,
subsampling: Subsampling,
plane_index: usize,
) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe {
sb::C_SkYUVAInfo_PlaneSubsamplingFactors(
plane,
subsampling.into_native(),
plane_index.try_into().unwrap(),
&mut factors[0],
)
};
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected
/// size of each plane. Returns the expected planes. The input image dimensions are as displayed
/// (after the planes have been transformed to the intended display orientation). The plane
/// dimensions are output as the planes are stored in memory (may be rotated from image dimensions).
pub fn plane_dimensions(
image_dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
origin: EncodedOrigin,
) -> Vec<ISize> {
let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES];
let size: usize = unsafe {
SkYUVAInfo::PlaneDimensions(
image_dimensions.into().into_native(),
config,
subsampling.into_native(),
origin.into_native(),
plane_dimensions.native_mut().as_mut_ptr(),
)
}
.try_into()
.unwrap();
plane_dimensions[0..size].to_vec()
}
/// Number of planes for a given [PlaneConfig].
pub fn num_planes(config: PlaneConfig) -> usize {
unsafe { sb::C_SkYUVAInfo_NumPlanes(config) }
.try_into()
.unwrap()
}
/// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is
/// invalid).
pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> {
(i < num_planes(config)).if_true_then_some(|| {
unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) }
.try_into()
.unwrap()
})
}
/// Does the [PlaneConfig] have alpha values?
pub fn has_alpha(config: PlaneConfig) -> bool {
unsafe { sb::SkYUVAInfo_HasAlpha(config) }
}
impl Default for YUVAInfo {
fn default() -> Self {
Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) })
}
}
impl NativePartialEq for YUVAInfo {
fn eq(&self, rhs: &Self) -> bool {
unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) }
}
}
impl fmt::Debug for YUVAInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("YUVAInfo")
.field("dimensions", &self.dimensions())
.field("plane_config", &self.plane_config())
.field("subsampling", &self.subsampling())
.field("yuv_color_space", &self.yuv_color_space())
.field("origin", &self.origin())
.field("siting_xy", &self.siting_xy())
.finish()
}
}
impl YUVAInfo {
pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _;
/// `dimensions` should specify the size of the full resolution image (after planes have been
/// oriented to how the image is displayed as indicated by `origin`).
pub fn new(
dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
color_space: image_info::YUVColorSpace,
origin: impl Into<Option<EncodedOrigin>>,
siting_xy: impl Into<Option<(Siting, Siting)>>,
) -> Option<Self> {
let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft);
let (siting_x, siting_y) = siting_xy
.into()
.unwrap_or((Siting::Centered, Siting::Centered));
let n = unsafe {
SkYUVAInfo::new(
dimensions.into().into_native(),
config,
subsampling.into_native(),
color_space,
origin.into_native(),
siting_x,
siting_y,
)
};
Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n))
}
pub fn plane_config(&self) -> PlaneConfig {
self.native().fPlaneConfig
}
pub fn subsampling(&self) -> Subsampling {
Subsampling::from_native_c(self.native().fSubsampling)
}
pub fn | (&self, plane_index: usize) -> (i32, i32) {
plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index)
}
/// Dimensions of the full resolution image (after planes have been oriented to how the image
/// is displayed as indicated by fOrigin).
pub fn dimensions(&self) -> ISize {
ISize::from_native_c(self.native().fDimensions)
}
pub fn width(&self) -> i32 {
self.dimensions().width
}
pub fn height(&self) -> i32 {
self.dimensions().height
}
pub fn yuv_color_space(&self) -> image_info::YUVColorSpace {
self.native().fYUVColorSpace
}
pub fn siting_xy(&self) -> (Siting, Siting) {
let n = self.native();
(n.fSitingX, n.fSitingY)
}
pub fn origin(&self) -> EncodedOrigin {
EncodedOrigin::from_native_c(self.native().fOrigin)
}
pub fn origin_matrix(&self) -> Matrix {
self.origin().to_matrix((self.width(), self.height()))
}
pub fn has_alpha(&self) -> bool {
has_alpha(self.plane_config())
}
/// Returns the dimensions for each plane. Dimensions are as stored in memory, before
/// transformation to image display space as indicated by [origin(&self)].
pub fn plane_dimensions(&self) -> Vec<ISize> {
self::plane_dimensions(
self.dimensions(),
self.plane_config(),
self.subsampling(),
self.origin(),
)
}
/// Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves
/// the per-plane byte sizes in planeSizes if not `None`. If total size overflows will return
/// `SIZE_MAX` and set all planeSizes to `SIZE_MAX`.
pub fn compute_total_bytes(
&self,
row_bytes: &[usize; Self::MAX_PLANES],
plane_sizes: Option<&mut [usize; Self::MAX_PLANES]>,
) -> usize {
unsafe {
self.native().computeTotalBytes(
row_bytes.as_ptr(),
plane_sizes
.map(|v| v.as_mut_ptr())
.unwrap_or(ptr::null_mut()),
)
}
}
pub fn num_planes(&self) -> usize {
num_planes(self.plane_config())
}
pub fn num_channels_in_plane(&self, i: usize) -> Option<usize> {
num_channels_in_plane(self.plane_config(), i)
}
/// Returns a [YUVAInfo] that is identical to this one but with the passed [Subsampling]. If the
/// passed [Subsampling] is not [Subsampling::S444] and this info's [PlaneConfig] is not
/// compatible with chroma subsampling (because Y is in the same plane as UV) then the result
/// will be `None`.
pub fn with_subsampling(&self, subsampling: Subsampling) -> Option<Self> {
Self::try_construct(|info| unsafe {
sb::C_SkYUVAInfo_makeSubsampling(self.native(), subsampling.into_native(), info);
Self::native_is_valid(&*info)
})
}
/// Returns a [YUVAInfo] that is identical to this one but with the passed dimensions. If the
/// passed dimensions is empty then the result will be `None`.
pub fn with_dimensions(&self, dimensions: impl Into<ISize>) -> Option<Self> {
Self::try_construct(|info| unsafe {
sb::C_SkYUVAInfo_makeDimensions(self.native(), dimensions.into().native(), info);
Self::native_is_valid(&*info)
})
}
pub(crate) fn native_is_valid(info: &SkYUVAInfo) -> bool {
info.fPlaneConfig != PlaneConfig::Unknown
}
}
| plane_subsampling_factors | identifier_name |
yuva_info.rs | use super::image_info;
use crate::{prelude::*, EncodedOrigin, ISize, Matrix};
use skia_bindings::{self as sb, SkYUVAInfo, SkYUVAInfo_Subsampling};
use std::{fmt, ptr};
/// Specifies the structure of planes for a YUV image with optional alpha. The actual planar data
/// is not part of this structure and depending on usage is in external textures or pixmaps.
pub type YUVAInfo = Handle<SkYUVAInfo>;
unsafe_send_sync!(YUVAInfo);
impl NativeDrop for SkYUVAInfo {
fn drop(&mut self) {
unsafe { sb::C_SkYUVAInfo_destruct(self) }
}
}
/// Specifies how YUV (and optionally A) are divided among planes. Planes are separated by
/// underscores in the enum value names. Within each plane the pixmap/texture channels are
/// mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane
/// 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering
/// within a pixmap/texture given the channels it contains:
/// A: 0:A
/// Luminance/Gray: 0:Gray
/// Luminance/Gray + Alpha: 0:Gray, 1:A
/// RG 0:R, 1:G
/// RGB 0:R, 1:G, 2:B
/// RGBA 0:R, 1:G, 2:B, 3:A
pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig;
variant_name!(PlaneConfig::YUV);
/// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is
/// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub-
/// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values
/// that have U and V in different planes than Y (and A, if present).
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Subsampling {
Unknown = SkYUVAInfo_Subsampling::kUnknown as _,
S444 = SkYUVAInfo_Subsampling::k444 as _,
S422 = SkYUVAInfo_Subsampling::k422 as _,
S420 = SkYUVAInfo_Subsampling::k420 as _,
S440 = SkYUVAInfo_Subsampling::k440 as _,
S411 = SkYUVAInfo_Subsampling::k411 as _,
S410 = SkYUVAInfo_Subsampling::k410 as _,
}
native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout);
/// Describes how subsampled chroma values are sited relative to luma values.
///
/// Currently only centered siting is supported but will expand to support additional sitings.
pub use sb::SkYUVAInfo_Siting as Siting;
variant_name!(Siting::Centered);
/// Ratio of Y/A values to U/V values in x and y.
pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) };
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)`
/// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling]
/// `plane_index` combinations. `(0, 0)` is returned for invalid inputs.
pub fn plane_subsampling_factors(
plane: PlaneConfig,
subsampling: Subsampling,
plane_index: usize,
) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe {
sb::C_SkYUVAInfo_PlaneSubsamplingFactors(
plane,
subsampling.into_native(),
plane_index.try_into().unwrap(),
&mut factors[0],
)
};
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected
/// size of each plane. Returns the expected planes. The input image dimensions are as displayed
/// (after the planes have been transformed to the intended display orientation). The plane
/// dimensions are output as the planes are stored in memory (may be rotated from image dimensions).
pub fn plane_dimensions(
image_dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
origin: EncodedOrigin,
) -> Vec<ISize> {
let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES];
let size: usize = unsafe {
SkYUVAInfo::PlaneDimensions(
image_dimensions.into().into_native(),
config,
subsampling.into_native(),
origin.into_native(),
plane_dimensions.native_mut().as_mut_ptr(),
)
}
.try_into()
.unwrap();
plane_dimensions[0..size].to_vec()
}
/// Number of planes for a given [PlaneConfig].
pub fn num_planes(config: PlaneConfig) -> usize |
/// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is
/// invalid).
pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> {
(i < num_planes(config)).if_true_then_some(|| {
unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) }
.try_into()
.unwrap()
})
}
/// Does the [PlaneConfig] have alpha values?
pub fn has_alpha(config: PlaneConfig) -> bool {
unsafe { sb::SkYUVAInfo_HasAlpha(config) }
}
impl Default for YUVAInfo {
fn default() -> Self {
Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) })
}
}
impl NativePartialEq for YUVAInfo {
fn eq(&self, rhs: &Self) -> bool {
unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) }
}
}
impl fmt::Debug for YUVAInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("YUVAInfo")
.field("dimensions", &self.dimensions())
.field("plane_config", &self.plane_config())
.field("subsampling", &self.subsampling())
.field("yuv_color_space", &self.yuv_color_space())
.field("origin", &self.origin())
.field("siting_xy", &self.siting_xy())
.finish()
}
}
impl YUVAInfo {
pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _;
/// `dimensions` should specify the size of the full resolution image (after planes have been
/// oriented to how the image is displayed as indicated by `origin`).
pub fn new(
dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
color_space: image_info::YUVColorSpace,
origin: impl Into<Option<EncodedOrigin>>,
siting_xy: impl Into<Option<(Siting, Siting)>>,
) -> Option<Self> {
let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft);
let (siting_x, siting_y) = siting_xy
.into()
.unwrap_or((Siting::Centered, Siting::Centered));
let n = unsafe {
SkYUVAInfo::new(
dimensions.into().into_native(),
config,
subsampling.into_native(),
color_space,
origin.into_native(),
siting_x,
siting_y,
)
};
Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n))
}
pub fn plane_config(&self) -> PlaneConfig {
self.native().fPlaneConfig
}
pub fn subsampling(&self) -> Subsampling {
Subsampling::from_native_c(self.native().fSubsampling)
}
pub fn plane_subsampling_factors(&self, plane_index: usize) -> (i32, i32) {
plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index)
}
/// Dimensions of the full resolution image (after planes have been oriented to how the image
/// is displayed as indicated by fOrigin).
pub fn dimensions(&self) -> ISize {
ISize::from_native_c(self.native().fDimensions)
}
pub fn width(&self) -> i32 {
self.dimensions().width
}
pub fn height(&self) -> i32 {
self.dimensions().height
}
pub fn yuv_color_space(&self) -> image_info::YUVColorSpace {
self.native().fYUVColorSpace
}
pub fn siting_xy(&self) -> (Siting, Siting) {
let n = self.native();
(n.fSitingX, n.fSitingY)
}
pub fn origin(&self) -> EncodedOrigin {
EncodedOrigin::from_native_c(self.native().fOrigin)
}
pub fn origin_matrix(&self) -> Matrix {
self.origin().to_matrix((self.width(), self.height()))
}
pub fn has_alpha(&self) -> bool {
has_alpha(self.plane_config())
}
/// Returns the dimensions for each plane. Dimensions are as stored in memory, before
/// transformation to image display space as indicated by [origin(&self)].
pub fn plane_dimensions(&self) -> Vec<ISize> {
self::plane_dimensions(
self.dimensions(),
self.plane_config(),
self.subsampling(),
self.origin(),
)
}
/// Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves
/// the per-plane byte sizes in planeSizes if not `None`. If total size overflows will return
/// `SIZE_MAX` and set all planeSizes to `SIZE_MAX`.
pub fn compute_total_bytes(
&self,
row_bytes: &[usize; Self::MAX_PLANES],
plane_sizes: Option<&mut [usize; Self::MAX_PLANES]>,
) -> usize {
unsafe {
self.native().computeTotalBytes(
row_bytes.as_ptr(),
plane_sizes
.map(|v| v.as_mut_ptr())
.unwrap_or(ptr::null_mut()),
)
}
}
pub fn num_planes(&self) -> usize {
num_planes(self.plane_config())
}
pub fn num_channels_in_plane(&self, i: usize) -> Option<usize> {
num_channels_in_plane(self.plane_config(), i)
}
/// Returns a [YUVAInfo] that is identical to this one but with the passed [Subsampling]. If the
/// passed [Subsampling] is not [Subsampling::S444] and this info's [PlaneConfig] is not
/// compatible with chroma subsampling (because Y is in the same plane as UV) then the result
/// will be `None`.
pub fn with_subsampling(&self, subsampling: Subsampling) -> Option<Self> {
Self::try_construct(|info| unsafe {
sb::C_SkYUVAInfo_makeSubsampling(self.native(), subsampling.into_native(), info);
Self::native_is_valid(&*info)
})
}
/// Returns a [YUVAInfo] that is identical to this one but with the passed dimensions. If the
/// passed dimensions is empty then the result will be `None`.
pub fn with_dimensions(&self, dimensions: impl Into<ISize>) -> Option<Self> {
Self::try_construct(|info| unsafe {
sb::C_SkYUVAInfo_makeDimensions(self.native(), dimensions.into().native(), info);
Self::native_is_valid(&*info)
})
}
pub(crate) fn native_is_valid(info: &SkYUVAInfo) -> bool {
info.fPlaneConfig != PlaneConfig::Unknown
}
}
| {
unsafe { sb::C_SkYUVAInfo_NumPlanes(config) }
.try_into()
.unwrap()
} | identifier_body |
yuva_info.rs | use super::image_info;
use crate::{prelude::*, EncodedOrigin, ISize, Matrix};
use skia_bindings::{self as sb, SkYUVAInfo, SkYUVAInfo_Subsampling};
use std::{fmt, ptr};
/// Specifies the structure of planes for a YUV image with optional alpha. The actual planar data
/// is not part of this structure and depending on usage is in external textures or pixmaps.
pub type YUVAInfo = Handle<SkYUVAInfo>;
unsafe_send_sync!(YUVAInfo);
impl NativeDrop for SkYUVAInfo {
fn drop(&mut self) {
unsafe { sb::C_SkYUVAInfo_destruct(self) }
}
}
/// Specifies how YUV (and optionally A) are divided among planes. Planes are separated by
/// underscores in the enum value names. Within each plane the pixmap/texture channels are
/// mapped to the YUVA channels in the order specified, e.g. for kY_UV Y is in channel 0 of plane
/// 0, U is in channel 0 of plane 1, and V is in channel 1 of plane 1. Channel ordering
/// within a pixmap/texture given the channels it contains:
/// A: 0:A
/// Luminance/Gray: 0:Gray
/// Luminance/Gray + Alpha: 0:Gray, 1:A
/// RG 0:R, 1:G
/// RGB 0:R, 1:G, 2:B
/// RGBA 0:R, 1:G, 2:B, 3:A
pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig;
variant_name!(PlaneConfig::YUV);
/// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is
/// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub-
/// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values
/// that have U and V in different planes than Y (and A, if present).
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Subsampling {
Unknown = SkYUVAInfo_Subsampling::kUnknown as _,
S444 = SkYUVAInfo_Subsampling::k444 as _,
S422 = SkYUVAInfo_Subsampling::k422 as _,
S420 = SkYUVAInfo_Subsampling::k420 as _,
S440 = SkYUVAInfo_Subsampling::k440 as _,
S411 = SkYUVAInfo_Subsampling::k411 as _,
S410 = SkYUVAInfo_Subsampling::k410 as _,
}
native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout);
/// Describes how subsampled chroma values are sited relative to luma values.
///
/// Currently only centered siting is supported but will expand to support additional sitings.
pub use sb::SkYUVAInfo_Siting as Siting;
variant_name!(Siting::Centered);
/// Ratio of Y/A values to U/V values in x and y.
pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) };
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)`
/// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling]
/// `plane_index` combinations. `(0, 0)` is returned for invalid inputs.
pub fn plane_subsampling_factors(
plane: PlaneConfig,
subsampling: Subsampling,
plane_index: usize,
) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe {
sb::C_SkYUVAInfo_PlaneSubsamplingFactors(
plane,
subsampling.into_native(),
plane_index.try_into().unwrap(),
&mut factors[0],
)
};
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected
/// size of each plane. Returns the expected planes. The input image dimensions are as displayed
/// (after the planes have been transformed to the intended display orientation). The plane
/// dimensions are output as the planes are stored in memory (may be rotated from image dimensions).
pub fn plane_dimensions(
image_dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
origin: EncodedOrigin,
) -> Vec<ISize> {
let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES];
let size: usize = unsafe {
SkYUVAInfo::PlaneDimensions(
image_dimensions.into().into_native(),
config,
subsampling.into_native(),
origin.into_native(),
plane_dimensions.native_mut().as_mut_ptr(),
)
}
.try_into()
.unwrap();
plane_dimensions[0..size].to_vec()
}
/// Number of planes for a given [PlaneConfig].
pub fn num_planes(config: PlaneConfig) -> usize {
unsafe { sb::C_SkYUVAInfo_NumPlanes(config) }
.try_into()
.unwrap()
}
/// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is
/// invalid).
pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> {
(i < num_planes(config)).if_true_then_some(|| {
unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) }
.try_into()
.unwrap()
})
}
/// Does the [PlaneConfig] have alpha values?
pub fn has_alpha(config: PlaneConfig) -> bool {
unsafe { sb::SkYUVAInfo_HasAlpha(config) }
}
impl Default for YUVAInfo {
fn default() -> Self {
Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) })
}
}
impl NativePartialEq for YUVAInfo {
fn eq(&self, rhs: &Self) -> bool {
unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) }
}
}
impl fmt::Debug for YUVAInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("YUVAInfo")
.field("dimensions", &self.dimensions())
.field("plane_config", &self.plane_config())
.field("subsampling", &self.subsampling())
.field("yuv_color_space", &self.yuv_color_space())
.field("origin", &self.origin())
.field("siting_xy", &self.siting_xy())
.finish()
}
}
impl YUVAInfo {
pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _;
/// `dimensions` should specify the size of the full resolution image (after planes have been
/// oriented to how the image is displayed as indicated by `origin`).
pub fn new(
dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
color_space: image_info::YUVColorSpace,
origin: impl Into<Option<EncodedOrigin>>,
siting_xy: impl Into<Option<(Siting, Siting)>>,
) -> Option<Self> {
let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft);
let (siting_x, siting_y) = siting_xy
.into()
.unwrap_or((Siting::Centered, Siting::Centered));
let n = unsafe {
SkYUVAInfo::new(
dimensions.into().into_native(),
config,
subsampling.into_native(),
color_space,
origin.into_native(),
siting_x,
siting_y,
)
};
Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n))
}
pub fn plane_config(&self) -> PlaneConfig {
self.native().fPlaneConfig
}
pub fn subsampling(&self) -> Subsampling {
Subsampling::from_native_c(self.native().fSubsampling)
}
pub fn plane_subsampling_factors(&self, plane_index: usize) -> (i32, i32) {
plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index)
}
/// Dimensions of the full resolution image (after planes have been oriented to how the image
/// is displayed as indicated by fOrigin).
pub fn dimensions(&self) -> ISize {
ISize::from_native_c(self.native().fDimensions)
}
pub fn width(&self) -> i32 {
self.dimensions().width
}
pub fn height(&self) -> i32 {
self.dimensions().height
}
pub fn yuv_color_space(&self) -> image_info::YUVColorSpace {
self.native().fYUVColorSpace
}
pub fn siting_xy(&self) -> (Siting, Siting) {
let n = self.native();
(n.fSitingX, n.fSitingY)
}
pub fn origin(&self) -> EncodedOrigin {
EncodedOrigin::from_native_c(self.native().fOrigin)
}
pub fn origin_matrix(&self) -> Matrix {
self.origin().to_matrix((self.width(), self.height()))
} | pub fn has_alpha(&self) -> bool {
has_alpha(self.plane_config())
}
/// Returns the dimensions for each plane. Dimensions are as stored in memory, before
/// transformation to image display space as indicated by [origin(&self)].
pub fn plane_dimensions(&self) -> Vec<ISize> {
self::plane_dimensions(
self.dimensions(),
self.plane_config(),
self.subsampling(),
self.origin(),
)
}
/// Given a per-plane row bytes, determine size to allocate for all planes. Optionally retrieves
/// the per-plane byte sizes in planeSizes if not `None`. If total size overflows will return
/// `SIZE_MAX` and set all planeSizes to `SIZE_MAX`.
pub fn compute_total_bytes(
&self,
row_bytes: &[usize; Self::MAX_PLANES],
plane_sizes: Option<&mut [usize; Self::MAX_PLANES]>,
) -> usize {
unsafe {
self.native().computeTotalBytes(
row_bytes.as_ptr(),
plane_sizes
.map(|v| v.as_mut_ptr())
.unwrap_or(ptr::null_mut()),
)
}
}
pub fn num_planes(&self) -> usize {
num_planes(self.plane_config())
}
pub fn num_channels_in_plane(&self, i: usize) -> Option<usize> {
num_channels_in_plane(self.plane_config(), i)
}
/// Returns a [YUVAInfo] that is identical to this one but with the passed [Subsampling]. If the
/// passed [Subsampling] is not [Subsampling::S444] and this info's [PlaneConfig] is not
/// compatible with chroma subsampling (because Y is in the same plane as UV) then the result
/// will be `None`.
pub fn with_subsampling(&self, subsampling: Subsampling) -> Option<Self> {
Self::try_construct(|info| unsafe {
sb::C_SkYUVAInfo_makeSubsampling(self.native(), subsampling.into_native(), info);
Self::native_is_valid(&*info)
})
}
/// Returns a [YUVAInfo] that is identical to this one but with the passed dimensions. If the
/// passed dimensions is empty then the result will be `None`.
pub fn with_dimensions(&self, dimensions: impl Into<ISize>) -> Option<Self> {
Self::try_construct(|info| unsafe {
sb::C_SkYUVAInfo_makeDimensions(self.native(), dimensions.into().native(), info);
Self::native_is_valid(&*info)
})
}
pub(crate) fn native_is_valid(info: &SkYUVAInfo) -> bool {
info.fPlaneConfig != PlaneConfig::Unknown
}
} | random_line_split | |
labstats-subscriber.py | #!/usr/bin/env python
import zmq
import sys, os, time, random, signal, json
sys.dont_write_bytecode = True
import logging, labstatslogger, argparse
from daemon import Daemon
from datetime import datetime, timedelta, date
from time import mktime, sleep
import cPickle
directory = "/var/run/labstats/"
timeformat = '%Y-%m-%dT%H:%M:%S'
logger = labstatslogger.logger
'''
Utility functions used by the rest further below
'''
###############################################################################
# Outputs to stdout if --verbose enabled
def verbose_print(message):
if options.verbose:
print message
# Outputs to both logging and stdout (if --verbose enabled)
def error_output(message):
logger.warning(message)
verbose_print(message)
# Exits script. Will delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time.
# Returns True if timestamp is outdated
def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
|
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def main(ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping (20 by default)")
parser.add_argument("--faulttime", "-fault", action = "store", type = int, default = 90,
dest = "faulttime",
help = "Set minimum difference in minutes of last check and last recv to skip reaping (90 by default)")
parser.add_argument("--tlimit", "-t", action = "store", type = int, default = -1,
dest = "tlimit",
help = "Sets maximum restart sleep time in ms (-1 or infinite by default)")
parser.add_argument("--retries", "-r", action = "store", type = int, default = 3,
dest = "retries",
help = "Sets maximum number of retries when restarting (3 by default)")
parser.add_argument("--output", "-o", action = "store_true", default = True,
dest = "output",
help = "Sets whether or not check-in data will be outputted (true by default)")
options = parser.parse_args()
# ntries specified and negative, but no tlimit provided
if options.retries < 0 and options.tlimit < 0:
parser.error("must specify --tlimit if --retries is negative")
verbose_print("Verbosity on")
if options.daemon:
if not os.path.exists(options.directory):
try:
os.mkdir(options.directory)
except OSError as e: # bad directory, or no permissions
error_output("Encountered error while trying to create " + options.directory + ". "
+ e.args[1].capitalize() + ".")
exit(1)
daemon = subscriberDaemon(directory+'subscriber.pid')
daemon.start()
else:
main(options.retries, 2000, options.tlimit)
| new_dict[hostname] = timestamp | conditional_block |
labstats-subscriber.py | #!/usr/bin/env python
import zmq
import sys, os, time, random, signal, json
sys.dont_write_bytecode = True
import logging, labstatslogger, argparse
from daemon import Daemon
from datetime import datetime, timedelta, date
from time import mktime, sleep
import cPickle
directory = "/var/run/labstats/"
timeformat = '%Y-%m-%dT%H:%M:%S'
logger = labstatslogger.logger
'''
Utility functions used by the rest further below
'''
###############################################################################
# Outputs to stdout if --verbose enabled
def verbose_print(message):
if options.verbose:
print message
# Outputs to both logging and stdout (if --verbose enabled)
def error_output(message):
logger.warning(message)
verbose_print(message)
# Exits script. Will delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
|
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time.
# Returns True if timestamp is outdated
def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
new_dict[hostname] = timestamp
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def main(ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping (20 by default)")
parser.add_argument("--faulttime", "-fault", action = "store", type = int, default = 90,
dest = "faulttime",
help = "Set minimum difference in minutes of last check and last recv to skip reaping (90 by default)")
parser.add_argument("--tlimit", "-t", action = "store", type = int, default = -1,
dest = "tlimit",
help = "Sets maximum restart sleep time in ms (-1 or infinite by default)")
parser.add_argument("--retries", "-r", action = "store", type = int, default = 3,
dest = "retries",
help = "Sets maximum number of retries when restarting (3 by default)")
parser.add_argument("--output", "-o", action = "store_true", default = True,
dest = "output",
help = "Sets whether or not check-in data will be outputted (true by default)")
options = parser.parse_args()
# ntries specified and negative, but no tlimit provided
if options.retries < 0 and options.tlimit < 0:
parser.error("must specify --tlimit if --retries is negative")
verbose_print("Verbosity on")
if options.daemon:
if not os.path.exists(options.directory):
try:
os.mkdir(options.directory)
except OSError as e: # bad directory, or no permissions
error_output("Encountered error while trying to create " + options.directory + ". "
+ e.args[1].capitalize() + ".")
exit(1)
daemon = subscriberDaemon(directory+'subscriber.pid')
daemon.start()
else:
main(options.retries, 2000, options.tlimit)
| error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit) | identifier_body |
labstats-subscriber.py | #!/usr/bin/env python
import zmq
import sys, os, time, random, signal, json
sys.dont_write_bytecode = True
import logging, labstatslogger, argparse
from daemon import Daemon
from datetime import datetime, timedelta, date
from time import mktime, sleep
import cPickle
directory = "/var/run/labstats/"
timeformat = '%Y-%m-%dT%H:%M:%S'
logger = labstatslogger.logger
'''
Utility functions used by the rest further below
'''
###############################################################################
# Outputs to stdout if --verbose enabled
def verbose_print(message):
if options.verbose:
print message
# Outputs to both logging and stdout (if --verbose enabled)
def error_output(message):
logger.warning(message)
verbose_print(message)
# Exits script. Will delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time. | def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
new_dict[hostname] = timestamp
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def main(ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping (20 by default)")
parser.add_argument("--faulttime", "-fault", action = "store", type = int, default = 90,
dest = "faulttime",
help = "Set minimum difference in minutes of last check and last recv to skip reaping (90 by default)")
parser.add_argument("--tlimit", "-t", action = "store", type = int, default = -1,
dest = "tlimit",
help = "Sets maximum restart sleep time in ms (-1 or infinite by default)")
parser.add_argument("--retries", "-r", action = "store", type = int, default = 3,
dest = "retries",
help = "Sets maximum number of retries when restarting (3 by default)")
parser.add_argument("--output", "-o", action = "store_true", default = True,
dest = "output",
help = "Sets whether or not check-in data will be outputted (true by default)")
options = parser.parse_args()
# ntries specified and negative, but no tlimit provided
if options.retries < 0 and options.tlimit < 0:
parser.error("must specify --tlimit if --retries is negative")
verbose_print("Verbosity on")
if options.daemon:
if not os.path.exists(options.directory):
try:
os.mkdir(options.directory)
except OSError as e: # bad directory, or no permissions
error_output("Encountered error while trying to create " + options.directory + ". "
+ e.args[1].capitalize() + ".")
exit(1)
daemon = subscriberDaemon(directory+'subscriber.pid')
daemon.start()
else:
main(options.retries, 2000, options.tlimit) | # Returns True if timestamp is outdated | random_line_split |
labstats-subscriber.py | #!/usr/bin/env python
import zmq
import sys, os, time, random, signal, json
sys.dont_write_bytecode = True
import logging, labstatslogger, argparse
from daemon import Daemon
from datetime import datetime, timedelta, date
from time import mktime, sleep
import cPickle
directory = "/var/run/labstats/"
timeformat = '%Y-%m-%dT%H:%M:%S'
logger = labstatslogger.logger
'''
Utility functions used by the rest further below
'''
###############################################################################
# Outputs to stdout if --verbose enabled
def verbose_print(message):
if options.verbose:
print message
# Outputs to both logging and stdout (if --verbose enabled)
def error_output(message):
logger.warning(message)
verbose_print(message)
# Exits script. Will delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time.
# Returns True if timestamp is outdated
def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
new_dict[hostname] = timestamp
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def | (ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping (20 by default)")
parser.add_argument("--faulttime", "-fault", action = "store", type = int, default = 90,
dest = "faulttime",
help = "Set minimum difference in minutes of last check and last recv to skip reaping (90 by default)")
parser.add_argument("--tlimit", "-t", action = "store", type = int, default = -1,
dest = "tlimit",
help = "Sets maximum restart sleep time in ms (-1 or infinite by default)")
parser.add_argument("--retries", "-r", action = "store", type = int, default = 3,
dest = "retries",
help = "Sets maximum number of retries when restarting (3 by default)")
parser.add_argument("--output", "-o", action = "store_true", default = True,
dest = "output",
help = "Sets whether or not check-in data will be outputted (true by default)")
options = parser.parse_args()
# ntries specified and negative, but no tlimit provided
if options.retries < 0 and options.tlimit < 0:
parser.error("must specify --tlimit if --retries is negative")
verbose_print("Verbosity on")
if options.daemon:
if not os.path.exists(options.directory):
try:
os.mkdir(options.directory)
except OSError as e: # bad directory, or no permissions
error_output("Encountered error while trying to create " + options.directory + ". "
+ e.args[1].capitalize() + ".")
exit(1)
daemon = subscriberDaemon(directory+'subscriber.pid')
daemon.start()
else:
main(options.retries, 2000, options.tlimit)
| main | identifier_name |
gocomics.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher
from ..scraper import ParserScraper
from ..helpers import indirectStarter
class GoComics(ParserScraper):
url = 'https://www.gocomics.com/'
imageSearch = '//picture[d:class("item-comic-image")]/img'
prevSearch = '//a[d:class("js-previous-comic")]'
latestSearch = '//div[d:class("gc-deck--cta-0")]//a'
starter = indirectStarter
help = 'Index format: yyyy/mm/dd'
def __init__(self, name, path, lang=None):
super(GoComics, self).__init__('GoComics/' + name)
self.session.add_throttle('www.gocomics.com', 1.0, 2.0)
self.url = 'https://www.gocomics.com/' + path
self.shortname = name
if lang:
self.lang = lang
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index):
return '{}/{}'.format(self.url, index)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: CFQ001
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyBlues', 'babyblues'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('Bozo', 'bozo'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Buckles', 'buckles'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
cls('Frazz', 'frazz'),
cls('FredBasset', 'fredbasset'),
cls('FredBassetEnEspanol', 'fredbassetespanol', 'es'),
cls('FreeRange', 'freerange'),
cls('FreshlySqueezed', 'freshlysqueezed'),
cls('FrogApplause', 'frogapplause'),
cls('Garfield', 'garfield'),
cls('GarfieldClassics', 'garfield-classics'),
cls('GarfieldEnEspanol', 'garfieldespanol', 'es'),
cls('GaryMarkstein', 'garymarkstein'),
cls('GaryVarvel', 'garyvarvel'),
cls('GasolineAlley', 'gasolinealley'),
cls('Gaturro', 'gaturro', 'es'),
cls('Geech', 'geech'),
cls('GetALife', 'getalife'),
cls('GetFuzzy', 'getfuzzy'),
cls('Gil', 'gil'),
cls('GilThorp', 'gilthorp'),
cls('GingerMeggs', 'gingermeggs'),
cls('GingerMeggsEnEspanol', 'gingermeggs-espanol', 'es'),
cls('GlasbergenCartoons', 'glasbergen-cartoons'),
cls('Globetrotter', 'globetrotter'),
cls('GManWebcomics', 'g-man-webcomics'),
cls('Goats', 'goats'),
cls('GrandAvenue', 'grand-avenue'),
cls('GrayMatters', 'gray-matters'),
cls('GreenHumour', 'green-humour'),
cls('HaircutPractice', 'haircut-practice'),
cls('HalfFull', 'half-full'),
cls('Harley', 'harley'),
cls('HeartOfTheCity', 'heartofthecity'),
cls('Heathcliff', 'heathcliff'),
cls('HeathcliffEnEspanol', 'heathcliffespanol', 'es'),
cls('HenryPayne', 'henrypayne'),
cls('HerbAndJamaal', 'herbandjamaal'),
cls('Herman', 'herman'),
cls('HomeAndAway', 'homeandaway'),
cls('HotComicsForCoolPeople', 'hot-comics-for-cool-people'),
cls('HutchOwen', 'hutch-owen'),
cls('ImagineThis', 'imaginethis'),
cls('ImogenQuest', 'imogen-quest'),
cls('InkPen', 'inkpen'),
cls('InSecurity', 'in-security'),
cls('InTheBleachers', 'inthebleachers'),
cls('InTheSticks', 'inthesticks'),
cls('InvisibleBread', 'invisible-bread'),
cls('ItsAllAboutYou', 'itsallaboutyou'),
cls('JackOhman', 'jackohman'),
cls('JakeLikesOnions', 'jake-likes-onions'),
cls('JanesWorld', 'janesworld'),
cls('JeffDanziger', 'jeffdanziger'),
cls('JeffStahler', 'jeffstahler'),
cls('JenSorensen', 'jen-sorensen'),
cls('JimBentonCartoons', 'jim-benton-cartoons'),
cls('JimMorin', 'jimmorin'),
cls('JoeHeller', 'joe-heller'),
cls('JoelPett', 'joelpett'),
cls('JohnDeering', 'johndeering'),
cls('JumpStart', 'jumpstart'), | cls('KidBeowulf', 'kid-beowulf'),
cls('KitchenCapers', 'kitchen-capers'),
cls('Kliban', 'kliban'),
cls('KlibansCats', 'klibans-cats'),
cls('LaCucaracha', 'lacucaracha'),
cls('LaCucarachaEnEspanol', 'la-cucaracha-en-espanol', 'es'),
cls('LaloAlcaraz', 'laloalcaraz'),
cls('LaloAlcarazEnEspanol', 'laloenespanol', 'es'),
cls('LardsWorldPeaceTips', 'lards-world-peace-tips'),
cls('LasHermanasStone', 'stonesoup_espanol', 'es'),
cls('LastKiss', 'lastkiss'),
cls('LaughingRedheadComics', 'laughing-redhead-comics'),
cls('LayLines', 'lay-lines'),
cls('LearnToSpeakCat', 'learn-to-speak-cat'),
cls('LibertyMeadows', 'libertymeadows'),
cls('LifeOnEarth', 'life-on-earth'),
cls('LilAbner', 'lil-abner'),
cls('Lio', 'lio'),
cls('LioEnEspanol', 'lioespanol', 'es'),
cls('LisaBenson', 'lisabenson'),
cls('LittleDogLost', 'littledoglost'),
cls('LittleFriedChickenAndSushi', 'little-fried-chicken-and-sushi'),
cls('LittleNemo', 'little-nemo'),
cls('LizClimoCartoons', 'liz-climo-cartoons'),
cls('Lola', 'lola'),
cls('LolaEnEspanol', 'lola-en-espanol', 'es'),
cls('LongStoryShort', 'long-story-short'),
cls('LooksGoodOnPaper', 'looks-good-on-paper'),
cls('LooseParts', 'looseparts'),
cls('LosOsorios', 'los-osorios', 'es'),
cls('LostSheep', 'lostsheep'),
cls('Luann', 'luann'),
cls('LuannAgainn', 'luann-againn'),
cls('LuannEnEspanol', 'luannspanish', 'es'),
cls('LuckyCow', 'luckycow'),
cls('LugNuts', 'lug-nuts'),
cls('Lunarbaboon', 'lunarbaboon'),
cls('M2Bulls', 'm2bulls'),
cls('Maintaining', 'maintaining'),
cls('MakingIt', 'making-it'),
cls('MannequinOnTheMoon', 'mannequin-on-the-moon'),
cls('MariasDay', 'marias-day'),
cls('Marmaduke', 'marmaduke'),
cls('MarshallRamsey', 'marshallramsey'),
cls('MattBors', 'matt-bors'),
cls('MattDavies', 'mattdavies'),
cls('MattWuerker', 'mattwuerker'),
cls('MediumLarge', 'medium-large'),
cls('MessycowComics', 'messy-cow'),
cls('MexikidStories', 'mexikid-stories'),
cls('MichaelRamirez', 'michaelramirez'),
cls('MikeDuJour', 'mike-du-jour'),
cls('MikeLester', 'mike-lester'),
cls('MikeLuckovich', 'mikeluckovich'),
cls('MissPeach', 'miss-peach'),
cls('ModeratelyConfused', 'moderately-confused'),
cls('Momma', 'momma'),
cls('Monty', 'monty'),
cls('MontyDiaros', 'monty-diaros', 'es'),
cls('MotleyClassics', 'motley-classics'),
cls('MrLowe', 'mr-lowe'),
cls('MtPleasant', 'mtpleasant'),
cls('MuttAndJeff', 'muttandjeff'),
cls('MyDadIsDracula', 'my-dad-is-dracula'),
cls('MythTickle', 'mythtickle'),
cls('Nancy', 'nancy'),
cls('NancyClassics', 'nancy-classics'),
cls('NateElGrande', 'nate-el-grande', 'es'),
cls('NestHeads', 'nestheads'),
cls('NEUROTICA', 'neurotica'),
cls('NewAdventuresOfQueenVictoria', 'thenewadventuresofqueenvictoria'),
cls('NextDoorNeighbors', 'next-door-neighbors'),
cls('NickAnderson', 'nickanderson'),
cls('NickAndZuzu', 'nick-and-zuzu'),
cls('NonSequitur', 'nonsequitur'),
cls('NothingIsNotSomething', 'nothing-is-not-something'),
cls('NotInventedHere', 'not-invented-here'),
cls('NowRecharging', 'now-recharging'),
cls('OffTheMark', 'offthemark'),
cls('OhBrother', 'oh-brother'),
cls('OllieAndQuentin', 'ollie-and-quentin'),
cls('OnAClaireDay', 'onaclaireday'),
cls('OneBigHappy', 'onebighappy'),
cls('OrdinaryBill', 'ordinary-bill'),
cls('OriginsOfTheSundayComics', 'origins-of-the-sunday-comics'),
cls('OurSuperAdventure', 'our-super-adventure'),
cls('Outland', 'outland'),
cls('OutOfTheGenePoolReRuns', 'outofthegenepool'),
cls('Overboard', 'overboard'),
cls('OverboardEnEspanol', 'overboardespanol', 'es'),
cls('OverTheHedge', 'overthehedge'),
cls('OzyAndMillie', 'ozy-and-millie'),
cls('PatOliphant', 'patoliphant'),
cls('PCAndPixel', 'pcandpixel'),
cls('Peanuts', 'peanuts'),
cls('PeanutsBegins', 'peanuts-begins'),
cls('PearlsBeforeSwine', 'pearlsbeforeswine'),
cls('Periquita', 'periquita', 'es'),
cls('PerlasParaLosCerdos', 'perlas-para-los-cerdos', 'es'),
cls('PerryBibleFellowship', 'perry-bible-fellowship'),
cls('PetuniaAndDre', 'petunia-and-dre'),
cls('PhilHands', 'phil-hands'),
cls('PhoebeAndHerUnicorn', 'phoebe-and-her-unicorn'),
cls('Pibgorn', 'pibgorn'),
cls('PibgornSketches', 'pibgornsketches'),
cls('Pickles', 'pickles'),
cls('PleaseListenToMe', 'please-listen-to-me'),
cls('Pluggers', 'pluggers'),
cls('PoochCafe', 'poochcafe'),
cls('Poorcraft', 'poorcraft'),
cls('PoorlyDrawnLines', 'poorly-drawn-lines'),
cls('PotShots', 'pot-shots'),
cls('PreTeena', 'preteena'),
cls('PricklyCity', 'pricklycity'),
cls('QuestionableQuotebook', 'questionable-quotebook'),
cls('RabbitsAgainstMagic', 'rabbitsagainstmagic'),
cls('RaisingDuncan', 'raising-duncan'),
cls('RandolphItch2Am', 'randolphitch'),
cls('RealityCheck', 'realitycheck'),
cls('RealLifeAdventures', 'reallifeadventures'),
cls('RebeccaHendin', 'rebecca-hendin'),
cls('RedAndRover', 'redandrover'),
cls('RedMeat', 'redmeat'),
cls('RichardsPoorAlmanac', 'richards-poor-almanac'),
cls('RipHaywire', 'riphaywire'),
cls('RipleysAunqueUstedNoLoCrea', 'ripleys-en-espanol', 'es'),
cls('RipleysBelieveItOrNot', 'ripleysbelieveitornot'),
cls('RobbieAndBobby', 'robbie-and-bobby'),
cls('RobertAriail', 'robert-ariail'),
cls('RobRogers', 'robrogers'),
cls('Rosebuds', 'rosebuds'),
cls('RosebudsEnEspanol', 'rosebuds-en-espanol'),
cls('RoseIsRose', 'roseisrose'),
cls('Rubes', 'rubes'),
cls('RudyPark', 'rudypark'),
cls('SaltNPepper', 'salt-n-pepper'),
cls('SarahsScribbles', 'sarahs-scribbles'),
cls('SaturdayMorningBreakfastCereal', 'saturday-morning-breakfast-cereal'),
cls('SavageChickens', 'savage-chickens'),
cls('ScaryGary', 'scarygary'),
cls('ScenesFromAMultiverse', 'scenes-from-a-multiverse'),
cls('ScottStantis', 'scottstantis'),
cls('ShenComix', 'shen-comix'),
cls('ShermansLagoon', 'shermanslagoon'),
cls('ShirleyAndSonClassics', 'shirley-and-son-classics'),
cls('Shoe', 'shoe'),
cls('SigneWilkinson', 'signewilkinson'),
cls('SketchsharkComics', 'sketchshark-comics'),
cls('SkinHorse', 'skinhorse'),
cls('Skippy', 'skippy'),
cls('SmallPotatoes', 'small-potatoes'),
cls('SnoopyEnEspanol', 'peanuts-espanol', 'es'),
cls('Snowflakes', 'snowflakes'),
cls('SnowSez', 'snow-sez'),
cls('SpeedBump', 'speedbump'),
cls('SpiritOfTheStaircase', 'spirit-of-the-staircase'),
cls('SpotTheFrog', 'spot-the-frog'),
cls('SteveBenson', 'stevebenson'),
cls('SteveBreen', 'stevebreen'),
cls('SteveKelley', 'stevekelley'),
cls('StickyComics', 'sticky-comics'),
cls('StoneSoup', 'stonesoup'),
cls('StoneSoupClassics', 'stone-soup-classics'),
cls('StrangeBrew', 'strangebrew'),
cls('StuartCarlson', 'stuartcarlson'),
cls('StudioJantze', 'studio-jantze'),
cls('SunnyStreet', 'sunny-street'),
cls('SunshineState', 'sunshine-state'),
cls('SuperFunPakComix', 'super-fun-pak-comix'),
cls('SwanEaters', 'swan-eaters'),
cls('SweetAndSourPork', 'sweet-and-sour-pork'),
cls('Sylvia', 'sylvia'),
cls('TankMcNamara', 'tankmcnamara'),
cls('Tarzan', 'tarzan'),
cls('TarzanEnEspanol', 'tarzan-en-espanol', 'es'),
cls('TedRall', 'ted-rall'),
cls('TenCats', 'ten-cats'),
cls('TextsFromMittens', 'texts-from-mittens'),
cls('Thatababy', 'thatababy'),
cls('ThatIsPriceless', 'that-is-priceless'),
cls('ThatNewCarlSmell', 'that-new-carl-smell'),
cls('TheAcademiaWaltz', 'academiawaltz'),
cls('TheAdventuresOfBusinessCat', 'the-adventures-of-business-cat'),
cls('TheArgyleSweater', 'theargylesweater'),
cls('TheAwkwardYeti', 'the-awkward-yeti'),
cls('TheBarn', 'thebarn'),
cls('TheBigPicture', 'thebigpicture'),
cls('TheBoondocks', 'boondocks'),
cls('TheBornLoser', 'the-born-loser'),
cls('TheBuckets', 'thebuckets'),
cls('TheCity', 'thecity'),
cls('TheComicStripThatHasAFinaleEveryDay', 'the-comic-strip-that-has-a-finale-every-day'),
cls('TheDailyDrawing', 'the-daily-drawing'),
cls('TheDinetteSet', 'dinetteset'),
cls('TheDoozies', 'thedoozies'),
cls('TheDuplex', 'duplex'),
cls('TheElderberries', 'theelderberries'),
cls('TheFlyingMcCoys', 'theflyingmccoys'),
cls('TheFuscoBrothers', 'thefuscobrothers'),
cls('TheGrizzwells', 'thegrizzwells'),
cls('TheHumbleStumble', 'humble-stumble'),
cls('TheKChronicles', 'thekchronicles'),
cls('TheKnightLife', 'theknightlife'),
cls('TheMartianConfederacy', 'the-martian-confederacy'),
cls('TheMeaningOfLila', 'meaningoflila'),
cls('TheMiddleAge', 'the-middle-age'),
cls('TheMiddletons', 'themiddletons'),
cls('TheNormClassics', 'thenorm'),
cls('TheOtherCoast', 'theothercoast'),
cls('TheUpsideDownWorldOfGustaveVerbeek', 'upside-down-world-of-gustave-verbeek'),
cls('TheWanderingMelon', 'the-wandering-melon'),
cls('TheWizardOfIdSpanish', 'wizardofidespanol', 'es'),
cls('TheWorriedWell', 'the-worried-well'),
cls('think', 'think'),
cls('ThinLines', 'thinlines'),
cls('TimCampbell', 'tim-campbell'),
cls('TinySepuku', 'tinysepuku'),
cls('TodaysSzep', 'todays-szep'),
cls('TomTheDancingBug', 'tomthedancingbug'),
cls('TomToles', 'tomtoles'),
cls('TooMuchCoffeeMan', 'toomuchcoffeeman'),
cls('Trucutu', 'trucutu', 'es'),
cls('TruthFacts', 'truth-facts'),
cls('Tutelandia', 'tutelandia', 'es'),
cls('TwoPartyOpera', 'two-party-opera'),
cls('UnderpantsAndOverbites', 'underpants-and-overbites'),
cls('UnderstandingChaos', 'understanding-chaos'),
cls('UnstrangePhenomena', 'unstrange-phenomena'),
cls('ViewsAfrica', 'viewsafrica'),
cls('ViewsAmerica', 'viewsamerica'),
cls('ViewsAsia', 'viewsasia'),
cls('ViewsBusiness', 'viewsbusiness'),
cls('ViewsEurope', 'viewseurope'),
cls('ViewsLatinAmerica', 'viewslatinamerica'),
cls('ViewsMidEast', 'viewsmideast'),
cls('ViewsOfTheWorld', 'viewsoftheworld'),
cls('ViiviAndWagner', 'viivi-and-wagner'),
cls('WallaceTheBrave', 'wallace-the-brave'),
cls('WaltHandelsman', 'walthandelsman'),
cls('Warped', 'warped'),
cls('WatchYourHead', 'watchyourhead'),
cls('Wawawiwa', 'wawawiwa'),
cls('WaynoVision', 'waynovision'),
cls('WeePals', 'weepals'),
cls('WideOpen', 'wide-open'),
cls('WinLoseDrew', 'drewlitton'),
cls('WizardOfId', 'wizardofid'),
cls('WizardOfIdClassics', 'wizard-of-id-classics'),
cls('Wondermark', 'wondermark'),
cls('WorkingDaze', 'working-daze'),
cls('WorkingItOut', 'workingitout'),
cls('WorryLines', 'worry-lines'),
cls('WrongHands', 'wrong-hands'),
cls('WTDuck', 'wtduck'),
cls('WuMo', 'wumo'),
cls('WumoEnEspanol', 'wumoespanol', 'es'),
cls('Yaffle', 'yaffle'),
cls('YesImHotInThis', 'yesimhotinthis'),
cls('ZackHill', 'zackhill'),
cls('ZenPencils', 'zen-pencils'),
cls('Ziggy', 'ziggy'),
cls('ZiggyEnEspanol', 'ziggyespanol', 'es'),
# END AUTOUPDATE
) | cls('JunkDrawer', 'junk-drawer'),
cls('JustoYFranco', 'justo-y-franco', 'es'),
cls('KevinKallaugher', 'kal'),
cls('KevinNecessaryEditorialCartoons', 'kevin-necessary-editorial-cartoons'), | random_line_split |
gocomics.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher
from ..scraper import ParserScraper
from ..helpers import indirectStarter
class GoComics(ParserScraper):
url = 'https://www.gocomics.com/'
imageSearch = '//picture[d:class("item-comic-image")]/img'
prevSearch = '//a[d:class("js-previous-comic")]'
latestSearch = '//div[d:class("gc-deck--cta-0")]//a'
starter = indirectStarter
help = 'Index format: yyyy/mm/dd'
def __init__(self, name, path, lang=None):
super(GoComics, self).__init__('GoComics/' + name)
self.session.add_throttle('www.gocomics.com', 1.0, 2.0)
self.url = 'https://www.gocomics.com/' + path
self.shortname = name
if lang:
|
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index):
return '{}/{}'.format(self.url, index)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: CFQ001
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyBlues', 'babyblues'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('Bozo', 'bozo'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Buckles', 'buckles'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
cls('Frazz', 'frazz'),
cls('FredBasset', 'fredbasset'),
cls('FredBassetEnEspanol', 'fredbassetespanol', 'es'),
cls('FreeRange', 'freerange'),
cls('FreshlySqueezed', 'freshlysqueezed'),
cls('FrogApplause', 'frogapplause'),
cls('Garfield', 'garfield'),
cls('GarfieldClassics', 'garfield-classics'),
cls('GarfieldEnEspanol', 'garfieldespanol', 'es'),
cls('GaryMarkstein', 'garymarkstein'),
cls('GaryVarvel', 'garyvarvel'),
cls('GasolineAlley', 'gasolinealley'),
cls('Gaturro', 'gaturro', 'es'),
cls('Geech', 'geech'),
cls('GetALife', 'getalife'),
cls('GetFuzzy', 'getfuzzy'),
cls('Gil', 'gil'),
cls('GilThorp', 'gilthorp'),
cls('GingerMeggs', 'gingermeggs'),
cls('GingerMeggsEnEspanol', 'gingermeggs-espanol', 'es'),
cls('GlasbergenCartoons', 'glasbergen-cartoons'),
cls('Globetrotter', 'globetrotter'),
cls('GManWebcomics', 'g-man-webcomics'),
cls('Goats', 'goats'),
cls('GrandAvenue', 'grand-avenue'),
cls('GrayMatters', 'gray-matters'),
cls('GreenHumour', 'green-humour'),
cls('HaircutPractice', 'haircut-practice'),
cls('HalfFull', 'half-full'),
cls('Harley', 'harley'),
cls('HeartOfTheCity', 'heartofthecity'),
cls('Heathcliff', 'heathcliff'),
cls('HeathcliffEnEspanol', 'heathcliffespanol', 'es'),
cls('HenryPayne', 'henrypayne'),
cls('HerbAndJamaal', 'herbandjamaal'),
cls('Herman', 'herman'),
cls('HomeAndAway', 'homeandaway'),
cls('HotComicsForCoolPeople', 'hot-comics-for-cool-people'),
cls('HutchOwen', 'hutch-owen'),
cls('ImagineThis', 'imaginethis'),
cls('ImogenQuest', 'imogen-quest'),
cls('InkPen', 'inkpen'),
cls('InSecurity', 'in-security'),
cls('InTheBleachers', 'inthebleachers'),
cls('InTheSticks', 'inthesticks'),
cls('InvisibleBread', 'invisible-bread'),
cls('ItsAllAboutYou', 'itsallaboutyou'),
cls('JackOhman', 'jackohman'),
cls('JakeLikesOnions', 'jake-likes-onions'),
cls('JanesWorld', 'janesworld'),
cls('JeffDanziger', 'jeffdanziger'),
cls('JeffStahler', 'jeffstahler'),
cls('JenSorensen', 'jen-sorensen'),
cls('JimBentonCartoons', 'jim-benton-cartoons'),
cls('JimMorin', 'jimmorin'),
cls('JoeHeller', 'joe-heller'),
cls('JoelPett', 'joelpett'),
cls('JohnDeering', 'johndeering'),
cls('JumpStart', 'jumpstart'),
cls('JunkDrawer', 'junk-drawer'),
cls('JustoYFranco', 'justo-y-franco', 'es'),
cls('KevinKallaugher', 'kal'),
cls('KevinNecessaryEditorialCartoons', 'kevin-necessary-editorial-cartoons'),
cls('KidBeowulf', 'kid-beowulf'),
cls('KitchenCapers', 'kitchen-capers'),
cls('Kliban', 'kliban'),
cls('KlibansCats', 'klibans-cats'),
cls('LaCucaracha', 'lacucaracha'),
cls('LaCucarachaEnEspanol', 'la-cucaracha-en-espanol', 'es'),
cls('LaloAlcaraz', 'laloalcaraz'),
cls('LaloAlcarazEnEspanol', 'laloenespanol', 'es'),
cls('LardsWorldPeaceTips', 'lards-world-peace-tips'),
cls('LasHermanasStone', 'stonesoup_espanol', 'es'),
cls('LastKiss', 'lastkiss'),
cls('LaughingRedheadComics', 'laughing-redhead-comics'),
cls('LayLines', 'lay-lines'),
cls('LearnToSpeakCat', 'learn-to-speak-cat'),
cls('LibertyMeadows', 'libertymeadows'),
cls('LifeOnEarth', 'life-on-earth'),
cls('LilAbner', 'lil-abner'),
cls('Lio', 'lio'),
cls('LioEnEspanol', 'lioespanol', 'es'),
cls('LisaBenson', 'lisabenson'),
cls('LittleDogLost', 'littledoglost'),
cls('LittleFriedChickenAndSushi', 'little-fried-chicken-and-sushi'),
cls('LittleNemo', 'little-nemo'),
cls('LizClimoCartoons', 'liz-climo-cartoons'),
cls('Lola', 'lola'),
cls('LolaEnEspanol', 'lola-en-espanol', 'es'),
cls('LongStoryShort', 'long-story-short'),
cls('LooksGoodOnPaper', 'looks-good-on-paper'),
cls('LooseParts', 'looseparts'),
cls('LosOsorios', 'los-osorios', 'es'),
cls('LostSheep', 'lostsheep'),
cls('Luann', 'luann'),
cls('LuannAgainn', 'luann-againn'),
cls('LuannEnEspanol', 'luannspanish', 'es'),
cls('LuckyCow', 'luckycow'),
cls('LugNuts', 'lug-nuts'),
cls('Lunarbaboon', 'lunarbaboon'),
cls('M2Bulls', 'm2bulls'),
cls('Maintaining', 'maintaining'),
cls('MakingIt', 'making-it'),
cls('MannequinOnTheMoon', 'mannequin-on-the-moon'),
cls('MariasDay', 'marias-day'),
cls('Marmaduke', 'marmaduke'),
cls('MarshallRamsey', 'marshallramsey'),
cls('MattBors', 'matt-bors'),
cls('MattDavies', 'mattdavies'),
cls('MattWuerker', 'mattwuerker'),
cls('MediumLarge', 'medium-large'),
cls('MessycowComics', 'messy-cow'),
cls('MexikidStories', 'mexikid-stories'),
cls('MichaelRamirez', 'michaelramirez'),
cls('MikeDuJour', 'mike-du-jour'),
cls('MikeLester', 'mike-lester'),
cls('MikeLuckovich', 'mikeluckovich'),
cls('MissPeach', 'miss-peach'),
cls('ModeratelyConfused', 'moderately-confused'),
cls('Momma', 'momma'),
cls('Monty', 'monty'),
cls('MontyDiaros', 'monty-diaros', 'es'),
cls('MotleyClassics', 'motley-classics'),
cls('MrLowe', 'mr-lowe'),
cls('MtPleasant', 'mtpleasant'),
cls('MuttAndJeff', 'muttandjeff'),
cls('MyDadIsDracula', 'my-dad-is-dracula'),
cls('MythTickle', 'mythtickle'),
cls('Nancy', 'nancy'),
cls('NancyClassics', 'nancy-classics'),
cls('NateElGrande', 'nate-el-grande', 'es'),
cls('NestHeads', 'nestheads'),
cls('NEUROTICA', 'neurotica'),
cls('NewAdventuresOfQueenVictoria', 'thenewadventuresofqueenvictoria'),
cls('NextDoorNeighbors', 'next-door-neighbors'),
cls('NickAnderson', 'nickanderson'),
cls('NickAndZuzu', 'nick-and-zuzu'),
cls('NonSequitur', 'nonsequitur'),
cls('NothingIsNotSomething', 'nothing-is-not-something'),
cls('NotInventedHere', 'not-invented-here'),
cls('NowRecharging', 'now-recharging'),
cls('OffTheMark', 'offthemark'),
cls('OhBrother', 'oh-brother'),
cls('OllieAndQuentin', 'ollie-and-quentin'),
cls('OnAClaireDay', 'onaclaireday'),
cls('OneBigHappy', 'onebighappy'),
cls('OrdinaryBill', 'ordinary-bill'),
cls('OriginsOfTheSundayComics', 'origins-of-the-sunday-comics'),
cls('OurSuperAdventure', 'our-super-adventure'),
cls('Outland', 'outland'),
cls('OutOfTheGenePoolReRuns', 'outofthegenepool'),
cls('Overboard', 'overboard'),
cls('OverboardEnEspanol', 'overboardespanol', 'es'),
cls('OverTheHedge', 'overthehedge'),
cls('OzyAndMillie', 'ozy-and-millie'),
cls('PatOliphant', 'patoliphant'),
cls('PCAndPixel', 'pcandpixel'),
cls('Peanuts', 'peanuts'),
cls('PeanutsBegins', 'peanuts-begins'),
cls('PearlsBeforeSwine', 'pearlsbeforeswine'),
cls('Periquita', 'periquita', 'es'),
cls('PerlasParaLosCerdos', 'perlas-para-los-cerdos', 'es'),
cls('PerryBibleFellowship', 'perry-bible-fellowship'),
cls('PetuniaAndDre', 'petunia-and-dre'),
cls('PhilHands', 'phil-hands'),
cls('PhoebeAndHerUnicorn', 'phoebe-and-her-unicorn'),
cls('Pibgorn', 'pibgorn'),
cls('PibgornSketches', 'pibgornsketches'),
cls('Pickles', 'pickles'),
cls('PleaseListenToMe', 'please-listen-to-me'),
cls('Pluggers', 'pluggers'),
cls('PoochCafe', 'poochcafe'),
cls('Poorcraft', 'poorcraft'),
cls('PoorlyDrawnLines', 'poorly-drawn-lines'),
cls('PotShots', 'pot-shots'),
cls('PreTeena', 'preteena'),
cls('PricklyCity', 'pricklycity'),
cls('QuestionableQuotebook', 'questionable-quotebook'),
cls('RabbitsAgainstMagic', 'rabbitsagainstmagic'),
cls('RaisingDuncan', 'raising-duncan'),
cls('RandolphItch2Am', 'randolphitch'),
cls('RealityCheck', 'realitycheck'),
cls('RealLifeAdventures', 'reallifeadventures'),
cls('RebeccaHendin', 'rebecca-hendin'),
cls('RedAndRover', 'redandrover'),
cls('RedMeat', 'redmeat'),
cls('RichardsPoorAlmanac', 'richards-poor-almanac'),
cls('RipHaywire', 'riphaywire'),
cls('RipleysAunqueUstedNoLoCrea', 'ripleys-en-espanol', 'es'),
cls('RipleysBelieveItOrNot', 'ripleysbelieveitornot'),
cls('RobbieAndBobby', 'robbie-and-bobby'),
cls('RobertAriail', 'robert-ariail'),
cls('RobRogers', 'robrogers'),
cls('Rosebuds', 'rosebuds'),
cls('RosebudsEnEspanol', 'rosebuds-en-espanol'),
cls('RoseIsRose', 'roseisrose'),
cls('Rubes', 'rubes'),
cls('RudyPark', 'rudypark'),
cls('SaltNPepper', 'salt-n-pepper'),
cls('SarahsScribbles', 'sarahs-scribbles'),
cls('SaturdayMorningBreakfastCereal', 'saturday-morning-breakfast-cereal'),
cls('SavageChickens', 'savage-chickens'),
cls('ScaryGary', 'scarygary'),
cls('ScenesFromAMultiverse', 'scenes-from-a-multiverse'),
cls('ScottStantis', 'scottstantis'),
cls('ShenComix', 'shen-comix'),
cls('ShermansLagoon', 'shermanslagoon'),
cls('ShirleyAndSonClassics', 'shirley-and-son-classics'),
cls('Shoe', 'shoe'),
cls('SigneWilkinson', 'signewilkinson'),
cls('SketchsharkComics', 'sketchshark-comics'),
cls('SkinHorse', 'skinhorse'),
cls('Skippy', 'skippy'),
cls('SmallPotatoes', 'small-potatoes'),
cls('SnoopyEnEspanol', 'peanuts-espanol', 'es'),
cls('Snowflakes', 'snowflakes'),
cls('SnowSez', 'snow-sez'),
cls('SpeedBump', 'speedbump'),
cls('SpiritOfTheStaircase', 'spirit-of-the-staircase'),
cls('SpotTheFrog', 'spot-the-frog'),
cls('SteveBenson', 'stevebenson'),
cls('SteveBreen', 'stevebreen'),
cls('SteveKelley', 'stevekelley'),
cls('StickyComics', 'sticky-comics'),
cls('StoneSoup', 'stonesoup'),
cls('StoneSoupClassics', 'stone-soup-classics'),
cls('StrangeBrew', 'strangebrew'),
cls('StuartCarlson', 'stuartcarlson'),
cls('StudioJantze', 'studio-jantze'),
cls('SunnyStreet', 'sunny-street'),
cls('SunshineState', 'sunshine-state'),
cls('SuperFunPakComix', 'super-fun-pak-comix'),
cls('SwanEaters', 'swan-eaters'),
cls('SweetAndSourPork', 'sweet-and-sour-pork'),
cls('Sylvia', 'sylvia'),
cls('TankMcNamara', 'tankmcnamara'),
cls('Tarzan', 'tarzan'),
cls('TarzanEnEspanol', 'tarzan-en-espanol', 'es'),
cls('TedRall', 'ted-rall'),
cls('TenCats', 'ten-cats'),
cls('TextsFromMittens', 'texts-from-mittens'),
cls('Thatababy', 'thatababy'),
cls('ThatIsPriceless', 'that-is-priceless'),
cls('ThatNewCarlSmell', 'that-new-carl-smell'),
cls('TheAcademiaWaltz', 'academiawaltz'),
cls('TheAdventuresOfBusinessCat', 'the-adventures-of-business-cat'),
cls('TheArgyleSweater', 'theargylesweater'),
cls('TheAwkwardYeti', 'the-awkward-yeti'),
cls('TheBarn', 'thebarn'),
cls('TheBigPicture', 'thebigpicture'),
cls('TheBoondocks', 'boondocks'),
cls('TheBornLoser', 'the-born-loser'),
cls('TheBuckets', 'thebuckets'),
cls('TheCity', 'thecity'),
cls('TheComicStripThatHasAFinaleEveryDay', 'the-comic-strip-that-has-a-finale-every-day'),
cls('TheDailyDrawing', 'the-daily-drawing'),
cls('TheDinetteSet', 'dinetteset'),
cls('TheDoozies', 'thedoozies'),
cls('TheDuplex', 'duplex'),
cls('TheElderberries', 'theelderberries'),
cls('TheFlyingMcCoys', 'theflyingmccoys'),
cls('TheFuscoBrothers', 'thefuscobrothers'),
cls('TheGrizzwells', 'thegrizzwells'),
cls('TheHumbleStumble', 'humble-stumble'),
cls('TheKChronicles', 'thekchronicles'),
cls('TheKnightLife', 'theknightlife'),
cls('TheMartianConfederacy', 'the-martian-confederacy'),
cls('TheMeaningOfLila', 'meaningoflila'),
cls('TheMiddleAge', 'the-middle-age'),
cls('TheMiddletons', 'themiddletons'),
cls('TheNormClassics', 'thenorm'),
cls('TheOtherCoast', 'theothercoast'),
cls('TheUpsideDownWorldOfGustaveVerbeek', 'upside-down-world-of-gustave-verbeek'),
cls('TheWanderingMelon', 'the-wandering-melon'),
cls('TheWizardOfIdSpanish', 'wizardofidespanol', 'es'),
cls('TheWorriedWell', 'the-worried-well'),
cls('think', 'think'),
cls('ThinLines', 'thinlines'),
cls('TimCampbell', 'tim-campbell'),
cls('TinySepuku', 'tinysepuku'),
cls('TodaysSzep', 'todays-szep'),
cls('TomTheDancingBug', 'tomthedancingbug'),
cls('TomToles', 'tomtoles'),
cls('TooMuchCoffeeMan', 'toomuchcoffeeman'),
cls('Trucutu', 'trucutu', 'es'),
cls('TruthFacts', 'truth-facts'),
cls('Tutelandia', 'tutelandia', 'es'),
cls('TwoPartyOpera', 'two-party-opera'),
cls('UnderpantsAndOverbites', 'underpants-and-overbites'),
cls('UnderstandingChaos', 'understanding-chaos'),
cls('UnstrangePhenomena', 'unstrange-phenomena'),
cls('ViewsAfrica', 'viewsafrica'),
cls('ViewsAmerica', 'viewsamerica'),
cls('ViewsAsia', 'viewsasia'),
cls('ViewsBusiness', 'viewsbusiness'),
cls('ViewsEurope', 'viewseurope'),
cls('ViewsLatinAmerica', 'viewslatinamerica'),
cls('ViewsMidEast', 'viewsmideast'),
cls('ViewsOfTheWorld', 'viewsoftheworld'),
cls('ViiviAndWagner', 'viivi-and-wagner'),
cls('WallaceTheBrave', 'wallace-the-brave'),
cls('WaltHandelsman', 'walthandelsman'),
cls('Warped', 'warped'),
cls('WatchYourHead', 'watchyourhead'),
cls('Wawawiwa', 'wawawiwa'),
cls('WaynoVision', 'waynovision'),
cls('WeePals', 'weepals'),
cls('WideOpen', 'wide-open'),
cls('WinLoseDrew', 'drewlitton'),
cls('WizardOfId', 'wizardofid'),
cls('WizardOfIdClassics', 'wizard-of-id-classics'),
cls('Wondermark', 'wondermark'),
cls('WorkingDaze', 'working-daze'),
cls('WorkingItOut', 'workingitout'),
cls('WorryLines', 'worry-lines'),
cls('WrongHands', 'wrong-hands'),
cls('WTDuck', 'wtduck'),
cls('WuMo', 'wumo'),
cls('WumoEnEspanol', 'wumoespanol', 'es'),
cls('Yaffle', 'yaffle'),
cls('YesImHotInThis', 'yesimhotinthis'),
cls('ZackHill', 'zackhill'),
cls('ZenPencils', 'zen-pencils'),
cls('Ziggy', 'ziggy'),
cls('ZiggyEnEspanol', 'ziggyespanol', 'es'),
# END AUTOUPDATE
)
| self.lang = lang | conditional_block |
gocomics.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher
from ..scraper import ParserScraper
from ..helpers import indirectStarter
class GoComics(ParserScraper):
| url = 'https://www.gocomics.com/'
imageSearch = '//picture[d:class("item-comic-image")]/img'
prevSearch = '//a[d:class("js-previous-comic")]'
latestSearch = '//div[d:class("gc-deck--cta-0")]//a'
starter = indirectStarter
help = 'Index format: yyyy/mm/dd'
def __init__(self, name, path, lang=None):
super(GoComics, self).__init__('GoComics/' + name)
self.session.add_throttle('www.gocomics.com', 1.0, 2.0)
self.url = 'https://www.gocomics.com/' + path
self.shortname = name
if lang:
self.lang = lang
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index):
return '{}/{}'.format(self.url, index)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: CFQ001
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyBlues', 'babyblues'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('Bozo', 'bozo'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Buckles', 'buckles'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
cls('Frazz', 'frazz'),
cls('FredBasset', 'fredbasset'),
cls('FredBassetEnEspanol', 'fredbassetespanol', 'es'),
cls('FreeRange', 'freerange'),
cls('FreshlySqueezed', 'freshlysqueezed'),
cls('FrogApplause', 'frogapplause'),
cls('Garfield', 'garfield'),
cls('GarfieldClassics', 'garfield-classics'),
cls('GarfieldEnEspanol', 'garfieldespanol', 'es'),
cls('GaryMarkstein', 'garymarkstein'),
cls('GaryVarvel', 'garyvarvel'),
cls('GasolineAlley', 'gasolinealley'),
cls('Gaturro', 'gaturro', 'es'),
cls('Geech', 'geech'),
cls('GetALife', 'getalife'),
cls('GetFuzzy', 'getfuzzy'),
cls('Gil', 'gil'),
cls('GilThorp', 'gilthorp'),
cls('GingerMeggs', 'gingermeggs'),
cls('GingerMeggsEnEspanol', 'gingermeggs-espanol', 'es'),
cls('GlasbergenCartoons', 'glasbergen-cartoons'),
cls('Globetrotter', 'globetrotter'),
cls('GManWebcomics', 'g-man-webcomics'),
cls('Goats', 'goats'),
cls('GrandAvenue', 'grand-avenue'),
cls('GrayMatters', 'gray-matters'),
cls('GreenHumour', 'green-humour'),
cls('HaircutPractice', 'haircut-practice'),
cls('HalfFull', 'half-full'),
cls('Harley', 'harley'),
cls('HeartOfTheCity', 'heartofthecity'),
cls('Heathcliff', 'heathcliff'),
cls('HeathcliffEnEspanol', 'heathcliffespanol', 'es'),
cls('HenryPayne', 'henrypayne'),
cls('HerbAndJamaal', 'herbandjamaal'),
cls('Herman', 'herman'),
cls('HomeAndAway', 'homeandaway'),
cls('HotComicsForCoolPeople', 'hot-comics-for-cool-people'),
cls('HutchOwen', 'hutch-owen'),
cls('ImagineThis', 'imaginethis'),
cls('ImogenQuest', 'imogen-quest'),
cls('InkPen', 'inkpen'),
cls('InSecurity', 'in-security'),
cls('InTheBleachers', 'inthebleachers'),
cls('InTheSticks', 'inthesticks'),
cls('InvisibleBread', 'invisible-bread'),
cls('ItsAllAboutYou', 'itsallaboutyou'),
cls('JackOhman', 'jackohman'),
cls('JakeLikesOnions', 'jake-likes-onions'),
cls('JanesWorld', 'janesworld'),
cls('JeffDanziger', 'jeffdanziger'),
cls('JeffStahler', 'jeffstahler'),
cls('JenSorensen', 'jen-sorensen'),
cls('JimBentonCartoons', 'jim-benton-cartoons'),
cls('JimMorin', 'jimmorin'),
cls('JoeHeller', 'joe-heller'),
cls('JoelPett', 'joelpett'),
cls('JohnDeering', 'johndeering'),
cls('JumpStart', 'jumpstart'),
cls('JunkDrawer', 'junk-drawer'),
cls('JustoYFranco', 'justo-y-franco', 'es'),
cls('KevinKallaugher', 'kal'),
cls('KevinNecessaryEditorialCartoons', 'kevin-necessary-editorial-cartoons'),
cls('KidBeowulf', 'kid-beowulf'),
cls('KitchenCapers', 'kitchen-capers'),
cls('Kliban', 'kliban'),
cls('KlibansCats', 'klibans-cats'),
cls('LaCucaracha', 'lacucaracha'),
cls('LaCucarachaEnEspanol', 'la-cucaracha-en-espanol', 'es'),
cls('LaloAlcaraz', 'laloalcaraz'),
cls('LaloAlcarazEnEspanol', 'laloenespanol', 'es'),
cls('LardsWorldPeaceTips', 'lards-world-peace-tips'),
cls('LasHermanasStone', 'stonesoup_espanol', 'es'),
cls('LastKiss', 'lastkiss'),
cls('LaughingRedheadComics', 'laughing-redhead-comics'),
cls('LayLines', 'lay-lines'),
cls('LearnToSpeakCat', 'learn-to-speak-cat'),
cls('LibertyMeadows', 'libertymeadows'),
cls('LifeOnEarth', 'life-on-earth'),
cls('LilAbner', 'lil-abner'),
cls('Lio', 'lio'),
cls('LioEnEspanol', 'lioespanol', 'es'),
cls('LisaBenson', 'lisabenson'),
cls('LittleDogLost', 'littledoglost'),
cls('LittleFriedChickenAndSushi', 'little-fried-chicken-and-sushi'),
cls('LittleNemo', 'little-nemo'),
cls('LizClimoCartoons', 'liz-climo-cartoons'),
cls('Lola', 'lola'),
cls('LolaEnEspanol', 'lola-en-espanol', 'es'),
cls('LongStoryShort', 'long-story-short'),
cls('LooksGoodOnPaper', 'looks-good-on-paper'),
cls('LooseParts', 'looseparts'),
cls('LosOsorios', 'los-osorios', 'es'),
cls('LostSheep', 'lostsheep'),
cls('Luann', 'luann'),
cls('LuannAgainn', 'luann-againn'),
cls('LuannEnEspanol', 'luannspanish', 'es'),
cls('LuckyCow', 'luckycow'),
cls('LugNuts', 'lug-nuts'),
cls('Lunarbaboon', 'lunarbaboon'),
cls('M2Bulls', 'm2bulls'),
cls('Maintaining', 'maintaining'),
cls('MakingIt', 'making-it'),
cls('MannequinOnTheMoon', 'mannequin-on-the-moon'),
cls('MariasDay', 'marias-day'),
cls('Marmaduke', 'marmaduke'),
cls('MarshallRamsey', 'marshallramsey'),
cls('MattBors', 'matt-bors'),
cls('MattDavies', 'mattdavies'),
cls('MattWuerker', 'mattwuerker'),
cls('MediumLarge', 'medium-large'),
cls('MessycowComics', 'messy-cow'),
cls('MexikidStories', 'mexikid-stories'),
cls('MichaelRamirez', 'michaelramirez'),
cls('MikeDuJour', 'mike-du-jour'),
cls('MikeLester', 'mike-lester'),
cls('MikeLuckovich', 'mikeluckovich'),
cls('MissPeach', 'miss-peach'),
cls('ModeratelyConfused', 'moderately-confused'),
cls('Momma', 'momma'),
cls('Monty', 'monty'),
cls('MontyDiaros', 'monty-diaros', 'es'),
cls('MotleyClassics', 'motley-classics'),
cls('MrLowe', 'mr-lowe'),
cls('MtPleasant', 'mtpleasant'),
cls('MuttAndJeff', 'muttandjeff'),
cls('MyDadIsDracula', 'my-dad-is-dracula'),
cls('MythTickle', 'mythtickle'),
cls('Nancy', 'nancy'),
cls('NancyClassics', 'nancy-classics'),
cls('NateElGrande', 'nate-el-grande', 'es'),
cls('NestHeads', 'nestheads'),
cls('NEUROTICA', 'neurotica'),
cls('NewAdventuresOfQueenVictoria', 'thenewadventuresofqueenvictoria'),
cls('NextDoorNeighbors', 'next-door-neighbors'),
cls('NickAnderson', 'nickanderson'),
cls('NickAndZuzu', 'nick-and-zuzu'),
cls('NonSequitur', 'nonsequitur'),
cls('NothingIsNotSomething', 'nothing-is-not-something'),
cls('NotInventedHere', 'not-invented-here'),
cls('NowRecharging', 'now-recharging'),
cls('OffTheMark', 'offthemark'),
cls('OhBrother', 'oh-brother'),
cls('OllieAndQuentin', 'ollie-and-quentin'),
cls('OnAClaireDay', 'onaclaireday'),
cls('OneBigHappy', 'onebighappy'),
cls('OrdinaryBill', 'ordinary-bill'),
cls('OriginsOfTheSundayComics', 'origins-of-the-sunday-comics'),
cls('OurSuperAdventure', 'our-super-adventure'),
cls('Outland', 'outland'),
cls('OutOfTheGenePoolReRuns', 'outofthegenepool'),
cls('Overboard', 'overboard'),
cls('OverboardEnEspanol', 'overboardespanol', 'es'),
cls('OverTheHedge', 'overthehedge'),
cls('OzyAndMillie', 'ozy-and-millie'),
cls('PatOliphant', 'patoliphant'),
cls('PCAndPixel', 'pcandpixel'),
cls('Peanuts', 'peanuts'),
cls('PeanutsBegins', 'peanuts-begins'),
cls('PearlsBeforeSwine', 'pearlsbeforeswine'),
cls('Periquita', 'periquita', 'es'),
cls('PerlasParaLosCerdos', 'perlas-para-los-cerdos', 'es'),
cls('PerryBibleFellowship', 'perry-bible-fellowship'),
cls('PetuniaAndDre', 'petunia-and-dre'),
cls('PhilHands', 'phil-hands'),
cls('PhoebeAndHerUnicorn', 'phoebe-and-her-unicorn'),
cls('Pibgorn', 'pibgorn'),
cls('PibgornSketches', 'pibgornsketches'),
cls('Pickles', 'pickles'),
cls('PleaseListenToMe', 'please-listen-to-me'),
cls('Pluggers', 'pluggers'),
cls('PoochCafe', 'poochcafe'),
cls('Poorcraft', 'poorcraft'),
cls('PoorlyDrawnLines', 'poorly-drawn-lines'),
cls('PotShots', 'pot-shots'),
cls('PreTeena', 'preteena'),
cls('PricklyCity', 'pricklycity'),
cls('QuestionableQuotebook', 'questionable-quotebook'),
cls('RabbitsAgainstMagic', 'rabbitsagainstmagic'),
cls('RaisingDuncan', 'raising-duncan'),
cls('RandolphItch2Am', 'randolphitch'),
cls('RealityCheck', 'realitycheck'),
cls('RealLifeAdventures', 'reallifeadventures'),
cls('RebeccaHendin', 'rebecca-hendin'),
cls('RedAndRover', 'redandrover'),
cls('RedMeat', 'redmeat'),
cls('RichardsPoorAlmanac', 'richards-poor-almanac'),
cls('RipHaywire', 'riphaywire'),
cls('RipleysAunqueUstedNoLoCrea', 'ripleys-en-espanol', 'es'),
cls('RipleysBelieveItOrNot', 'ripleysbelieveitornot'),
cls('RobbieAndBobby', 'robbie-and-bobby'),
cls('RobertAriail', 'robert-ariail'),
cls('RobRogers', 'robrogers'),
cls('Rosebuds', 'rosebuds'),
cls('RosebudsEnEspanol', 'rosebuds-en-espanol'),
cls('RoseIsRose', 'roseisrose'),
cls('Rubes', 'rubes'),
cls('RudyPark', 'rudypark'),
cls('SaltNPepper', 'salt-n-pepper'),
cls('SarahsScribbles', 'sarahs-scribbles'),
cls('SaturdayMorningBreakfastCereal', 'saturday-morning-breakfast-cereal'),
cls('SavageChickens', 'savage-chickens'),
cls('ScaryGary', 'scarygary'),
cls('ScenesFromAMultiverse', 'scenes-from-a-multiverse'),
cls('ScottStantis', 'scottstantis'),
cls('ShenComix', 'shen-comix'),
cls('ShermansLagoon', 'shermanslagoon'),
cls('ShirleyAndSonClassics', 'shirley-and-son-classics'),
cls('Shoe', 'shoe'),
cls('SigneWilkinson', 'signewilkinson'),
cls('SketchsharkComics', 'sketchshark-comics'),
cls('SkinHorse', 'skinhorse'),
cls('Skippy', 'skippy'),
cls('SmallPotatoes', 'small-potatoes'),
cls('SnoopyEnEspanol', 'peanuts-espanol', 'es'),
cls('Snowflakes', 'snowflakes'),
cls('SnowSez', 'snow-sez'),
cls('SpeedBump', 'speedbump'),
cls('SpiritOfTheStaircase', 'spirit-of-the-staircase'),
cls('SpotTheFrog', 'spot-the-frog'),
cls('SteveBenson', 'stevebenson'),
cls('SteveBreen', 'stevebreen'),
cls('SteveKelley', 'stevekelley'),
cls('StickyComics', 'sticky-comics'),
cls('StoneSoup', 'stonesoup'),
cls('StoneSoupClassics', 'stone-soup-classics'),
cls('StrangeBrew', 'strangebrew'),
cls('StuartCarlson', 'stuartcarlson'),
cls('StudioJantze', 'studio-jantze'),
cls('SunnyStreet', 'sunny-street'),
cls('SunshineState', 'sunshine-state'),
cls('SuperFunPakComix', 'super-fun-pak-comix'),
cls('SwanEaters', 'swan-eaters'),
cls('SweetAndSourPork', 'sweet-and-sour-pork'),
cls('Sylvia', 'sylvia'),
cls('TankMcNamara', 'tankmcnamara'),
cls('Tarzan', 'tarzan'),
cls('TarzanEnEspanol', 'tarzan-en-espanol', 'es'),
cls('TedRall', 'ted-rall'),
cls('TenCats', 'ten-cats'),
cls('TextsFromMittens', 'texts-from-mittens'),
cls('Thatababy', 'thatababy'),
cls('ThatIsPriceless', 'that-is-priceless'),
cls('ThatNewCarlSmell', 'that-new-carl-smell'),
cls('TheAcademiaWaltz', 'academiawaltz'),
cls('TheAdventuresOfBusinessCat', 'the-adventures-of-business-cat'),
cls('TheArgyleSweater', 'theargylesweater'),
cls('TheAwkwardYeti', 'the-awkward-yeti'),
cls('TheBarn', 'thebarn'),
cls('TheBigPicture', 'thebigpicture'),
cls('TheBoondocks', 'boondocks'),
cls('TheBornLoser', 'the-born-loser'),
cls('TheBuckets', 'thebuckets'),
cls('TheCity', 'thecity'),
cls('TheComicStripThatHasAFinaleEveryDay', 'the-comic-strip-that-has-a-finale-every-day'),
cls('TheDailyDrawing', 'the-daily-drawing'),
cls('TheDinetteSet', 'dinetteset'),
cls('TheDoozies', 'thedoozies'),
cls('TheDuplex', 'duplex'),
cls('TheElderberries', 'theelderberries'),
cls('TheFlyingMcCoys', 'theflyingmccoys'),
cls('TheFuscoBrothers', 'thefuscobrothers'),
cls('TheGrizzwells', 'thegrizzwells'),
cls('TheHumbleStumble', 'humble-stumble'),
cls('TheKChronicles', 'thekchronicles'),
cls('TheKnightLife', 'theknightlife'),
cls('TheMartianConfederacy', 'the-martian-confederacy'),
cls('TheMeaningOfLila', 'meaningoflila'),
cls('TheMiddleAge', 'the-middle-age'),
cls('TheMiddletons', 'themiddletons'),
cls('TheNormClassics', 'thenorm'),
cls('TheOtherCoast', 'theothercoast'),
cls('TheUpsideDownWorldOfGustaveVerbeek', 'upside-down-world-of-gustave-verbeek'),
cls('TheWanderingMelon', 'the-wandering-melon'),
cls('TheWizardOfIdSpanish', 'wizardofidespanol', 'es'),
cls('TheWorriedWell', 'the-worried-well'),
cls('think', 'think'),
cls('ThinLines', 'thinlines'),
cls('TimCampbell', 'tim-campbell'),
cls('TinySepuku', 'tinysepuku'),
cls('TodaysSzep', 'todays-szep'),
cls('TomTheDancingBug', 'tomthedancingbug'),
cls('TomToles', 'tomtoles'),
cls('TooMuchCoffeeMan', 'toomuchcoffeeman'),
cls('Trucutu', 'trucutu', 'es'),
cls('TruthFacts', 'truth-facts'),
cls('Tutelandia', 'tutelandia', 'es'),
cls('TwoPartyOpera', 'two-party-opera'),
cls('UnderpantsAndOverbites', 'underpants-and-overbites'),
cls('UnderstandingChaos', 'understanding-chaos'),
cls('UnstrangePhenomena', 'unstrange-phenomena'),
cls('ViewsAfrica', 'viewsafrica'),
cls('ViewsAmerica', 'viewsamerica'),
cls('ViewsAsia', 'viewsasia'),
cls('ViewsBusiness', 'viewsbusiness'),
cls('ViewsEurope', 'viewseurope'),
cls('ViewsLatinAmerica', 'viewslatinamerica'),
cls('ViewsMidEast', 'viewsmideast'),
cls('ViewsOfTheWorld', 'viewsoftheworld'),
cls('ViiviAndWagner', 'viivi-and-wagner'),
cls('WallaceTheBrave', 'wallace-the-brave'),
cls('WaltHandelsman', 'walthandelsman'),
cls('Warped', 'warped'),
cls('WatchYourHead', 'watchyourhead'),
cls('Wawawiwa', 'wawawiwa'),
cls('WaynoVision', 'waynovision'),
cls('WeePals', 'weepals'),
cls('WideOpen', 'wide-open'),
cls('WinLoseDrew', 'drewlitton'),
cls('WizardOfId', 'wizardofid'),
cls('WizardOfIdClassics', 'wizard-of-id-classics'),
cls('Wondermark', 'wondermark'),
cls('WorkingDaze', 'working-daze'),
cls('WorkingItOut', 'workingitout'),
cls('WorryLines', 'worry-lines'),
cls('WrongHands', 'wrong-hands'),
cls('WTDuck', 'wtduck'),
cls('WuMo', 'wumo'),
cls('WumoEnEspanol', 'wumoespanol', 'es'),
cls('Yaffle', 'yaffle'),
cls('YesImHotInThis', 'yesimhotinthis'),
cls('ZackHill', 'zackhill'),
cls('ZenPencils', 'zen-pencils'),
cls('Ziggy', 'ziggy'),
cls('ZiggyEnEspanol', 'ziggyespanol', 'es'),
# END AUTOUPDATE
) | identifier_body | |
gocomics.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2004-2008 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2022 Tobias Gruetzmacher
from ..scraper import ParserScraper
from ..helpers import indirectStarter
class GoComics(ParserScraper):
url = 'https://www.gocomics.com/'
imageSearch = '//picture[d:class("item-comic-image")]/img'
prevSearch = '//a[d:class("js-previous-comic")]'
latestSearch = '//div[d:class("gc-deck--cta-0")]//a'
starter = indirectStarter
help = 'Index format: yyyy/mm/dd'
def __init__(self, name, path, lang=None):
super(GoComics, self).__init__('GoComics/' + name)
self.session.add_throttle('www.gocomics.com', 1.0, 2.0)
self.url = 'https://www.gocomics.com/' + path
self.shortname = name
if lang:
self.lang = lang
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index):
return '{}/{}'.format(self.url, index)
def | (self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: CFQ001
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyBlues', 'babyblues'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('Bozo', 'bozo'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Buckles', 'buckles'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
cls('Frazz', 'frazz'),
cls('FredBasset', 'fredbasset'),
cls('FredBassetEnEspanol', 'fredbassetespanol', 'es'),
cls('FreeRange', 'freerange'),
cls('FreshlySqueezed', 'freshlysqueezed'),
cls('FrogApplause', 'frogapplause'),
cls('Garfield', 'garfield'),
cls('GarfieldClassics', 'garfield-classics'),
cls('GarfieldEnEspanol', 'garfieldespanol', 'es'),
cls('GaryMarkstein', 'garymarkstein'),
cls('GaryVarvel', 'garyvarvel'),
cls('GasolineAlley', 'gasolinealley'),
cls('Gaturro', 'gaturro', 'es'),
cls('Geech', 'geech'),
cls('GetALife', 'getalife'),
cls('GetFuzzy', 'getfuzzy'),
cls('Gil', 'gil'),
cls('GilThorp', 'gilthorp'),
cls('GingerMeggs', 'gingermeggs'),
cls('GingerMeggsEnEspanol', 'gingermeggs-espanol', 'es'),
cls('GlasbergenCartoons', 'glasbergen-cartoons'),
cls('Globetrotter', 'globetrotter'),
cls('GManWebcomics', 'g-man-webcomics'),
cls('Goats', 'goats'),
cls('GrandAvenue', 'grand-avenue'),
cls('GrayMatters', 'gray-matters'),
cls('GreenHumour', 'green-humour'),
cls('HaircutPractice', 'haircut-practice'),
cls('HalfFull', 'half-full'),
cls('Harley', 'harley'),
cls('HeartOfTheCity', 'heartofthecity'),
cls('Heathcliff', 'heathcliff'),
cls('HeathcliffEnEspanol', 'heathcliffespanol', 'es'),
cls('HenryPayne', 'henrypayne'),
cls('HerbAndJamaal', 'herbandjamaal'),
cls('Herman', 'herman'),
cls('HomeAndAway', 'homeandaway'),
cls('HotComicsForCoolPeople', 'hot-comics-for-cool-people'),
cls('HutchOwen', 'hutch-owen'),
cls('ImagineThis', 'imaginethis'),
cls('ImogenQuest', 'imogen-quest'),
cls('InkPen', 'inkpen'),
cls('InSecurity', 'in-security'),
cls('InTheBleachers', 'inthebleachers'),
cls('InTheSticks', 'inthesticks'),
cls('InvisibleBread', 'invisible-bread'),
cls('ItsAllAboutYou', 'itsallaboutyou'),
cls('JackOhman', 'jackohman'),
cls('JakeLikesOnions', 'jake-likes-onions'),
cls('JanesWorld', 'janesworld'),
cls('JeffDanziger', 'jeffdanziger'),
cls('JeffStahler', 'jeffstahler'),
cls('JenSorensen', 'jen-sorensen'),
cls('JimBentonCartoons', 'jim-benton-cartoons'),
cls('JimMorin', 'jimmorin'),
cls('JoeHeller', 'joe-heller'),
cls('JoelPett', 'joelpett'),
cls('JohnDeering', 'johndeering'),
cls('JumpStart', 'jumpstart'),
cls('JunkDrawer', 'junk-drawer'),
cls('JustoYFranco', 'justo-y-franco', 'es'),
cls('KevinKallaugher', 'kal'),
cls('KevinNecessaryEditorialCartoons', 'kevin-necessary-editorial-cartoons'),
cls('KidBeowulf', 'kid-beowulf'),
cls('KitchenCapers', 'kitchen-capers'),
cls('Kliban', 'kliban'),
cls('KlibansCats', 'klibans-cats'),
cls('LaCucaracha', 'lacucaracha'),
cls('LaCucarachaEnEspanol', 'la-cucaracha-en-espanol', 'es'),
cls('LaloAlcaraz', 'laloalcaraz'),
cls('LaloAlcarazEnEspanol', 'laloenespanol', 'es'),
cls('LardsWorldPeaceTips', 'lards-world-peace-tips'),
cls('LasHermanasStone', 'stonesoup_espanol', 'es'),
cls('LastKiss', 'lastkiss'),
cls('LaughingRedheadComics', 'laughing-redhead-comics'),
cls('LayLines', 'lay-lines'),
cls('LearnToSpeakCat', 'learn-to-speak-cat'),
cls('LibertyMeadows', 'libertymeadows'),
cls('LifeOnEarth', 'life-on-earth'),
cls('LilAbner', 'lil-abner'),
cls('Lio', 'lio'),
cls('LioEnEspanol', 'lioespanol', 'es'),
cls('LisaBenson', 'lisabenson'),
cls('LittleDogLost', 'littledoglost'),
cls('LittleFriedChickenAndSushi', 'little-fried-chicken-and-sushi'),
cls('LittleNemo', 'little-nemo'),
cls('LizClimoCartoons', 'liz-climo-cartoons'),
cls('Lola', 'lola'),
cls('LolaEnEspanol', 'lola-en-espanol', 'es'),
cls('LongStoryShort', 'long-story-short'),
cls('LooksGoodOnPaper', 'looks-good-on-paper'),
cls('LooseParts', 'looseparts'),
cls('LosOsorios', 'los-osorios', 'es'),
cls('LostSheep', 'lostsheep'),
cls('Luann', 'luann'),
cls('LuannAgainn', 'luann-againn'),
cls('LuannEnEspanol', 'luannspanish', 'es'),
cls('LuckyCow', 'luckycow'),
cls('LugNuts', 'lug-nuts'),
cls('Lunarbaboon', 'lunarbaboon'),
cls('M2Bulls', 'm2bulls'),
cls('Maintaining', 'maintaining'),
cls('MakingIt', 'making-it'),
cls('MannequinOnTheMoon', 'mannequin-on-the-moon'),
cls('MariasDay', 'marias-day'),
cls('Marmaduke', 'marmaduke'),
cls('MarshallRamsey', 'marshallramsey'),
cls('MattBors', 'matt-bors'),
cls('MattDavies', 'mattdavies'),
cls('MattWuerker', 'mattwuerker'),
cls('MediumLarge', 'medium-large'),
cls('MessycowComics', 'messy-cow'),
cls('MexikidStories', 'mexikid-stories'),
cls('MichaelRamirez', 'michaelramirez'),
cls('MikeDuJour', 'mike-du-jour'),
cls('MikeLester', 'mike-lester'),
cls('MikeLuckovich', 'mikeluckovich'),
cls('MissPeach', 'miss-peach'),
cls('ModeratelyConfused', 'moderately-confused'),
cls('Momma', 'momma'),
cls('Monty', 'monty'),
cls('MontyDiaros', 'monty-diaros', 'es'),
cls('MotleyClassics', 'motley-classics'),
cls('MrLowe', 'mr-lowe'),
cls('MtPleasant', 'mtpleasant'),
cls('MuttAndJeff', 'muttandjeff'),
cls('MyDadIsDracula', 'my-dad-is-dracula'),
cls('MythTickle', 'mythtickle'),
cls('Nancy', 'nancy'),
cls('NancyClassics', 'nancy-classics'),
cls('NateElGrande', 'nate-el-grande', 'es'),
cls('NestHeads', 'nestheads'),
cls('NEUROTICA', 'neurotica'),
cls('NewAdventuresOfQueenVictoria', 'thenewadventuresofqueenvictoria'),
cls('NextDoorNeighbors', 'next-door-neighbors'),
cls('NickAnderson', 'nickanderson'),
cls('NickAndZuzu', 'nick-and-zuzu'),
cls('NonSequitur', 'nonsequitur'),
cls('NothingIsNotSomething', 'nothing-is-not-something'),
cls('NotInventedHere', 'not-invented-here'),
cls('NowRecharging', 'now-recharging'),
cls('OffTheMark', 'offthemark'),
cls('OhBrother', 'oh-brother'),
cls('OllieAndQuentin', 'ollie-and-quentin'),
cls('OnAClaireDay', 'onaclaireday'),
cls('OneBigHappy', 'onebighappy'),
cls('OrdinaryBill', 'ordinary-bill'),
cls('OriginsOfTheSundayComics', 'origins-of-the-sunday-comics'),
cls('OurSuperAdventure', 'our-super-adventure'),
cls('Outland', 'outland'),
cls('OutOfTheGenePoolReRuns', 'outofthegenepool'),
cls('Overboard', 'overboard'),
cls('OverboardEnEspanol', 'overboardespanol', 'es'),
cls('OverTheHedge', 'overthehedge'),
cls('OzyAndMillie', 'ozy-and-millie'),
cls('PatOliphant', 'patoliphant'),
cls('PCAndPixel', 'pcandpixel'),
cls('Peanuts', 'peanuts'),
cls('PeanutsBegins', 'peanuts-begins'),
cls('PearlsBeforeSwine', 'pearlsbeforeswine'),
cls('Periquita', 'periquita', 'es'),
cls('PerlasParaLosCerdos', 'perlas-para-los-cerdos', 'es'),
cls('PerryBibleFellowship', 'perry-bible-fellowship'),
cls('PetuniaAndDre', 'petunia-and-dre'),
cls('PhilHands', 'phil-hands'),
cls('PhoebeAndHerUnicorn', 'phoebe-and-her-unicorn'),
cls('Pibgorn', 'pibgorn'),
cls('PibgornSketches', 'pibgornsketches'),
cls('Pickles', 'pickles'),
cls('PleaseListenToMe', 'please-listen-to-me'),
cls('Pluggers', 'pluggers'),
cls('PoochCafe', 'poochcafe'),
cls('Poorcraft', 'poorcraft'),
cls('PoorlyDrawnLines', 'poorly-drawn-lines'),
cls('PotShots', 'pot-shots'),
cls('PreTeena', 'preteena'),
cls('PricklyCity', 'pricklycity'),
cls('QuestionableQuotebook', 'questionable-quotebook'),
cls('RabbitsAgainstMagic', 'rabbitsagainstmagic'),
cls('RaisingDuncan', 'raising-duncan'),
cls('RandolphItch2Am', 'randolphitch'),
cls('RealityCheck', 'realitycheck'),
cls('RealLifeAdventures', 'reallifeadventures'),
cls('RebeccaHendin', 'rebecca-hendin'),
cls('RedAndRover', 'redandrover'),
cls('RedMeat', 'redmeat'),
cls('RichardsPoorAlmanac', 'richards-poor-almanac'),
cls('RipHaywire', 'riphaywire'),
cls('RipleysAunqueUstedNoLoCrea', 'ripleys-en-espanol', 'es'),
cls('RipleysBelieveItOrNot', 'ripleysbelieveitornot'),
cls('RobbieAndBobby', 'robbie-and-bobby'),
cls('RobertAriail', 'robert-ariail'),
cls('RobRogers', 'robrogers'),
cls('Rosebuds', 'rosebuds'),
cls('RosebudsEnEspanol', 'rosebuds-en-espanol'),
cls('RoseIsRose', 'roseisrose'),
cls('Rubes', 'rubes'),
cls('RudyPark', 'rudypark'),
cls('SaltNPepper', 'salt-n-pepper'),
cls('SarahsScribbles', 'sarahs-scribbles'),
cls('SaturdayMorningBreakfastCereal', 'saturday-morning-breakfast-cereal'),
cls('SavageChickens', 'savage-chickens'),
cls('ScaryGary', 'scarygary'),
cls('ScenesFromAMultiverse', 'scenes-from-a-multiverse'),
cls('ScottStantis', 'scottstantis'),
cls('ShenComix', 'shen-comix'),
cls('ShermansLagoon', 'shermanslagoon'),
cls('ShirleyAndSonClassics', 'shirley-and-son-classics'),
cls('Shoe', 'shoe'),
cls('SigneWilkinson', 'signewilkinson'),
cls('SketchsharkComics', 'sketchshark-comics'),
cls('SkinHorse', 'skinhorse'),
cls('Skippy', 'skippy'),
cls('SmallPotatoes', 'small-potatoes'),
cls('SnoopyEnEspanol', 'peanuts-espanol', 'es'),
cls('Snowflakes', 'snowflakes'),
cls('SnowSez', 'snow-sez'),
cls('SpeedBump', 'speedbump'),
cls('SpiritOfTheStaircase', 'spirit-of-the-staircase'),
cls('SpotTheFrog', 'spot-the-frog'),
cls('SteveBenson', 'stevebenson'),
cls('SteveBreen', 'stevebreen'),
cls('SteveKelley', 'stevekelley'),
cls('StickyComics', 'sticky-comics'),
cls('StoneSoup', 'stonesoup'),
cls('StoneSoupClassics', 'stone-soup-classics'),
cls('StrangeBrew', 'strangebrew'),
cls('StuartCarlson', 'stuartcarlson'),
cls('StudioJantze', 'studio-jantze'),
cls('SunnyStreet', 'sunny-street'),
cls('SunshineState', 'sunshine-state'),
cls('SuperFunPakComix', 'super-fun-pak-comix'),
cls('SwanEaters', 'swan-eaters'),
cls('SweetAndSourPork', 'sweet-and-sour-pork'),
cls('Sylvia', 'sylvia'),
cls('TankMcNamara', 'tankmcnamara'),
cls('Tarzan', 'tarzan'),
cls('TarzanEnEspanol', 'tarzan-en-espanol', 'es'),
cls('TedRall', 'ted-rall'),
cls('TenCats', 'ten-cats'),
cls('TextsFromMittens', 'texts-from-mittens'),
cls('Thatababy', 'thatababy'),
cls('ThatIsPriceless', 'that-is-priceless'),
cls('ThatNewCarlSmell', 'that-new-carl-smell'),
cls('TheAcademiaWaltz', 'academiawaltz'),
cls('TheAdventuresOfBusinessCat', 'the-adventures-of-business-cat'),
cls('TheArgyleSweater', 'theargylesweater'),
cls('TheAwkwardYeti', 'the-awkward-yeti'),
cls('TheBarn', 'thebarn'),
cls('TheBigPicture', 'thebigpicture'),
cls('TheBoondocks', 'boondocks'),
cls('TheBornLoser', 'the-born-loser'),
cls('TheBuckets', 'thebuckets'),
cls('TheCity', 'thecity'),
cls('TheComicStripThatHasAFinaleEveryDay', 'the-comic-strip-that-has-a-finale-every-day'),
cls('TheDailyDrawing', 'the-daily-drawing'),
cls('TheDinetteSet', 'dinetteset'),
cls('TheDoozies', 'thedoozies'),
cls('TheDuplex', 'duplex'),
cls('TheElderberries', 'theelderberries'),
cls('TheFlyingMcCoys', 'theflyingmccoys'),
cls('TheFuscoBrothers', 'thefuscobrothers'),
cls('TheGrizzwells', 'thegrizzwells'),
cls('TheHumbleStumble', 'humble-stumble'),
cls('TheKChronicles', 'thekchronicles'),
cls('TheKnightLife', 'theknightlife'),
cls('TheMartianConfederacy', 'the-martian-confederacy'),
cls('TheMeaningOfLila', 'meaningoflila'),
cls('TheMiddleAge', 'the-middle-age'),
cls('TheMiddletons', 'themiddletons'),
cls('TheNormClassics', 'thenorm'),
cls('TheOtherCoast', 'theothercoast'),
cls('TheUpsideDownWorldOfGustaveVerbeek', 'upside-down-world-of-gustave-verbeek'),
cls('TheWanderingMelon', 'the-wandering-melon'),
cls('TheWizardOfIdSpanish', 'wizardofidespanol', 'es'),
cls('TheWorriedWell', 'the-worried-well'),
cls('think', 'think'),
cls('ThinLines', 'thinlines'),
cls('TimCampbell', 'tim-campbell'),
cls('TinySepuku', 'tinysepuku'),
cls('TodaysSzep', 'todays-szep'),
cls('TomTheDancingBug', 'tomthedancingbug'),
cls('TomToles', 'tomtoles'),
cls('TooMuchCoffeeMan', 'toomuchcoffeeman'),
cls('Trucutu', 'trucutu', 'es'),
cls('TruthFacts', 'truth-facts'),
cls('Tutelandia', 'tutelandia', 'es'),
cls('TwoPartyOpera', 'two-party-opera'),
cls('UnderpantsAndOverbites', 'underpants-and-overbites'),
cls('UnderstandingChaos', 'understanding-chaos'),
cls('UnstrangePhenomena', 'unstrange-phenomena'),
cls('ViewsAfrica', 'viewsafrica'),
cls('ViewsAmerica', 'viewsamerica'),
cls('ViewsAsia', 'viewsasia'),
cls('ViewsBusiness', 'viewsbusiness'),
cls('ViewsEurope', 'viewseurope'),
cls('ViewsLatinAmerica', 'viewslatinamerica'),
cls('ViewsMidEast', 'viewsmideast'),
cls('ViewsOfTheWorld', 'viewsoftheworld'),
cls('ViiviAndWagner', 'viivi-and-wagner'),
cls('WallaceTheBrave', 'wallace-the-brave'),
cls('WaltHandelsman', 'walthandelsman'),
cls('Warped', 'warped'),
cls('WatchYourHead', 'watchyourhead'),
cls('Wawawiwa', 'wawawiwa'),
cls('WaynoVision', 'waynovision'),
cls('WeePals', 'weepals'),
cls('WideOpen', 'wide-open'),
cls('WinLoseDrew', 'drewlitton'),
cls('WizardOfId', 'wizardofid'),
cls('WizardOfIdClassics', 'wizard-of-id-classics'),
cls('Wondermark', 'wondermark'),
cls('WorkingDaze', 'working-daze'),
cls('WorkingItOut', 'workingitout'),
cls('WorryLines', 'worry-lines'),
cls('WrongHands', 'wrong-hands'),
cls('WTDuck', 'wtduck'),
cls('WuMo', 'wumo'),
cls('WumoEnEspanol', 'wumoespanol', 'es'),
cls('Yaffle', 'yaffle'),
cls('YesImHotInThis', 'yesimhotinthis'),
cls('ZackHill', 'zackhill'),
cls('ZenPencils', 'zen-pencils'),
cls('Ziggy', 'ziggy'),
cls('ZiggyEnEspanol', 'ziggyespanol', 'es'),
# END AUTOUPDATE
)
| shouldSkipUrl | identifier_name |
PushForm.js | 'use strict';
import * as Immutable from 'immutable';
import moment from 'moment';
import React, { PropTypes } from 'react';
import { bindActionCreators } from 'redux';
import { connect } from 'react-redux';
import {
Button,
Card,
Col,
Collapse,
DatePicker,
Form,
Input,
InputNumber,
Radio,
Row,
Select,
TimePicker,
} from 'antd';
import * as actions from 'action';
import PanelBox from 'framework/components/PanelBox';
import { getStaticDataByCodeType } from 'business/common/StaticDataService';
import MySelect from 'framework/components/Select';
import * as MsgUtil from 'utils/MsgUtil';
import * as Constant from 'utils/Constant';
import AfterOnClickOptPlus from './plus/AfterOnClickOptPlus';
import FormField from './FormField';
import IosEnvPlus from './plus/IosEnvPlus';
const FormItem = Form.Item;
const Option = Select.Option;
const RadioGroup = Radio.Group;
const Panel = Collapse.Panel;
const {
titleFN, bodyFN, pushBigTypeFN, pushTypeFN, androidOpenTypeFN, androidActivityFN,
androidOpenUrlFN, customExtParamsItemsFN, isCustomExtParamsFN, notifyTypeFN,
targetFN, targetValueFN, isSetPushTimeFN, pushTimeDatePickerFN, pushTimeTimePickerFN,
isStoreOfflineFn, expireTimeFN, apnsEnvFN, remindFN, isIosAddOpenUrlFN,androidNotificationChannelFN
} = FormField;
const pushBigType = [// 1-系统 2-活动/推荐 3-吻吻服务 4-资讯
{ key: '1', value: '系统' },
{ key: '2', value: '活动/推荐' },
{ key: '3', value: '吻吻服务' },
{ key: '4', value: '资讯' },
];
const pushType = {
// 大类1-系统:1-系统信息,1001-强制退出消息,后续从1002开始定义;
// 大类2-活动/推荐:2-活动信息,后续从2001开始定义;
// 大类3-吻吻服务:3-发起绑定 4-绑定回应 6-吻吻蜜语
// 8-情侣等级提升 9-背景图片更新,后续从3001开始定义;
// 大类4-资讯:4001-普通资讯
1: [{ key: '1001', value: '强制退出' }],
2: [{ key: '2', value: '活动信息' }],
3: [
// {key:"3", value:"发起绑定"},
// {key:"4", value:"绑定回应"},
{ key: '6', value: '吻吻蜜语' },
{ key: '8', value: '情侣等级提升' },
{ key: '9', value: '背景图片更新' },
{ key: '3009', value: '今日睡眠报告' },
{ key: '3100', value: '运动计步晚上9点10点提醒' },
],
4: [{ key: '4001', value: '普通资讯' }],
};
const titleMaxSize = 20;
const bodyMaxSize = 60;
const extParamsItemUuid = 0;
class PushForm extends React.Component {
static propTypes = {
actions: PropTypes.object.isRequired,
form: PropTypes.object.isRequired,
type: PropTypes.number.isRequired,
deviceType: PropTypes.number.isRequired,
countryList: PropTypes.array.isRequired,
};
static defaultProp = {
countryList: []
}
componentWillMount() {
const { actions, countryList } = this.props;
if (!countryList || countryList.length < 1) {
actions.getStaticDataByCodeType(Constant.USER_COUNTRY_ATTR_CODE);
}
}
onPushBigTypeChange = (evt) => {
const { getFieldValue, setFieldsValue } = this.props.form;
setFieldsValue(pushTypeFN,
pushType[getFieldValue(pushBigTypeFN)][0].key);
};
handleSubmit = (e) => {
e.preventDefault();
this.props.form.validateFields((errors, values) => {
if (!!errors) {
return;
}
let data = Immutable.Map(values).
set('type', this.props.type === undefined ? 0 : Number(
this.props.type))//0:表示消息(默认为0), 1:表示通知
.set('deviceType', this.props.deviceType); //设备类型,取值范围为:0:iOS设备 1:Andriod设备 3:全部类型设备
if (data.get('deviceType') === '1') { //android设备推送不需要上传ios apns环境变量
data = data.delete(apnsEnvFN);
}
if (data.get('type') === 1) { //通知类型才有打开方式选项
if (data.get(androidOpenTypeFN) === 'ACTIVITY') { //打开指定页面
if (!data.get(androidActivityFN)) {
MsgUtil.showwarning('指定打开的页面不允许为空');
return;
}
data.set('xiaomiActivity', data.get(androidActivityFN)); //xiaomiActivity与androidActivity赋值一直,由服务端判断怎么推
}
if (data.get(androidOpenTypeFN) === 'URL' &&
!data.get(androidOpenUrlFN)) { //打开指定网页
MsgUtil.showwarning('指定打开的网页不允许为空');
return;
}
//删除无效字段
if (data.get(androidOpenTypeFN) === 'APPLICATION' ||
data.get(androidOpenTypeFN) === 'NONE') { //打开应用或者无逻辑
data = data.delete(androidActivityFN).
delete(androidOpenUrlFN);
}
} else {
data = data.delete(androidActivityFN).delete(androidOpenUrlFN);
}
//填充自定义扩展参数
if (data.get(isCustomExtParamsFN) || data.get(isIosAddOpenUrlFN)) {
const extParameters = {};
data.get(customExtParamsItemsFN).map((item) => {
const key = `extParamsKey_${item}`;
if (data.get(key)) {
extParameters[data.get(key)] = data.get(
`extParamsValue_${item}`);
}
});
data = data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
data = data.delete(isCustomExtParamsFN).
delete(customExtParamsItemsFN);
if (data.get(targetFN) === 'DEVICE' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定设备信息不正确');
return;
}
if (data.get(targetFN) === 'ACCOUNT' && !data | } else {
console.log('需要精确到分钟____________', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmm'));
data.set('pushTime', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmmss'));
const pushTimeFormNowLengthSplit = moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow().split(' ');
if (pushTimeFormNowLengthSplit[1] === '天前') {
MsgUtil.showwarning(`推送日期不允许选择${moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow()}`);
return;
}
if (Number(pushTimeFormNowLengthSplit[0]) > 7) {
MsgUtil.showwarning('推送日期不允许大于7天');
return;
}
}
}
data = data.delete(isSetPushTimeFN).
delete(pushTimeDatePickerFN).
delete(pushTimeTimePickerFN);
if (data.get(isStoreOfflineFn) === 'true') {
const expireTime = Number(data.get(expireTimeFN));
if (expireTime < 1 || expireTime > 72) {
MsgUtil.showwarning('离线保存时间最短1小时,最长72小时');
return;
}
} else {
data = data.delete(expireTimeFN);
}
//当设备类型为1-android或者3-所有设备时,填充Android提醒方式
if (data.get('deviceType') === '1' ||
data.get('deviceType') === '3') {
const extParameters = {};
extParameters['_NOTIFY_TYPE_'] = data.get(notifyTypeFN);
data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
if (data.get('extParameters')) {
data = data.set('extParameters', JSON.stringify(data.get('extParameters').toJS()));
}
data = data.delete(notifyTypeFN);
//target targetValue处理
if (data.get(targetFN) === 'COUNTRY') {
console.log('data.get(targetValueFN)', data.get(targetValueFN));
if ( data.get(targetValueFN) && data.get(targetValueFN) !== 'ALL' ) {
data = data.set(targetFN, 'tag');
} else {
data = data.set(targetFN, 'ALL').set(targetValueFN, 'ALL');
}
}
data = data.set(androidNotificationChannelFN, 1);
if (data.get('type') === 1) {
this.props.actions.pushNotification(data);
} else {
this.props.actions.pushMessage(data);
}
});
};
render () {
const { getFieldDecorator, getFieldValue, setFieldsValue } = this.props.form;
//通知大类
const PushBigTypeProp = getFieldDecorator(pushBigTypeFN, {
initialValue: pushBigType[0].key,
onChange: this.onPushBigTypeChange,
});
const pushBigTypeOptions = pushBigType.map(
bigType =>
<Option key={bigType.key} >{bigType.key}-{bigType.value}</Option>);
//通知小类
const PushTypeProp = getFieldDecorator(pushTypeFN,
{ initialValue: pushType[getFieldValue(pushBigTypeFN)][0].key });
const pushTypeOptions = () => {
return pushType[getFieldValue(pushBigTypeFN)].map(
pushType =>
<Option key={pushType.key} >{pushType.key}-{pushType.value}</Option>);
};
//获取标题实际长度
const titleFactSize = () => {
const value = getFieldValue(titleFN);
return String(value ? value.length : 0);
};
const titleNode = (contentLabel) => {
if (this.props.type === '1' && this.props.deviceType === '0') { //通知且ios设备
getFieldDecorator(titleFN, { initialValue: '' });
return undefined;
} else {
const TitleProp = getFieldDecorator(titleFN, {
initialValue: '',
rules: [
{ required: true, max: 20 },
],
validateTrigger: 'onBlur',
});
const titleLabel = `${contentLabel}标题`;
return (
<Row>
<Row style={{ height: 25 }} >{titleLabel}:
<span style={{ float: 'right' }} >
{titleFactSize()}/{String(titleMaxSize)}
</span>
</Row>
<FormItem {...formItemLayout}>
{TitleProp(
<Input style={{ width: '100%' }} />,
)}
</FormItem>
</Row>
);
}
};
const BodyProp = getFieldDecorator(bodyFN, {
initialValue: '',
rules: [{ required: true, max: 60 }],
validateTrigger: 'onBlur',
});
//获取内容实际长度
const bodyFactSize = () => {
const value = getFieldValue(bodyFN);
return String(value ? value.length : 0);
};
//ios推送环境
const ApnsEnvProp = getFieldDecorator(apnsEnvFN,
{ initialValue: 'DEV' });
//点击后操作的字段
const IsCustomExtParamsProp = getFieldDecorator(isCustomExtParamsFN,
{ initialValue: false });
const isCustomExtParams = getFieldValue(isCustomExtParamsFN);
const AndroidOpenTypeProp = getFieldDecorator(androidOpenTypeFN,
{ initialValue: 'APPLICATION' });
const androidOpenTypeValue = getFieldValue(androidOpenTypeFN);
const AndroidActivityProp = getFieldDecorator(androidActivityFN,
{ initialValue: undefined });
const AndroidOpenUrlProp = getFieldDecorator(androidOpenUrlFN,
{ initialValue: undefined });
getFieldDecorator(customExtParamsItemsFN, {
initialValue: [extParamsItemUuid],
});
const customExtParamsItems = getFieldValue(customExtParamsItemsFN);
//推送目标
const TargetProp = getFieldDecorator(targetFN, { initialValue: 'ALL' });
const TargetValueProp = getFieldDecorator(targetValueFN, { initialValue: 'ALL' });
const targetValueNode = () => {
switch (getFieldValue(targetFN)) {
case 'ALL':
return undefined;
case 'DEVICE':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入deviceId,多个终端用逗号分隔" />,
)}
</Row>
);
case 'ACCOUNT':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入wenwenId,多个wenwenId用逗号分隔" />,
)}
</Row>
);
case 'COUNTRY': {
getFieldDecorator(targetValueFN, { initialValue: '' });
const { countryList } = this.props;
return (
<Row>
{TargetValueProp(
<MySelect defaultValue="" selectOptionDataList={countryList} descKey="codeName" valueKey="codeValue" />
)}
</Row>
);
}
}
};
//推送时间
const IsSetPushTimeProp = getFieldDecorator(isSetPushTimeFN,
{ initialValue: 'false' });
const currentDate = moment(new Date());
const PushTimeDatePickerProp = getFieldDecorator(pushTimeDatePickerFN, {
initialValue: currentDate,
format: 'YYYY-MM-DD',
});
const PushTimeTimePickerProp = getFieldDecorator(pushTimeTimePickerFN, {
initialValue: currentDate,
format: 'HH:mm:ss',
});
const pushTimeNode = () => {
switch (getFieldValue(isSetPushTimeFN)) {
case 'true':
return (
<Row>
{PushTimeDatePickerProp(<DatePicker />)}
{PushTimeTimePickerProp(<TimePicker />)}
</Row>
);
default:
return undefined;
}
};
//离线保存
const IsStoreOfflineProp = getFieldDecorator(isStoreOfflineFn,
{ initialValue: 'true' });
const ExpireTimeProp = getFieldDecorator(expireTimeFN,
{ initialValue: 72 });
const expireTimeNode = () => {
if (getFieldValue(isStoreOfflineFn) === 'true') {
return (<Row>保存 {ExpireTimeProp(
<InputNumber min={1} max={72} />)}小时,该时段之后再上线的用户将收不到推送</Row>);
}
return undefined;
};
const formItemLayout = {
style: { width: '100%' },
wrapperCol: { span: 24 },
};
let contentPanelBoxLabel;
if (this.props.type === '1') {
contentPanelBoxLabel = '通知';
} else {
contentPanelBoxLabel = '消息';
}
return (
<Row>
<Col span={15} >
<Form layout="inline" >
<Row> </Row>
<Row style={{ width: 1045 }} >
推送大类
{PushBigTypeProp(
<Select style={{ width: 250 }} >
{pushBigTypeOptions}
</Select>,
)}
推送小类
{PushTypeProp(
<Select style={{ width: 265 }} >
{pushTypeOptions()}
</Select>,
)}
</Row>
<Row />
<PanelBox
title={`${contentPanelBoxLabel}内容(必填)`}
style={{ marginTop: '10px', width: 650 }}
>
{titleNode(contentPanelBoxLabel)}
<Row />
<Row style={{ height: 25 }} >
{contentPanelBoxLabel}内容:
<span style={{ float: 'right' }} >{bodyFactSize()}/{String(
bodyMaxSize)}</span>
</Row>
<FormItem {...formItemLayout}>
{BodyProp(
<Input type="textarea" />,
)}
</FormItem>
</PanelBox>
<IosEnvPlus deviceType={this.props.deviceType} ApnsEnvProp={ApnsEnvProp} style={{ width: 650 }} />
<Collapse style={{ marginTop: '25px', width: 650 }} >
<Panel header="高级设置(选填)" >
<Card
title="发送对象及时间:"
style={{
marginTop: '5px',
lineHeight: '28px',
width: 650,
}}
>
<AfterOnClickOptPlus
deviceType={this.props.deviceType}
AndroidOpenTypeProp={AndroidOpenTypeProp}
androidOpenTypeValue={androidOpenTypeValue}
IsCustomExtParamsProp={IsCustomExtParamsProp}
isCustomExtParams={isCustomExtParams}
customExtParamsItems={customExtParamsItems}
AndroidActivityProp={AndroidActivityProp}
AndroidOpenUrlProp={AndroidOpenUrlProp}
getFieldDecoratorFn={getFieldDecorator}
setFieldsValueFn={setFieldsValue}
/>
<Row>发送对象:
{TargetProp(
<RadioGroup >
<Radio value="ALL" >所有</Radio>
<Radio value="DEVICE" >指定终端</Radio>
<Radio value="ACCOUNT" >指定wenwenId</Radio>
<Radio value="COUNTRY" >指定国家</Radio>
</RadioGroup>,
)}
</Row>
<Row>{targetValueNode()}</Row>
<Row >发送时间:
{IsSetPushTimeProp(
<RadioGroup >
<Radio value="false" >立即发送</Radio>
<Radio value="true" >定时发送</Radio>
</RadioGroup>,
)}
</Row>
<Row>{pushTimeNode()}</Row>
<Row >离线保存:
{IsStoreOfflineProp(
<RadioGroup >
<Radio value="false" >不保存</Radio>
<Radio value="true" >保存</Radio>
</RadioGroup>,
)}
</Row>
<Row>{expireTimeNode()}</Row>
</Card>
</Panel>
</Collapse>
<br />
<Button type="primary" onClick={this.handleSubmit} style={{ marginTop: '10px' }} >确定</Button>
</Form>
</Col>
</Row>
);
}
}
const mapStateToProps = (state) => {
const StaticDataService = state.get('StaticDataService');
const countryList = StaticDataService.get('staticDataList');
return { countryList };
};
const mapDispatchToProps = (dispatch) => {
const actionCreators = {
pushNotification: actions.pushNotification,
pushMessage: actions.pushMessage,
getStaticDataByCodeType
};
return {
actions: bindActionCreators(actionCreators, dispatch),
};
};
export default connect(mapStateToProps, mapDispatchToProps)(
Form.create()(PushForm));
| .get(targetValueFN)) {
MsgUtil.showwarning('指定wenwenId信息不正确');
return;
}
if (data.get(targetFN) === 'ALL') {
data.set(targetValueFN, 'ALL');
}
//设置推送时间
if (data.get(isSetPushTimeFN) === 'true') {
if (!data.get(pushTimeDatePickerFN) || !data.get(pushTimeTimePickerFN)) {
MsgUtil.showwarning('推送日期不允许为空');
return;
| conditional_block |
PushForm.js | 'use strict';
import * as Immutable from 'immutable';
import moment from 'moment';
import React, { PropTypes } from 'react';
import { bindActionCreators } from 'redux';
import { connect } from 'react-redux';
import {
Button,
Card,
Col,
Collapse,
DatePicker,
Form,
Input,
InputNumber,
Radio,
Row,
Select,
TimePicker,
} from 'antd';
import * as actions from 'action';
import PanelBox from 'framework/components/PanelBox';
import { getStaticDataByCodeType } from 'business/common/StaticDataService';
import MySelect from 'framework/components/Select';
import * as MsgUtil from 'utils/MsgUtil';
import * as Constant from 'utils/Constant';
import AfterOnClickOptPlus from './plus/AfterOnClickOptPlus';
import FormField from './FormField';
import IosEnvPlus from './plus/IosEnvPlus';
const FormItem = Form.Item;
const Option = Select.Option;
const RadioGroup = Radio.Group;
const Panel = Collapse.Panel;
const {
titleFN, bodyFN, pushBigTypeFN, pushTypeFN, androidOpenTypeFN, androidActivityFN,
androidOpenUrlFN, customExtParamsItemsFN, isCustomExtParamsFN, notifyTypeFN,
targetFN, targetValueFN, isSetPushTimeFN, pushTimeDatePickerFN, pushTimeTimePickerFN,
isStoreOfflineFn, expireTimeFN, apnsEnvFN, remindFN, isIosAddOpenUrlFN,androidNotificationChannelFN
} = FormField;
const pushBigType = [// 1-系统 2-活动/推荐 3-吻吻服务 4-资讯
{ key: '1', value: '系统' },
{ key: '2', value: '活动/推荐' },
{ key: '3', value: '吻吻服务' },
{ key: '4', value: '资讯' },
];
const pushType = {
// 大类1-系统:1-系统信息,1001-强制退出消息,后续从1002开始定义;
// 大类2-活动/推荐:2-活动信息,后续从2001开始定义;
// 大类3-吻吻服务:3-发起绑定 4-绑定回应 6-吻吻蜜语
// 8-情侣等级提升 9-背景图片更新,后续从3001开始定义;
// 大类4-资讯:4001-普通资讯
1: [{ key: '1001', value: '强制退出' }],
2: [{ key: '2', value: '活动信息' }],
3: [
// {key:"3", value:"发起绑定"},
// {key:"4", value:"绑定回应"},
{ key: '6', value: '吻吻蜜语' },
{ key: '8', value: '情侣等级提升' },
{ key: '9', value: '背景图片更新' },
{ key: '3009', value: '今日睡眠报告' },
{ key: '3100', value: '运动计步晚上9点10点提醒' },
],
4: [{ key: '4001', value: '普通资讯' }],
};
const titleMaxSize = 20;
const bodyMaxSize = 60;
const extParamsItemUuid = 0;
class PushForm extends React.Component {
static propTypes = {
actions: PropTypes.object.isRequired,
form: PropTypes.object.isRequired,
type: PropTypes.number.isRequired,
deviceType: PropTypes.number.isRequired,
countryList: PropTypes.array.isRequired,
};
static defaultProp = {
countryList: []
}
componentWillMount() {
const { | , countryList } = this.props;
if (!countryList || countryList.length < 1) {
actions.getStaticDataByCodeType(Constant.USER_COUNTRY_ATTR_CODE);
}
}
onPushBigTypeChange = (evt) => {
const { getFieldValue, setFieldsValue } = this.props.form;
setFieldsValue(pushTypeFN,
pushType[getFieldValue(pushBigTypeFN)][0].key);
};
handleSubmit = (e) => {
e.preventDefault();
this.props.form.validateFields((errors, values) => {
if (!!errors) {
return;
}
let data = Immutable.Map(values).
set('type', this.props.type === undefined ? 0 : Number(
this.props.type))//0:表示消息(默认为0), 1:表示通知
.set('deviceType', this.props.deviceType); //设备类型,取值范围为:0:iOS设备 1:Andriod设备 3:全部类型设备
if (data.get('deviceType') === '1') { //android设备推送不需要上传ios apns环境变量
data = data.delete(apnsEnvFN);
}
if (data.get('type') === 1) { //通知类型才有打开方式选项
if (data.get(androidOpenTypeFN) === 'ACTIVITY') { //打开指定页面
if (!data.get(androidActivityFN)) {
MsgUtil.showwarning('指定打开的页面不允许为空');
return;
}
data.set('xiaomiActivity', data.get(androidActivityFN)); //xiaomiActivity与androidActivity赋值一直,由服务端判断怎么推
}
if (data.get(androidOpenTypeFN) === 'URL' &&
!data.get(androidOpenUrlFN)) { //打开指定网页
MsgUtil.showwarning('指定打开的网页不允许为空');
return;
}
//删除无效字段
if (data.get(androidOpenTypeFN) === 'APPLICATION' ||
data.get(androidOpenTypeFN) === 'NONE') { //打开应用或者无逻辑
data = data.delete(androidActivityFN).
delete(androidOpenUrlFN);
}
} else {
data = data.delete(androidActivityFN).delete(androidOpenUrlFN);
}
//填充自定义扩展参数
if (data.get(isCustomExtParamsFN) || data.get(isIosAddOpenUrlFN)) {
const extParameters = {};
data.get(customExtParamsItemsFN).map((item) => {
const key = `extParamsKey_${item}`;
if (data.get(key)) {
extParameters[data.get(key)] = data.get(
`extParamsValue_${item}`);
}
});
data = data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
data = data.delete(isCustomExtParamsFN).
delete(customExtParamsItemsFN);
if (data.get(targetFN) === 'DEVICE' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定设备信息不正确');
return;
}
if (data.get(targetFN) === 'ACCOUNT' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定wenwenId信息不正确');
return;
}
if (data.get(targetFN) === 'ALL') {
data.set(targetValueFN, 'ALL');
}
//设置推送时间
if (data.get(isSetPushTimeFN) === 'true') {
if (!data.get(pushTimeDatePickerFN) || !data.get(pushTimeTimePickerFN)) {
MsgUtil.showwarning('推送日期不允许为空');
return;
} else {
console.log('需要精确到分钟____________', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmm'));
data.set('pushTime', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmmss'));
const pushTimeFormNowLengthSplit = moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow().split(' ');
if (pushTimeFormNowLengthSplit[1] === '天前') {
MsgUtil.showwarning(`推送日期不允许选择${moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow()}`);
return;
}
if (Number(pushTimeFormNowLengthSplit[0]) > 7) {
MsgUtil.showwarning('推送日期不允许大于7天');
return;
}
}
}
data = data.delete(isSetPushTimeFN).
delete(pushTimeDatePickerFN).
delete(pushTimeTimePickerFN);
if (data.get(isStoreOfflineFn) === 'true') {
const expireTime = Number(data.get(expireTimeFN));
if (expireTime < 1 || expireTime > 72) {
MsgUtil.showwarning('离线保存时间最短1小时,最长72小时');
return;
}
} else {
data = data.delete(expireTimeFN);
}
//当设备类型为1-android或者3-所有设备时,填充Android提醒方式
if (data.get('deviceType') === '1' ||
data.get('deviceType') === '3') {
const extParameters = {};
extParameters['_NOTIFY_TYPE_'] = data.get(notifyTypeFN);
data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
if (data.get('extParameters')) {
data = data.set('extParameters', JSON.stringify(data.get('extParameters').toJS()));
}
data = data.delete(notifyTypeFN);
//target targetValue处理
if (data.get(targetFN) === 'COUNTRY') {
console.log('data.get(targetValueFN)', data.get(targetValueFN));
if ( data.get(targetValueFN) && data.get(targetValueFN) !== 'ALL' ) {
data = data.set(targetFN, 'tag');
} else {
data = data.set(targetFN, 'ALL').set(targetValueFN, 'ALL');
}
}
data = data.set(androidNotificationChannelFN, 1);
if (data.get('type') === 1) {
this.props.actions.pushNotification(data);
} else {
this.props.actions.pushMessage(data);
}
});
};
render () {
const { getFieldDecorator, getFieldValue, setFieldsValue } = this.props.form;
//通知大类
const PushBigTypeProp = getFieldDecorator(pushBigTypeFN, {
initialValue: pushBigType[0].key,
onChange: this.onPushBigTypeChange,
});
const pushBigTypeOptions = pushBigType.map(
bigType =>
<Option key={bigType.key} >{bigType.key}-{bigType.value}</Option>);
//通知小类
const PushTypeProp = getFieldDecorator(pushTypeFN,
{ initialValue: pushType[getFieldValue(pushBigTypeFN)][0].key });
const pushTypeOptions = () => {
return pushType[getFieldValue(pushBigTypeFN)].map(
pushType =>
<Option key={pushType.key} >{pushType.key}-{pushType.value}</Option>);
};
//获取标题实际长度
const titleFactSize = () => {
const value = getFieldValue(titleFN);
return String(value ? value.length : 0);
};
const titleNode = (contentLabel) => {
if (this.props.type === '1' && this.props.deviceType === '0') { //通知且ios设备
getFieldDecorator(titleFN, { initialValue: '' });
return undefined;
} else {
const TitleProp = getFieldDecorator(titleFN, {
initialValue: '',
rules: [
{ required: true, max: 20 },
],
validateTrigger: 'onBlur',
});
const titleLabel = `${contentLabel}标题`;
return (
<Row>
<Row style={{ height: 25 }} >{titleLabel}:
<span style={{ float: 'right' }} >
{titleFactSize()}/{String(titleMaxSize)}
</span>
</Row>
<FormItem {...formItemLayout}>
{TitleProp(
<Input style={{ width: '100%' }} />,
)}
</FormItem>
</Row>
);
}
};
const BodyProp = getFieldDecorator(bodyFN, {
initialValue: '',
rules: [{ required: true, max: 60 }],
validateTrigger: 'onBlur',
});
//获取内容实际长度
const bodyFactSize = () => {
const value = getFieldValue(bodyFN);
return String(value ? value.length : 0);
};
//ios推送环境
const ApnsEnvProp = getFieldDecorator(apnsEnvFN,
{ initialValue: 'DEV' });
//点击后操作的字段
const IsCustomExtParamsProp = getFieldDecorator(isCustomExtParamsFN,
{ initialValue: false });
const isCustomExtParams = getFieldValue(isCustomExtParamsFN);
const AndroidOpenTypeProp = getFieldDecorator(androidOpenTypeFN,
{ initialValue: 'APPLICATION' });
const androidOpenTypeValue = getFieldValue(androidOpenTypeFN);
const AndroidActivityProp = getFieldDecorator(androidActivityFN,
{ initialValue: undefined });
const AndroidOpenUrlProp = getFieldDecorator(androidOpenUrlFN,
{ initialValue: undefined });
getFieldDecorator(customExtParamsItemsFN, {
initialValue: [extParamsItemUuid],
});
const customExtParamsItems = getFieldValue(customExtParamsItemsFN);
//推送目标
const TargetProp = getFieldDecorator(targetFN, { initialValue: 'ALL' });
const TargetValueProp = getFieldDecorator(targetValueFN, { initialValue: 'ALL' });
const targetValueNode = () => {
switch (getFieldValue(targetFN)) {
case 'ALL':
return undefined;
case 'DEVICE':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入deviceId,多个终端用逗号分隔" />,
)}
</Row>
);
case 'ACCOUNT':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入wenwenId,多个wenwenId用逗号分隔" />,
)}
</Row>
);
case 'COUNTRY': {
getFieldDecorator(targetValueFN, { initialValue: '' });
const { countryList } = this.props;
return (
<Row>
{TargetValueProp(
<MySelect defaultValue="" selectOptionDataList={countryList} descKey="codeName" valueKey="codeValue" />
)}
</Row>
);
}
}
};
//推送时间
const IsSetPushTimeProp = getFieldDecorator(isSetPushTimeFN,
{ initialValue: 'false' });
const currentDate = moment(new Date());
const PushTimeDatePickerProp = getFieldDecorator(pushTimeDatePickerFN, {
initialValue: currentDate,
format: 'YYYY-MM-DD',
});
const PushTimeTimePickerProp = getFieldDecorator(pushTimeTimePickerFN, {
initialValue: currentDate,
format: 'HH:mm:ss',
});
const pushTimeNode = () => {
switch (getFieldValue(isSetPushTimeFN)) {
case 'true':
return (
<Row>
{PushTimeDatePickerProp(<DatePicker />)}
{PushTimeTimePickerProp(<TimePicker />)}
</Row>
);
default:
return undefined;
}
};
//离线保存
const IsStoreOfflineProp = getFieldDecorator(isStoreOfflineFn,
{ initialValue: 'true' });
const ExpireTimeProp = getFieldDecorator(expireTimeFN,
{ initialValue: 72 });
const expireTimeNode = () => {
if (getFieldValue(isStoreOfflineFn) === 'true') {
return (<Row>保存 {ExpireTimeProp(
<InputNumber min={1} max={72} />)}小时,该时段之后再上线的用户将收不到推送</Row>);
}
return undefined;
};
const formItemLayout = {
style: { width: '100%' },
wrapperCol: { span: 24 },
};
let contentPanelBoxLabel;
if (this.props.type === '1') {
contentPanelBoxLabel = '通知';
} else {
contentPanelBoxLabel = '消息';
}
return (
<Row>
<Col span={15} >
<Form layout="inline" >
<Row> </Row>
<Row style={{ width: 1045 }} >
推送大类
{PushBigTypeProp(
<Select style={{ width: 250 }} >
{pushBigTypeOptions}
</Select>,
)}
推送小类
{PushTypeProp(
<Select style={{ width: 265 }} >
{pushTypeOptions()}
</Select>,
)}
</Row>
<Row />
<PanelBox
title={`${contentPanelBoxLabel}内容(必填)`}
style={{ marginTop: '10px', width: 650 }}
>
{titleNode(contentPanelBoxLabel)}
<Row />
<Row style={{ height: 25 }} >
{contentPanelBoxLabel}内容:
<span style={{ float: 'right' }} >{bodyFactSize()}/{String(
bodyMaxSize)}</span>
</Row>
<FormItem {...formItemLayout}>
{BodyProp(
<Input type="textarea" />,
)}
</FormItem>
</PanelBox>
<IosEnvPlus deviceType={this.props.deviceType} ApnsEnvProp={ApnsEnvProp} style={{ width: 650 }} />
<Collapse style={{ marginTop: '25px', width: 650 }} >
<Panel header="高级设置(选填)" >
<Card
title="发送对象及时间:"
style={{
marginTop: '5px',
lineHeight: '28px',
width: 650,
}}
>
<AfterOnClickOptPlus
deviceType={this.props.deviceType}
AndroidOpenTypeProp={AndroidOpenTypeProp}
androidOpenTypeValue={androidOpenTypeValue}
IsCustomExtParamsProp={IsCustomExtParamsProp}
isCustomExtParams={isCustomExtParams}
customExtParamsItems={customExtParamsItems}
AndroidActivityProp={AndroidActivityProp}
AndroidOpenUrlProp={AndroidOpenUrlProp}
getFieldDecoratorFn={getFieldDecorator}
setFieldsValueFn={setFieldsValue}
/>
<Row>发送对象:
{TargetProp(
<RadioGroup >
<Radio value="ALL" >所有</Radio>
<Radio value="DEVICE" >指定终端</Radio>
<Radio value="ACCOUNT" >指定wenwenId</Radio>
<Radio value="COUNTRY" >指定国家</Radio>
</RadioGroup>,
)}
</Row>
<Row>{targetValueNode()}</Row>
<Row >发送时间:
{IsSetPushTimeProp(
<RadioGroup >
<Radio value="false" >立即发送</Radio>
<Radio value="true" >定时发送</Radio>
</RadioGroup>,
)}
</Row>
<Row>{pushTimeNode()}</Row>
<Row >离线保存:
{IsStoreOfflineProp(
<RadioGroup >
<Radio value="false" >不保存</Radio>
<Radio value="true" >保存</Radio>
</RadioGroup>,
)}
</Row>
<Row>{expireTimeNode()}</Row>
</Card>
</Panel>
</Collapse>
<br />
<Button type="primary" onClick={this.handleSubmit} style={{ marginTop: '10px' }} >确定</Button>
</Form>
</Col>
</Row>
);
}
}
const mapStateToProps = (state) => {
const StaticDataService = state.get('StaticDataService');
const countryList = StaticDataService.get('staticDataList');
return { countryList };
};
const mapDispatchToProps = (dispatch) => {
const actionCreators = {
pushNotification: actions.pushNotification,
pushMessage: actions.pushMessage,
getStaticDataByCodeType
};
return {
actions: bindActionCreators(actionCreators, dispatch),
};
};
export default connect(mapStateToProps, mapDispatchToProps)(
Form.create()(PushForm));
| actions | identifier_name |
PushForm.js | 'use strict';
import * as Immutable from 'immutable';
import moment from 'moment';
import React, { PropTypes } from 'react';
import { bindActionCreators } from 'redux';
import { connect } from 'react-redux';
import {
Button,
Card,
Col,
Collapse,
DatePicker,
Form,
Input,
InputNumber,
Radio,
Row,
Select,
TimePicker,
} from 'antd';
import * as actions from 'action';
import PanelBox from 'framework/components/PanelBox';
import { getStaticDataByCodeType } from 'business/common/StaticDataService';
import MySelect from 'framework/components/Select';
import * as MsgUtil from 'utils/MsgUtil';
import * as Constant from 'utils/Constant';
import AfterOnClickOptPlus from './plus/AfterOnClickOptPlus';
import FormField from './FormField';
import IosEnvPlus from './plus/IosEnvPlus';
const FormItem = Form.Item;
const Option = Select.Option;
const RadioGroup = Radio.Group;
const Panel = Collapse.Panel;
const {
titleFN, bodyFN, pushBigTypeFN, pushTypeFN, androidOpenTypeFN, androidActivityFN,
androidOpenUrlFN, customExtParamsItemsFN, isCustomExtParamsFN, notifyTypeFN,
targetFN, targetValueFN, isSetPushTimeFN, pushTimeDatePickerFN, pushTimeTimePickerFN,
isStoreOfflineFn, expireTimeFN, apnsEnvFN, remindFN, isIosAddOpenUrlFN,androidNotificationChannelFN
} = FormField;
const pushBigType = [// 1-系统 2-活动/推荐 3-吻吻服务 4-资讯
{ key: '1', value: '系统' },
{ key: '2', value: '活动/推荐' },
{ key: '3', value: '吻吻服务' },
{ key: '4', value: '资讯' },
];
const pushType = {
// 大类1-系统:1-系统信息,1001-强制退出消息,后续从1002开始定义;
// 大类2-活动/推荐:2-活动信息,后续从2001开始定义;
// 大类3-吻吻服务:3-发起绑定 4-绑定回应 6-吻吻蜜语
// 8-情侣等级提升 9-背景图片更新,后续从3001开始定义;
// 大类4-资讯:4001-普通资讯
1: [{ key: '1001', value: '强制退出' }],
2: [{ key: '2', value: '活动信息' }],
3: [
// {key:"3", value:"发起绑定"},
// {key:"4", value:"绑定回应"},
{ key: '6', value: '吻吻蜜语' },
{ key: '8', value: '情侣等级提升' },
{ key: '9', value: '背景图片更新' },
{ key: '3009', value: '今日睡眠报告' },
{ key: '3100', value: '运动计步晚上9点10点提醒' },
],
4: [{ key: '4001', value: '普通资讯' }],
};
const titleMaxSize = 20;
const bodyMaxSize = 60;
const extParamsItemUuid = 0;
class PushForm extends React.Component {
static propTypes = {
actions: PropTypes.object.isRequired,
form: PropTypes.object.isRequired,
type: PropTypes.number.isRequired,
deviceType: PropTypes.number.isRequired,
countryList: PropTypes.array.isRequired,
};
static defaultProp = {
countryList: []
}
componentWillMount() {
const { actions, countryList } = this.props;
if (!countryList || countryList.length < 1) {
actions.getStaticDataByCodeType(Constant.USER_COUNTRY_ATTR_CODE);
}
}
onPushBigTypeChange = (evt) => {
const { getFieldValue, setFieldsValue } = this.props.form;
setFieldsValue(pushTypeFN,
pushType[getFieldValue(pushBigTypeFN)][0].key);
};
handleSubmit = (e) => {
e.preventDefault();
this.props.form.validateFields((errors, values) => {
if (!!errors) {
return;
}
let data = Immutable.Map(values).
set('type', this.props.type === undefined ? 0 : Number(
this.props.type))//0:表示消息(默认为0), 1:表示通知
.set('deviceType', this.props.deviceType); //设备类型,取值范围为:0:iOS设备 1:Andriod设备 3:全部类型设备
if (data.get('deviceType') === '1') { //android设备推送不需要上传ios apns环境变量
data = data.delete(apnsEnvFN);
}
if (data.get('type') === 1) { //通知类型才有打开方式选项
if (data.get(androidOpenTypeFN) === 'ACTIVITY') { //打开指定页面
if (!data.get(androidActivityFN)) {
MsgUtil.showwarning('指定打开的页面不允许为空');
return;
}
data.set('xiaomiActivity', data.get(androidActivityFN)); //xiaomiActivity与androidActivity赋值一直,由服务端判断怎么推
}
if (data.get(androidOpenTypeFN) === 'URL' &&
!data.get(androidOpenUrlFN)) { //打开指定网页
MsgUtil.showwarning('指定打开的网页不允许为空');
return;
}
//删除无效字段
if (data.get(androidOpenTypeFN) === 'APPLICATION' ||
data.get(androidOpenTypeFN) === 'NONE') { //打开应用或者无逻辑
data = data.delete(androidActivityFN).
delete(androidOpenUrlFN);
}
} else {
data = data.delete(androidActivityFN).delete(androidOpenUrlFN);
}
//填充自定义扩展参数
if (data.get(isCustomExtParamsFN) || data.get(isIosAddOpenUrlFN)) {
const extParameters = {};
data.get(customExtParamsItemsFN).map((item) => {
const key = `extParamsKey_${item}`;
if (data.get(key)) {
extParameters[data.get(key)] = data.get(
`extParamsValue_${item}`);
}
});
data = data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
data = data.delete(isCustomExtParamsFN).
delete(customExtParamsItemsFN);
if (data.get(targetFN) === 'DEVICE' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定设备信息不正确');
return;
}
if (data.get(targetFN) === 'ACCOUNT' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定wenwenId信息不正确');
return;
}
if (data.get(targetFN) === 'ALL') {
data.set(targetValueFN, 'ALL');
}
//设置推送时间
if (data.get(isSetPushTimeFN) === 'true') {
if (!data.get(pushTimeDatePickerFN) || !data.get(pushTimeTimePickerFN)) {
MsgUtil.showwarning('推送日期不允许为空');
return;
} else {
console.log('需要精确到分钟____________', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmm'));
data.set('pushTime', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmmss'));
const pushTimeFormNowLengthSplit = moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow().split(' ');
if (pushTimeFormNowLengthSplit[1] === '天前') {
MsgUtil.showwarning(`推送日期不允许选择${moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow()}`);
return;
}
if (Number(pushTimeFormNowLengthSplit[0]) > 7) {
MsgUtil.showwarning('推送日期不允许大于7天');
return;
}
}
}
data = data.delete(isSetPushTimeFN).
delete(pushTimeDatePickerFN).
delete(pushTimeTimePickerFN);
if (data.get(isStoreOfflineFn) === 'true') {
const expireTime = Number(data.get(expireTimeFN));
if (expireTime < 1 || expireTime > 72) {
MsgUtil.showwarning('离线保存时间最短1小时,最长72小时');
return;
}
} else {
data = data.delete(expireTimeFN);
}
//当设备类型为1-android或者3-所有设备时,填充Android提醒方式
if (data.get('deviceType') === '1' ||
data.get('deviceType') === '3') {
const extParameters = {};
extParameters['_NOTIFY_TYPE_'] = data.get(notifyTypeFN);
data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
if (data.get('extParameters')) {
data = data.set('extParameters', JSON.stringify(data.get('extParameters').toJS()));
}
data = data.delete(notifyTypeFN);
//target targetValue处理
if (data.get(targetFN) === 'COUNTRY') {
console.log('data.get(targetValueFN)', data.get(targetValueFN));
if ( data.get(targetValueFN) && data.get(targetValueFN) !== 'ALL' ) {
data = data.set(targetFN, 'tag');
} else {
data = data.set(targetFN, 'ALL').set(targetValueFN, 'ALL');
}
}
data = data.set(androidNotificationChannelFN, 1);
if (data.get('type') === 1) {
this.props.actions.pushNotification(data);
} else {
this.props.actions.pushMessage(data);
}
});
};
render () {
const { getFieldDecorator, getFieldValue, setFieldsValue } = this.props.form;
//通知大类
const PushBigTypeProp = getFieldDecorator(pushBigTypeFN, {
initialValue: pushBigType[0].key,
onChange: this.onPushBigTypeChange,
});
const pushBigTypeOptions = pushBigType.map(
bigType =>
<Option key={bigType.key} >{bigType.key}-{bigType.value}</Option>);
//通知小类
const PushTypeProp = getFieldDecorator(pushTypeFN,
{ initialValue: pushType[getFieldValue(pushBigTypeFN)][0].key });
const pushTypeOptions = () => {
return pushType[getFieldValue(pushBigTypeFN)].map(
pushType =>
<Option key={pushType.key} >{pushType.key}-{pushType.value}</Option>);
};
//获取标题实际长度
const titleFactSize = () => {
const value = getFieldValue(titleFN);
return String(value ? value.length : 0);
};
const titleNode = (contentLabel) => {
if (this.props.type === '1' && this.props.deviceType === '0') { //通知且ios设备
getFieldDecorator(titleFN, { initialValue: '' });
return undefined;
} else {
const TitleProp = getFieldDecorator(titleFN, {
initialValue: '',
rules: [
{ required: true, max: 20 },
],
validateTrigger: 'onBlur',
});
const titleLabel = `${contentLabel}标题`;
return (
<Row>
<Row style={{ height: 25 }} >{titleLabel}:
<span style={{ float: 'right' }} >
{titleFactSize()}/{String(titleMaxSize)}
</span>
</Row>
<FormItem {...formItemLayout}>
{TitleProp(
<Input style={{ width: '100%' }} />,
)}
</FormItem>
</Row>
);
}
};
const BodyProp = getFieldDecorator(bodyFN, {
initialValue: '',
rules: [{ required: true, max: 60 }],
validateTrigger: 'onBlur',
});
//获取内容实际长度
const bodyFactSize = () => {
const value = getFieldValue(bodyFN);
return String(value ? value.length : 0);
};
//ios推送环境
const ApnsEnvProp = getFieldDecorator(apnsEnvFN,
{ initialValue: 'DEV' });
//点击后操作的字段
const IsCustomExtParamsProp = getFieldDecorator(isCustomExtParamsFN,
{ initialValue: false });
const isCustomExtParams = getFieldValue(isCustomExtParamsFN);
const AndroidOpenTypeProp = getFieldDecorator(androidOpenTypeFN,
{ initialValue: 'APPLICATION' });
const androidOpenTypeValue = getFieldValue(androidOpenTypeFN);
const AndroidActivityProp = getFieldDecorator(androidActivityFN,
{ initialValue: undefined });
const AndroidOpenUrlProp = getFieldDecorator(androidOpenUrlFN,
{ initialValue: undefined });
getFieldDecorator(customExtParamsItemsFN, {
initialValue: [extParamsItemUuid],
});
const customExtParamsItems = getFieldValue(customExtParamsItemsFN);
//推送目标
const TargetProp = getFieldDecorator(targetFN, { initialValue: 'ALL' });
const TargetValueProp = getFieldDecorator(targetValueFN, { initialValue: 'ALL' });
const targetValueNode = () => {
switch (getFieldValue(targetFN)) {
case 'ALL':
return undefined;
case 'DEVICE':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入deviceId,多个终端用逗号分隔" />,
)}
</Row>
);
case 'ACCOUNT':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入wenwenId,多个wenwenId用逗号分隔" />,
)}
</Row>
);
case 'COUNTRY': {
getFieldDecorator(targetValueFN, { initialValue: '' });
const { countryList } = this.props;
return (
<Row>
{TargetValueProp(
<MySelect defaultValue="" selectOptionDataList={countryList} descKey="codeName" valueKey="codeValue" />
)}
</Row>
);
}
}
};
//推送时间
const IsSetPushTimeProp = getFieldDecorator(isSetPushTimeFN,
{ initialValue: 'false' });
const currentDate = moment(new Date());
const PushTimeDatePickerProp = getFieldDecorator(pushTimeDatePickerFN, {
initialValue: currentDate,
format: 'YYYY-MM-DD',
});
const PushTimeTimePickerProp = getFieldDecorator(pushTimeTimePickerFN, {
initialValue: currentDate,
format: 'HH:mm:ss',
});
const pushTimeNode = () => {
switch (getFieldValue(isSetPushTimeFN)) {
case 'true':
return (
<Row>
{PushTimeDatePickerProp(<DatePicker />)}
{PushTimeTimePickerProp(<TimePicker />)}
</Row>
);
default:
return undefined;
}
};
//离线保存
const IsStoreOfflineProp = getFieldDecorator(isStoreOfflineFn,
{ initialValue: 'true' });
const ExpireTimeProp = getFieldDecorator(expireTimeFN,
{ initialValue: 72 });
const expireTimeNode = () => {
if (getFieldValue(isStoreOfflineFn) === 'true') {
return (<Row>保存 {ExpireTimeProp(
<InputNumber min={1} max={72} />)}小时,该时段之后再上线的用户将收不到推送</Row>);
}
return undefined;
};
const formItemLayout = {
style: { width: '100%' },
wrapperCol: { span: 24 },
};
let contentPanelBoxLabel;
if (this.props.type === '1') {
contentPanelBoxLabel = '通知'; | <Row>
<Col span={15} >
<Form layout="inline" >
<Row> </Row>
<Row style={{ width: 1045 }} >
推送大类
{PushBigTypeProp(
<Select style={{ width: 250 }} >
{pushBigTypeOptions}
</Select>,
)}
推送小类
{PushTypeProp(
<Select style={{ width: 265 }} >
{pushTypeOptions()}
</Select>,
)}
</Row>
<Row />
<PanelBox
title={`${contentPanelBoxLabel}内容(必填)`}
style={{ marginTop: '10px', width: 650 }}
>
{titleNode(contentPanelBoxLabel)}
<Row />
<Row style={{ height: 25 }} >
{contentPanelBoxLabel}内容:
<span style={{ float: 'right' }} >{bodyFactSize()}/{String(
bodyMaxSize)}</span>
</Row>
<FormItem {...formItemLayout}>
{BodyProp(
<Input type="textarea" />,
)}
</FormItem>
</PanelBox>
<IosEnvPlus deviceType={this.props.deviceType} ApnsEnvProp={ApnsEnvProp} style={{ width: 650 }} />
<Collapse style={{ marginTop: '25px', width: 650 }} >
<Panel header="高级设置(选填)" >
<Card
title="发送对象及时间:"
style={{
marginTop: '5px',
lineHeight: '28px',
width: 650,
}}
>
<AfterOnClickOptPlus
deviceType={this.props.deviceType}
AndroidOpenTypeProp={AndroidOpenTypeProp}
androidOpenTypeValue={androidOpenTypeValue}
IsCustomExtParamsProp={IsCustomExtParamsProp}
isCustomExtParams={isCustomExtParams}
customExtParamsItems={customExtParamsItems}
AndroidActivityProp={AndroidActivityProp}
AndroidOpenUrlProp={AndroidOpenUrlProp}
getFieldDecoratorFn={getFieldDecorator}
setFieldsValueFn={setFieldsValue}
/>
<Row>发送对象:
{TargetProp(
<RadioGroup >
<Radio value="ALL" >所有</Radio>
<Radio value="DEVICE" >指定终端</Radio>
<Radio value="ACCOUNT" >指定wenwenId</Radio>
<Radio value="COUNTRY" >指定国家</Radio>
</RadioGroup>,
)}
</Row>
<Row>{targetValueNode()}</Row>
<Row >发送时间:
{IsSetPushTimeProp(
<RadioGroup >
<Radio value="false" >立即发送</Radio>
<Radio value="true" >定时发送</Radio>
</RadioGroup>,
)}
</Row>
<Row>{pushTimeNode()}</Row>
<Row >离线保存:
{IsStoreOfflineProp(
<RadioGroup >
<Radio value="false" >不保存</Radio>
<Radio value="true" >保存</Radio>
</RadioGroup>,
)}
</Row>
<Row>{expireTimeNode()}</Row>
</Card>
</Panel>
</Collapse>
<br />
<Button type="primary" onClick={this.handleSubmit} style={{ marginTop: '10px' }} >确定</Button>
</Form>
</Col>
</Row>
);
}
}
const mapStateToProps = (state) => {
const StaticDataService = state.get('StaticDataService');
const countryList = StaticDataService.get('staticDataList');
return { countryList };
};
const mapDispatchToProps = (dispatch) => {
const actionCreators = {
pushNotification: actions.pushNotification,
pushMessage: actions.pushMessage,
getStaticDataByCodeType
};
return {
actions: bindActionCreators(actionCreators, dispatch),
};
};
export default connect(mapStateToProps, mapDispatchToProps)(
Form.create()(PushForm)); | } else {
contentPanelBoxLabel = '消息';
}
return ( | random_line_split |
app.js | 'use strict';
define(['angular', 'ol', 'toolbar', 'layermanager', 'sidebar', 'map', 'ows', 'query', 'search', 'print', 'permalink', 'measure', 'bootstrap', 'legend', 'panoramio', 'geolocation', 'core', 'wirecloud', 'angular-gettext', 'translations'],
function(angular, ol, toolbar, layermanager) {
var modules_to_load = [
'hs.toolbar',
'hs.layermanager',
'hs.map',
'hs.ows',
'hs.query',
'hs.search', 'hs.print', 'hs.permalink', 'hs.measure',
'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function rainbow(numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) |
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
}
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if (typeof MashupPlatform !== 'undefined')
MashupPlatform.wiring.registerCallback("data_received_slot", config.wirecloud_data_consumer);
//This is needed because data can arrive before hslayers is loaded, so we store it in tmp and process later.
for (var i = 0; i < tmp_data_received.length; i++) {
config.wirecloud_data_consumer(tmp_data_received[i]);
}
}
]);
return module;
});
| {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
}
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
} | identifier_body |
app.js | 'use strict';
define(['angular', 'ol', 'toolbar', 'layermanager', 'sidebar', 'map', 'ows', 'query', 'search', 'print', 'permalink', 'measure', 'bootstrap', 'legend', 'panoramio', 'geolocation', 'core', 'wirecloud', 'angular-gettext', 'translations'],
function(angular, ol, toolbar, layermanager) {
var modules_to_load = [
'hs.toolbar',
'hs.layermanager',
'hs.map',
'hs.ows',
'hs.query',
'hs.search', 'hs.print', 'hs.permalink', 'hs.measure',
'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function rainbow(numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
}
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
}
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
}
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if (typeof MashupPlatform !== 'undefined')
MashupPlatform.wiring.registerCallback("data_received_slot", config.wirecloud_data_consumer);
//This is needed because data can arrive before hslayers is loaded, so we store it in tmp and process later.
for (var i = 0; i < tmp_data_received.length; i++) {
config.wirecloud_data_consumer(tmp_data_received[i]);
}
}
]); | }); |
return module; | random_line_split |
app.js | 'use strict';
define(['angular', 'ol', 'toolbar', 'layermanager', 'sidebar', 'map', 'ows', 'query', 'search', 'print', 'permalink', 'measure', 'bootstrap', 'legend', 'panoramio', 'geolocation', 'core', 'wirecloud', 'angular-gettext', 'translations'],
function(angular, ol, toolbar, layermanager) {
var modules_to_load = [
'hs.toolbar',
'hs.layermanager',
'hs.map',
'hs.ows',
'hs.query',
'hs.search', 'hs.print', 'hs.permalink', 'hs.measure',
'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function | (numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
}
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
}
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
}
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if (typeof MashupPlatform !== 'undefined')
MashupPlatform.wiring.registerCallback("data_received_slot", config.wirecloud_data_consumer);
//This is needed because data can arrive before hslayers is loaded, so we store it in tmp and process later.
for (var i = 0; i < tmp_data_received.length; i++) {
config.wirecloud_data_consumer(tmp_data_received[i]);
}
}
]);
return module;
});
| rainbow | identifier_name |
app.js | 'use strict';
define(['angular', 'ol', 'toolbar', 'layermanager', 'sidebar', 'map', 'ows', 'query', 'search', 'print', 'permalink', 'measure', 'bootstrap', 'legend', 'panoramio', 'geolocation', 'core', 'wirecloud', 'angular-gettext', 'translations'],
function(angular, ol, toolbar, layermanager) {
var modules_to_load = [
'hs.toolbar',
'hs.layermanager',
'hs.map',
'hs.ows',
'hs.query',
'hs.search', 'hs.print', 'hs.permalink', 'hs.measure',
'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function rainbow(numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
}
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
}
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) | }
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if (typeof MashupPlatform !== 'undefined')
MashupPlatform.wiring.registerCallback("data_received_slot", config.wirecloud_data_consumer);
//This is needed because data can arrive before hslayers is loaded, so we store it in tmp and process later.
for (var i = 0; i < tmp_data_received.length; i++) {
config.wirecloud_data_consumer(tmp_data_received[i]);
}
}
]);
return module;
});
| {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
| conditional_block |
main.py | '''
Implementation of DES
'''
################################################ INITIAL CONSTANTS BEGIN HERE ###########################################################
import sys
import bitarray
# First Key Permutation done on the 64 bit key to retreieve a 56 bit version
PC1 = [57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
# Left shift each subsequent subkey (from C0-D0) to get the required 16 subkeys
LSHIFT_MAP = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
# Second Key Permutation done on the concatenated 56 bit key to obtain a 48 bit subkey
PC2 = [14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32]
# Initial Permutation of the message
IP = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
# Extending 32 bit R_n-1 to a 48 bit version to correspond to subkey size
E = [32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
# Substituting back to get a 32 bit value
# Done by splitting the 48 bit into 6 bit segments,
# The first and last bit are considered the row number
# The middle 4 bits are the column number
SBOXES = {0:
[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
1:
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
2:
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
3:
[[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
4:
[[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
5:
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
6:
[[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
7:
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]}
# Permutes the sbox value for the final 32 bit value that is then added on top of the L_n-1 value
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
# A final permutation that is placed on the reverse concatenated R_16 L_16 bit string
IP_INVERSE = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
####################################################### END OF CONSTANTS ########################################################################
####################################################### GENERIC FUNCTIONS ########################################################################
#convert string to a hexadecimal representation
def stringToHex(stringInput):
return ''.join(hex(ord(x))[2:] for x in stringInput)
#for a given string, convert it to hex and partition it in 64bit words and add padding if needed (Padding is just zeroes)
def getHexwords(msg):
"""break the ASCII message into a 64bit (16 hex bytes) words"""
hexwords = []
for i in range(0, len(msg), 8):
msgBlock = msg[i:i+8]
m = stringToHex(msgBlock)
hexwords.append(m)
last = hexwords[-1]
hexwords[-1] += ''.join(['0'] * (16-len(last)))
return hexwords
def stringToBits(string_input):
string_output = bitarray.bitarray()
string_output.frombytes(string_input.encode("utf-8"))
return string_output.to01()
def leftshift(array, value):
return array[value:] + array[:value]
def hexToBinary(hexstr):
return str(bin(int(hexstr, 16)))[2:].rjust(64, '0')
#take a list of hex Words and convert each of them to binary.
def getBinWords(hexWords):
binWords = []
for message in hexWords:
binWord = hexToBinary(message)
binWords.append(binWord)
return binWords
# XORs two bit values val1 and val2
def xor(val1, val2):
xoredBits = []
for i in range(len(val1)):
bit1 = int(val1[i])
bit2 = int(val2[i])
xorBit = int(bool(bit1) ^ bool(bit2))
xoredBits.append(xorBit)
return ''.join(map(str,xoredBits))
########################################################## KEY FUNCTIONS ##########################################################################
def generate_subkeys(key):
|
############################################################### END OF KEY FUNCTIONS #########################################################
############################################################### ENCODING STARTS HERE #########################################################
#take a list of binary words and permute them according to IP. Returns a list of binaries as strings.
def permute(binMessageList):
permutedList = []
temp = []
for message in binMessageList:
for elem in IP:
temp.append(message[int(elem)-1])
strTemp = ''.join(map(str,temp))
permutedList.append(strTemp)
return permutedList
# Run 8 rounds of S-box with the given 48 bit value
def sbox_substitution(mixed_R):
reducedR = []
splitList = []
bitList = list(mixed_R)
#create 8 lists of 6 elems
splitList = [bitList[i:i + 6] for i in range(0, len(bitList), 6)]
for i in range(8):
row = int(splitList[i][0] + splitList[i][-1],2)
col = int(splitList[i][1] + splitList[i][2] + splitList[i][3] + splitList[i][4],2)
newVal = SBOXES[int(i)][row][col]
bits = str(format(newVal,"b")).zfill(4)
reducedR.append(bits)
return ''.join(reducedR)
#Input an individual 64 bit message into to get encrypted
def message_encryption(message, subkeys):
temp_msg = message
print("the full message is : {}".format(temp_msg))
print("The message is {} -- {}".format(temp_msg[:32], temp_msg[32:]))
L_n = temp_msg[:32]
R_n = temp_msg[32:]
L_n1 = temp_msg[:32]
R_n1 = temp_msg[32:]
print("L_0 is : {}".format(L_n))
print("R_0 is : {}".format(R_n))
for i in range(16):
L_n = R_n1
print("L_{} is : {}".format(i+1, L_n))
expanded_R = []
for j in range(48):
expanded_R.append(L_n[E[j]-1])
mixed_R = xor(subkeys[15], expanded_R)
reduced_R = sbox_substitution(mixed_R)
permuted_R = []
for k in range (32):
permuted_R.append(reduced_R[P[k]-1])
R_n = xor(L_n1, permuted_R)
print("R_{} is : {}".format(i+1, R_n))
L_n1 = L_n
R_n1 = R_n
# temp_msg = L_n + R_n
encrypted_msg = []
norm = temp_msg[32:] + temp_msg[:32]
for i in range(64):
encrypted_msg.append(norm[IP_INVERSE[i]-1])
return ''.join(encrypted_msg)
def DESencryption(message):
messages = getBinWords(getHexwords(message))
# messages = getBinWords(message)
print("The message in hex is : {}".format(getHexwords(message)))
subkeys = generate_subkeys("54657374696e6731")
encrypted_messages = []
permute(messages)
for msg in messages:
encrypted_messages.append(message_encryption(msg, subkeys))
encrypted_message = hex(int(''.join(encrypted_messages), 2))
return encrypted_message
############################################################ END OF ENCODING #############################################################
def test():
# subkeys = generate_subkeys("133457799BBCDFF1")
# print(DESencryption("is is a nice time to be alive"))
# print("\n\n\n")
print(DESencryption("Today is a good day."))
if __name__ == "__main__":
test() | key_bits = hexToBinary(key)
if len(key_bits) != 64:
print("Incorrect key provided.")
sys.exit()
key_up = []
for i in range (56):
key_up.append(key_bits[PC1[i]-1])
key_up = ''.join(key_up)
print("The initial key is {}".format(key_bits))
print("They permuted key is {}".format(key_up))
subkeys = []
left = key_up[:28]
right = key_up[28:]
print(left)
print(right)
for i in range(16):
left = leftshift(left, LSHIFT_MAP[i])
right = leftshift(right, LSHIFT_MAP[i])
subkey = left + right
subkey_final = []
for j in range(48):
subkey_final.append(subkey[PC2[j]-1])
subkeys.append("".join(subkey_final))
for i in range (16):
print("Subkey #{} is {}".format(i+1,subkeys[i]))
print("the length is : {}".format(len(subkeys[0])))
print("Keys have been generated.")
return subkeys | identifier_body |
main.py | '''
Implementation of DES
'''
################################################ INITIAL CONSTANTS BEGIN HERE ###########################################################
import sys
import bitarray
# First Key Permutation done on the 64 bit key to retreieve a 56 bit version
PC1 = [57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
# Left shift each subsequent subkey (from C0-D0) to get the required 16 subkeys
LSHIFT_MAP = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
# Second Key Permutation done on the concatenated 56 bit key to obtain a 48 bit subkey
PC2 = [14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32]
# Initial Permutation of the message
IP = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
# Extending 32 bit R_n-1 to a 48 bit version to correspond to subkey size
E = [32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
# Substituting back to get a 32 bit value
# Done by splitting the 48 bit into 6 bit segments,
# The first and last bit are considered the row number
# The middle 4 bits are the column number
SBOXES = {0:
[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
1:
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
2:
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
3:
[[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
4:
[[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
5:
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
6:
[[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
7:
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]}
# Permutes the sbox value for the final 32 bit value that is then added on top of the L_n-1 value
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
# A final permutation that is placed on the reverse concatenated R_16 L_16 bit string
IP_INVERSE = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
####################################################### END OF CONSTANTS ########################################################################
####################################################### GENERIC FUNCTIONS ########################################################################
#convert string to a hexadecimal representation
def stringToHex(stringInput):
return ''.join(hex(ord(x))[2:] for x in stringInput)
#for a given string, convert it to hex and partition it in 64bit words and add padding if needed (Padding is just zeroes)
def getHexwords(msg):
"""break the ASCII message into a 64bit (16 hex bytes) words"""
hexwords = []
for i in range(0, len(msg), 8):
msgBlock = msg[i:i+8]
m = stringToHex(msgBlock)
hexwords.append(m)
last = hexwords[-1]
hexwords[-1] += ''.join(['0'] * (16-len(last)))
return hexwords
def stringToBits(string_input):
string_output = bitarray.bitarray()
string_output.frombytes(string_input.encode("utf-8"))
return string_output.to01()
def leftshift(array, value):
return array[value:] + array[:value]
def hexToBinary(hexstr):
return str(bin(int(hexstr, 16)))[2:].rjust(64, '0')
#take a list of hex Words and convert each of them to binary.
def getBinWords(hexWords):
binWords = []
for message in hexWords:
binWord = hexToBinary(message)
binWords.append(binWord)
return binWords
# XORs two bit values val1 and val2
def xor(val1, val2):
xoredBits = []
for i in range(len(val1)):
bit1 = int(val1[i])
bit2 = int(val2[i])
xorBit = int(bool(bit1) ^ bool(bit2))
xoredBits.append(xorBit)
return ''.join(map(str,xoredBits))
########################################################## KEY FUNCTIONS ##########################################################################
def generate_subkeys(key):
key_bits = hexToBinary(key)
if len(key_bits) != 64:
print("Incorrect key provided.")
sys.exit()
key_up = []
for i in range (56):
key_up.append(key_bits[PC1[i]-1])
key_up = ''.join(key_up)
print("The initial key is {}".format(key_bits))
print("They permuted key is {}".format(key_up))
subkeys = []
left = key_up[:28]
right = key_up[28:]
print(left)
print(right)
for i in range(16):
left = leftshift(left, LSHIFT_MAP[i])
right = leftshift(right, LSHIFT_MAP[i])
subkey = left + right
subkey_final = []
for j in range(48):
subkey_final.append(subkey[PC2[j]-1])
subkeys.append("".join(subkey_final))
for i in range (16):
print("Subkey #{} is {}".format(i+1,subkeys[i]))
print("the length is : {}".format(len(subkeys[0])))
print("Keys have been generated.")
return subkeys
############################################################### END OF KEY FUNCTIONS #########################################################
############################################################### ENCODING STARTS HERE #########################################################
#take a list of binary words and permute them according to IP. Returns a list of binaries as strings.
def permute(binMessageList):
permutedList = []
temp = []
for message in binMessageList:
for elem in IP:
temp.append(message[int(elem)-1])
strTemp = ''.join(map(str,temp))
permutedList.append(strTemp)
return permutedList
# Run 8 rounds of S-box with the given 48 bit value
def sbox_substitution(mixed_R):
reducedR = []
splitList = []
bitList = list(mixed_R)
#create 8 lists of 6 elems
splitList = [bitList[i:i + 6] for i in range(0, len(bitList), 6)]
for i in range(8):
|
return ''.join(reducedR)
#Input an individual 64 bit message into to get encrypted
def message_encryption(message, subkeys):
temp_msg = message
print("the full message is : {}".format(temp_msg))
print("The message is {} -- {}".format(temp_msg[:32], temp_msg[32:]))
L_n = temp_msg[:32]
R_n = temp_msg[32:]
L_n1 = temp_msg[:32]
R_n1 = temp_msg[32:]
print("L_0 is : {}".format(L_n))
print("R_0 is : {}".format(R_n))
for i in range(16):
L_n = R_n1
print("L_{} is : {}".format(i+1, L_n))
expanded_R = []
for j in range(48):
expanded_R.append(L_n[E[j]-1])
mixed_R = xor(subkeys[15], expanded_R)
reduced_R = sbox_substitution(mixed_R)
permuted_R = []
for k in range (32):
permuted_R.append(reduced_R[P[k]-1])
R_n = xor(L_n1, permuted_R)
print("R_{} is : {}".format(i+1, R_n))
L_n1 = L_n
R_n1 = R_n
# temp_msg = L_n + R_n
encrypted_msg = []
norm = temp_msg[32:] + temp_msg[:32]
for i in range(64):
encrypted_msg.append(norm[IP_INVERSE[i]-1])
return ''.join(encrypted_msg)
def DESencryption(message):
messages = getBinWords(getHexwords(message))
# messages = getBinWords(message)
print("The message in hex is : {}".format(getHexwords(message)))
subkeys = generate_subkeys("54657374696e6731")
encrypted_messages = []
permute(messages)
for msg in messages:
encrypted_messages.append(message_encryption(msg, subkeys))
encrypted_message = hex(int(''.join(encrypted_messages), 2))
return encrypted_message
############################################################ END OF ENCODING #############################################################
def test():
# subkeys = generate_subkeys("133457799BBCDFF1")
# print(DESencryption("is is a nice time to be alive"))
# print("\n\n\n")
print(DESencryption("Today is a good day."))
if __name__ == "__main__":
test() | row = int(splitList[i][0] + splitList[i][-1],2)
col = int(splitList[i][1] + splitList[i][2] + splitList[i][3] + splitList[i][4],2)
newVal = SBOXES[int(i)][row][col]
bits = str(format(newVal,"b")).zfill(4)
reducedR.append(bits) | conditional_block |
main.py | '''
Implementation of DES
'''
################################################ INITIAL CONSTANTS BEGIN HERE ###########################################################
import sys
import bitarray
# First Key Permutation done on the 64 bit key to retreieve a 56 bit version
PC1 = [57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
# Left shift each subsequent subkey (from C0-D0) to get the required 16 subkeys
LSHIFT_MAP = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
# Second Key Permutation done on the concatenated 56 bit key to obtain a 48 bit subkey
PC2 = [14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32]
# Initial Permutation of the message
IP = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
# Extending 32 bit R_n-1 to a 48 bit version to correspond to subkey size
E = [32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
# Substituting back to get a 32 bit value
# Done by splitting the 48 bit into 6 bit segments,
# The first and last bit are considered the row number
# The middle 4 bits are the column number
SBOXES = {0:
[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
1:
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], | [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
2:
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
3:
[[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
4:
[[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
5:
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
6:
[[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
7:
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]}
# Permutes the sbox value for the final 32 bit value that is then added on top of the L_n-1 value
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
# A final permutation that is placed on the reverse concatenated R_16 L_16 bit string
IP_INVERSE = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
####################################################### END OF CONSTANTS ########################################################################
####################################################### GENERIC FUNCTIONS ########################################################################
#convert string to a hexadecimal representation
def stringToHex(stringInput):
return ''.join(hex(ord(x))[2:] for x in stringInput)
#for a given string, convert it to hex and partition it in 64bit words and add padding if needed (Padding is just zeroes)
def getHexwords(msg):
"""break the ASCII message into a 64bit (16 hex bytes) words"""
hexwords = []
for i in range(0, len(msg), 8):
msgBlock = msg[i:i+8]
m = stringToHex(msgBlock)
hexwords.append(m)
last = hexwords[-1]
hexwords[-1] += ''.join(['0'] * (16-len(last)))
return hexwords
def stringToBits(string_input):
string_output = bitarray.bitarray()
string_output.frombytes(string_input.encode("utf-8"))
return string_output.to01()
def leftshift(array, value):
return array[value:] + array[:value]
def hexToBinary(hexstr):
return str(bin(int(hexstr, 16)))[2:].rjust(64, '0')
#take a list of hex Words and convert each of them to binary.
def getBinWords(hexWords):
binWords = []
for message in hexWords:
binWord = hexToBinary(message)
binWords.append(binWord)
return binWords
# XORs two bit values val1 and val2
def xor(val1, val2):
xoredBits = []
for i in range(len(val1)):
bit1 = int(val1[i])
bit2 = int(val2[i])
xorBit = int(bool(bit1) ^ bool(bit2))
xoredBits.append(xorBit)
return ''.join(map(str,xoredBits))
########################################################## KEY FUNCTIONS ##########################################################################
def generate_subkeys(key):
key_bits = hexToBinary(key)
if len(key_bits) != 64:
print("Incorrect key provided.")
sys.exit()
key_up = []
for i in range (56):
key_up.append(key_bits[PC1[i]-1])
key_up = ''.join(key_up)
print("The initial key is {}".format(key_bits))
print("They permuted key is {}".format(key_up))
subkeys = []
left = key_up[:28]
right = key_up[28:]
print(left)
print(right)
for i in range(16):
left = leftshift(left, LSHIFT_MAP[i])
right = leftshift(right, LSHIFT_MAP[i])
subkey = left + right
subkey_final = []
for j in range(48):
subkey_final.append(subkey[PC2[j]-1])
subkeys.append("".join(subkey_final))
for i in range (16):
print("Subkey #{} is {}".format(i+1,subkeys[i]))
print("the length is : {}".format(len(subkeys[0])))
print("Keys have been generated.")
return subkeys
############################################################### END OF KEY FUNCTIONS #########################################################
############################################################### ENCODING STARTS HERE #########################################################
#take a list of binary words and permute them according to IP. Returns a list of binaries as strings.
def permute(binMessageList):
permutedList = []
temp = []
for message in binMessageList:
for elem in IP:
temp.append(message[int(elem)-1])
strTemp = ''.join(map(str,temp))
permutedList.append(strTemp)
return permutedList
# Run 8 rounds of S-box with the given 48 bit value
def sbox_substitution(mixed_R):
reducedR = []
splitList = []
bitList = list(mixed_R)
#create 8 lists of 6 elems
splitList = [bitList[i:i + 6] for i in range(0, len(bitList), 6)]
for i in range(8):
row = int(splitList[i][0] + splitList[i][-1],2)
col = int(splitList[i][1] + splitList[i][2] + splitList[i][3] + splitList[i][4],2)
newVal = SBOXES[int(i)][row][col]
bits = str(format(newVal,"b")).zfill(4)
reducedR.append(bits)
return ''.join(reducedR)
#Input an individual 64 bit message into to get encrypted
def message_encryption(message, subkeys):
temp_msg = message
print("the full message is : {}".format(temp_msg))
print("The message is {} -- {}".format(temp_msg[:32], temp_msg[32:]))
L_n = temp_msg[:32]
R_n = temp_msg[32:]
L_n1 = temp_msg[:32]
R_n1 = temp_msg[32:]
print("L_0 is : {}".format(L_n))
print("R_0 is : {}".format(R_n))
for i in range(16):
L_n = R_n1
print("L_{} is : {}".format(i+1, L_n))
expanded_R = []
for j in range(48):
expanded_R.append(L_n[E[j]-1])
mixed_R = xor(subkeys[15], expanded_R)
reduced_R = sbox_substitution(mixed_R)
permuted_R = []
for k in range (32):
permuted_R.append(reduced_R[P[k]-1])
R_n = xor(L_n1, permuted_R)
print("R_{} is : {}".format(i+1, R_n))
L_n1 = L_n
R_n1 = R_n
# temp_msg = L_n + R_n
encrypted_msg = []
norm = temp_msg[32:] + temp_msg[:32]
for i in range(64):
encrypted_msg.append(norm[IP_INVERSE[i]-1])
return ''.join(encrypted_msg)
def DESencryption(message):
messages = getBinWords(getHexwords(message))
# messages = getBinWords(message)
print("The message in hex is : {}".format(getHexwords(message)))
subkeys = generate_subkeys("54657374696e6731")
encrypted_messages = []
permute(messages)
for msg in messages:
encrypted_messages.append(message_encryption(msg, subkeys))
encrypted_message = hex(int(''.join(encrypted_messages), 2))
return encrypted_message
############################################################ END OF ENCODING #############################################################
def test():
# subkeys = generate_subkeys("133457799BBCDFF1")
# print(DESencryption("is is a nice time to be alive"))
# print("\n\n\n")
print(DESencryption("Today is a good day."))
if __name__ == "__main__":
test() | random_line_split | |
main.py | '''
Implementation of DES
'''
################################################ INITIAL CONSTANTS BEGIN HERE ###########################################################
import sys
import bitarray
# First Key Permutation done on the 64 bit key to retreieve a 56 bit version
PC1 = [57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
# Left shift each subsequent subkey (from C0-D0) to get the required 16 subkeys
LSHIFT_MAP = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
# Second Key Permutation done on the concatenated 56 bit key to obtain a 48 bit subkey
PC2 = [14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32]
# Initial Permutation of the message
IP = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
# Extending 32 bit R_n-1 to a 48 bit version to correspond to subkey size
E = [32, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
# Substituting back to get a 32 bit value
# Done by splitting the 48 bit into 6 bit segments,
# The first and last bit are considered the row number
# The middle 4 bits are the column number
SBOXES = {0:
[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
1:
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
2:
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
3:
[[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
4:
[[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
5:
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
6:
[[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
7:
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]}
# Permutes the sbox value for the final 32 bit value that is then added on top of the L_n-1 value
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
# A final permutation that is placed on the reverse concatenated R_16 L_16 bit string
IP_INVERSE = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
####################################################### END OF CONSTANTS ########################################################################
####################################################### GENERIC FUNCTIONS ########################################################################
#convert string to a hexadecimal representation
def stringToHex(stringInput):
return ''.join(hex(ord(x))[2:] for x in stringInput)
#for a given string, convert it to hex and partition it in 64bit words and add padding if needed (Padding is just zeroes)
def getHexwords(msg):
"""break the ASCII message into a 64bit (16 hex bytes) words"""
hexwords = []
for i in range(0, len(msg), 8):
msgBlock = msg[i:i+8]
m = stringToHex(msgBlock)
hexwords.append(m)
last = hexwords[-1]
hexwords[-1] += ''.join(['0'] * (16-len(last)))
return hexwords
def stringToBits(string_input):
string_output = bitarray.bitarray()
string_output.frombytes(string_input.encode("utf-8"))
return string_output.to01()
def leftshift(array, value):
return array[value:] + array[:value]
def hexToBinary(hexstr):
return str(bin(int(hexstr, 16)))[2:].rjust(64, '0')
#take a list of hex Words and convert each of them to binary.
def getBinWords(hexWords):
binWords = []
for message in hexWords:
binWord = hexToBinary(message)
binWords.append(binWord)
return binWords
# XORs two bit values val1 and val2
def xor(val1, val2):
xoredBits = []
for i in range(len(val1)):
bit1 = int(val1[i])
bit2 = int(val2[i])
xorBit = int(bool(bit1) ^ bool(bit2))
xoredBits.append(xorBit)
return ''.join(map(str,xoredBits))
########################################################## KEY FUNCTIONS ##########################################################################
def generate_subkeys(key):
key_bits = hexToBinary(key)
if len(key_bits) != 64:
print("Incorrect key provided.")
sys.exit()
key_up = []
for i in range (56):
key_up.append(key_bits[PC1[i]-1])
key_up = ''.join(key_up)
print("The initial key is {}".format(key_bits))
print("They permuted key is {}".format(key_up))
subkeys = []
left = key_up[:28]
right = key_up[28:]
print(left)
print(right)
for i in range(16):
left = leftshift(left, LSHIFT_MAP[i])
right = leftshift(right, LSHIFT_MAP[i])
subkey = left + right
subkey_final = []
for j in range(48):
subkey_final.append(subkey[PC2[j]-1])
subkeys.append("".join(subkey_final))
for i in range (16):
print("Subkey #{} is {}".format(i+1,subkeys[i]))
print("the length is : {}".format(len(subkeys[0])))
print("Keys have been generated.")
return subkeys
############################################################### END OF KEY FUNCTIONS #########################################################
############################################################### ENCODING STARTS HERE #########################################################
#take a list of binary words and permute them according to IP. Returns a list of binaries as strings.
def permute(binMessageList):
permutedList = []
temp = []
for message in binMessageList:
for elem in IP:
temp.append(message[int(elem)-1])
strTemp = ''.join(map(str,temp))
permutedList.append(strTemp)
return permutedList
# Run 8 rounds of S-box with the given 48 bit value
def sbox_substitution(mixed_R):
reducedR = []
splitList = []
bitList = list(mixed_R)
#create 8 lists of 6 elems
splitList = [bitList[i:i + 6] for i in range(0, len(bitList), 6)]
for i in range(8):
row = int(splitList[i][0] + splitList[i][-1],2)
col = int(splitList[i][1] + splitList[i][2] + splitList[i][3] + splitList[i][4],2)
newVal = SBOXES[int(i)][row][col]
bits = str(format(newVal,"b")).zfill(4)
reducedR.append(bits)
return ''.join(reducedR)
#Input an individual 64 bit message into to get encrypted
def message_encryption(message, subkeys):
temp_msg = message
print("the full message is : {}".format(temp_msg))
print("The message is {} -- {}".format(temp_msg[:32], temp_msg[32:]))
L_n = temp_msg[:32]
R_n = temp_msg[32:]
L_n1 = temp_msg[:32]
R_n1 = temp_msg[32:]
print("L_0 is : {}".format(L_n))
print("R_0 is : {}".format(R_n))
for i in range(16):
L_n = R_n1
print("L_{} is : {}".format(i+1, L_n))
expanded_R = []
for j in range(48):
expanded_R.append(L_n[E[j]-1])
mixed_R = xor(subkeys[15], expanded_R)
reduced_R = sbox_substitution(mixed_R)
permuted_R = []
for k in range (32):
permuted_R.append(reduced_R[P[k]-1])
R_n = xor(L_n1, permuted_R)
print("R_{} is : {}".format(i+1, R_n))
L_n1 = L_n
R_n1 = R_n
# temp_msg = L_n + R_n
encrypted_msg = []
norm = temp_msg[32:] + temp_msg[:32]
for i in range(64):
encrypted_msg.append(norm[IP_INVERSE[i]-1])
return ''.join(encrypted_msg)
def DESencryption(message):
messages = getBinWords(getHexwords(message))
# messages = getBinWords(message)
print("The message in hex is : {}".format(getHexwords(message)))
subkeys = generate_subkeys("54657374696e6731")
encrypted_messages = []
permute(messages)
for msg in messages:
encrypted_messages.append(message_encryption(msg, subkeys))
encrypted_message = hex(int(''.join(encrypted_messages), 2))
return encrypted_message
############################################################ END OF ENCODING #############################################################
def | ():
# subkeys = generate_subkeys("133457799BBCDFF1")
# print(DESencryption("is is a nice time to be alive"))
# print("\n\n\n")
print(DESencryption("Today is a good day."))
if __name__ == "__main__":
test() | test | identifier_name |
data.go | package uplink
import (
"context"
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/pkg/errors"
"github.com/brocaar/loraserver/api/as"
"github.com/brocaar/loraserver/api/nc"
"github.com/brocaar/loraserver/internal/adr"
"github.com/brocaar/loraserver/internal/channels"
"github.com/brocaar/loraserver/internal/common"
"github.com/brocaar/loraserver/internal/downlink"
"github.com/brocaar/loraserver/internal/gateway"
"github.com/brocaar/loraserver/internal/maccommand"
"github.com/brocaar/loraserver/internal/models"
"github.com/brocaar/loraserver/internal/session"
"github.com/brocaar/lorawan"
)
func setContextFromDataPHYPayload(ctx *DataUpContext) error {
macPL, ok := ctx.RXPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", ctx.RXPacket.PHYPayload.MACPayload)
}
ctx.MACPayload = macPL
return nil
}
func getNodeSessionForDataUp(ctx *DataUpContext) error {
ns, err := session.GetNodeSessionForPHYPayload(common.RedisPool, ctx.RXPacket.PHYPayload)
if err != nil {
return errors.Wrap(err, "get node-session error")
}
ctx.NodeSession = ns
return nil
}
func logDataFramesCollected(ctx *DataUpContext) error {
var macs []string
for _, p := range ctx.RXPacket.RXInfoSet {
macs = append(macs, p.MAC.String())
}
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"gw_count": len(macs),
"gw_macs": strings.Join(macs, ", "),
"mtype": ctx.RXPacket.PHYPayload.MHDR.MType,
}).Info("packet(s) collected")
logUplink(common.DB, ctx.NodeSession.DevEUI, ctx.RXPacket)
return nil
}
func decryptFRMPayloadMACCommands(ctx *DataUpContext) error {
// only decrypt when FPort is equal to 0
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if err := ctx.RXPacket.PHYPayload.DecryptFRMPayload(ctx.NodeSession.NwkSKey); err != nil {
return errors.Wrap(err, "decrypt FRMPayload error")
}
}
return nil
}
func sendRXInfoToNetworkController(ctx *DataUpContext) error {
// TODO: change so that errors get logged but not returned
if err := sendRXInfoPayload(ctx.NodeSession, ctx.RXPacket); err != nil {
return errors.Wrap(err, "send rx-info to network-controller error")
}
return nil
}
func handleFOptsMACCommands(ctx *DataUpContext) error {
if len(ctx.MACPayload.FHDR.FOpts) > 0 {
if err := handleUplinkMACCommands(&ctx.NodeSession, false, ctx.MACPayload.FHDR.FOpts, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fopts": ctx.MACPayload.FHDR.FOpts,
}).Errorf("handle FOpts mac commands error: %s", err)
}
}
return nil
}
func handleFRMPayloadMACCommands(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if len(ctx.MACPayload.FRMPayload) == 0 {
return errors.New("expected mac commands, but FRMPayload is empty (FPort=0)")
}
var commands []lorawan.MACCommand
for _, pl := range ctx.MACPayload.FRMPayload {
cmd, ok := pl.(*lorawan.MACCommand)
if !ok {
return fmt.Errorf("expected MACPayload, but got %T", ctx.MACPayload.FRMPayload)
}
commands = append(commands, *cmd)
}
if err := handleUplinkMACCommands(&ctx.NodeSession, true, commands, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"commands": commands,
}).Errorf("handle FRMPayload mac commands error: %s", err)
}
}
return nil
}
func sendFRMPayloadToApplicationServer(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func handleUplinkACK(ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
}
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws = make(map[lorawan.EUI64]gateway.Gateway)
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
asRxInfo := as.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
}
if gw, ok := gws[rxInfo.MAC]; ok {
asRxInfo.Name = gw.Name
asRxInfo.Latitude = gw.Location.Latitude
asRxInfo.Longitude = gw.Location.Longitude
asRxInfo.Altitude = gw.Altitude
}
publishDataUpReq.RxInfo = append(publishDataUpReq.RxInfo, &asRxInfo)
}
if macPL.FPort != nil {
publishDataUpReq.FPort = uint32(*macPL.FPort)
}
if len(macPL.FRMPayload) == 1 {
dataPL, ok := macPL.FRMPayload[0].(*lorawan.DataPayload)
if !ok {
return fmt.Errorf("expected type *lorawan.DataPayload, got %T", macPL.FRMPayload[0])
}
publishDataUpReq.Data = dataPL.Bytes
}
if _, err := common.Application.HandleDataUp(context.Background(), &publishDataUpReq); err != nil {
return fmt.Errorf("publish data up to application-server error: %s", err)
}
return nil
}
func handleUplinkMACCommands(ns *session.NodeSession, frmPayload bool, commands []lorawan.MACCommand, rxInfoSet models.RXInfoSet) error | {
var cids []lorawan.CID
blocks := make(map[lorawan.CID]maccommand.Block)
// group mac-commands by CID
for _, cmd := range commands {
block, ok := blocks[cmd.CID]
if !ok {
block = maccommand.Block{
CID: cmd.CID,
FRMPayload: frmPayload,
}
cids = append(cids, cmd.CID)
}
block.MACCommands = append(block.MACCommands, cmd)
blocks[cmd.CID] = block
}
for _, cid := range cids {
block := blocks[cid]
logFields := log.Fields{
"dev_eui": ns.DevEUI,
"cid": block.CID,
"frm_payload": block.FRMPayload,
}
// read pending mac-command block for CID. e.g. on case of an ack, the
// pending mac-command block contains the request.
// we need this pending mac-command block to find out if the command
// was scheduled through the API (external).
pending, err := maccommand.ReadPending(common.RedisPool, ns.DevEUI, block.CID)
if err != nil {
log.WithFields(logFields).Errorf("read pending mac-command error: %s", err)
continue
}
var external bool
if pending != nil {
external = pending.External
}
// in case the node is requesting a mac-command, there is nothing pending
if pending != nil {
if err = maccommand.DeletePending(common.RedisPool, ns.DevEUI, block.CID); err != nil {
log.WithFields(logFields).Errorf("delete pending mac-command error: %s", err)
}
}
// CID >= 0x80 are proprietary mac-commands and are not handled by LoRa Server
if block.CID < 0x80 {
if err := maccommand.Handle(ns, block, pending, rxInfoSet); err != nil {
log.WithFields(logFields).Errorf("handle mac-command block error: %s", err)
}
}
// report to external controller in case of proprietary mac-commands or
// in case when the request has been scheduled through the API.
if block.CID >= 0x80 || external {
var data [][]byte
for _, cmd := range block.MACCommands {
b, err := cmd.MarshalBinary()
if err != nil {
log.WithFields(logFields).Errorf("marshal mac-command to binary error: %s", err)
continue
}
data = append(data, b)
}
_, err = common.Controller.HandleDataUpMACCommand(context.Background(), &nc.HandleDataUpMACCommandRequest{
DevEUI: ns.DevEUI[:],
FrmPayload: block.FRMPayload,
Cid: uint32(block.CID),
Commands: data,
})
if err != nil {
log.WithFields(logFields).Errorf("send mac-command to network-controller error: %s", err)
} else {
log.WithFields(logFields).Info("mac-command sent to network-controller")
}
}
}
return nil
} | identifier_body | |
data.go | package uplink
import (
"context"
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/pkg/errors"
"github.com/brocaar/loraserver/api/as"
"github.com/brocaar/loraserver/api/nc"
"github.com/brocaar/loraserver/internal/adr"
"github.com/brocaar/loraserver/internal/channels"
"github.com/brocaar/loraserver/internal/common"
"github.com/brocaar/loraserver/internal/downlink"
"github.com/brocaar/loraserver/internal/gateway"
"github.com/brocaar/loraserver/internal/maccommand"
"github.com/brocaar/loraserver/internal/models"
"github.com/brocaar/loraserver/internal/session"
"github.com/brocaar/lorawan"
)
func setContextFromDataPHYPayload(ctx *DataUpContext) error {
macPL, ok := ctx.RXPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", ctx.RXPacket.PHYPayload.MACPayload)
}
ctx.MACPayload = macPL
return nil
}
func getNodeSessionForDataUp(ctx *DataUpContext) error {
ns, err := session.GetNodeSessionForPHYPayload(common.RedisPool, ctx.RXPacket.PHYPayload)
if err != nil {
return errors.Wrap(err, "get node-session error")
}
ctx.NodeSession = ns
return nil
}
func logDataFramesCollected(ctx *DataUpContext) error {
var macs []string
for _, p := range ctx.RXPacket.RXInfoSet {
macs = append(macs, p.MAC.String())
}
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"gw_count": len(macs),
"gw_macs": strings.Join(macs, ", "),
"mtype": ctx.RXPacket.PHYPayload.MHDR.MType,
}).Info("packet(s) collected")
logUplink(common.DB, ctx.NodeSession.DevEUI, ctx.RXPacket)
return nil
}
func decryptFRMPayloadMACCommands(ctx *DataUpContext) error {
// only decrypt when FPort is equal to 0
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if err := ctx.RXPacket.PHYPayload.DecryptFRMPayload(ctx.NodeSession.NwkSKey); err != nil {
return errors.Wrap(err, "decrypt FRMPayload error")
}
}
return nil
}
func sendRXInfoToNetworkController(ctx *DataUpContext) error {
// TODO: change so that errors get logged but not returned
if err := sendRXInfoPayload(ctx.NodeSession, ctx.RXPacket); err != nil {
return errors.Wrap(err, "send rx-info to network-controller error")
}
return nil
}
func handleFOptsMACCommands(ctx *DataUpContext) error {
if len(ctx.MACPayload.FHDR.FOpts) > 0 {
if err := handleUplinkMACCommands(&ctx.NodeSession, false, ctx.MACPayload.FHDR.FOpts, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fopts": ctx.MACPayload.FHDR.FOpts,
}).Errorf("handle FOpts mac commands error: %s", err)
}
}
return nil
}
func handleFRMPayloadMACCommands(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if len(ctx.MACPayload.FRMPayload) == 0 {
return errors.New("expected mac commands, but FRMPayload is empty (FPort=0)")
}
var commands []lorawan.MACCommand
for _, pl := range ctx.MACPayload.FRMPayload {
cmd, ok := pl.(*lorawan.MACCommand)
if !ok {
return fmt.Errorf("expected MACPayload, but got %T", ctx.MACPayload.FRMPayload)
}
commands = append(commands, *cmd)
}
if err := handleUplinkMACCommands(&ctx.NodeSession, true, commands, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"commands": commands,
}).Errorf("handle FRMPayload mac commands error: %s", err)
}
}
return nil
}
func sendFRMPayloadToApplicationServer(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func handleUplinkACK(ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8) | Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
}
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws = make(map[lorawan.EUI64]gateway.Gateway)
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
asRxInfo := as.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
}
if gw, ok := gws[rxInfo.MAC]; ok {
asRxInfo.Name = gw.Name
asRxInfo.Latitude = gw.Location.Latitude
asRxInfo.Longitude = gw.Location.Longitude
asRxInfo.Altitude = gw.Altitude
}
publishDataUpReq.RxInfo = append(publishDataUpReq.RxInfo, &asRxInfo)
}
if macPL.FPort != nil {
publishDataUpReq.FPort = uint32(*macPL.FPort)
}
if len(macPL.FRMPayload) == 1 {
dataPL, ok := macPL.FRMPayload[0].(*lorawan.DataPayload)
if !ok {
return fmt.Errorf("expected type *lorawan.DataPayload, got %T", macPL.FRMPayload[0])
}
publishDataUpReq.Data = dataPL.Bytes
}
if _, err := common.Application.HandleDataUp(context.Background(), &publishDataUpReq); err != nil {
return fmt.Errorf("publish data up to application-server error: %s", err)
}
return nil
}
func handleUplinkMACCommands(ns *session.NodeSession, frmPayload bool, commands []lorawan.MACCommand, rxInfoSet models.RXInfoSet) error {
var cids []lorawan.CID
blocks := make(map[lorawan.CID]maccommand.Block)
// group mac-commands by CID
for _, cmd := range commands {
block, ok := blocks[cmd.CID]
if !ok {
block = maccommand.Block{
CID: cmd.CID,
FRMPayload: frmPayload,
}
cids = append(cids, cmd.CID)
}
block.MACCommands = append(block.MACCommands, cmd)
blocks[cmd.CID] = block
}
for _, cid := range cids {
block := blocks[cid]
logFields := log.Fields{
"dev_eui": ns.DevEUI,
"cid": block.CID,
"frm_payload": block.FRMPayload,
}
// read pending mac-command block for CID. e.g. on case of an ack, the
// pending mac-command block contains the request.
// we need this pending mac-command block to find out if the command
// was scheduled through the API (external).
pending, err := maccommand.ReadPending(common.RedisPool, ns.DevEUI, block.CID)
if err != nil {
log.WithFields(logFields).Errorf("read pending mac-command error: %s", err)
continue
}
var external bool
if pending != nil {
external = pending.External
}
// in case the node is requesting a mac-command, there is nothing pending
if pending != nil {
if err = maccommand.DeletePending(common.RedisPool, ns.DevEUI, block.CID); err != nil {
log.WithFields(logFields).Errorf("delete pending mac-command error: %s", err)
}
}
// CID >= 0x80 are proprietary mac-commands and are not handled by LoRa Server
if block.CID < 0x80 {
if err := maccommand.Handle(ns, block, pending, rxInfoSet); err != nil {
log.WithFields(logFields).Errorf("handle mac-command block error: %s", err)
}
}
// report to external controller in case of proprietary mac-commands or
// in case when the request has been scheduled through the API.
if block.CID >= 0x80 || external {
var data [][]byte
for _, cmd := range block.MACCommands {
b, err := cmd.MarshalBinary()
if err != nil {
log.WithFields(logFields).Errorf("marshal mac-command to binary error: %s", err)
continue
}
data = append(data, b)
}
_, err = common.Controller.HandleDataUpMACCommand(context.Background(), &nc.HandleDataUpMACCommandRequest{
DevEUI: ns.DevEUI[:],
FrmPayload: block.FRMPayload,
Cid: uint32(block.CID),
Commands: data,
})
if err != nil {
log.WithFields(logFields).Errorf("send mac-command to network-controller error: %s", err)
} else {
log.WithFields(logFields).Info("mac-command sent to network-controller")
}
}
}
return nil
} | copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac, | random_line_split |
data.go | package uplink
import (
"context"
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/pkg/errors"
"github.com/brocaar/loraserver/api/as"
"github.com/brocaar/loraserver/api/nc"
"github.com/brocaar/loraserver/internal/adr"
"github.com/brocaar/loraserver/internal/channels"
"github.com/brocaar/loraserver/internal/common"
"github.com/brocaar/loraserver/internal/downlink"
"github.com/brocaar/loraserver/internal/gateway"
"github.com/brocaar/loraserver/internal/maccommand"
"github.com/brocaar/loraserver/internal/models"
"github.com/brocaar/loraserver/internal/session"
"github.com/brocaar/lorawan"
)
func setContextFromDataPHYPayload(ctx *DataUpContext) error {
macPL, ok := ctx.RXPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", ctx.RXPacket.PHYPayload.MACPayload)
}
ctx.MACPayload = macPL
return nil
}
func getNodeSessionForDataUp(ctx *DataUpContext) error {
ns, err := session.GetNodeSessionForPHYPayload(common.RedisPool, ctx.RXPacket.PHYPayload)
if err != nil {
return errors.Wrap(err, "get node-session error")
}
ctx.NodeSession = ns
return nil
}
func logDataFramesCollected(ctx *DataUpContext) error {
var macs []string
for _, p := range ctx.RXPacket.RXInfoSet {
macs = append(macs, p.MAC.String())
}
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"gw_count": len(macs),
"gw_macs": strings.Join(macs, ", "),
"mtype": ctx.RXPacket.PHYPayload.MHDR.MType,
}).Info("packet(s) collected")
logUplink(common.DB, ctx.NodeSession.DevEUI, ctx.RXPacket)
return nil
}
func decryptFRMPayloadMACCommands(ctx *DataUpContext) error {
// only decrypt when FPort is equal to 0
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if err := ctx.RXPacket.PHYPayload.DecryptFRMPayload(ctx.NodeSession.NwkSKey); err != nil {
return errors.Wrap(err, "decrypt FRMPayload error")
}
}
return nil
}
func sendRXInfoToNetworkController(ctx *DataUpContext) error {
// TODO: change so that errors get logged but not returned
if err := sendRXInfoPayload(ctx.NodeSession, ctx.RXPacket); err != nil {
return errors.Wrap(err, "send rx-info to network-controller error")
}
return nil
}
func handleFOptsMACCommands(ctx *DataUpContext) error {
if len(ctx.MACPayload.FHDR.FOpts) > 0 {
if err := handleUplinkMACCommands(&ctx.NodeSession, false, ctx.MACPayload.FHDR.FOpts, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fopts": ctx.MACPayload.FHDR.FOpts,
}).Errorf("handle FOpts mac commands error: %s", err)
}
}
return nil
}
func handleFRMPayloadMACCommands(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if len(ctx.MACPayload.FRMPayload) == 0 {
return errors.New("expected mac commands, but FRMPayload is empty (FPort=0)")
}
var commands []lorawan.MACCommand
for _, pl := range ctx.MACPayload.FRMPayload {
cmd, ok := pl.(*lorawan.MACCommand)
if !ok {
return fmt.Errorf("expected MACPayload, but got %T", ctx.MACPayload.FRMPayload)
}
commands = append(commands, *cmd)
}
if err := handleUplinkMACCommands(&ctx.NodeSession, true, commands, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"commands": commands,
}).Errorf("handle FRMPayload mac commands error: %s", err)
}
}
return nil
}
func sendFRMPayloadToApplicationServer(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func handleUplinkACK(ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil |
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws = make(map[lorawan.EUI64]gateway.Gateway)
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
asRxInfo := as.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
}
if gw, ok := gws[rxInfo.MAC]; ok {
asRxInfo.Name = gw.Name
asRxInfo.Latitude = gw.Location.Latitude
asRxInfo.Longitude = gw.Location.Longitude
asRxInfo.Altitude = gw.Altitude
}
publishDataUpReq.RxInfo = append(publishDataUpReq.RxInfo, &asRxInfo)
}
if macPL.FPort != nil {
publishDataUpReq.FPort = uint32(*macPL.FPort)
}
if len(macPL.FRMPayload) == 1 {
dataPL, ok := macPL.FRMPayload[0].(*lorawan.DataPayload)
if !ok {
return fmt.Errorf("expected type *lorawan.DataPayload, got %T", macPL.FRMPayload[0])
}
publishDataUpReq.Data = dataPL.Bytes
}
if _, err := common.Application.HandleDataUp(context.Background(), &publishDataUpReq); err != nil {
return fmt.Errorf("publish data up to application-server error: %s", err)
}
return nil
}
func handleUplinkMACCommands(ns *session.NodeSession, frmPayload bool, commands []lorawan.MACCommand, rxInfoSet models.RXInfoSet) error {
var cids []lorawan.CID
blocks := make(map[lorawan.CID]maccommand.Block)
// group mac-commands by CID
for _, cmd := range commands {
block, ok := blocks[cmd.CID]
if !ok {
block = maccommand.Block{
CID: cmd.CID,
FRMPayload: frmPayload,
}
cids = append(cids, cmd.CID)
}
block.MACCommands = append(block.MACCommands, cmd)
blocks[cmd.CID] = block
}
for _, cid := range cids {
block := blocks[cid]
logFields := log.Fields{
"dev_eui": ns.DevEUI,
"cid": block.CID,
"frm_payload": block.FRMPayload,
}
// read pending mac-command block for CID. e.g. on case of an ack, the
// pending mac-command block contains the request.
// we need this pending mac-command block to find out if the command
// was scheduled through the API (external).
pending, err := maccommand.ReadPending(common.RedisPool, ns.DevEUI, block.CID)
if err != nil {
log.WithFields(logFields).Errorf("read pending mac-command error: %s", err)
continue
}
var external bool
if pending != nil {
external = pending.External
}
// in case the node is requesting a mac-command, there is nothing pending
if pending != nil {
if err = maccommand.DeletePending(common.RedisPool, ns.DevEUI, block.CID); err != nil {
log.WithFields(logFields).Errorf("delete pending mac-command error: %s", err)
}
}
// CID >= 0x80 are proprietary mac-commands and are not handled by LoRa Server
if block.CID < 0x80 {
if err := maccommand.Handle(ns, block, pending, rxInfoSet); err != nil {
log.WithFields(logFields).Errorf("handle mac-command block error: %s", err)
}
}
// report to external controller in case of proprietary mac-commands or
// in case when the request has been scheduled through the API.
if block.CID >= 0x80 || external {
var data [][]byte
for _, cmd := range block.MACCommands {
b, err := cmd.MarshalBinary()
if err != nil {
log.WithFields(logFields).Errorf("marshal mac-command to binary error: %s", err)
continue
}
data = append(data, b)
}
_, err = common.Controller.HandleDataUpMACCommand(context.Background(), &nc.HandleDataUpMACCommandRequest{
DevEUI: ns.DevEUI[:],
FrmPayload: block.FRMPayload,
Cid: uint32(block.CID),
Commands: data,
})
if err != nil {
log.WithFields(logFields).Errorf("send mac-command to network-controller error: %s", err)
} else {
log.WithFields(logFields).Info("mac-command sent to network-controller")
}
}
}
return nil
}
| {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
} | conditional_block |
data.go | package uplink
import (
"context"
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/pkg/errors"
"github.com/brocaar/loraserver/api/as"
"github.com/brocaar/loraserver/api/nc"
"github.com/brocaar/loraserver/internal/adr"
"github.com/brocaar/loraserver/internal/channels"
"github.com/brocaar/loraserver/internal/common"
"github.com/brocaar/loraserver/internal/downlink"
"github.com/brocaar/loraserver/internal/gateway"
"github.com/brocaar/loraserver/internal/maccommand"
"github.com/brocaar/loraserver/internal/models"
"github.com/brocaar/loraserver/internal/session"
"github.com/brocaar/lorawan"
)
func setContextFromDataPHYPayload(ctx *DataUpContext) error {
macPL, ok := ctx.RXPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", ctx.RXPacket.PHYPayload.MACPayload)
}
ctx.MACPayload = macPL
return nil
}
func getNodeSessionForDataUp(ctx *DataUpContext) error {
ns, err := session.GetNodeSessionForPHYPayload(common.RedisPool, ctx.RXPacket.PHYPayload)
if err != nil {
return errors.Wrap(err, "get node-session error")
}
ctx.NodeSession = ns
return nil
}
func logDataFramesCollected(ctx *DataUpContext) error {
var macs []string
for _, p := range ctx.RXPacket.RXInfoSet {
macs = append(macs, p.MAC.String())
}
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"gw_count": len(macs),
"gw_macs": strings.Join(macs, ", "),
"mtype": ctx.RXPacket.PHYPayload.MHDR.MType,
}).Info("packet(s) collected")
logUplink(common.DB, ctx.NodeSession.DevEUI, ctx.RXPacket)
return nil
}
func decryptFRMPayloadMACCommands(ctx *DataUpContext) error {
// only decrypt when FPort is equal to 0
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if err := ctx.RXPacket.PHYPayload.DecryptFRMPayload(ctx.NodeSession.NwkSKey); err != nil {
return errors.Wrap(err, "decrypt FRMPayload error")
}
}
return nil
}
func sendRXInfoToNetworkController(ctx *DataUpContext) error {
// TODO: change so that errors get logged but not returned
if err := sendRXInfoPayload(ctx.NodeSession, ctx.RXPacket); err != nil {
return errors.Wrap(err, "send rx-info to network-controller error")
}
return nil
}
func handleFOptsMACCommands(ctx *DataUpContext) error {
if len(ctx.MACPayload.FHDR.FOpts) > 0 {
if err := handleUplinkMACCommands(&ctx.NodeSession, false, ctx.MACPayload.FHDR.FOpts, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fopts": ctx.MACPayload.FHDR.FOpts,
}).Errorf("handle FOpts mac commands error: %s", err)
}
}
return nil
}
func handleFRMPayloadMACCommands(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if len(ctx.MACPayload.FRMPayload) == 0 {
return errors.New("expected mac commands, but FRMPayload is empty (FPort=0)")
}
var commands []lorawan.MACCommand
for _, pl := range ctx.MACPayload.FRMPayload {
cmd, ok := pl.(*lorawan.MACCommand)
if !ok {
return fmt.Errorf("expected MACPayload, but got %T", ctx.MACPayload.FRMPayload)
}
commands = append(commands, *cmd)
}
if err := handleUplinkMACCommands(&ctx.NodeSession, true, commands, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"commands": commands,
}).Errorf("handle FRMPayload mac commands error: %s", err)
}
}
return nil
}
func sendFRMPayloadToApplicationServer(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func | (ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
}
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws = make(map[lorawan.EUI64]gateway.Gateway)
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
asRxInfo := as.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
}
if gw, ok := gws[rxInfo.MAC]; ok {
asRxInfo.Name = gw.Name
asRxInfo.Latitude = gw.Location.Latitude
asRxInfo.Longitude = gw.Location.Longitude
asRxInfo.Altitude = gw.Altitude
}
publishDataUpReq.RxInfo = append(publishDataUpReq.RxInfo, &asRxInfo)
}
if macPL.FPort != nil {
publishDataUpReq.FPort = uint32(*macPL.FPort)
}
if len(macPL.FRMPayload) == 1 {
dataPL, ok := macPL.FRMPayload[0].(*lorawan.DataPayload)
if !ok {
return fmt.Errorf("expected type *lorawan.DataPayload, got %T", macPL.FRMPayload[0])
}
publishDataUpReq.Data = dataPL.Bytes
}
if _, err := common.Application.HandleDataUp(context.Background(), &publishDataUpReq); err != nil {
return fmt.Errorf("publish data up to application-server error: %s", err)
}
return nil
}
func handleUplinkMACCommands(ns *session.NodeSession, frmPayload bool, commands []lorawan.MACCommand, rxInfoSet models.RXInfoSet) error {
var cids []lorawan.CID
blocks := make(map[lorawan.CID]maccommand.Block)
// group mac-commands by CID
for _, cmd := range commands {
block, ok := blocks[cmd.CID]
if !ok {
block = maccommand.Block{
CID: cmd.CID,
FRMPayload: frmPayload,
}
cids = append(cids, cmd.CID)
}
block.MACCommands = append(block.MACCommands, cmd)
blocks[cmd.CID] = block
}
for _, cid := range cids {
block := blocks[cid]
logFields := log.Fields{
"dev_eui": ns.DevEUI,
"cid": block.CID,
"frm_payload": block.FRMPayload,
}
// read pending mac-command block for CID. e.g. on case of an ack, the
// pending mac-command block contains the request.
// we need this pending mac-command block to find out if the command
// was scheduled through the API (external).
pending, err := maccommand.ReadPending(common.RedisPool, ns.DevEUI, block.CID)
if err != nil {
log.WithFields(logFields).Errorf("read pending mac-command error: %s", err)
continue
}
var external bool
if pending != nil {
external = pending.External
}
// in case the node is requesting a mac-command, there is nothing pending
if pending != nil {
if err = maccommand.DeletePending(common.RedisPool, ns.DevEUI, block.CID); err != nil {
log.WithFields(logFields).Errorf("delete pending mac-command error: %s", err)
}
}
// CID >= 0x80 are proprietary mac-commands and are not handled by LoRa Server
if block.CID < 0x80 {
if err := maccommand.Handle(ns, block, pending, rxInfoSet); err != nil {
log.WithFields(logFields).Errorf("handle mac-command block error: %s", err)
}
}
// report to external controller in case of proprietary mac-commands or
// in case when the request has been scheduled through the API.
if block.CID >= 0x80 || external {
var data [][]byte
for _, cmd := range block.MACCommands {
b, err := cmd.MarshalBinary()
if err != nil {
log.WithFields(logFields).Errorf("marshal mac-command to binary error: %s", err)
continue
}
data = append(data, b)
}
_, err = common.Controller.HandleDataUpMACCommand(context.Background(), &nc.HandleDataUpMACCommandRequest{
DevEUI: ns.DevEUI[:],
FrmPayload: block.FRMPayload,
Cid: uint32(block.CID),
Commands: data,
})
if err != nil {
log.WithFields(logFields).Errorf("send mac-command to network-controller error: %s", err)
} else {
log.WithFields(logFields).Info("mac-command sent to network-controller")
}
}
}
return nil
}
| handleUplinkACK | identifier_name |
kaart-teken-laag.component.ts | import {
Component,
NgZone,
OnDestroy,
OnInit,
ViewEncapsulation,
} from "@angular/core";
import { option } from "fp-ts";
import { pipe } from "fp-ts/lib/function";
import { Subject } from "rxjs";
import { distinctUntilChanged, map, skipWhile } from "rxjs/operators";
import * as uuid from "uuid";
import { Transparantie } from "../../transparantieeditor/transparantie";
import { dimensieBeschrijving } from "../../util/geometries";
import { observeOnAngular } from "../../util/observe-on-angular";
import * as ol from "../../util/openlayers-compat";
import { ofType } from "../../util/operators";
import { forEach } from "../../util/option";
import { KaartChildDirective } from "../kaart-child.directive";
import * as ke from "../kaart-elementen";
import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) |
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private startMetTekenen(tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
}
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source,
type: tekenSettings.geometryType,
style: pipe(
tekenSettings.drawStyle,
option.map(toStylish),
option.getOrElse(() => defaultDrawStyle)
),
});
source.forEachFeature((feature) =>
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen)
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawstart",
(event: ol.interaction.DrawEvent) => {
const feature = event.feature;
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen);
}
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawend",
() => {
if (!tekenSettings.meerdereGeometrieen) {
// Als we maar 1 geometrie open mogen hebben, stoppen we direct met tekenen wanneer 1 geometrie afgesloten is.
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
}
}
);
return draw;
}
private volgendeVolgnummer(): number {
const maxVolgNummer = this.source
.getFeatures()
.map((feature) => option.fromNullable(feature.get("volgnummer")))
.filter((optional) => option.isSome(optional))
.map((optional) => option.toNullable(optional))
.reduce(
(maxVolgNummer: number, volgNummer: number) =>
Math.max(maxVolgNummer, volgNummer),
0
);
return maxVolgNummer + 1;
}
tooltipCoord(geometry: ol.geom.Geometry): option.Option<ol.Coordinate> {
switch (geometry.getType()) {
case "Polygon":
return option.some(
(geometry as ol.geom.Polygon).getInteriorPoint().getCoordinates()
);
case "LineString":
return option.some(
(geometry as ol.geom.LineString).getLastCoordinate()
);
default:
return option.none;
}
}
}
| {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
} | conditional_block |
kaart-teken-laag.component.ts | import {
Component,
NgZone,
OnDestroy,
OnInit,
ViewEncapsulation,
} from "@angular/core";
import { option } from "fp-ts";
import { pipe } from "fp-ts/lib/function";
import { Subject } from "rxjs";
import { distinctUntilChanged, map, skipWhile } from "rxjs/operators";
import * as uuid from "uuid";
import { Transparantie } from "../../transparantieeditor/transparantie";
import { dimensieBeschrijving } from "../../util/geometries";
import { observeOnAngular } from "../../util/observe-on-angular";
import * as ol from "../../util/openlayers-compat";
import { ofType } from "../../util/operators";
import { forEach } from "../../util/option";
import { KaartChildDirective } from "../kaart-child.directive";
import * as ke from "../kaart-elementen";
import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
}
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private | (tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
}
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source,
type: tekenSettings.geometryType,
style: pipe(
tekenSettings.drawStyle,
option.map(toStylish),
option.getOrElse(() => defaultDrawStyle)
),
});
source.forEachFeature((feature) =>
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen)
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawstart",
(event: ol.interaction.DrawEvent) => {
const feature = event.feature;
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen);
}
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawend",
() => {
if (!tekenSettings.meerdereGeometrieen) {
// Als we maar 1 geometrie open mogen hebben, stoppen we direct met tekenen wanneer 1 geometrie afgesloten is.
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
}
}
);
return draw;
}
private volgendeVolgnummer(): number {
const maxVolgNummer = this.source
.getFeatures()
.map((feature) => option.fromNullable(feature.get("volgnummer")))
.filter((optional) => option.isSome(optional))
.map((optional) => option.toNullable(optional))
.reduce(
(maxVolgNummer: number, volgNummer: number) =>
Math.max(maxVolgNummer, volgNummer),
0
);
return maxVolgNummer + 1;
}
tooltipCoord(geometry: ol.geom.Geometry): option.Option<ol.Coordinate> {
switch (geometry.getType()) {
case "Polygon":
return option.some(
(geometry as ol.geom.Polygon).getInteriorPoint().getCoordinates()
);
case "LineString":
return option.some(
(geometry as ol.geom.LineString).getLastCoordinate()
);
default:
return option.none;
}
}
}
| startMetTekenen | identifier_name |
kaart-teken-laag.component.ts | import {
Component,
NgZone,
OnDestroy,
OnInit,
ViewEncapsulation,
} from "@angular/core";
import { option } from "fp-ts";
import { pipe } from "fp-ts/lib/function";
import { Subject } from "rxjs";
import { distinctUntilChanged, map, skipWhile } from "rxjs/operators";
import * as uuid from "uuid";
import { Transparantie } from "../../transparantieeditor/transparantie";
import { dimensieBeschrijving } from "../../util/geometries";
import { observeOnAngular } from "../../util/observe-on-angular";
import * as ol from "../../util/openlayers-compat";
import { ofType } from "../../util/operators"; | import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
}
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private startMetTekenen(tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
}
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source,
type: tekenSettings.geometryType,
style: pipe(
tekenSettings.drawStyle,
option.map(toStylish),
option.getOrElse(() => defaultDrawStyle)
),
});
source.forEachFeature((feature) =>
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen)
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawstart",
(event: ol.interaction.DrawEvent) => {
const feature = event.feature;
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen);
}
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawend",
() => {
if (!tekenSettings.meerdereGeometrieen) {
// Als we maar 1 geometrie open mogen hebben, stoppen we direct met tekenen wanneer 1 geometrie afgesloten is.
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
}
}
);
return draw;
}
private volgendeVolgnummer(): number {
const maxVolgNummer = this.source
.getFeatures()
.map((feature) => option.fromNullable(feature.get("volgnummer")))
.filter((optional) => option.isSome(optional))
.map((optional) => option.toNullable(optional))
.reduce(
(maxVolgNummer: number, volgNummer: number) =>
Math.max(maxVolgNummer, volgNummer),
0
);
return maxVolgNummer + 1;
}
tooltipCoord(geometry: ol.geom.Geometry): option.Option<ol.Coordinate> {
switch (geometry.getType()) {
case "Polygon":
return option.some(
(geometry as ol.geom.Polygon).getInteriorPoint().getCoordinates()
);
case "LineString":
return option.some(
(geometry as ol.geom.LineString).getLastCoordinate()
);
default:
return option.none;
}
}
} | import { forEach } from "../../util/option";
import { KaartChildDirective } from "../kaart-child.directive";
import * as ke from "../kaart-elementen"; | random_line_split |
kaart-teken-laag.component.ts | import {
Component,
NgZone,
OnDestroy,
OnInit,
ViewEncapsulation,
} from "@angular/core";
import { option } from "fp-ts";
import { pipe } from "fp-ts/lib/function";
import { Subject } from "rxjs";
import { distinctUntilChanged, map, skipWhile } from "rxjs/operators";
import * as uuid from "uuid";
import { Transparantie } from "../../transparantieeditor/transparantie";
import { dimensieBeschrijving } from "../../util/geometries";
import { observeOnAngular } from "../../util/observe-on-angular";
import * as ol from "../../util/openlayers-compat";
import { ofType } from "../../util/operators";
import { forEach } from "../../util/option";
import { KaartChildDirective } from "../kaart-child.directive";
import * as ke from "../kaart-elementen";
import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
}
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private startMetTekenen(tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void |
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source,
type: tekenSettings.geometryType,
style: pipe(
tekenSettings.drawStyle,
option.map(toStylish),
option.getOrElse(() => defaultDrawStyle)
),
});
source.forEachFeature((feature) =>
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen)
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawstart",
(event: ol.interaction.DrawEvent) => {
const feature = event.feature;
this.initializeFeature(feature, tekenSettings.meerdereGeometrieen);
}
);
draw.on(
// TODO na OL upgrade -> is this pointer OK?
"drawend",
() => {
if (!tekenSettings.meerdereGeometrieen) {
// Als we maar 1 geometrie open mogen hebben, stoppen we direct met tekenen wanneer 1 geometrie afgesloten is.
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
}
}
);
return draw;
}
private volgendeVolgnummer(): number {
const maxVolgNummer = this.source
.getFeatures()
.map((feature) => option.fromNullable(feature.get("volgnummer")))
.filter((optional) => option.isSome(optional))
.map((optional) => option.toNullable(optional))
.reduce(
(maxVolgNummer: number, volgNummer: number) =>
Math.max(maxVolgNummer, volgNummer),
0
);
return maxVolgNummer + 1;
}
tooltipCoord(geometry: ol.geom.Geometry): option.Option<ol.Coordinate> {
switch (geometry.getType()) {
case "Polygon":
return option.some(
(geometry as ol.geom.Polygon).getInteriorPoint().getCoordinates()
);
case "LineString":
return option.some(
(geometry as ol.geom.LineString).getLastCoordinate()
);
default:
return option.none;
}
}
}
| {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
} | identifier_body |
commands.py | from telethon import events
from var import Var
from pathlib import Path
from ub.config import Config
import re, logging, inspect, sys, json, os
from asyncio import create_subprocess_shell as asyncsubshell, subprocess as asyncsub
from os import remove
from time import gmtime, strftime
from traceback import format_exc
from typing import List
from ub.javes_main.heroku_var import *
from ub import *
from sys import *
from telethon.errors.rpcerrorlist import PhoneNumberInvalidError
from telethon import TelegramClient, functions, types
from telethon.tl.types import InputMessagesFilterDocument
import traceback
import asyncio, time, io, math, os, logging, asyncio, shutil, re
def zzaacckkyy(**args):
|
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname)
else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("javes_error.log", "w+")
file.write(ftext)
file.close()
try:
await check.client.send_file(send_to, "javes_error.log", caption=text)
remove("javes_error.log")
except:
pass
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if client2:
client2.add_event_handler(wrapper, events.NewMessage(**args))
if client3:
client3.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
borg = javes = bot ; admin_cmd = rekcah05 ; command = zzaacckkyy ; register = javes05 = javess
def errors_handler(func):
async def wrapper(event):
try:
return await func(event)
except Exception:
pass
return wrapper
async def progress(current, total, event, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}] {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress_str + \
"{0} of {1}\nETA: {2}".format(
humanbytes(current),
humanbytes(total),
time_formatter(estimated_total_time)
)
if file_name:
await event.edit("{}\nFile Name: `{}`\n{}".format(
type_of_ps, file_name, tmp))
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + " day(s), ") if days else "") + \
((str(hours) + " hour(s), ") if hours else "") + \
((str(minutes) + " minute(s), ") if minutes else "") + \
((str(seconds) + " second(s), ") if seconds else "") + \
((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
return tmp[:-2]
class Loader():
def __init__(self, func=None, **args):
self.Var = Var
bot.add_event_handler(func, events.NewMessage(**args))
data = json.load(open("ub/javes_main/extra/meaning.json"))
def meaning(w):
w = w.lower()
if w in data:
return data[w]
| args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
allow_sudo = args.get("allow_sudo", None)
allow_edited_updates = args.get('allow_edited_updates', False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator | identifier_body |
commands.py | from telethon import events
from var import Var
from pathlib import Path
from ub.config import Config
import re, logging, inspect, sys, json, os
from asyncio import create_subprocess_shell as asyncsubshell, subprocess as asyncsub
from os import remove
from time import gmtime, strftime
from traceback import format_exc
from typing import List
from ub.javes_main.heroku_var import *
from ub import *
from sys import *
from telethon.errors.rpcerrorlist import PhoneNumberInvalidError
from telethon import TelegramClient, functions, types
from telethon.tl.types import InputMessagesFilterDocument
import traceback
import asyncio, time, io, math, os, logging, asyncio, shutil, re
def | (**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
allow_sudo = args.get("allow_sudo", None)
allow_edited_updates = args.get('allow_edited_updates', False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname)
else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("javes_error.log", "w+")
file.write(ftext)
file.close()
try:
await check.client.send_file(send_to, "javes_error.log", caption=text)
remove("javes_error.log")
except:
pass
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if client2:
client2.add_event_handler(wrapper, events.NewMessage(**args))
if client3:
client3.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
borg = javes = bot ; admin_cmd = rekcah05 ; command = zzaacckkyy ; register = javes05 = javess
def errors_handler(func):
async def wrapper(event):
try:
return await func(event)
except Exception:
pass
return wrapper
async def progress(current, total, event, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}] {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress_str + \
"{0} of {1}\nETA: {2}".format(
humanbytes(current),
humanbytes(total),
time_formatter(estimated_total_time)
)
if file_name:
await event.edit("{}\nFile Name: `{}`\n{}".format(
type_of_ps, file_name, tmp))
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + " day(s), ") if days else "") + \
((str(hours) + " hour(s), ") if hours else "") + \
((str(minutes) + " minute(s), ") if minutes else "") + \
((str(seconds) + " second(s), ") if seconds else "") + \
((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
return tmp[:-2]
class Loader():
def __init__(self, func=None, **args):
self.Var = Var
bot.add_event_handler(func, events.NewMessage(**args))
data = json.load(open("ub/javes_main/extra/meaning.json"))
def meaning(w):
w = w.lower()
if w in data:
return data[w]
| zzaacckkyy | identifier_name |
commands.py | from telethon import events
from var import Var
from pathlib import Path
from ub.config import Config
import re, logging, inspect, sys, json, os
from asyncio import create_subprocess_shell as asyncsubshell, subprocess as asyncsub
from os import remove
from time import gmtime, strftime
from traceback import format_exc
from typing import List
from ub.javes_main.heroku_var import *
from ub import *
from sys import *
from telethon.errors.rpcerrorlist import PhoneNumberInvalidError
from telethon import TelegramClient, functions, types
from telethon.tl.types import InputMessagesFilterDocument
import traceback
import asyncio, time, io, math, os, logging, asyncio, shutil, re
def zzaacckkyy(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
allow_sudo = args.get("allow_sudo", None)
allow_edited_updates = args.get('allow_edited_updates', False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py") | else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("javes_error.log", "w+")
file.write(ftext)
file.close()
try:
await check.client.send_file(send_to, "javes_error.log", caption=text)
remove("javes_error.log")
except:
pass
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if client2:
client2.add_event_handler(wrapper, events.NewMessage(**args))
if client3:
client3.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
borg = javes = bot ; admin_cmd = rekcah05 ; command = zzaacckkyy ; register = javes05 = javess
def errors_handler(func):
async def wrapper(event):
try:
return await func(event)
except Exception:
pass
return wrapper
async def progress(current, total, event, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}] {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress_str + \
"{0} of {1}\nETA: {2}".format(
humanbytes(current),
humanbytes(total),
time_formatter(estimated_total_time)
)
if file_name:
await event.edit("{}\nFile Name: `{}`\n{}".format(
type_of_ps, file_name, tmp))
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + " day(s), ") if days else "") + \
((str(hours) + " hour(s), ") if hours else "") + \
((str(minutes) + " minute(s), ") if minutes else "") + \
((str(seconds) + " second(s), ") if seconds else "") + \
((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
return tmp[:-2]
class Loader():
def __init__(self, func=None, **args):
self.Var = Var
bot.add_event_handler(func, events.NewMessage(**args))
data = json.load(open("ub/javes_main/extra/meaning.json"))
def meaning(w):
w = w.lower()
if w in data:
return data[w] | name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname) | random_line_split |
commands.py | from telethon import events
from var import Var
from pathlib import Path
from ub.config import Config
import re, logging, inspect, sys, json, os
from asyncio import create_subprocess_shell as asyncsubshell, subprocess as asyncsub
from os import remove
from time import gmtime, strftime
from traceback import format_exc
from typing import List
from ub.javes_main.heroku_var import *
from ub import *
from sys import *
from telethon.errors.rpcerrorlist import PhoneNumberInvalidError
from telethon import TelegramClient, functions, types
from telethon.tl.types import InputMessagesFilterDocument
import traceback
import asyncio, time, io, math, os, logging, asyncio, shutil, re
def zzaacckkyy(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
allow_sudo = args.get("allow_sudo", None)
allow_edited_updates = args.get('allow_edited_updates', False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname)
else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("javes_error.log", "w+")
file.write(ftext)
file.close()
try:
await check.client.send_file(send_to, "javes_error.log", caption=text)
remove("javes_error.log")
except:
pass
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if client2:
client2.add_event_handler(wrapper, events.NewMessage(**args))
if client3:
client3.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
borg = javes = bot ; admin_cmd = rekcah05 ; command = zzaacckkyy ; register = javes05 = javess
def errors_handler(func):
async def wrapper(event):
try:
return await func(event)
except Exception:
pass
return wrapper
async def progress(current, total, event, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}] {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress_str + \
"{0} of {1}\nETA: {2}".format(
humanbytes(current),
humanbytes(total),
time_formatter(estimated_total_time)
)
if file_name:
await event.edit("{}\nFile Name: `{}`\n{}".format(
type_of_ps, file_name, tmp))
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size | return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + " day(s), ") if days else "") + \
((str(hours) + " hour(s), ") if hours else "") + \
((str(minutes) + " minute(s), ") if minutes else "") + \
((str(seconds) + " second(s), ") if seconds else "") + \
((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
return tmp[:-2]
class Loader():
def __init__(self, func=None, **args):
self.Var = Var
bot.add_event_handler(func, events.NewMessage(**args))
data = json.load(open("ub/javes_main/extra/meaning.json"))
def meaning(w):
w = w.lower()
if w in data:
return data[w]
| /= power
raised_to_pow += 1
| conditional_block |
kuberuntime_sandbox.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"fmt"
"net/url"
"runtime"
"sort"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format"
netutils "k8s.io/utils/net"
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) | (ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil {
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
return "", message, err
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
if err != nil {
message := fmt.Sprintf("Failed to create log directory for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c)
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) {
return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec != nil {
wc.SecurityContext.CredentialSpec = *wo.GMSACredentialSpec
}
if wo.RunAsUserName != nil {
wc.SecurityContext.RunAsUsername = *wo.RunAsUserName
}
if kubecontainer.HasWindowsHostProcessContainer(pod) {
if wo.HostProcess != nil && !*wo.HostProcess {
return nil, fmt.Errorf("pod must not contain any HostProcess containers if Pod's WindowsOptions.HostProcess is set to false")
}
}
return wc, nil
}
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeapi.PodSandboxFilter
if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &runtimeapi.PodSandboxStateValue{
State: readyState,
},
}
}
resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list pod sandboxes")
return nil, err
}
return resp, nil
}
// determinePodSandboxIP determines the IP addresses of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string {
podIPs := make([]string, 0)
if podSandbox.Network == nil {
klog.InfoS("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName))
return podIPs
}
// ip could be an empty string if runtime is not responsible for the
// IP (e.g., host networking).
// pick primary IP
if len(podSandbox.Network.Ip) != 0 {
if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
return nil
}
podIPs = append(podIPs, podSandbox.Network.Ip)
}
// pick additional ips, if cri reported them
for _, podIP := range podSandbox.Network.AdditionalIps {
if nil == netutils.ParseIPSloppy(podIP.Ip) {
klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
return nil
}
podIPs = append(podIPs, podIP.Ip)
}
return podIPs
}
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
if state != nil {
filter.State = &runtimeapi.PodSandboxStateValue{
State: *state,
}
}
sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID)
return nil, err
}
if len(sandboxes) == 0 {
return nil, nil
}
// Sort with newest first.
sandboxIDs := make([]string, len(sandboxes))
sort.Sort(podSandboxByCreated(sandboxes))
for i, s := range sandboxes {
sandboxIDs[i] = s.Id
}
return sandboxIDs, nil
}
// GetPortForward gets the endpoint the runtime will serve the port-forward request from.
func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil)
if err != nil {
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
}
if len(sandboxIDs) == 0 {
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
}
req := &runtimeapi.PortForwardRequest{
PodSandboxId: sandboxIDs[0],
Port: ports,
}
resp, err := m.runtimeService.PortForward(ctx, req)
if err != nil {
return nil, err
}
return url.Parse(resp.Url)
}
| createPodSandbox | identifier_name |
kuberuntime_sandbox.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"fmt"
"net/url"
"runtime"
"sort"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format"
netutils "k8s.io/utils/net"
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil {
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
return "", message, err
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
if err != nil {
message := fmt.Sprintf("Failed to create log directory for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
|
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) {
return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec != nil {
wc.SecurityContext.CredentialSpec = *wo.GMSACredentialSpec
}
if wo.RunAsUserName != nil {
wc.SecurityContext.RunAsUsername = *wo.RunAsUserName
}
if kubecontainer.HasWindowsHostProcessContainer(pod) {
if wo.HostProcess != nil && !*wo.HostProcess {
return nil, fmt.Errorf("pod must not contain any HostProcess containers if Pod's WindowsOptions.HostProcess is set to false")
}
}
return wc, nil
}
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeapi.PodSandboxFilter
if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &runtimeapi.PodSandboxStateValue{
State: readyState,
},
}
}
resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list pod sandboxes")
return nil, err
}
return resp, nil
}
// determinePodSandboxIP determines the IP addresses of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string {
podIPs := make([]string, 0)
if podSandbox.Network == nil {
klog.InfoS("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName))
return podIPs
}
// ip could be an empty string if runtime is not responsible for the
// IP (e.g., host networking).
// pick primary IP
if len(podSandbox.Network.Ip) != 0 {
if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
return nil
}
podIPs = append(podIPs, podSandbox.Network.Ip)
}
// pick additional ips, if cri reported them
for _, podIP := range podSandbox.Network.AdditionalIps {
if nil == netutils.ParseIPSloppy(podIP.Ip) {
klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
return nil
}
podIPs = append(podIPs, podIP.Ip)
}
return podIPs
}
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
if state != nil {
filter.State = &runtimeapi.PodSandboxStateValue{
State: *state,
}
}
sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID)
return nil, err
}
if len(sandboxes) == 0 {
return nil, nil
}
// Sort with newest first.
sandboxIDs := make([]string, len(sandboxes))
sort.Sort(podSandboxByCreated(sandboxes))
for i, s := range sandboxes {
sandboxIDs[i] = s.Id
}
return sandboxIDs, nil
}
// GetPortForward gets the endpoint the runtime will serve the port-forward request from.
func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil)
if err != nil {
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
}
if len(sandboxIDs) == 0 {
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
}
req := &runtimeapi.PortForwardRequest{
PodSandboxId: sandboxIDs[0],
Port: ports,
}
resp, err := m.runtimeService.PortForward(ctx, req)
if err != nil {
return nil, err
}
return url.Parse(resp.Url)
} | portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c) | random_line_split |
kuberuntime_sandbox.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"fmt"
"net/url"
"runtime"
"sort"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format"
netutils "k8s.io/utils/net"
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil {
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
return "", message, err
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
if err != nil {
message := fmt.Sprintf("Failed to create log directory for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c)
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil |
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) {
return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec != nil {
wc.SecurityContext.CredentialSpec = *wo.GMSACredentialSpec
}
if wo.RunAsUserName != nil {
wc.SecurityContext.RunAsUsername = *wo.RunAsUserName
}
if kubecontainer.HasWindowsHostProcessContainer(pod) {
if wo.HostProcess != nil && !*wo.HostProcess {
return nil, fmt.Errorf("pod must not contain any HostProcess containers if Pod's WindowsOptions.HostProcess is set to false")
}
}
return wc, nil
}
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeapi.PodSandboxFilter
if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &runtimeapi.PodSandboxStateValue{
State: readyState,
},
}
}
resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list pod sandboxes")
return nil, err
}
return resp, nil
}
// determinePodSandboxIP determines the IP addresses of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string {
podIPs := make([]string, 0)
if podSandbox.Network == nil {
klog.InfoS("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName))
return podIPs
}
// ip could be an empty string if runtime is not responsible for the
// IP (e.g., host networking).
// pick primary IP
if len(podSandbox.Network.Ip) != 0 {
if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
return nil
}
podIPs = append(podIPs, podSandbox.Network.Ip)
}
// pick additional ips, if cri reported them
for _, podIP := range podSandbox.Network.AdditionalIps {
if nil == netutils.ParseIPSloppy(podIP.Ip) {
klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
return nil
}
podIPs = append(podIPs, podIP.Ip)
}
return podIPs
}
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
if state != nil {
filter.State = &runtimeapi.PodSandboxStateValue{
State: *state,
}
}
sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID)
return nil, err
}
if len(sandboxes) == 0 {
return nil, nil
}
// Sort with newest first.
sandboxIDs := make([]string, len(sandboxes))
sort.Sort(podSandboxByCreated(sandboxes))
for i, s := range sandboxes {
sandboxIDs[i] = s.Id
}
return sandboxIDs, nil
}
// GetPortForward gets the endpoint the runtime will serve the port-forward request from.
func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil)
if err != nil {
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
}
if len(sandboxIDs) == 0 {
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
}
req := &runtimeapi.PortForwardRequest{
PodSandboxId: sandboxIDs[0],
Port: ports,
}
resp, err := m.runtimeService.PortForward(ctx, req)
if err != nil {
return nil, err
}
return url.Parse(resp.Url)
}
| {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
} | conditional_block |
kuberuntime_sandbox.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"context"
"fmt"
"net/url"
"runtime"
"sort"
v1 "k8s.io/api/core/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
runtimeutil "k8s.io/kubernetes/pkg/kubelet/kuberuntime/util"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format"
netutils "k8s.io/utils/net"
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil {
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
return "", message, err
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
if err != nil {
message := fmt.Sprintf("Failed to create log directory for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c)
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) |
// getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet.
func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) {
var filter *runtimeapi.PodSandboxFilter
if !all {
readyState := runtimeapi.PodSandboxState_SANDBOX_READY
filter = &runtimeapi.PodSandboxFilter{
State: &runtimeapi.PodSandboxStateValue{
State: readyState,
},
}
}
resp, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list pod sandboxes")
return nil, err
}
return resp, nil
}
// determinePodSandboxIP determines the IP addresses of the given pod sandbox.
func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName string, podSandbox *runtimeapi.PodSandboxStatus) []string {
podIPs := make([]string, 0)
if podSandbox.Network == nil {
klog.InfoS("Pod Sandbox status doesn't have network information, cannot report IPs", "pod", klog.KRef(podNamespace, podName))
return podIPs
}
// ip could be an empty string if runtime is not responsible for the
// IP (e.g., host networking).
// pick primary IP
if len(podSandbox.Network.Ip) != 0 {
if netutils.ParseIPSloppy(podSandbox.Network.Ip) == nil {
klog.InfoS("Pod Sandbox reported an unparseable primary IP", "pod", klog.KRef(podNamespace, podName), "IP", podSandbox.Network.Ip)
return nil
}
podIPs = append(podIPs, podSandbox.Network.Ip)
}
// pick additional ips, if cri reported them
for _, podIP := range podSandbox.Network.AdditionalIps {
if nil == netutils.ParseIPSloppy(podIP.Ip) {
klog.InfoS("Pod Sandbox reported an unparseable additional IP", "pod", klog.KRef(podNamespace, podName), "IP", podIP.Ip)
return nil
}
podIPs = append(podIPs, podIP.Ip)
}
return podIPs
}
// getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error).
// Param state could be nil in order to get all sandboxes belonging to same pod.
func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) {
filter := &runtimeapi.PodSandboxFilter{
LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)},
}
if state != nil {
filter.State = &runtimeapi.PodSandboxStateValue{
State: *state,
}
}
sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter)
if err != nil {
klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID)
return nil, err
}
if len(sandboxes) == 0 {
return nil, nil
}
// Sort with newest first.
sandboxIDs := make([]string, len(sandboxes))
sort.Sort(podSandboxByCreated(sandboxes))
for i, s := range sandboxes {
sandboxIDs[i] = s.Id
}
return sandboxIDs, nil
}
// GetPortForward gets the endpoint the runtime will serve the port-forward request from.
func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) {
sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil)
if err != nil {
return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err)
}
if len(sandboxIDs) == 0 {
return nil, fmt.Errorf("failed to find sandboxID for pod %s", format.PodDesc(podName, podNamespace, podUID))
}
req := &runtimeapi.PortForwardRequest{
PodSandboxId: sandboxIDs[0],
Port: ports,
}
resp, err := m.runtimeService.PortForward(ctx, req)
if err != nil {
return nil, err
}
return url.Parse(resp.Url)
}
| {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) {
return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec != nil {
wc.SecurityContext.CredentialSpec = *wo.GMSACredentialSpec
}
if wo.RunAsUserName != nil {
wc.SecurityContext.RunAsUsername = *wo.RunAsUserName
}
if kubecontainer.HasWindowsHostProcessContainer(pod) {
if wo.HostProcess != nil && !*wo.HostProcess {
return nil, fmt.Errorf("pod must not contain any HostProcess containers if Pod's WindowsOptions.HostProcess is set to false")
}
}
return wc, nil
} | identifier_body |
JsonLogparser.py | import os
import argparse
import json
import numpy as np
import matplotlib.pyplot as pp
from threading import Thread
def get_Directory():
print ("Select the log you would like to access by using -run=<number>\n")
for i in range(len(dir_list)):
directory = "Run " + str(i) + ": " + dir_list[i] #printing all the directories in a neat order
print (directory)
#to get the all the keys .. not applicable right now
# def get_keys(d_or_l, keys_list):
# if isinstance(d_or_l, dict):
# for k, v in iter(sorted(d_or_l.items())):
# if isinstance(v, list):
# get_keys(v, keys_list)
# elif isinstance(v, dict):
# get_keys(v, keys_list)
# keys_list.append(k) # Altered line
# elif isinstance(d_or_l, list):
# for i in d_or_l:
# if isinstance(i, list):
# get_keys(i, keys_list)
# elif isinstance(i, dict):
# get_keys(i, keys_list)
# else:
# print ('\n')
# #print ("** Skipping item of type: {}".format(type(d_or_l)))
# return keys_list
def getTimeIndex(dic,from_t,to_t):
# gets the index of the from and to time from string to integers
i = 0
from_index = 0
to_index = len(dic)
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= from_t):
if (i!=0):
from_index = i
else:
from_index = i
break
i = i + 1
i = 0
if (dic[len(dic)-1]["last_update"]==to_t):
to_t = len(dic)-1
else:
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= to_t):
if (i != 0):
to_index = i
else:
to_index = i
break
i = i + 1
return from_index, to_index
def printPlot(dic, key, key2, label, starttime, endtime,all,indexes):
array = []
while(starttime < endtime):
if (key2 == None):
array.append(dic[starttime][key])
else :
array.append(dic[starttime][key][key2]) | array = np.array(array)
array = array.T
if (all):
for i in range(len(array)):
pp.plot(array[i], label="Thruster " + str(i))
else:
for i in indexes:
try:
pp.plot(array[i],label="Thruster " + str(i))
except:
print ("\nUnable to Plot\nPlotting index out of range, Please use appropriate range")
exit(10)
pp.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode="expand", borderaxespad=0.)
pp.xlabel("Time")
pp.ylabel("Thruster power")
pp.title(label)
pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1
def print2dim(dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime,printallplots,plotarray)
if (args.dbt):
print2dim(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime)
printPlot(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime,printallplots,plotarray)
if (args.trs):
print2dim(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime)
printPlot(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime,printallplots,plotarray)
if (args.frt):
print2dim(data, 'thrusters','frozen', 'Frozen', fromtime, totime)
printPlot(data, 'thrusters', 'frozen', 'Frozen', fromtime, totime, printallplots, plotarray)
if (args.claw):
print1dim(data,'claw','Claw',fromtime,totime)
if (args.pow):
print2dim(data, 'claw','power', 'Claw: Power', fromtime, totime)
if (args.led):
print1dim(data,'leds','LED\'s',fromtime,totime)
if (args.cled):
print2dim(data, 'leds','camera_leds', 'LED : Camera LED', fromtime, totime)
if (args.bled):
print2dim(data, 'leds','bluetooth_led', 'LED: Bluetooth LED', fromtime, totime)
if (args.cam):
print1dim(data,'cameras','Camera',fromtime,totime)
if (args.dc):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
printPlot(data,'thrusters', None ,'Thrusters', fromtime, totime, printallplots, plotarray)
if (args.IMU):
print1dim(data,'IMU','IMU',fromtime,totime)
# printPlot(data,'IMU', None ,'IMU', fromtime, totime, printallplots, plotarray)
# does not work because of nested keys
if (args.pres):
print1dim(data,'pressure','Pressure',fromtime,totime)
#printPlot(data,'pressure', None ,'Pressure', fromtime, totime, printallplots, plotarray)
#does not work because of nested keys
if (args.logtime):
print1dim(data,'cam_cur', 'Times',fromtime,totime)
if (args.cam):
if (args.camnum!=None):
cam_num = args.camnum
str = 'Cam_' + str(cam_num)
try:
print2dim(data, 'cameras', str, 'Camera '+str, fromtime, totime)
except:
print ("Camera Number out of range")
else :
print1dim(data,'cameras','Cameras',fromtime,totime)
# if neither were selected, tell user to select one or the other
if (check == 0):
print ("Please choose either dearclient or dearflask\nUse -dc or -df")
exit(0) | starttime = starttime + 1 | random_line_split |
JsonLogparser.py | import os
import argparse
import json
import numpy as np
import matplotlib.pyplot as pp
from threading import Thread
def get_Directory():
print ("Select the log you would like to access by using -run=<number>\n")
for i in range(len(dir_list)):
directory = "Run " + str(i) + ": " + dir_list[i] #printing all the directories in a neat order
print (directory)
#to get the all the keys .. not applicable right now
# def get_keys(d_or_l, keys_list):
# if isinstance(d_or_l, dict):
# for k, v in iter(sorted(d_or_l.items())):
# if isinstance(v, list):
# get_keys(v, keys_list)
# elif isinstance(v, dict):
# get_keys(v, keys_list)
# keys_list.append(k) # Altered line
# elif isinstance(d_or_l, list):
# for i in d_or_l:
# if isinstance(i, list):
# get_keys(i, keys_list)
# elif isinstance(i, dict):
# get_keys(i, keys_list)
# else:
# print ('\n')
# #print ("** Skipping item of type: {}".format(type(d_or_l)))
# return keys_list
def getTimeIndex(dic,from_t,to_t):
# gets the index of the from and to time from string to integers
i = 0
from_index = 0
to_index = len(dic)
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= from_t):
if (i!=0):
from_index = i
else:
from_index = i
break
i = i + 1
i = 0
if (dic[len(dic)-1]["last_update"]==to_t):
to_t = len(dic)-1
else:
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= to_t):
if (i != 0):
to_index = i
else:
to_index = i
break
i = i + 1
return from_index, to_index
def printPlot(dic, key, key2, label, starttime, endtime,all,indexes):
array = []
while(starttime < endtime):
if (key2 == None):
array.append(dic[starttime][key])
else :
array.append(dic[starttime][key][key2])
starttime = starttime + 1
array = np.array(array)
array = array.T
if (all):
for i in range(len(array)):
pp.plot(array[i], label="Thruster " + str(i))
else:
for i in indexes:
try:
pp.plot(array[i],label="Thruster " + str(i))
except:
print ("\nUnable to Plot\nPlotting index out of range, Please use appropriate range")
exit(10)
pp.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode="expand", borderaxespad=0.)
pp.xlabel("Time")
pp.ylabel("Thruster power")
pp.title(label)
pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1
def print2dim(dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime,printallplots,plotarray)
if (args.dbt):
print2dim(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime)
printPlot(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime,printallplots,plotarray)
if (args.trs):
print2dim(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime)
printPlot(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime,printallplots,plotarray)
if (args.frt):
print2dim(data, 'thrusters','frozen', 'Frozen', fromtime, totime)
printPlot(data, 'thrusters', 'frozen', 'Frozen', fromtime, totime, printallplots, plotarray)
if (args.claw):
print1dim(data,'claw','Claw',fromtime,totime)
if (args.pow):
print2dim(data, 'claw','power', 'Claw: Power', fromtime, totime)
if (args.led):
print1dim(data,'leds','LED\'s',fromtime,totime)
if (args.cled):
print2dim(data, 'leds','camera_leds', 'LED : Camera LED', fromtime, totime)
if (args.bled):
print2dim(data, 'leds','bluetooth_led', 'LED: Bluetooth LED', fromtime, totime)
if (args.cam):
|
if (args.dc):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
printPlot(data,'thrusters', None ,'Thrusters', fromtime, totime, printallplots, plotarray)
if (args.IMU):
print1dim(data,'IMU','IMU',fromtime,totime)
# printPlot(data,'IMU', None ,'IMU', fromtime, totime, printallplots, plotarray)
# does not work because of nested keys
if (args.pres):
print1dim(data,'pressure','Pressure',fromtime,totime)
#printPlot(data,'pressure', None ,'Pressure', fromtime, totime, printallplots, plotarray)
#does not work because of nested keys
if (args.logtime):
print1dim(data,'cam_cur', 'Times',fromtime,totime)
if (args.cam):
if (args.camnum!=None):
cam_num = args.camnum
str = 'Cam_' + str(cam_num)
try:
print2dim(data, 'cameras', str, 'Camera '+str, fromtime, totime)
except:
print ("Camera Number out of range")
else :
print1dim(data,'cameras','Cameras',fromtime,totime)
# if neither were selected, tell user to select one or the other
if (check == 0):
print ("Please choose either dearclient or dearflask\nUse -dc or -df")
exit(0) | print1dim(data,'cameras','Camera',fromtime,totime) | conditional_block |
JsonLogparser.py | import os
import argparse
import json
import numpy as np
import matplotlib.pyplot as pp
from threading import Thread
def get_Directory():
print ("Select the log you would like to access by using -run=<number>\n")
for i in range(len(dir_list)):
directory = "Run " + str(i) + ": " + dir_list[i] #printing all the directories in a neat order
print (directory)
#to get the all the keys .. not applicable right now
# def get_keys(d_or_l, keys_list):
# if isinstance(d_or_l, dict):
# for k, v in iter(sorted(d_or_l.items())):
# if isinstance(v, list):
# get_keys(v, keys_list)
# elif isinstance(v, dict):
# get_keys(v, keys_list)
# keys_list.append(k) # Altered line
# elif isinstance(d_or_l, list):
# for i in d_or_l:
# if isinstance(i, list):
# get_keys(i, keys_list)
# elif isinstance(i, dict):
# get_keys(i, keys_list)
# else:
# print ('\n')
# #print ("** Skipping item of type: {}".format(type(d_or_l)))
# return keys_list
def getTimeIndex(dic,from_t,to_t):
# gets the index of the from and to time from string to integers
i = 0
from_index = 0
to_index = len(dic)
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= from_t):
if (i!=0):
from_index = i
else:
from_index = i
break
i = i + 1
i = 0
if (dic[len(dic)-1]["last_update"]==to_t):
to_t = len(dic)-1
else:
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= to_t):
if (i != 0):
to_index = i
else:
to_index = i
break
i = i + 1
return from_index, to_index
def printPlot(dic, key, key2, label, starttime, endtime,all,indexes):
array = []
while(starttime < endtime):
if (key2 == None):
array.append(dic[starttime][key])
else :
array.append(dic[starttime][key][key2])
starttime = starttime + 1
array = np.array(array)
array = array.T
if (all):
for i in range(len(array)):
pp.plot(array[i], label="Thruster " + str(i))
else:
for i in indexes:
try:
pp.plot(array[i],label="Thruster " + str(i))
except:
print ("\nUnable to Plot\nPlotting index out of range, Please use appropriate range")
exit(10)
pp.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode="expand", borderaxespad=0.)
pp.xlabel("Time")
pp.ylabel("Thruster power")
pp.title(label)
pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1
def | (dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime,printallplots,plotarray)
if (args.dbt):
print2dim(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime)
printPlot(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime,printallplots,plotarray)
if (args.trs):
print2dim(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime)
printPlot(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime,printallplots,plotarray)
if (args.frt):
print2dim(data, 'thrusters','frozen', 'Frozen', fromtime, totime)
printPlot(data, 'thrusters', 'frozen', 'Frozen', fromtime, totime, printallplots, plotarray)
if (args.claw):
print1dim(data,'claw','Claw',fromtime,totime)
if (args.pow):
print2dim(data, 'claw','power', 'Claw: Power', fromtime, totime)
if (args.led):
print1dim(data,'leds','LED\'s',fromtime,totime)
if (args.cled):
print2dim(data, 'leds','camera_leds', 'LED : Camera LED', fromtime, totime)
if (args.bled):
print2dim(data, 'leds','bluetooth_led', 'LED: Bluetooth LED', fromtime, totime)
if (args.cam):
print1dim(data,'cameras','Camera',fromtime,totime)
if (args.dc):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
printPlot(data,'thrusters', None ,'Thrusters', fromtime, totime, printallplots, plotarray)
if (args.IMU):
print1dim(data,'IMU','IMU',fromtime,totime)
# printPlot(data,'IMU', None ,'IMU', fromtime, totime, printallplots, plotarray)
# does not work because of nested keys
if (args.pres):
print1dim(data,'pressure','Pressure',fromtime,totime)
#printPlot(data,'pressure', None ,'Pressure', fromtime, totime, printallplots, plotarray)
#does not work because of nested keys
if (args.logtime):
print1dim(data,'cam_cur', 'Times',fromtime,totime)
if (args.cam):
if (args.camnum!=None):
cam_num = args.camnum
str = 'Cam_' + str(cam_num)
try:
print2dim(data, 'cameras', str, 'Camera '+str, fromtime, totime)
except:
print ("Camera Number out of range")
else :
print1dim(data,'cameras','Cameras',fromtime,totime)
# if neither were selected, tell user to select one or the other
if (check == 0):
print ("Please choose either dearclient or dearflask\nUse -dc or -df")
exit(0) | print2dim | identifier_name |
JsonLogparser.py | import os
import argparse
import json
import numpy as np
import matplotlib.pyplot as pp
from threading import Thread
def get_Directory():
print ("Select the log you would like to access by using -run=<number>\n")
for i in range(len(dir_list)):
directory = "Run " + str(i) + ": " + dir_list[i] #printing all the directories in a neat order
print (directory)
#to get the all the keys .. not applicable right now
# def get_keys(d_or_l, keys_list):
# if isinstance(d_or_l, dict):
# for k, v in iter(sorted(d_or_l.items())):
# if isinstance(v, list):
# get_keys(v, keys_list)
# elif isinstance(v, dict):
# get_keys(v, keys_list)
# keys_list.append(k) # Altered line
# elif isinstance(d_or_l, list):
# for i in d_or_l:
# if isinstance(i, list):
# get_keys(i, keys_list)
# elif isinstance(i, dict):
# get_keys(i, keys_list)
# else:
# print ('\n')
# #print ("** Skipping item of type: {}".format(type(d_or_l)))
# return keys_list
def getTimeIndex(dic,from_t,to_t):
# gets the index of the from and to time from string to integers
i = 0
from_index = 0
to_index = len(dic)
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= from_t):
if (i!=0):
from_index = i
else:
from_index = i
break
i = i + 1
i = 0
if (dic[len(dic)-1]["last_update"]==to_t):
to_t = len(dic)-1
else:
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= to_t):
if (i != 0):
to_index = i
else:
to_index = i
break
i = i + 1
return from_index, to_index
def printPlot(dic, key, key2, label, starttime, endtime,all,indexes):
array = []
while(starttime < endtime):
if (key2 == None):
array.append(dic[starttime][key])
else :
array.append(dic[starttime][key][key2])
starttime = starttime + 1
array = np.array(array)
array = array.T
if (all):
for i in range(len(array)):
pp.plot(array[i], label="Thruster " + str(i))
else:
for i in indexes:
try:
pp.plot(array[i],label="Thruster " + str(i))
except:
print ("\nUnable to Plot\nPlotting index out of range, Please use appropriate range")
exit(10)
pp.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode="expand", borderaxespad=0.)
pp.xlabel("Time")
pp.ylabel("Thruster power")
pp.title(label)
pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
|
def print2dim(dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime,printallplots,plotarray)
if (args.dbt):
print2dim(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime)
printPlot(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime,printallplots,plotarray)
if (args.trs):
print2dim(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime)
printPlot(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime,printallplots,plotarray)
if (args.frt):
print2dim(data, 'thrusters','frozen', 'Frozen', fromtime, totime)
printPlot(data, 'thrusters', 'frozen', 'Frozen', fromtime, totime, printallplots, plotarray)
if (args.claw):
print1dim(data,'claw','Claw',fromtime,totime)
if (args.pow):
print2dim(data, 'claw','power', 'Claw: Power', fromtime, totime)
if (args.led):
print1dim(data,'leds','LED\'s',fromtime,totime)
if (args.cled):
print2dim(data, 'leds','camera_leds', 'LED : Camera LED', fromtime, totime)
if (args.bled):
print2dim(data, 'leds','bluetooth_led', 'LED: Bluetooth LED', fromtime, totime)
if (args.cam):
print1dim(data,'cameras','Camera',fromtime,totime)
if (args.dc):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
printPlot(data,'thrusters', None ,'Thrusters', fromtime, totime, printallplots, plotarray)
if (args.IMU):
print1dim(data,'IMU','IMU',fromtime,totime)
# printPlot(data,'IMU', None ,'IMU', fromtime, totime, printallplots, plotarray)
# does not work because of nested keys
if (args.pres):
print1dim(data,'pressure','Pressure',fromtime,totime)
#printPlot(data,'pressure', None ,'Pressure', fromtime, totime, printallplots, plotarray)
#does not work because of nested keys
if (args.logtime):
print1dim(data,'cam_cur', 'Times',fromtime,totime)
if (args.cam):
if (args.camnum!=None):
cam_num = args.camnum
str = 'Cam_' + str(cam_num)
try:
print2dim(data, 'cameras', str, 'Camera '+str, fromtime, totime)
except:
print ("Camera Number out of range")
else :
print1dim(data,'cameras','Cameras',fromtime,totime)
# if neither were selected, tell user to select one or the other
if (check == 0):
print ("Please choose either dearclient or dearflask\nUse -dc or -df")
exit(0) | print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1 | identifier_body |
mikes-modal.min.js | !function (a, b, c) {
function d(a, c) {
var d = b.createElement(a || "div"), e;
for (e in c)d[e] = c[e];
return d
}
function e(a) {
for (var b = 1, c = arguments.length; b < c; b++)a.appendChild(arguments[b]);
return a
}
function f(a, b, c, d) {
var e = ["opacity", b, ~~(a * 100), c, d].join("-"), f = .01 + c / d * 100, g = Math.max(1 - (1 - a) / b * (100 - f), a), h = m.substring(0, m.indexOf("Animation")).toLowerCase(), i = h && "-" + h + "-" || "";
return l[e] || (n.insertRule("@" + i + "keyframes " + e + "{" + "0%{opacity:" + g + "}" + f + "%{opacity:" + a + "}" + (f + .01) + "%{opacity:1}" + (f + b) % 100 + "%{opacity:" + a + "}" + "100%{opacity:" + g + "}" + "}", n.cssRules.length), l[e] = 1), e
}
function g(a, b) {
var d = a.style, e, f;
if (d[b] !== c)return b;
b = b.charAt(0).toUpperCase() + b.slice(1);
for (f = 0; f < k.length; f++) {
e = k[f] + b;
if (d[e] !== c)return e
}
}
function h(a, b) {
for (var c in b)a.style[g(a, c) || c] = b[c];
return a
}
function i(a) {
for (var b = 1; b < arguments.length; b++) {
var d = arguments[b];
for (var e in d)a[e] === c && (a[e] = d[e])
}
return a
}
function j(a) {
var b = {x: a.offsetLeft, y: a.offsetTop};
while (a = a.offsetParent)b.x += a.offsetLeft, b.y += a.offsetTop;
return b
}
var k = ["webkit", "Moz", "ms", "O"], l = {}, m, n = function () {
var a = d("style", {type: "text/css"});
return e(b.getElementsByTagName("head")[0], a), a.sheet || a.styleSheet
}(), o = {
lines: 12,
length: 7,
width: 5,
radius: 10,
rotate: 0,
corners: 1,
color: "#000",
speed: 1,
trail: 100,
opacity: .25,
fps: 20,
zIndex: 2e9,
className: "spinner",
top: "auto",
left: "auto"
}, p = function q(a) {
if (!this.spin)return new q(a);
this.opts = i(a || {}, q.defaults, o)
};
p.defaults = {}, i(p.prototype, {
spin: function (a) {
this.stop();
var b = this, c = b.opts, e = b.el = h(d(0, {className: c.className}), {
position: "relative",
width: 0,
zIndex: c.zIndex
}), f = c.radius + c.length + c.width, g, i;
a && (a.insertBefore(e, a.firstChild || null), i = j(a), g = j(e), h(e, {
left: (c.left == "auto" ? i.x - g.x + (a.offsetWidth >> 1) : parseInt(c.left, 10) + f) + "px",
top: (c.top == "auto" ? i.y - g.y + (a.offsetHeight >> 1) : parseInt(c.top, 10) + f) + "px"
})), e.setAttribute("aria-role", "progressbar"), b.lines(e, b.opts);
if (!m) {
var k = 0, l = c.fps, n = l / c.speed, o = (1 - c.opacity) / (n * c.trail / 100), p = n / c.lines;
(function q() {
k++;
for (var a = c.lines; a; a--) {
var d = Math.max(1 - (k + a * p) % n * o, c.opacity);
b.opacity(e, c.lines - a, d, c)
}
b.timeout = b.el && setTimeout(q, ~~(1e3 / l))
})()
}
return b
}, stop: function () {
var a = this.el;
return a && (clearTimeout(this.timeout), a.parentNode && a.parentNode.removeChild(a), this.el = c), this
}, lines: function (a, b) {
function c(a, c) {
return h(d(), {
position: "absolute",
width: b.length + b.width + "px",
height: b.width + "px",
background: a,
boxShadow: c,
transformOrigin: "left",
transform: "rotate(" + ~~(360 / b.lines * g + b.rotate) + "deg) translate(" + b.radius + "px" + ",0)",
borderRadius: (b.corners * b.width >> 1) + "px"
})
}
var g = 0, i;
for (; g < b.lines; g++)i = h(d(), {
position: "absolute",
top: 1 + ~(b.width / 2) + "px",
transform: b.hwaccel ? "translate3d(0,0,0)" : "",
opacity: b.opacity,
animation: m && f(b.opacity, b.trail, g, b.lines) + " " + 1 / b.speed + "s linear infinite"
}), b.shadow && e(i, h(c("#000", "0 0 4px #000"), {top: "2px"})), e(a, e(i, c(b.color, "0 0 1px rgba(0,0,0,.1)")));
return a
}, opacity: function (a, b, c) {
b < a.childNodes.length && (a.childNodes[b].style.opacity = c)
}
}), function () {
function a(a, b) {
return d("<" + a + ' xmlns="urn:schemas-microsoft.com:vml" class="spin-vml">', b)
}
var b = h(d("group"), {behavior: "url(#default#VML)"});
!g(b, "transform") && b.adj ? (n.addRule(".spin-vml", "behavior:url(#default#VML)"), p.prototype.lines = function (b, c) {
function d() {
return h(a("group", {coordsize: i + " " + i, coordorigin: -g + " " + -g}), {width: i, height: i})
}
function f(b, f, i) {
e(k, e(h(d(), {
rotation: 360 / c.lines * b + "deg",
left: ~~f
}), e(h(a("roundrect", {arcsize: c.corners}), {
width: g,
height: c.width,
left: c.radius,
top: -c.width >> 1,
filter: i
}), a("fill", {color: c.color, opacity: c.opacity}), a("stroke", {opacity: 0}))))
}
var g = c.length + c.width, i = 2 * g, j = -(c.width + c.length) * 2 + "px", k = h(d(), {
position: "absolute",
top: j,
left: j
}), l;
if (c.shadow)for (l = 1; l <= c.lines; l++)f(l, -2, "progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)");
for (l = 1; l <= c.lines; l++)f(l);
return e(b, k)
}, p.prototype.opacity = function (a, b, c, d) {
var e = a.firstChild;
d = d.shadow && d.lines || 0, e && b + d < e.childNodes.length && (e = e.childNodes[b + d], e = e && e.firstChild, e = e && e.firstChild, e && (e.opacity = c))
}) : m = g(b, "animation")
}(), typeof define == "function" && define.amd ? define(function () {
return p
}) : a.Spinner = p
}(window, document), function () {
var a, b, c, d, e = function (a, b) {
return function () {
return a.apply(b, arguments)
}
};
$.fn.mikesModal = function (a) {
return this.modal = new b($(this))
}, b = function () {
function b(a) {
this.addClose = e(this.addClose, this), this.marginLeft = e(this.marginLeft, this), this.marginTop = e(this.marginTop, this), this.imageMaxHeight = e(this.imageMaxHeight, this), this.imageMaxWidth = e(this.imageMaxWidth, this), this.triggerClose = e(this.triggerClose, this), this.imagePosition = e(this.imagePosition, this), this.imageLoaded = e(this.imageLoaded, this), this.loaded = e(this.loaded, this), this.closed = e(this.closed, this), this.opened = e(this.opened, this), this.bindMethods = e(this.bindMethods, this), this.createAllClasses = e(this.createAllClasses, this), this.modalBox = a, this.bindMethods(), this.createAllClasses(), this.modalBox.trigger("open"), this.imageLoaded(), this.addClose(), this.triggerClose()
}
return b.prototype.createAllClasses = function () {
return new c(this.modalBox), new a(this.modalBox), new d(this.modalBox)
}, b.prototype.bindMethods = function () {
return this.opened(), this.loaded(), this.closed()
}, b.prototype.opened = function () {
var a = this;
return this.modalBox.bind("open", function () {
return a.modalBox.find("img").css({"max-width": a.imageMaxWidth(), "max-height": a.imageMaxHeight()})
})
}, b.prototype.closed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.modalBox.hide()
})
}, b.prototype.loaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.modalBox.fadeIn("slow")
})
}, b.prototype.imageLoaded = function () {
var a = this;
this.modalBox.find("img").first().load(function () {
return a.imagePosition()
});
if (this.modalBox.find("img")[0].complete)return this.imagePosition()
}, b.prototype.imagePosition = function () {
return this.modalBox.trigger("loaded").css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.find(".description").css({height: this.modalBox.find("img").height() - 20})
}, b.prototype.triggerClose = function () {
var a = this;
return $(document).keyup(function (b) {
if (b.keyCode === 27)return a.modalBox.trigger("close")
}), this.modalBox.find(".close").click(function () {
return a.modalBox.trigger("close")
})
}, b.prototype.imageMaxWidth = function () {
return window.innerWidth * .8 - 300
}, b.prototype.imageMaxHeight = function () {
return window.innerHeight * .8
}, b.prototype.marginTop = function () {
return document.width > 700 ? "-" + this.modalBox.height() / 2 + "px" : "-" + (this.modalBox.height() / 2 - 80) + "px"
}, b.prototype.marginLeft = function () {
return "-" + this.modalBox.width() / 2 + "px"
}, b.prototype.addClose = function () {
return $(".description").before("")
}, b
}(), d = function () {
function a(a) {
this.bindClicks = e(this.bindClicks, this), this.bindClosed = e(this.bindClosed, this), this.bindLoaded = e(this.bindLoaded, this), this.modalBox = a, this.bindLoaded(), this.bindClosed(), this.bindClicks()
}
return a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return $("#the-lights").length ? a.theLights = $("#the-lights") : (a.theLights = $("<div id='the-lights'></div>"), a.theLights.appendTo("body"))
})
}, a.prototype.bindClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.theLights.remove()
})
}, a.prototype.bindClicks = function () {
var a = this;
return $("body").on("click touchstart", "#the-lights", function (b) {
return b.preventDefault(), b.stopPropagation(), a.modalBox.trigger("close")
})
}, a
}(), c = function () {
function a(a) {
this.bindFullClosed = e(this.bindFullClosed, this), this.bindFullLoaded = e(this.bindFullLoaded, this), this.modalBox = a, document.width > 700 && (this.bindFullLoaded(), this.bindFullClosed()), this.html = $("html")
}
return a.prototype.bindFullLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.html.css("overflow", "hidden")
})
}, a.prototype.bindFullClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.html.css("overflow", "auto")
})
}, a
}(), a = function () {
function a(a) |
return a.prototype.bindOpened = function () {
var a = this;
return this.modalBox.bind("open", function () {
var b;
return a.loading = $("<div id='loading-modal'></div>"), a.loading.appendTo("body").css({top: $(window).scrollTop() + 300 + "px"}), b = (new Spinner(a.opts())).spin(document.getElementById("loading-modal"))
})
}, a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.loading.remove()
})
}, a.prototype.opts = function () {
return {
lines: 9,
length: 30,
width: 20,
radius: 40,
corners: 1,
rotate: 19,
color: "#fff",
speed: 1.2,
trail: 42,
shadow: !1,
hwaccel: !1,
className: "spinner",
zIndex: 2e9,
top: "auto",
left: "auto"
}
}, a
}()
}.call(this) | {
this.opts = e(this.opts, this), this.bindLoaded = e(this.bindLoaded, this), this.bindOpened = e(this.bindOpened, this), this.modalBox = a, this.bindOpened(), this.bindLoaded()
} | identifier_body |
mikes-modal.min.js | !function (a, b, c) {
function d(a, c) {
var d = b.createElement(a || "div"), e;
for (e in c)d[e] = c[e];
return d
}
function e(a) {
for (var b = 1, c = arguments.length; b < c; b++)a.appendChild(arguments[b]);
return a
}
function f(a, b, c, d) {
var e = ["opacity", b, ~~(a * 100), c, d].join("-"), f = .01 + c / d * 100, g = Math.max(1 - (1 - a) / b * (100 - f), a), h = m.substring(0, m.indexOf("Animation")).toLowerCase(), i = h && "-" + h + "-" || "";
return l[e] || (n.insertRule("@" + i + "keyframes " + e + "{" + "0%{opacity:" + g + "}" + f + "%{opacity:" + a + "}" + (f + .01) + "%{opacity:1}" + (f + b) % 100 + "%{opacity:" + a + "}" + "100%{opacity:" + g + "}" + "}", n.cssRules.length), l[e] = 1), e
}
function g(a, b) {
var d = a.style, e, f;
if (d[b] !== c)return b;
b = b.charAt(0).toUpperCase() + b.slice(1);
for (f = 0; f < k.length; f++) {
e = k[f] + b;
if (d[e] !== c)return e
}
}
function h(a, b) {
for (var c in b)a.style[g(a, c) || c] = b[c];
return a
}
function i(a) {
for (var b = 1; b < arguments.length; b++) {
var d = arguments[b];
for (var e in d)a[e] === c && (a[e] = d[e])
}
return a
}
function j(a) {
var b = {x: a.offsetLeft, y: a.offsetTop};
while (a = a.offsetParent)b.x += a.offsetLeft, b.y += a.offsetTop;
return b
}
var k = ["webkit", "Moz", "ms", "O"], l = {}, m, n = function () {
var a = d("style", {type: "text/css"});
return e(b.getElementsByTagName("head")[0], a), a.sheet || a.styleSheet
}(), o = {
lines: 12,
length: 7,
width: 5,
radius: 10,
rotate: 0,
corners: 1,
color: "#000",
speed: 1,
trail: 100,
opacity: .25,
fps: 20,
zIndex: 2e9,
className: "spinner",
top: "auto",
left: "auto"
}, p = function q(a) {
if (!this.spin)return new q(a);
this.opts = i(a || {}, q.defaults, o)
};
p.defaults = {}, i(p.prototype, {
spin: function (a) {
this.stop();
var b = this, c = b.opts, e = b.el = h(d(0, {className: c.className}), {
position: "relative",
width: 0,
zIndex: c.zIndex
}), f = c.radius + c.length + c.width, g, i;
a && (a.insertBefore(e, a.firstChild || null), i = j(a), g = j(e), h(e, {
left: (c.left == "auto" ? i.x - g.x + (a.offsetWidth >> 1) : parseInt(c.left, 10) + f) + "px",
top: (c.top == "auto" ? i.y - g.y + (a.offsetHeight >> 1) : parseInt(c.top, 10) + f) + "px"
})), e.setAttribute("aria-role", "progressbar"), b.lines(e, b.opts);
if (!m) {
var k = 0, l = c.fps, n = l / c.speed, o = (1 - c.opacity) / (n * c.trail / 100), p = n / c.lines;
(function q() {
k++;
for (var a = c.lines; a; a--) {
var d = Math.max(1 - (k + a * p) % n * o, c.opacity);
b.opacity(e, c.lines - a, d, c)
}
b.timeout = b.el && setTimeout(q, ~~(1e3 / l))
})()
}
return b
}, stop: function () {
var a = this.el;
return a && (clearTimeout(this.timeout), a.parentNode && a.parentNode.removeChild(a), this.el = c), this
}, lines: function (a, b) {
function c(a, c) {
return h(d(), {
position: "absolute",
width: b.length + b.width + "px",
height: b.width + "px",
background: a,
boxShadow: c,
transformOrigin: "left",
transform: "rotate(" + ~~(360 / b.lines * g + b.rotate) + "deg) translate(" + b.radius + "px" + ",0)",
borderRadius: (b.corners * b.width >> 1) + "px"
})
}
var g = 0, i;
for (; g < b.lines; g++)i = h(d(), {
position: "absolute",
top: 1 + ~(b.width / 2) + "px",
transform: b.hwaccel ? "translate3d(0,0,0)" : "",
opacity: b.opacity,
animation: m && f(b.opacity, b.trail, g, b.lines) + " " + 1 / b.speed + "s linear infinite"
}), b.shadow && e(i, h(c("#000", "0 0 4px #000"), {top: "2px"})), e(a, e(i, c(b.color, "0 0 1px rgba(0,0,0,.1)")));
return a
}, opacity: function (a, b, c) {
b < a.childNodes.length && (a.childNodes[b].style.opacity = c)
}
}), function () {
function a(a, b) {
return d("<" + a + ' xmlns="urn:schemas-microsoft.com:vml" class="spin-vml">', b)
}
var b = h(d("group"), {behavior: "url(#default#VML)"});
!g(b, "transform") && b.adj ? (n.addRule(".spin-vml", "behavior:url(#default#VML)"), p.prototype.lines = function (b, c) {
function d() {
return h(a("group", {coordsize: i + " " + i, coordorigin: -g + " " + -g}), {width: i, height: i})
}
function f(b, f, i) {
e(k, e(h(d(), {
rotation: 360 / c.lines * b + "deg",
left: ~~f
}), e(h(a("roundrect", {arcsize: c.corners}), {
width: g,
height: c.width,
left: c.radius,
top: -c.width >> 1,
filter: i
}), a("fill", {color: c.color, opacity: c.opacity}), a("stroke", {opacity: 0}))))
}
var g = c.length + c.width, i = 2 * g, j = -(c.width + c.length) * 2 + "px", k = h(d(), {
position: "absolute",
top: j,
left: j
}), l;
if (c.shadow)for (l = 1; l <= c.lines; l++)f(l, -2, "progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)");
for (l = 1; l <= c.lines; l++)f(l);
return e(b, k)
}, p.prototype.opacity = function (a, b, c, d) {
var e = a.firstChild;
d = d.shadow && d.lines || 0, e && b + d < e.childNodes.length && (e = e.childNodes[b + d], e = e && e.firstChild, e = e && e.firstChild, e && (e.opacity = c))
}) : m = g(b, "animation")
}(), typeof define == "function" && define.amd ? define(function () {
return p
}) : a.Spinner = p
}(window, document), function () {
var a, b, c, d, e = function (a, b) {
return function () {
return a.apply(b, arguments)
}
};
$.fn.mikesModal = function (a) {
return this.modal = new b($(this))
}, b = function () {
function b(a) {
this.addClose = e(this.addClose, this), this.marginLeft = e(this.marginLeft, this), this.marginTop = e(this.marginTop, this), this.imageMaxHeight = e(this.imageMaxHeight, this), this.imageMaxWidth = e(this.imageMaxWidth, this), this.triggerClose = e(this.triggerClose, this), this.imagePosition = e(this.imagePosition, this), this.imageLoaded = e(this.imageLoaded, this), this.loaded = e(this.loaded, this), this.closed = e(this.closed, this), this.opened = e(this.opened, this), this.bindMethods = e(this.bindMethods, this), this.createAllClasses = e(this.createAllClasses, this), this.modalBox = a, this.bindMethods(), this.createAllClasses(), this.modalBox.trigger("open"), this.imageLoaded(), this.addClose(), this.triggerClose()
}
return b.prototype.createAllClasses = function () {
return new c(this.modalBox), new a(this.modalBox), new d(this.modalBox)
}, b.prototype.bindMethods = function () {
return this.opened(), this.loaded(), this.closed()
}, b.prototype.opened = function () {
var a = this;
return this.modalBox.bind("open", function () {
return a.modalBox.find("img").css({"max-width": a.imageMaxWidth(), "max-height": a.imageMaxHeight()})
})
}, b.prototype.closed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.modalBox.hide()
})
}, b.prototype.loaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.modalBox.fadeIn("slow")
})
}, b.prototype.imageLoaded = function () {
var a = this;
this.modalBox.find("img").first().load(function () {
return a.imagePosition()
});
if (this.modalBox.find("img")[0].complete)return this.imagePosition()
}, b.prototype.imagePosition = function () {
return this.modalBox.trigger("loaded").css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.find(".description").css({height: this.modalBox.find("img").height() - 20})
}, b.prototype.triggerClose = function () {
var a = this;
return $(document).keyup(function (b) {
if (b.keyCode === 27)return a.modalBox.trigger("close")
}), this.modalBox.find(".close").click(function () {
return a.modalBox.trigger("close")
})
}, b.prototype.imageMaxWidth = function () {
return window.innerWidth * .8 - 300
}, b.prototype.imageMaxHeight = function () {
return window.innerHeight * .8
}, b.prototype.marginTop = function () {
return document.width > 700 ? "-" + this.modalBox.height() / 2 + "px" : "-" + (this.modalBox.height() / 2 - 80) + "px"
}, b.prototype.marginLeft = function () {
return "-" + this.modalBox.width() / 2 + "px"
}, b.prototype.addClose = function () {
return $(".description").before("")
}, b
}(), d = function () {
function a(a) {
this.bindClicks = e(this.bindClicks, this), this.bindClosed = e(this.bindClosed, this), this.bindLoaded = e(this.bindLoaded, this), this.modalBox = a, this.bindLoaded(), this.bindClosed(), this.bindClicks()
}
return a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return $("#the-lights").length ? a.theLights = $("#the-lights") : (a.theLights = $("<div id='the-lights'></div>"), a.theLights.appendTo("body"))
})
}, a.prototype.bindClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.theLights.remove()
})
}, a.prototype.bindClicks = function () {
var a = this;
return $("body").on("click touchstart", "#the-lights", function (b) {
return b.preventDefault(), b.stopPropagation(), a.modalBox.trigger("close")
})
}, a
}(), c = function () {
function a(a) {
this.bindFullClosed = e(this.bindFullClosed, this), this.bindFullLoaded = e(this.bindFullLoaded, this), this.modalBox = a, document.width > 700 && (this.bindFullLoaded(), this.bindFullClosed()), this.html = $("html")
}
return a.prototype.bindFullLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.html.css("overflow", "hidden")
})
}, a.prototype.bindFullClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.html.css("overflow", "auto")
})
}, a
}(), a = function () {
function a(a) {
this.opts = e(this.opts, this), this.bindLoaded = e(this.bindLoaded, this), this.bindOpened = e(this.bindOpened, this), this.modalBox = a, this.bindOpened(), this.bindLoaded()
}
return a.prototype.bindOpened = function () {
var a = this;
return this.modalBox.bind("open", function () {
var b;
return a.loading = $("<div id='loading-modal'></div>"), a.loading.appendTo("body").css({top: $(window).scrollTop() + 300 + "px"}), b = (new Spinner(a.opts())).spin(document.getElementById("loading-modal"))
})
}, a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.loading.remove()
}) | width: 20,
radius: 40,
corners: 1,
rotate: 19,
color: "#fff",
speed: 1.2,
trail: 42,
shadow: !1,
hwaccel: !1,
className: "spinner",
zIndex: 2e9,
top: "auto",
left: "auto"
}
}, a
}()
}.call(this) | }, a.prototype.opts = function () {
return {
lines: 9,
length: 30, | random_line_split |
mikes-modal.min.js | !function (a, b, c) {
function d(a, c) {
var d = b.createElement(a || "div"), e;
for (e in c)d[e] = c[e];
return d
}
function e(a) {
for (var b = 1, c = arguments.length; b < c; b++)a.appendChild(arguments[b]);
return a
}
function f(a, b, c, d) {
var e = ["opacity", b, ~~(a * 100), c, d].join("-"), f = .01 + c / d * 100, g = Math.max(1 - (1 - a) / b * (100 - f), a), h = m.substring(0, m.indexOf("Animation")).toLowerCase(), i = h && "-" + h + "-" || "";
return l[e] || (n.insertRule("@" + i + "keyframes " + e + "{" + "0%{opacity:" + g + "}" + f + "%{opacity:" + a + "}" + (f + .01) + "%{opacity:1}" + (f + b) % 100 + "%{opacity:" + a + "}" + "100%{opacity:" + g + "}" + "}", n.cssRules.length), l[e] = 1), e
}
function g(a, b) {
var d = a.style, e, f;
if (d[b] !== c)return b;
b = b.charAt(0).toUpperCase() + b.slice(1);
for (f = 0; f < k.length; f++) {
e = k[f] + b;
if (d[e] !== c)return e
}
}
function h(a, b) {
for (var c in b)a.style[g(a, c) || c] = b[c];
return a
}
function i(a) {
for (var b = 1; b < arguments.length; b++) {
var d = arguments[b];
for (var e in d)a[e] === c && (a[e] = d[e])
}
return a
}
function j(a) {
var b = {x: a.offsetLeft, y: a.offsetTop};
while (a = a.offsetParent)b.x += a.offsetLeft, b.y += a.offsetTop;
return b
}
var k = ["webkit", "Moz", "ms", "O"], l = {}, m, n = function () {
var a = d("style", {type: "text/css"});
return e(b.getElementsByTagName("head")[0], a), a.sheet || a.styleSheet
}(), o = {
lines: 12,
length: 7,
width: 5,
radius: 10,
rotate: 0,
corners: 1,
color: "#000",
speed: 1,
trail: 100,
opacity: .25,
fps: 20,
zIndex: 2e9,
className: "spinner",
top: "auto",
left: "auto"
}, p = function q(a) {
if (!this.spin)return new q(a);
this.opts = i(a || {}, q.defaults, o)
};
p.defaults = {}, i(p.prototype, {
spin: function (a) {
this.stop();
var b = this, c = b.opts, e = b.el = h(d(0, {className: c.className}), {
position: "relative",
width: 0,
zIndex: c.zIndex
}), f = c.radius + c.length + c.width, g, i;
a && (a.insertBefore(e, a.firstChild || null), i = j(a), g = j(e), h(e, {
left: (c.left == "auto" ? i.x - g.x + (a.offsetWidth >> 1) : parseInt(c.left, 10) + f) + "px",
top: (c.top == "auto" ? i.y - g.y + (a.offsetHeight >> 1) : parseInt(c.top, 10) + f) + "px"
})), e.setAttribute("aria-role", "progressbar"), b.lines(e, b.opts);
if (!m) {
var k = 0, l = c.fps, n = l / c.speed, o = (1 - c.opacity) / (n * c.trail / 100), p = n / c.lines;
(function q() {
k++;
for (var a = c.lines; a; a--) {
var d = Math.max(1 - (k + a * p) % n * o, c.opacity);
b.opacity(e, c.lines - a, d, c)
}
b.timeout = b.el && setTimeout(q, ~~(1e3 / l))
})()
}
return b
}, stop: function () {
var a = this.el;
return a && (clearTimeout(this.timeout), a.parentNode && a.parentNode.removeChild(a), this.el = c), this
}, lines: function (a, b) {
function c(a, c) {
return h(d(), {
position: "absolute",
width: b.length + b.width + "px",
height: b.width + "px",
background: a,
boxShadow: c,
transformOrigin: "left",
transform: "rotate(" + ~~(360 / b.lines * g + b.rotate) + "deg) translate(" + b.radius + "px" + ",0)",
borderRadius: (b.corners * b.width >> 1) + "px"
})
}
var g = 0, i;
for (; g < b.lines; g++)i = h(d(), {
position: "absolute",
top: 1 + ~(b.width / 2) + "px",
transform: b.hwaccel ? "translate3d(0,0,0)" : "",
opacity: b.opacity,
animation: m && f(b.opacity, b.trail, g, b.lines) + " " + 1 / b.speed + "s linear infinite"
}), b.shadow && e(i, h(c("#000", "0 0 4px #000"), {top: "2px"})), e(a, e(i, c(b.color, "0 0 1px rgba(0,0,0,.1)")));
return a
}, opacity: function (a, b, c) {
b < a.childNodes.length && (a.childNodes[b].style.opacity = c)
}
}), function () {
function a(a, b) {
return d("<" + a + ' xmlns="urn:schemas-microsoft.com:vml" class="spin-vml">', b)
}
var b = h(d("group"), {behavior: "url(#default#VML)"});
!g(b, "transform") && b.adj ? (n.addRule(".spin-vml", "behavior:url(#default#VML)"), p.prototype.lines = function (b, c) {
function d() {
return h(a("group", {coordsize: i + " " + i, coordorigin: -g + " " + -g}), {width: i, height: i})
}
function f(b, f, i) {
e(k, e(h(d(), {
rotation: 360 / c.lines * b + "deg",
left: ~~f
}), e(h(a("roundrect", {arcsize: c.corners}), {
width: g,
height: c.width,
left: c.radius,
top: -c.width >> 1,
filter: i
}), a("fill", {color: c.color, opacity: c.opacity}), a("stroke", {opacity: 0}))))
}
var g = c.length + c.width, i = 2 * g, j = -(c.width + c.length) * 2 + "px", k = h(d(), {
position: "absolute",
top: j,
left: j
}), l;
if (c.shadow)for (l = 1; l <= c.lines; l++)f(l, -2, "progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)");
for (l = 1; l <= c.lines; l++)f(l);
return e(b, k)
}, p.prototype.opacity = function (a, b, c, d) {
var e = a.firstChild;
d = d.shadow && d.lines || 0, e && b + d < e.childNodes.length && (e = e.childNodes[b + d], e = e && e.firstChild, e = e && e.firstChild, e && (e.opacity = c))
}) : m = g(b, "animation")
}(), typeof define == "function" && define.amd ? define(function () {
return p
}) : a.Spinner = p
}(window, document), function () {
var a, b, c, d, e = function (a, b) {
return function () {
return a.apply(b, arguments)
}
};
$.fn.mikesModal = function (a) {
return this.modal = new b($(this))
}, b = function () {
function | (a) {
this.addClose = e(this.addClose, this), this.marginLeft = e(this.marginLeft, this), this.marginTop = e(this.marginTop, this), this.imageMaxHeight = e(this.imageMaxHeight, this), this.imageMaxWidth = e(this.imageMaxWidth, this), this.triggerClose = e(this.triggerClose, this), this.imagePosition = e(this.imagePosition, this), this.imageLoaded = e(this.imageLoaded, this), this.loaded = e(this.loaded, this), this.closed = e(this.closed, this), this.opened = e(this.opened, this), this.bindMethods = e(this.bindMethods, this), this.createAllClasses = e(this.createAllClasses, this), this.modalBox = a, this.bindMethods(), this.createAllClasses(), this.modalBox.trigger("open"), this.imageLoaded(), this.addClose(), this.triggerClose()
}
return b.prototype.createAllClasses = function () {
return new c(this.modalBox), new a(this.modalBox), new d(this.modalBox)
}, b.prototype.bindMethods = function () {
return this.opened(), this.loaded(), this.closed()
}, b.prototype.opened = function () {
var a = this;
return this.modalBox.bind("open", function () {
return a.modalBox.find("img").css({"max-width": a.imageMaxWidth(), "max-height": a.imageMaxHeight()})
})
}, b.prototype.closed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.modalBox.hide()
})
}, b.prototype.loaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.modalBox.fadeIn("slow")
})
}, b.prototype.imageLoaded = function () {
var a = this;
this.modalBox.find("img").first().load(function () {
return a.imagePosition()
});
if (this.modalBox.find("img")[0].complete)return this.imagePosition()
}, b.prototype.imagePosition = function () {
return this.modalBox.trigger("loaded").css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.find(".description").css({height: this.modalBox.find("img").height() - 20})
}, b.prototype.triggerClose = function () {
var a = this;
return $(document).keyup(function (b) {
if (b.keyCode === 27)return a.modalBox.trigger("close")
}), this.modalBox.find(".close").click(function () {
return a.modalBox.trigger("close")
})
}, b.prototype.imageMaxWidth = function () {
return window.innerWidth * .8 - 300
}, b.prototype.imageMaxHeight = function () {
return window.innerHeight * .8
}, b.prototype.marginTop = function () {
return document.width > 700 ? "-" + this.modalBox.height() / 2 + "px" : "-" + (this.modalBox.height() / 2 - 80) + "px"
}, b.prototype.marginLeft = function () {
return "-" + this.modalBox.width() / 2 + "px"
}, b.prototype.addClose = function () {
return $(".description").before("")
}, b
}(), d = function () {
function a(a) {
this.bindClicks = e(this.bindClicks, this), this.bindClosed = e(this.bindClosed, this), this.bindLoaded = e(this.bindLoaded, this), this.modalBox = a, this.bindLoaded(), this.bindClosed(), this.bindClicks()
}
return a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return $("#the-lights").length ? a.theLights = $("#the-lights") : (a.theLights = $("<div id='the-lights'></div>"), a.theLights.appendTo("body"))
})
}, a.prototype.bindClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.theLights.remove()
})
}, a.prototype.bindClicks = function () {
var a = this;
return $("body").on("click touchstart", "#the-lights", function (b) {
return b.preventDefault(), b.stopPropagation(), a.modalBox.trigger("close")
})
}, a
}(), c = function () {
function a(a) {
this.bindFullClosed = e(this.bindFullClosed, this), this.bindFullLoaded = e(this.bindFullLoaded, this), this.modalBox = a, document.width > 700 && (this.bindFullLoaded(), this.bindFullClosed()), this.html = $("html")
}
return a.prototype.bindFullLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.html.css("overflow", "hidden")
})
}, a.prototype.bindFullClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.html.css("overflow", "auto")
})
}, a
}(), a = function () {
function a(a) {
this.opts = e(this.opts, this), this.bindLoaded = e(this.bindLoaded, this), this.bindOpened = e(this.bindOpened, this), this.modalBox = a, this.bindOpened(), this.bindLoaded()
}
return a.prototype.bindOpened = function () {
var a = this;
return this.modalBox.bind("open", function () {
var b;
return a.loading = $("<div id='loading-modal'></div>"), a.loading.appendTo("body").css({top: $(window).scrollTop() + 300 + "px"}), b = (new Spinner(a.opts())).spin(document.getElementById("loading-modal"))
})
}, a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.loading.remove()
})
}, a.prototype.opts = function () {
return {
lines: 9,
length: 30,
width: 20,
radius: 40,
corners: 1,
rotate: 19,
color: "#fff",
speed: 1.2,
trail: 42,
shadow: !1,
hwaccel: !1,
className: "spinner",
zIndex: 2e9,
top: "auto",
left: "auto"
}
}, a
}()
}.call(this) | b | identifier_name |
metadb_test.go | package metadb
import (
"database/sql"
"errors"
"fmt"
"os"
"testing"
_ "github.com/mattn/go-sqlite3"
)
const TestDBPath = "./test.sqlite"
// TODO: Should unit tests be refactored so that all tests of methods attached
// to Instance are coupled to the test for NewInstance itself? This could
// entirely eliminate the need to work with fixtures as all data would be
// directly manipulated by the very methods being tested. Not only that, but
// this might eliminate the need to separately test toValueType and
// fromBlobString.
// panicked takes a simple function to execute and returns an error containing
// the data passed to panic from within the function, and nil if no panic
// occurred.
func panicked(fn func()) error {
ch := make(chan error)
go func() {
defer func() {
// if function didn't panic, return nil
if r := recover(); r == nil {
ch <- nil
} else { // else, return error
switch r.(type) {
case error:
ch <- r.(error)
case string:
ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil {
panic(err)
}
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
// RunWithInstance runs a closure passing it an Instance.
func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) |
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func TestFromBlobString(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string{"disallowed", "type"}); err == nil {
t.Error("Instance.Set: expected error with new value of disallowed type")
}
if err := instance.Set("foo", 1784); err == nil {
t.Error("Instance.Set: expected error with new value of different type than existing")
}
if err := panicked(func() { instance.MustSet("foo", true) }); err != nil {
t.Error("Instance.MustSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustSet("foo", 1834) }); err == nil {
t.Error("Instance.MustSet: expected panic with new value of different type than existing")
}
if err := instance.ForceSet("foo", 1873); err != nil {
t.Error("Instance.ForceSet: got error:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", 1891) }); err != nil {
t.Error("Instance.MustForceSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", []string{"disallowed", "type"}) }); err == nil {
t.Error("Instance.MustForceSet: expected panic with new value of disallowed type")
}
})
}
// TestDelete ensures that metadata entries inserted by means of a fixture are
// properly deleted and that attempting to delete a non-existent entry results
// in an ErrNoEntry.
func TestDelete(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "int", Value: "2891", ValueType: 1},
{Name: "string", Value: "hello world!", ValueType: 3},
})
if err := instance.Delete("int"); err != nil {
t.Error("Instance.Delete: got error:\n", err)
}
if err := panicked(func() { instance.MustDelete("string") }); err != nil {
t.Error("Instance.MustDelete: got panic:\n", err)
}
if err := instance.Delete("foo"); err == nil {
t.Error("Instance.Delete: expected error with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.Delete: expected error of type *ErrNoEntry")
}
if err := panicked(func() { instance.MustDelete("foo") }); err == nil {
t.Error("Instance.MustDelete: expected panic with non-existent entry")
}
})
}
| {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
} | identifier_body |
metadb_test.go | package metadb
import (
"database/sql"
"errors"
"fmt"
"os"
"testing"
_ "github.com/mattn/go-sqlite3"
)
const TestDBPath = "./test.sqlite"
// TODO: Should unit tests be refactored so that all tests of methods attached
// to Instance are coupled to the test for NewInstance itself? This could
// entirely eliminate the need to work with fixtures as all data would be
// directly manipulated by the very methods being tested. Not only that, but
// this might eliminate the need to separately test toValueType and
// fromBlobString.
// panicked takes a simple function to execute and returns an error containing
// the data passed to panic from within the function, and nil if no panic
// occurred.
func panicked(fn func()) error {
ch := make(chan error)
go func() {
defer func() {
// if function didn't panic, return nil
if r := recover(); r == nil {
ch <- nil
} else { // else, return error
switch r.(type) {
case error:
ch <- r.(error)
case string:
ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil |
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
// RunWithInstance runs a closure passing it an Instance.
func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
}
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func TestFromBlobString(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string{"disallowed", "type"}); err == nil {
t.Error("Instance.Set: expected error with new value of disallowed type")
}
if err := instance.Set("foo", 1784); err == nil {
t.Error("Instance.Set: expected error with new value of different type than existing")
}
if err := panicked(func() { instance.MustSet("foo", true) }); err != nil {
t.Error("Instance.MustSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustSet("foo", 1834) }); err == nil {
t.Error("Instance.MustSet: expected panic with new value of different type than existing")
}
if err := instance.ForceSet("foo", 1873); err != nil {
t.Error("Instance.ForceSet: got error:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", 1891) }); err != nil {
t.Error("Instance.MustForceSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", []string{"disallowed", "type"}) }); err == nil {
t.Error("Instance.MustForceSet: expected panic with new value of disallowed type")
}
})
}
// TestDelete ensures that metadata entries inserted by means of a fixture are
// properly deleted and that attempting to delete a non-existent entry results
// in an ErrNoEntry.
func TestDelete(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "int", Value: "2891", ValueType: 1},
{Name: "string", Value: "hello world!", ValueType: 3},
})
if err := instance.Delete("int"); err != nil {
t.Error("Instance.Delete: got error:\n", err)
}
if err := panicked(func() { instance.MustDelete("string") }); err != nil {
t.Error("Instance.MustDelete: got panic:\n", err)
}
if err := instance.Delete("foo"); err == nil {
t.Error("Instance.Delete: expected error with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.Delete: expected error of type *ErrNoEntry")
}
if err := panicked(func() { instance.MustDelete("foo") }); err == nil {
t.Error("Instance.MustDelete: expected panic with non-existent entry")
}
})
}
| {
panic(err)
} | conditional_block |
metadb_test.go | package metadb
import (
"database/sql"
"errors"
"fmt"
"os"
"testing"
_ "github.com/mattn/go-sqlite3"
)
const TestDBPath = "./test.sqlite"
// TODO: Should unit tests be refactored so that all tests of methods attached
// to Instance are coupled to the test for NewInstance itself? This could
// entirely eliminate the need to work with fixtures as all data would be
// directly manipulated by the very methods being tested. Not only that, but
// this might eliminate the need to separately test toValueType and
// fromBlobString.
// panicked takes a simple function to execute and returns an error containing
// the data passed to panic from within the function, and nil if no panic
// occurred.
func panicked(fn func()) error {
ch := make(chan error)
go func() {
defer func() {
// if function didn't panic, return nil
if r := recover(); r == nil {
ch <- nil
} else { // else, return error
switch r.(type) {
case error:
ch <- r.(error)
case string:
ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil {
panic(err)
}
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
| func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
}
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func TestFromBlobString(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string{"disallowed", "type"}); err == nil {
t.Error("Instance.Set: expected error with new value of disallowed type")
}
if err := instance.Set("foo", 1784); err == nil {
t.Error("Instance.Set: expected error with new value of different type than existing")
}
if err := panicked(func() { instance.MustSet("foo", true) }); err != nil {
t.Error("Instance.MustSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustSet("foo", 1834) }); err == nil {
t.Error("Instance.MustSet: expected panic with new value of different type than existing")
}
if err := instance.ForceSet("foo", 1873); err != nil {
t.Error("Instance.ForceSet: got error:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", 1891) }); err != nil {
t.Error("Instance.MustForceSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", []string{"disallowed", "type"}) }); err == nil {
t.Error("Instance.MustForceSet: expected panic with new value of disallowed type")
}
})
}
// TestDelete ensures that metadata entries inserted by means of a fixture are
// properly deleted and that attempting to delete a non-existent entry results
// in an ErrNoEntry.
func TestDelete(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "int", Value: "2891", ValueType: 1},
{Name: "string", Value: "hello world!", ValueType: 3},
})
if err := instance.Delete("int"); err != nil {
t.Error("Instance.Delete: got error:\n", err)
}
if err := panicked(func() { instance.MustDelete("string") }); err != nil {
t.Error("Instance.MustDelete: got panic:\n", err)
}
if err := instance.Delete("foo"); err == nil {
t.Error("Instance.Delete: expected error with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.Delete: expected error of type *ErrNoEntry")
}
if err := panicked(func() { instance.MustDelete("foo") }); err == nil {
t.Error("Instance.MustDelete: expected panic with non-existent entry")
}
})
} | // RunWithInstance runs a closure passing it an Instance. | random_line_split |
metadb_test.go | package metadb
import (
"database/sql"
"errors"
"fmt"
"os"
"testing"
_ "github.com/mattn/go-sqlite3"
)
const TestDBPath = "./test.sqlite"
// TODO: Should unit tests be refactored so that all tests of methods attached
// to Instance are coupled to the test for NewInstance itself? This could
// entirely eliminate the need to work with fixtures as all data would be
// directly manipulated by the very methods being tested. Not only that, but
// this might eliminate the need to separately test toValueType and
// fromBlobString.
// panicked takes a simple function to execute and returns an error containing
// the data passed to panic from within the function, and nil if no panic
// occurred.
func panicked(fn func()) error {
ch := make(chan error)
go func() {
defer func() {
// if function didn't panic, return nil
if r := recover(); r == nil {
ch <- nil
} else { // else, return error
switch r.(type) {
case error:
ch <- r.(error)
case string:
ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil {
panic(err)
}
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
// RunWithInstance runs a closure passing it an Instance.
func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
}
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func | (t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string{"disallowed", "type"}); err == nil {
t.Error("Instance.Set: expected error with new value of disallowed type")
}
if err := instance.Set("foo", 1784); err == nil {
t.Error("Instance.Set: expected error with new value of different type than existing")
}
if err := panicked(func() { instance.MustSet("foo", true) }); err != nil {
t.Error("Instance.MustSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustSet("foo", 1834) }); err == nil {
t.Error("Instance.MustSet: expected panic with new value of different type than existing")
}
if err := instance.ForceSet("foo", 1873); err != nil {
t.Error("Instance.ForceSet: got error:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", 1891) }); err != nil {
t.Error("Instance.MustForceSet: got panic:\n", err)
}
if err := panicked(func() { instance.MustForceSet("foo", []string{"disallowed", "type"}) }); err == nil {
t.Error("Instance.MustForceSet: expected panic with new value of disallowed type")
}
})
}
// TestDelete ensures that metadata entries inserted by means of a fixture are
// properly deleted and that attempting to delete a non-existent entry results
// in an ErrNoEntry.
func TestDelete(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "int", Value: "2891", ValueType: 1},
{Name: "string", Value: "hello world!", ValueType: 3},
})
if err := instance.Delete("int"); err != nil {
t.Error("Instance.Delete: got error:\n", err)
}
if err := panicked(func() { instance.MustDelete("string") }); err != nil {
t.Error("Instance.MustDelete: got panic:\n", err)
}
if err := instance.Delete("foo"); err == nil {
t.Error("Instance.Delete: expected error with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.Delete: expected error of type *ErrNoEntry")
}
if err := panicked(func() { instance.MustDelete("foo") }); err == nil {
t.Error("Instance.MustDelete: expected panic with non-existent entry")
}
})
}
| TestFromBlobString | identifier_name |
memcache_zipserve.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A class to serve pages from zip files and use memcache for performance.
This contains a class and a function to create an anonymous instance of the
class to serve HTTP GET requests. Memcache is used to increase response speed
and lower processing cycles used in serving. Credit to Guido van Rossum and
his implementation of zipserve which served as a reference as I wrote this.
MemcachedZipHandler: Class that serves request
create_handler: method to create instance of MemcachedZipHandler
"""
__author__ = 'jmatt@google.com (Justin Mattson)'
import email.Utils
import logging
import mimetypes
import time
import zipfile
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from time import localtime, strftime
def create_handler(zip_files, max_age=None, public=None):
|
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = 'cache://' # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = 'noncache://' # memcache key prefix for non-existant URL
intlString = 'intl/'
validLangs = ['en', 'de', 'es', 'fr','it','ja','zh-CN','zh-TW']
def TrueGet(self, reqUri):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method. Some logic is applied to the request to serve files
from an intl/<lang>/... directory or fall through to the default language.
Args:
name: URL requested
Returns:
None
"""
langName = 'en'
resetLangCookie = False
urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
return name
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def CreateResponse(self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * 365 * 10
self.response.headers.add_header('Set-Cookie',
'android_developer_pref_lang=%s; path=/; expires=%s' %
(langName, strftime("%a, %d %b %Y %H:%M:%S", localtime(expireDate))))
mustRevalidate = False
if ('.html' in name):
# revalidate html files -- workaround for cache inconsistencies for
# negotiated responses
mustRevalidate = True
#logging.info(' Adding [Vary: Cookie] to response...')
self.response.headers.add_header('Vary', 'Cookie')
content_type, encoding = mimetypes.guess_type(name)
if content_type:
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif (name == 'favicon.ico'):
self.response.headers['Content-Type'] = 'image/x-icon'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif name.endswith('.psd'):
self.response.headers['Content-Type'] = 'application/octet-stream'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
return True
def GetFromStore(self, file_path):
"""Retrieve file from zip files.
Get the file from the source, it must not have been in the memcache. If
possible, we'll use the zip file index to quickly locate where the file
should be found. (See MapToFileArchive documentation for assumptions about
file ordering.) If we don't have an index or don't find the file where the
index says we should, look through all the zip files to find it.
Args:
file_path: the file that we're looking for
Returns:
The contents of the requested file
"""
resp_data = None
file_itr = iter(self.zipfilenames)
# check the index, if we have one, to see what archive the file is in
archive_name = self.MapFileToArchive(file_path)
if not archive_name:
archive_name = file_itr.next()[0]
while resp_data is None and archive_name:
zip_archive = self.LoadZipFile(archive_name)
if zip_archive:
# we expect some lookups will fail, and that's okay, 404s will deal
# with that
try:
resp_data = zip_archive.read(file_path)
except (KeyError, RuntimeError), err:
# no op
x = False
if resp_data is not None:
logging.info('%s read from %s', file_path, archive_name)
try:
archive_name = file_itr.next()[0]
except (StopIteration), err:
archive_name = False
return resp_data
def LoadZipFile(self, zipfilename):
"""Convenience method to load zip file.
Just a convenience method to load the zip file from the data store. This is
useful if we ever want to change data stores and also as a means of
dependency injection for testing. This method will look at our file cache
first, and then load and cache the file if there's a cache miss
Args:
zipfilename: the name of the zip file to load
Returns:
The zip file requested, or None if there is an I/O error
"""
zip_archive = None
zip_archive = self.zipfile_cache.get(zipfilename)
if zip_archive is None:
try:
zip_archive = zipfile.ZipFile(zipfilename)
self.zipfile_cache[zipfilename] = zip_archive
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename,
err))
return zip_archive
def MapFileToArchive(self, file_path):
"""Given a file name, determine what archive it should be in.
This method makes two critical assumptions.
(1) The zip files passed as an argument to the handler, if concatenated
in that same order, would result in a total ordering
of all the files. See (2) for ordering type.
(2) Upper case letters before lower case letters. The traversal of a
directory tree is depth first. A parent directory's files are added
before the files of any child directories
Args:
file_path: the file to be mapped to an archive
Returns:
The name of the archive where we expect the file to be
"""
num_archives = len(self.zipfilenames)
while num_archives > 0:
target = self.zipfilenames[num_archives - 1]
if len(target) > 1:
if self.CompareFilenames(target[1], file_path) >= 0:
return target[0]
num_archives -= 1
return None
def CompareFilenames(self, file1, file2):
"""Determines whether file1 is lexigraphically 'before' file2.
WARNING: This method assumes that paths are output in a depth-first,
with parent directories' files stored before childs'
We say that file1 is lexigraphically before file2 if the last non-matching
path segment of file1 is alphabetically before file2.
Args:
file1: the first file path
file2: the second file path
Returns:
A positive number if file1 is before file2
A negative number if file2 is before file1
0 if filenames are the same
"""
f1_segments = file1.split('/')
f2_segments = file2.split('/')
segment_ptr = 0
while (segment_ptr < len(f1_segments) and
segment_ptr < len(f2_segments) and
f1_segments[segment_ptr] == f2_segments[segment_ptr]):
segment_ptr += 1
if len(f1_segments) == len(f2_segments):
# we fell off the end, the paths much be the same
if segment_ptr == len(f1_segments):
return 0
# we didn't fall of the end, compare the segments where they differ
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
# the number of segments differs, we either mismatched comparing
# directories, or comparing a file to a directory
else:
# IF we were looking at the last segment of one of the paths,
# the one with fewer segments is first because files come before
# directories
# ELSE we just need to compare directory names
if (segment_ptr + 1 == len(f1_segments) or
segment_ptr + 1 == len(f2_segments)):
return len(f2_segments) - len(f1_segments)
else:
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
def SetCachingHeaders(self, revalidate):
"""Set caching headers for the request."""
max_age = self.MAX_AGE
#self.response.headers['Expires'] = email.Utils.formatdate(
# time.time() + max_age, usegmt=True)
cache_control = []
if self.PUBLIC:
cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
if revalidate:
cache_control.append('must-revalidate')
self.response.headers['Cache-Control'] = ', '.join(cache_control)
def GetFromCache(self, filename):
"""Get file from memcache, if available.
Args:
filename: The URL of the file to return
Returns:
The content of the file
"""
return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))
def StoreOrUpdateInCache(self, filename, data):
"""Store data in the cache.
Store a piece of data in the memcache. Memcache has a maximum item size of
1*10^6 bytes. If the data is too large, fail, but log the failure. Future
work will consider compressing the data before storing or chunking it
Args:
filename: the name of the file to store
data: the data of the file
Returns:
None
"""
try:
if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):
memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)
except (ValueError), err:
logging.warning('Data size too large to cache\n%s' % err)
def Write404Error(self):
"""Ouptut a simple 404 response."""
self.error(404)
self.response.out.write(
''.join(['<html><head><title>404: Not Found</title></head>',
'<body><b><h2>Error 404</h2><br/>',
'File not found</b></body></html>']))
def StoreInNegativeCache(self, filename):
"""If a non-existant URL is accessed, cache this result as well.
Future work should consider setting a maximum negative cache size to
prevent it from from negatively impacting the real cache.
Args:
filename: URL to add ot negative cache
Returns:
None
"""
memcache.add('%s%s' % (self.NEG_CACHE_PREFIX, filename), -1)
def GetFromNegativeCache(self, filename):
"""Retrieve from negative cache.
Args:
filename: URL to retreive
Returns:
The file contents if present in the negative cache.
"""
return memcache.get('%s%s' % (self.NEG_CACHE_PREFIX, filename))
def main():
application = webapp.WSGIApplication([('/([^/]+)/(.*)',
MemcachedZipHandler)])
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| """Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0:
if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
self.TrueGet(name)
if max_age is not None:
MAX_AGE = max_age
if public is not None:
PUBLIC = public
return HandlerWrapper | identifier_body |
memcache_zipserve.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A class to serve pages from zip files and use memcache for performance.
This contains a class and a function to create an anonymous instance of the
class to serve HTTP GET requests. Memcache is used to increase response speed
and lower processing cycles used in serving. Credit to Guido van Rossum and
his implementation of zipserve which served as a reference as I wrote this.
MemcachedZipHandler: Class that serves request
create_handler: method to create instance of MemcachedZipHandler
"""
__author__ = 'jmatt@google.com (Justin Mattson)'
import email.Utils
import logging
import mimetypes
import time
import zipfile
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from time import localtime, strftime
def create_handler(zip_files, max_age=None, public=None):
"""Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0:
if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
self.TrueGet(name)
if max_age is not None:
MAX_AGE = max_age
if public is not None:
PUBLIC = public
return HandlerWrapper
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = 'cache://' # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = 'noncache://' # memcache key prefix for non-existant URL
intlString = 'intl/'
validLangs = ['en', 'de', 'es', 'fr','it','ja','zh-CN','zh-TW']
def TrueGet(self, reqUri):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method. Some logic is applied to the request to serve files
from an intl/<lang>/... directory or fall through to the default language.
Args:
name: URL requested
Returns:
None
"""
langName = 'en'
resetLangCookie = False
urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
return name
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def CreateResponse(self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None: | self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * 365 * 10
self.response.headers.add_header('Set-Cookie',
'android_developer_pref_lang=%s; path=/; expires=%s' %
(langName, strftime("%a, %d %b %Y %H:%M:%S", localtime(expireDate))))
mustRevalidate = False
if ('.html' in name):
# revalidate html files -- workaround for cache inconsistencies for
# negotiated responses
mustRevalidate = True
#logging.info(' Adding [Vary: Cookie] to response...')
self.response.headers.add_header('Vary', 'Cookie')
content_type, encoding = mimetypes.guess_type(name)
if content_type:
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif (name == 'favicon.ico'):
self.response.headers['Content-Type'] = 'image/x-icon'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif name.endswith('.psd'):
self.response.headers['Content-Type'] = 'application/octet-stream'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
return True
def GetFromStore(self, file_path):
"""Retrieve file from zip files.
Get the file from the source, it must not have been in the memcache. If
possible, we'll use the zip file index to quickly locate where the file
should be found. (See MapToFileArchive documentation for assumptions about
file ordering.) If we don't have an index or don't find the file where the
index says we should, look through all the zip files to find it.
Args:
file_path: the file that we're looking for
Returns:
The contents of the requested file
"""
resp_data = None
file_itr = iter(self.zipfilenames)
# check the index, if we have one, to see what archive the file is in
archive_name = self.MapFileToArchive(file_path)
if not archive_name:
archive_name = file_itr.next()[0]
while resp_data is None and archive_name:
zip_archive = self.LoadZipFile(archive_name)
if zip_archive:
# we expect some lookups will fail, and that's okay, 404s will deal
# with that
try:
resp_data = zip_archive.read(file_path)
except (KeyError, RuntimeError), err:
# no op
x = False
if resp_data is not None:
logging.info('%s read from %s', file_path, archive_name)
try:
archive_name = file_itr.next()[0]
except (StopIteration), err:
archive_name = False
return resp_data
def LoadZipFile(self, zipfilename):
"""Convenience method to load zip file.
Just a convenience method to load the zip file from the data store. This is
useful if we ever want to change data stores and also as a means of
dependency injection for testing. This method will look at our file cache
first, and then load and cache the file if there's a cache miss
Args:
zipfilename: the name of the zip file to load
Returns:
The zip file requested, or None if there is an I/O error
"""
zip_archive = None
zip_archive = self.zipfile_cache.get(zipfilename)
if zip_archive is None:
try:
zip_archive = zipfile.ZipFile(zipfilename)
self.zipfile_cache[zipfilename] = zip_archive
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename,
err))
return zip_archive
def MapFileToArchive(self, file_path):
"""Given a file name, determine what archive it should be in.
This method makes two critical assumptions.
(1) The zip files passed as an argument to the handler, if concatenated
in that same order, would result in a total ordering
of all the files. See (2) for ordering type.
(2) Upper case letters before lower case letters. The traversal of a
directory tree is depth first. A parent directory's files are added
before the files of any child directories
Args:
file_path: the file to be mapped to an archive
Returns:
The name of the archive where we expect the file to be
"""
num_archives = len(self.zipfilenames)
while num_archives > 0:
target = self.zipfilenames[num_archives - 1]
if len(target) > 1:
if self.CompareFilenames(target[1], file_path) >= 0:
return target[0]
num_archives -= 1
return None
def CompareFilenames(self, file1, file2):
"""Determines whether file1 is lexigraphically 'before' file2.
WARNING: This method assumes that paths are output in a depth-first,
with parent directories' files stored before childs'
We say that file1 is lexigraphically before file2 if the last non-matching
path segment of file1 is alphabetically before file2.
Args:
file1: the first file path
file2: the second file path
Returns:
A positive number if file1 is before file2
A negative number if file2 is before file1
0 if filenames are the same
"""
f1_segments = file1.split('/')
f2_segments = file2.split('/')
segment_ptr = 0
while (segment_ptr < len(f1_segments) and
segment_ptr < len(f2_segments) and
f1_segments[segment_ptr] == f2_segments[segment_ptr]):
segment_ptr += 1
if len(f1_segments) == len(f2_segments):
# we fell off the end, the paths much be the same
if segment_ptr == len(f1_segments):
return 0
# we didn't fall of the end, compare the segments where they differ
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
# the number of segments differs, we either mismatched comparing
# directories, or comparing a file to a directory
else:
# IF we were looking at the last segment of one of the paths,
# the one with fewer segments is first because files come before
# directories
# ELSE we just need to compare directory names
if (segment_ptr + 1 == len(f1_segments) or
segment_ptr + 1 == len(f2_segments)):
return len(f2_segments) - len(f1_segments)
else:
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
def SetCachingHeaders(self, revalidate):
"""Set caching headers for the request."""
max_age = self.MAX_AGE
#self.response.headers['Expires'] = email.Utils.formatdate(
# time.time() + max_age, usegmt=True)
cache_control = []
if self.PUBLIC:
cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
if revalidate:
cache_control.append('must-revalidate')
self.response.headers['Cache-Control'] = ', '.join(cache_control)
def GetFromCache(self, filename):
"""Get file from memcache, if available.
Args:
filename: The URL of the file to return
Returns:
The content of the file
"""
return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))
def StoreOrUpdateInCache(self, filename, data):
"""Store data in the cache.
Store a piece of data in the memcache. Memcache has a maximum item size of
1*10^6 bytes. If the data is too large, fail, but log the failure. Future
work will consider compressing the data before storing or chunking it
Args:
filename: the name of the file to store
data: the data of the file
Returns:
None
"""
try:
if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):
memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)
except (ValueError), err:
logging.warning('Data size too large to cache\n%s' % err)
def Write404Error(self):
"""Ouptut a simple 404 response."""
self.error(404)
self.response.out.write(
''.join(['<html><head><title>404: Not Found</title></head>',
'<body><b><h2>Error 404</h2><br/>',
'File not found</b></body></html>']))
def StoreInNegativeCache(self, filename):
"""If a non-existant URL is accessed, cache this result as well.
Future work should consider setting a maximum negative cache size to
prevent it from from negatively impacting the real cache.
Args:
filename: URL to add ot negative cache
Returns:
None
"""
memcache.add('%s%s' % (self.NEG_CACHE_PREFIX, filename), -1)
def GetFromNegativeCache(self, filename):
"""Retrieve from negative cache.
Args:
filename: URL to retreive
Returns:
The file contents if present in the negative cache.
"""
return memcache.get('%s%s' % (self.NEG_CACHE_PREFIX, filename))
def main():
application = webapp.WSGIApplication([('/([^/]+)/(.*)',
MemcachedZipHandler)])
util.run_wsgi_app(application)
if __name__ == '__main__':
main() | resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None: | random_line_split |
memcache_zipserve.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A class to serve pages from zip files and use memcache for performance.
This contains a class and a function to create an anonymous instance of the
class to serve HTTP GET requests. Memcache is used to increase response speed
and lower processing cycles used in serving. Credit to Guido van Rossum and
his implementation of zipserve which served as a reference as I wrote this.
MemcachedZipHandler: Class that serves request
create_handler: method to create instance of MemcachedZipHandler
"""
__author__ = 'jmatt@google.com (Justin Mattson)'
import email.Utils
import logging
import mimetypes
import time
import zipfile
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from time import localtime, strftime
def create_handler(zip_files, max_age=None, public=None):
"""Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0:
if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
self.TrueGet(name)
if max_age is not None:
MAX_AGE = max_age
if public is not None:
PUBLIC = public
return HandlerWrapper
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = 'cache://' # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = 'noncache://' # memcache key prefix for non-existant URL
intlString = 'intl/'
validLangs = ['en', 'de', 'es', 'fr','it','ja','zh-CN','zh-TW']
def TrueGet(self, reqUri):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method. Some logic is applied to the request to serve files
from an intl/<lang>/... directory or fall through to the default language.
Args:
name: URL requested
Returns:
None
"""
langName = 'en'
resetLangCookie = False
urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
|
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def CreateResponse(self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * 365 * 10
self.response.headers.add_header('Set-Cookie',
'android_developer_pref_lang=%s; path=/; expires=%s' %
(langName, strftime("%a, %d %b %Y %H:%M:%S", localtime(expireDate))))
mustRevalidate = False
if ('.html' in name):
# revalidate html files -- workaround for cache inconsistencies for
# negotiated responses
mustRevalidate = True
#logging.info(' Adding [Vary: Cookie] to response...')
self.response.headers.add_header('Vary', 'Cookie')
content_type, encoding = mimetypes.guess_type(name)
if content_type:
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif (name == 'favicon.ico'):
self.response.headers['Content-Type'] = 'image/x-icon'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif name.endswith('.psd'):
self.response.headers['Content-Type'] = 'application/octet-stream'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
return True
def GetFromStore(self, file_path):
"""Retrieve file from zip files.
Get the file from the source, it must not have been in the memcache. If
possible, we'll use the zip file index to quickly locate where the file
should be found. (See MapToFileArchive documentation for assumptions about
file ordering.) If we don't have an index or don't find the file where the
index says we should, look through all the zip files to find it.
Args:
file_path: the file that we're looking for
Returns:
The contents of the requested file
"""
resp_data = None
file_itr = iter(self.zipfilenames)
# check the index, if we have one, to see what archive the file is in
archive_name = self.MapFileToArchive(file_path)
if not archive_name:
archive_name = file_itr.next()[0]
while resp_data is None and archive_name:
zip_archive = self.LoadZipFile(archive_name)
if zip_archive:
# we expect some lookups will fail, and that's okay, 404s will deal
# with that
try:
resp_data = zip_archive.read(file_path)
except (KeyError, RuntimeError), err:
# no op
x = False
if resp_data is not None:
logging.info('%s read from %s', file_path, archive_name)
try:
archive_name = file_itr.next()[0]
except (StopIteration), err:
archive_name = False
return resp_data
def LoadZipFile(self, zipfilename):
"""Convenience method to load zip file.
Just a convenience method to load the zip file from the data store. This is
useful if we ever want to change data stores and also as a means of
dependency injection for testing. This method will look at our file cache
first, and then load and cache the file if there's a cache miss
Args:
zipfilename: the name of the zip file to load
Returns:
The zip file requested, or None if there is an I/O error
"""
zip_archive = None
zip_archive = self.zipfile_cache.get(zipfilename)
if zip_archive is None:
try:
zip_archive = zipfile.ZipFile(zipfilename)
self.zipfile_cache[zipfilename] = zip_archive
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename,
err))
return zip_archive
def MapFileToArchive(self, file_path):
"""Given a file name, determine what archive it should be in.
This method makes two critical assumptions.
(1) The zip files passed as an argument to the handler, if concatenated
in that same order, would result in a total ordering
of all the files. See (2) for ordering type.
(2) Upper case letters before lower case letters. The traversal of a
directory tree is depth first. A parent directory's files are added
before the files of any child directories
Args:
file_path: the file to be mapped to an archive
Returns:
The name of the archive where we expect the file to be
"""
num_archives = len(self.zipfilenames)
while num_archives > 0:
target = self.zipfilenames[num_archives - 1]
if len(target) > 1:
if self.CompareFilenames(target[1], file_path) >= 0:
return target[0]
num_archives -= 1
return None
def CompareFilenames(self, file1, file2):
"""Determines whether file1 is lexigraphically 'before' file2.
WARNING: This method assumes that paths are output in a depth-first,
with parent directories' files stored before childs'
We say that file1 is lexigraphically before file2 if the last non-matching
path segment of file1 is alphabetically before file2.
Args:
file1: the first file path
file2: the second file path
Returns:
A positive number if file1 is before file2
A negative number if file2 is before file1
0 if filenames are the same
"""
f1_segments = file1.split('/')
f2_segments = file2.split('/')
segment_ptr = 0
while (segment_ptr < len(f1_segments) and
segment_ptr < len(f2_segments) and
f1_segments[segment_ptr] == f2_segments[segment_ptr]):
segment_ptr += 1
if len(f1_segments) == len(f2_segments):
# we fell off the end, the paths much be the same
if segment_ptr == len(f1_segments):
return 0
# we didn't fall of the end, compare the segments where they differ
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
# the number of segments differs, we either mismatched comparing
# directories, or comparing a file to a directory
else:
# IF we were looking at the last segment of one of the paths,
# the one with fewer segments is first because files come before
# directories
# ELSE we just need to compare directory names
if (segment_ptr + 1 == len(f1_segments) or
segment_ptr + 1 == len(f2_segments)):
return len(f2_segments) - len(f1_segments)
else:
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
def SetCachingHeaders(self, revalidate):
"""Set caching headers for the request."""
max_age = self.MAX_AGE
#self.response.headers['Expires'] = email.Utils.formatdate(
# time.time() + max_age, usegmt=True)
cache_control = []
if self.PUBLIC:
cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
if revalidate:
cache_control.append('must-revalidate')
self.response.headers['Cache-Control'] = ', '.join(cache_control)
def GetFromCache(self, filename):
"""Get file from memcache, if available.
Args:
filename: The URL of the file to return
Returns:
The content of the file
"""
return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))
def StoreOrUpdateInCache(self, filename, data):
"""Store data in the cache.
Store a piece of data in the memcache. Memcache has a maximum item size of
1*10^6 bytes. If the data is too large, fail, but log the failure. Future
work will consider compressing the data before storing or chunking it
Args:
filename: the name of the file to store
data: the data of the file
Returns:
None
"""
try:
if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):
memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)
except (ValueError), err:
logging.warning('Data size too large to cache\n%s' % err)
def Write404Error(self):
"""Ouptut a simple 404 response."""
self.error(404)
self.response.out.write(
''.join(['<html><head><title>404: Not Found</title></head>',
'<body><b><h2>Error 404</h2><br/>',
'File not found</b></body></html>']))
def StoreInNegativeCache(self, filename):
"""If a non-existant URL is accessed, cache this result as well.
Future work should consider setting a maximum negative cache size to
prevent it from from negatively impacting the real cache.
Args:
filename: URL to add ot negative cache
Returns:
None
"""
memcache.add('%s%s' % (self.NEG_CACHE_PREFIX, filename), -1)
def GetFromNegativeCache(self, filename):
"""Retrieve from negative cache.
Args:
filename: URL to retreive
Returns:
The file contents if present in the negative cache.
"""
return memcache.get('%s%s' % (self.NEG_CACHE_PREFIX, filename))
def main():
application = webapp.WSGIApplication([('/([^/]+)/(.*)',
MemcachedZipHandler)])
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| return name | conditional_block |
memcache_zipserve.py | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A class to serve pages from zip files and use memcache for performance.
This contains a class and a function to create an anonymous instance of the
class to serve HTTP GET requests. Memcache is used to increase response speed
and lower processing cycles used in serving. Credit to Guido van Rossum and
his implementation of zipserve which served as a reference as I wrote this.
MemcachedZipHandler: Class that serves request
create_handler: method to create instance of MemcachedZipHandler
"""
__author__ = 'jmatt@google.com (Justin Mattson)'
import email.Utils
import logging
import mimetypes
import time
import zipfile
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from time import localtime, strftime
def create_handler(zip_files, max_age=None, public=None):
"""Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0:
if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
self.TrueGet(name)
if max_age is not None:
MAX_AGE = max_age
if public is not None:
PUBLIC = public
return HandlerWrapper
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = 'cache://' # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = 'noncache://' # memcache key prefix for non-existant URL
intlString = 'intl/'
validLangs = ['en', 'de', 'es', 'fr','it','ja','zh-CN','zh-TW']
def TrueGet(self, reqUri):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method. Some logic is applied to the request to serve files
from an intl/<lang>/... directory or fall through to the default language.
Args:
name: URL requested
Returns:
None
"""
langName = 'en'
resetLangCookie = False
urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
return name
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def | (self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * 365 * 10
self.response.headers.add_header('Set-Cookie',
'android_developer_pref_lang=%s; path=/; expires=%s' %
(langName, strftime("%a, %d %b %Y %H:%M:%S", localtime(expireDate))))
mustRevalidate = False
if ('.html' in name):
# revalidate html files -- workaround for cache inconsistencies for
# negotiated responses
mustRevalidate = True
#logging.info(' Adding [Vary: Cookie] to response...')
self.response.headers.add_header('Vary', 'Cookie')
content_type, encoding = mimetypes.guess_type(name)
if content_type:
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif (name == 'favicon.ico'):
self.response.headers['Content-Type'] = 'image/x-icon'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif name.endswith('.psd'):
self.response.headers['Content-Type'] = 'application/octet-stream'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
return True
def GetFromStore(self, file_path):
"""Retrieve file from zip files.
Get the file from the source, it must not have been in the memcache. If
possible, we'll use the zip file index to quickly locate where the file
should be found. (See MapToFileArchive documentation for assumptions about
file ordering.) If we don't have an index or don't find the file where the
index says we should, look through all the zip files to find it.
Args:
file_path: the file that we're looking for
Returns:
The contents of the requested file
"""
resp_data = None
file_itr = iter(self.zipfilenames)
# check the index, if we have one, to see what archive the file is in
archive_name = self.MapFileToArchive(file_path)
if not archive_name:
archive_name = file_itr.next()[0]
while resp_data is None and archive_name:
zip_archive = self.LoadZipFile(archive_name)
if zip_archive:
# we expect some lookups will fail, and that's okay, 404s will deal
# with that
try:
resp_data = zip_archive.read(file_path)
except (KeyError, RuntimeError), err:
# no op
x = False
if resp_data is not None:
logging.info('%s read from %s', file_path, archive_name)
try:
archive_name = file_itr.next()[0]
except (StopIteration), err:
archive_name = False
return resp_data
def LoadZipFile(self, zipfilename):
"""Convenience method to load zip file.
Just a convenience method to load the zip file from the data store. This is
useful if we ever want to change data stores and also as a means of
dependency injection for testing. This method will look at our file cache
first, and then load and cache the file if there's a cache miss
Args:
zipfilename: the name of the zip file to load
Returns:
The zip file requested, or None if there is an I/O error
"""
zip_archive = None
zip_archive = self.zipfile_cache.get(zipfilename)
if zip_archive is None:
try:
zip_archive = zipfile.ZipFile(zipfilename)
self.zipfile_cache[zipfilename] = zip_archive
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename,
err))
return zip_archive
def MapFileToArchive(self, file_path):
"""Given a file name, determine what archive it should be in.
This method makes two critical assumptions.
(1) The zip files passed as an argument to the handler, if concatenated
in that same order, would result in a total ordering
of all the files. See (2) for ordering type.
(2) Upper case letters before lower case letters. The traversal of a
directory tree is depth first. A parent directory's files are added
before the files of any child directories
Args:
file_path: the file to be mapped to an archive
Returns:
The name of the archive where we expect the file to be
"""
num_archives = len(self.zipfilenames)
while num_archives > 0:
target = self.zipfilenames[num_archives - 1]
if len(target) > 1:
if self.CompareFilenames(target[1], file_path) >= 0:
return target[0]
num_archives -= 1
return None
def CompareFilenames(self, file1, file2):
"""Determines whether file1 is lexigraphically 'before' file2.
WARNING: This method assumes that paths are output in a depth-first,
with parent directories' files stored before childs'
We say that file1 is lexigraphically before file2 if the last non-matching
path segment of file1 is alphabetically before file2.
Args:
file1: the first file path
file2: the second file path
Returns:
A positive number if file1 is before file2
A negative number if file2 is before file1
0 if filenames are the same
"""
f1_segments = file1.split('/')
f2_segments = file2.split('/')
segment_ptr = 0
while (segment_ptr < len(f1_segments) and
segment_ptr < len(f2_segments) and
f1_segments[segment_ptr] == f2_segments[segment_ptr]):
segment_ptr += 1
if len(f1_segments) == len(f2_segments):
# we fell off the end, the paths much be the same
if segment_ptr == len(f1_segments):
return 0
# we didn't fall of the end, compare the segments where they differ
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
# the number of segments differs, we either mismatched comparing
# directories, or comparing a file to a directory
else:
# IF we were looking at the last segment of one of the paths,
# the one with fewer segments is first because files come before
# directories
# ELSE we just need to compare directory names
if (segment_ptr + 1 == len(f1_segments) or
segment_ptr + 1 == len(f2_segments)):
return len(f2_segments) - len(f1_segments)
else:
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
def SetCachingHeaders(self, revalidate):
"""Set caching headers for the request."""
max_age = self.MAX_AGE
#self.response.headers['Expires'] = email.Utils.formatdate(
# time.time() + max_age, usegmt=True)
cache_control = []
if self.PUBLIC:
cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
if revalidate:
cache_control.append('must-revalidate')
self.response.headers['Cache-Control'] = ', '.join(cache_control)
def GetFromCache(self, filename):
"""Get file from memcache, if available.
Args:
filename: The URL of the file to return
Returns:
The content of the file
"""
return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))
def StoreOrUpdateInCache(self, filename, data):
"""Store data in the cache.
Store a piece of data in the memcache. Memcache has a maximum item size of
1*10^6 bytes. If the data is too large, fail, but log the failure. Future
work will consider compressing the data before storing or chunking it
Args:
filename: the name of the file to store
data: the data of the file
Returns:
None
"""
try:
if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):
memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)
except (ValueError), err:
logging.warning('Data size too large to cache\n%s' % err)
def Write404Error(self):
"""Ouptut a simple 404 response."""
self.error(404)
self.response.out.write(
''.join(['<html><head><title>404: Not Found</title></head>',
'<body><b><h2>Error 404</h2><br/>',
'File not found</b></body></html>']))
def StoreInNegativeCache(self, filename):
"""If a non-existant URL is accessed, cache this result as well.
Future work should consider setting a maximum negative cache size to
prevent it from from negatively impacting the real cache.
Args:
filename: URL to add ot negative cache
Returns:
None
"""
memcache.add('%s%s' % (self.NEG_CACHE_PREFIX, filename), -1)
def GetFromNegativeCache(self, filename):
"""Retrieve from negative cache.
Args:
filename: URL to retreive
Returns:
The file contents if present in the negative cache.
"""
return memcache.get('%s%s' % (self.NEG_CACHE_PREFIX, filename))
def main():
application = webapp.WSGIApplication([('/([^/]+)/(.*)',
MemcachedZipHandler)])
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| CreateResponse | identifier_name |
chart.js | google.charts.load('current', { packages: ['corechart'] });
//global variable for storing the ids of articles currently shown in author analytics page
var articlesShown = [];
var allArticleTitles = [];
var allArticletitlesWithRevisions = [];
//chart otpions
var options = {
'fontName':'Avenir',
'backgroundColor': {
fill:'#F3F3F3',
strokeWidth:10,
stroke:'#CE953F'
},
'bar': {groupWidth: "70%"},
'width': '100%',
'height': 500,
'hAxis':{
showTextEvery:1,
maxAlternation:1,
minTextSpacing:1,
textStyle:{
fontSize:11,
bold:true,
},
},
'legend': {
position: 'top',
alignment:'center'
},
'vAxis': {
viewWindowMode: 'pretty',
viewWindow: {
min: 0,
},
gridlines: {
count: 9,
}
}
};
var pieData
var barData
//Onload function
window.onload = function () {
getOverviewPage(); //loads overview page by default
//when menu links are clicked, other pages can be loaded
$('#ArticleAnalytics').click(function () {
resetMenuBar();
$('#ArticleAnalytics').addClass("active");
getArticleAnalyticsPage();
})
$('#AuthorAnalytics').click(function () {
resetMenuBar();
$('#AuthorAnalytics').addClass("active");
getAuthorAnalyticsPage();
})
$('#Overview').click(function () {
resetMenuBar();
$('#Overview').addClass("active");
getOverviewPage();
})
}
/********************************
FUNCTIONS FOR LOADING MAIN PAGES
********************************/
//Replaces entire page with article analytics
function getArticleAnalyticsPage() {
$('#main').empty();
$('#main').load('views/articleAnalytics.html', null, function () {
fillAutocomplete();
$('#articleSearchButton').click(function () {
getIndividualArticleStats();
})
})
}
function getOverviewPage() {
$('#main').empty(); //Clear page
$('#main').load('views/overview.html', null, function () { //load overview page
//Intial Data load
getTopRevs();
getBotRevs();
getOldestArticles();
getNewestArticles();
getTitleLargestRegUser();
getTitleLeastRegUser();
//get chart data
$.getJSON('/pieData', null, function (rdata) {
pieData = rdata
}
);
$.getJSON('/barData', null, function (rdata) {
barData = rdata
drawBar('#myChart');
}
);
//Update based on user input
$('[name=topBotRevUpdate]').click(function () {
getTopRevs();
getBotRevs();
});
$('[name=chartUpdate]').click(function () {
var whichChart = $('[name=chartSelector]').val();
if (whichChart == "In Total") {
drawPie('#myChart');
} else {
drawBar('#myChart');
}
});
});
}
function getAuthorAnalyticsPage() |
//clears the .active class from the menu bar
function resetMenuBar() {
$('#Overview').removeClass("active");
$('#ArticleAnalytics').removeClass("active");
$('#AuthorAnalytics').removeClass("active");
}
/******************
LOAD THE CHART DATA
*******************/
function drawPie(where) {
console.log(where)
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Element');
graphData.addColumn('number', 'Percentage');
$.each(pieData, function (key, val) {
graphData.addRow([key, val]);
})
var chart = new google.visualization.PieChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBar(where) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'RegularUsers');
graphData.addColumn('number', 'Bots');
graphData.addColumn('number', 'Admins');
graphData.addColumn('number', 'Anon');
var test = [];
for (var i in barData) {
test.push(barData[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].RegularUsers, test[x].Bots, test[x].Admins, test[x].Anon]);
}
var chart = new google.visualization.ColumnChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBarSpecificUser(where, dataToUse) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'Revisions');
var test = [];
for (var i in dataToUse) {
test.push(dataToUse[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].Revisions]);
}
var chart = new google.visualization.BarChart($(where)[0]);
chart.draw(graphData, options);
}
/*******************************************
FUNCTIONS FOR LOADING REGULAR DATA INTO HTML
********************************************/
function getTopRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getTopRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#topRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#topRev').append(appendMe);
}
})
}
function getBotRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getBotRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#botRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#botRev').append(appendMe);
}
})
}
function getOldestArticles() {
var destination = 'getOldestArticles';
$.get(destination, null, function (data) {
console.log(data);
$('#oldestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#oldestArticles').append(appendMe);
}
})
}
function getNewestArticles() {
var destination = 'getNewestArticles';
console.log('here');
$.get(destination, null, function (data) {
console.log(data);
$('#newestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#newestArticles').append(appendMe);
}
})
}
function getTitleLargestRegUser(){
var destination = 'getLeastRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#mostUsers').empty();
$('#mostUsers').text(data);
})
}
function getTitleLeastRegUser(){
var destination = 'getLargestRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#leastUsers').empty();
$('#leastUsers').text(data);
})
}
function getAuthorArticleList() {
var authorName = $('#authorEntryBox').val();
console.log(authorName)
var destination = 'getAuthorArticleList?authorName=' + authorName;
var putListHere = $('#articleList');
$.get(destination, null, function (data) {
console.log('Here is the user list ')
console.log(data)
if (data.length == 0) {
alert("Could not find any users with names matching that query");
} else {
// var heading = $('<thead><tr>' + '<th>' + 'Article Name' + '</th>' + '<th>' + 'Number of Revisions' + '</th>' + '</tr></thead><tbody>')
// $('#articleList').append(heading);
// for (var x = 0; x < data.length; x++) {
// var test = "<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>';
// var appendMe = $("<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
// console.log(test)
// $('#articleList').append(appendMe);
// }
// var ending = $('</tbody>');
// $('#articleList').append(ending);
putListHere.empty();
//Add headers
var theader = $("<thead><tr><th>User Name</th><th>Article Name</th><th>Number of Revisions</th></tr></thead>")
$('#articleList').append(theader);
//Create data table
for (var x = 0; x < data.length; x++) {
var appnedMe = $("<tr class='articleEntry' id= '" + "entryID" + x + "'>" + '<td>' + data[x].user + '</td>' + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
$('#articleList').append(appnedMe);
var temp = '#entryID' + x;
// $(temp).click(function(x){ //Get timestamps
// console.log(x)
// })
}
//Create event handler seperately
function handleEvent(idVal){
var elementGetter = '#entryID' + idVal;
$(elementGetter).click(function(){
$(".timestamp").remove();
console.log(elementGetter)
var newdestination = 'getTimestamps?authorName=' + data[idVal]._id + "&title=" + data[idVal].user;
$.get(newdestination, null, function (newdata) {
console.log(newdata)
for(var z = 0; z < newdata.length; z++){
var myDate = new Date(newdata[z].timestamp)
console.log(myDate)
$('<tr><td class="timestamp">' + " " + myDate.toUTCString() + '</td></tr>').insertAfter(elementGetter);
}
})
})
}
for(var x = 0; x < data.length; x++){
handleEvent(x)
}
}
})
}
function fillAutocomplete() {
var destination = 'getAllArticleTitles'
$.get(destination, null, function (data) {
$('#articleEntryList').empty();
for (var x = 0; x < data.length; x++) {
console.log(data[x])
var appendMe = $('<option>' + data[x]._id + " [revisions: " + data[x].count + ']</option>')
$('#articleEntryList').append(appendMe);
allArticleTitles[x] = data[x]._id;
allArticletitlesWithRevisions[x] = data[x]._id + ' [Total Revisions: ' + data[x].count + ']';
}
})
}
function getIndividualArticleStats() {
//Get article name
var searchedArticle = $('#articleEntryBox').val();
var temp = searchedArticle.split("[");
temp = temp[0];
if (temp.substring(temp.length - 1, temp.length) == " ") {
temp = temp.substring(0, temp.length - 1);
}
searchedArticle = temp;
var validTitle = false;
//check if this is a valid article
for (var n = 0; n < allArticleTitles.length; n++) {
if (searchedArticle == allArticleTitles[n]) {
validTitle = true;
}
}
//Convert article string to be used with wiki link
searchQuery = searchedArticle.replace(/\s+/g, '_');
//Retrieve last timestamp from DB
var sendTitle = 'getLastTime?title=' + searchedArticle
$.ajax({
url: sendTitle,
type: 'GET',
global: false,
dataType: 'json',
success: function(data) {
lastTS = data;
//AJAX within an AJAX
var wikiEndpoint = "https://en.wikipedia.org/w/api.php",
parameters = [ "action=query",
"format=json",
"formatversion=2",
"prop=revisions",
"titles="+searchQuery,
"rvstart="+lastTS,
"rvdir=newer",
"order=desc",
"rvlimit=max",
//Only querying for timestamps
"rvprop=timestamp",
"origin=*",
"callback=?"]
var url = wikiEndpoint + "?" + parameters.join("&");
console.log(url);
//Get data from MediaWiki API
$.ajax({
url: url,
type: 'GET',
dataType: 'jsonp',
contentType: "application/json; charset=utf-8",
success: function(data, jqXHR) {
page=data.query.pages;
revs = page[Object.keys(page)[0]].revisions
//Retrieve latest timestamp from the array
latestTS = revs[Object.keys(revs).length-1].timestamp
//Moment.js library to compare dates
var diff = moment.duration(moment(latestTS).diff(moment(lastTS)));
//Date conversions
var days = parseInt(diff.asDays());
var hours = parseInt(diff.asHours());
adjustedHours = hours - days*24;
var minutes = parseInt(diff.asMinutes());
minutes = minutes - (days*24*60 + adjustedHours*60);
console.log(searchQuery+": "+days+" days "+adjustedHours+" hours " + minutes +" minutes");
console.log("Latest: "+latestTS);
console.log("Database: "+lastTS);
//Displays number of revisions if available
if (hours<24) {
$("#wasIUpdated").empty();
$("#wasIUpdated").text("No new revisions!");
} else {
$("#wasIUpdated").empty();
$("#wasIUpdated").text("Update available: " +revs.length + " revisions");
}
}
})
}
})
if (!validTitle) {
alert("There are no articles with this title (titles are case sensitive)");
} else {
//check if any data is being returned
//Add if statement with alert in case of an incorrect title
//Display article name
$("#putTitleHere").empty();
var newTitle = searchedArticle.trim();
for(var c = 0; c < allArticleTitles.length; c++){
var toTest = allArticleTitles[c].trim();
if(newTitle == toTest){
newTitle = allArticletitlesWithRevisions[c];
}
}
$("#putTitleHere").text(newTitle);
//Add in title and class of table
$("putTopRegUsersHeading").empty();
$("#putTopRegUsersHeading").text("Top Users For " + searchedArticle);
$("#putClassHere").addClass("longtable");
//Add in charts
$("#putChartTitleHere").text("Graphs");
//Get chart data and draw charts
var destination2 = 'pieDataIndividualArticle?title=' + searchedArticle
$.getJSON(destination2, null, function (rdata) {
pieData = rdata
}
);
var destination3 = 'barDataIndividualArticle?title=' + searchedArticle
$.getJSON(destination3, null, function (rdata) {
barData = rdata
drawBar('#IndividualArticleChart'); //default chart
}
);
//Draws the chart controls and add the top 5 users to the table
var chartControls = $.get('views/chartControls.html', null, function (data) {
appendMe = $(data)
$('#putChartControlsHere').empty();
$('#putChartControlsHere').append(appendMe);
//Add in top 5 users to both the chart above and the select list
var destination = 'getTopUsersForArticle?title=' + searchedArticle
$.get(destination, null, function (data) {
console.log(data)
$('#putTopRegUsers').empty();
for (var x = 0; x < data.length; x++) {
var ranking = x + 1;
ranking = ranking + '. '
var appendMe = $('<li>' + ranking + data[x]._id + ' (revisions: ' + data[x].count + ')' + '</li>');
$('#putTopRegUsers').append(appendMe);
appendMe = $('<option value=' + data[x]._id + '>' + data[x]._id + '</option>');
$('#putUsersHere').append(appendMe)
}
})
//Registers event handler for update button
$('#chartSwitcherIndividualArticle').click(function () {
//get value from select box
var specificUser = $('#chartSelectorIndividualArticles').val();
if (specificUser == "In Total") {
drawPie('#IndividualArticleChart');
} else if (specificUser == "Over Time") {
drawBar('#IndividualArticleChart');
} else {
var destination4 = 'barDataSpecificUser?user=' + specificUser + '&title=' + searchedArticle
$.getJSON(destination4, null, function (rdata) {
console.log('here it is...')
console.log(rdata)
drawBarSpecificUser('#IndividualArticleChart', rdata);
}
);
}
})
})
}
}
| {
$('#main').empty();
$('#main').load('views/authorAnalytics.html', null, function () {
$('#authorSearchButton').click(function () {
getAuthorArticleList();
})
});
} | identifier_body |
chart.js | google.charts.load('current', { packages: ['corechart'] });
//global variable for storing the ids of articles currently shown in author analytics page
var articlesShown = [];
var allArticleTitles = [];
var allArticletitlesWithRevisions = [];
//chart otpions
var options = {
'fontName':'Avenir',
'backgroundColor': {
fill:'#F3F3F3',
strokeWidth:10,
stroke:'#CE953F'
},
'bar': {groupWidth: "70%"},
'width': '100%',
'height': 500,
'hAxis':{
showTextEvery:1,
maxAlternation:1,
minTextSpacing:1,
textStyle:{
fontSize:11,
bold:true,
},
},
'legend': {
position: 'top',
alignment:'center'
},
'vAxis': {
viewWindowMode: 'pretty',
viewWindow: {
min: 0,
},
gridlines: {
count: 9,
}
}
};
var pieData
var barData
//Onload function
window.onload = function () {
getOverviewPage(); //loads overview page by default
//when menu links are clicked, other pages can be loaded
$('#ArticleAnalytics').click(function () {
resetMenuBar();
$('#ArticleAnalytics').addClass("active");
getArticleAnalyticsPage();
})
$('#AuthorAnalytics').click(function () {
resetMenuBar();
$('#AuthorAnalytics').addClass("active");
getAuthorAnalyticsPage();
})
$('#Overview').click(function () {
resetMenuBar();
$('#Overview').addClass("active");
getOverviewPage();
})
}
/********************************
FUNCTIONS FOR LOADING MAIN PAGES
********************************/
//Replaces entire page with article analytics
function getArticleAnalyticsPage() {
$('#main').empty();
$('#main').load('views/articleAnalytics.html', null, function () {
fillAutocomplete();
$('#articleSearchButton').click(function () {
getIndividualArticleStats();
})
})
}
function getOverviewPage() {
$('#main').empty(); //Clear page
$('#main').load('views/overview.html', null, function () { //load overview page
//Intial Data load
getTopRevs();
getBotRevs();
getOldestArticles();
getNewestArticles();
getTitleLargestRegUser();
getTitleLeastRegUser();
//get chart data
$.getJSON('/pieData', null, function (rdata) {
pieData = rdata
}
);
$.getJSON('/barData', null, function (rdata) {
barData = rdata
drawBar('#myChart');
}
);
//Update based on user input
$('[name=topBotRevUpdate]').click(function () {
getTopRevs();
getBotRevs();
});
$('[name=chartUpdate]').click(function () {
var whichChart = $('[name=chartSelector]').val();
if (whichChart == "In Total") {
drawPie('#myChart');
} else {
drawBar('#myChart');
}
});
});
| $('#authorSearchButton').click(function () {
getAuthorArticleList();
})
});
}
//clears the .active class from the menu bar
function resetMenuBar() {
$('#Overview').removeClass("active");
$('#ArticleAnalytics').removeClass("active");
$('#AuthorAnalytics').removeClass("active");
}
/******************
LOAD THE CHART DATA
*******************/
function drawPie(where) {
console.log(where)
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Element');
graphData.addColumn('number', 'Percentage');
$.each(pieData, function (key, val) {
graphData.addRow([key, val]);
})
var chart = new google.visualization.PieChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBar(where) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'RegularUsers');
graphData.addColumn('number', 'Bots');
graphData.addColumn('number', 'Admins');
graphData.addColumn('number', 'Anon');
var test = [];
for (var i in barData) {
test.push(barData[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].RegularUsers, test[x].Bots, test[x].Admins, test[x].Anon]);
}
var chart = new google.visualization.ColumnChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBarSpecificUser(where, dataToUse) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'Revisions');
var test = [];
for (var i in dataToUse) {
test.push(dataToUse[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].Revisions]);
}
var chart = new google.visualization.BarChart($(where)[0]);
chart.draw(graphData, options);
}
/*******************************************
FUNCTIONS FOR LOADING REGULAR DATA INTO HTML
********************************************/
function getTopRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getTopRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#topRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#topRev').append(appendMe);
}
})
}
function getBotRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getBotRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#botRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#botRev').append(appendMe);
}
})
}
function getOldestArticles() {
var destination = 'getOldestArticles';
$.get(destination, null, function (data) {
console.log(data);
$('#oldestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#oldestArticles').append(appendMe);
}
})
}
function getNewestArticles() {
var destination = 'getNewestArticles';
console.log('here');
$.get(destination, null, function (data) {
console.log(data);
$('#newestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#newestArticles').append(appendMe);
}
})
}
function getTitleLargestRegUser(){
var destination = 'getLeastRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#mostUsers').empty();
$('#mostUsers').text(data);
})
}
function getTitleLeastRegUser(){
var destination = 'getLargestRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#leastUsers').empty();
$('#leastUsers').text(data);
})
}
function getAuthorArticleList() {
var authorName = $('#authorEntryBox').val();
console.log(authorName)
var destination = 'getAuthorArticleList?authorName=' + authorName;
var putListHere = $('#articleList');
$.get(destination, null, function (data) {
console.log('Here is the user list ')
console.log(data)
if (data.length == 0) {
alert("Could not find any users with names matching that query");
} else {
// var heading = $('<thead><tr>' + '<th>' + 'Article Name' + '</th>' + '<th>' + 'Number of Revisions' + '</th>' + '</tr></thead><tbody>')
// $('#articleList').append(heading);
// for (var x = 0; x < data.length; x++) {
// var test = "<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>';
// var appendMe = $("<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
// console.log(test)
// $('#articleList').append(appendMe);
// }
// var ending = $('</tbody>');
// $('#articleList').append(ending);
putListHere.empty();
//Add headers
var theader = $("<thead><tr><th>User Name</th><th>Article Name</th><th>Number of Revisions</th></tr></thead>")
$('#articleList').append(theader);
//Create data table
for (var x = 0; x < data.length; x++) {
var appnedMe = $("<tr class='articleEntry' id= '" + "entryID" + x + "'>" + '<td>' + data[x].user + '</td>' + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
$('#articleList').append(appnedMe);
var temp = '#entryID' + x;
// $(temp).click(function(x){ //Get timestamps
// console.log(x)
// })
}
//Create event handler seperately
function handleEvent(idVal){
var elementGetter = '#entryID' + idVal;
$(elementGetter).click(function(){
$(".timestamp").remove();
console.log(elementGetter)
var newdestination = 'getTimestamps?authorName=' + data[idVal]._id + "&title=" + data[idVal].user;
$.get(newdestination, null, function (newdata) {
console.log(newdata)
for(var z = 0; z < newdata.length; z++){
var myDate = new Date(newdata[z].timestamp)
console.log(myDate)
$('<tr><td class="timestamp">' + " " + myDate.toUTCString() + '</td></tr>').insertAfter(elementGetter);
}
})
})
}
for(var x = 0; x < data.length; x++){
handleEvent(x)
}
}
})
}
function fillAutocomplete() {
var destination = 'getAllArticleTitles'
$.get(destination, null, function (data) {
$('#articleEntryList').empty();
for (var x = 0; x < data.length; x++) {
console.log(data[x])
var appendMe = $('<option>' + data[x]._id + " [revisions: " + data[x].count + ']</option>')
$('#articleEntryList').append(appendMe);
allArticleTitles[x] = data[x]._id;
allArticletitlesWithRevisions[x] = data[x]._id + ' [Total Revisions: ' + data[x].count + ']';
}
})
}
function getIndividualArticleStats() {
//Get article name
var searchedArticle = $('#articleEntryBox').val();
var temp = searchedArticle.split("[");
temp = temp[0];
if (temp.substring(temp.length - 1, temp.length) == " ") {
temp = temp.substring(0, temp.length - 1);
}
searchedArticle = temp;
var validTitle = false;
//check if this is a valid article
for (var n = 0; n < allArticleTitles.length; n++) {
if (searchedArticle == allArticleTitles[n]) {
validTitle = true;
}
}
//Convert article string to be used with wiki link
searchQuery = searchedArticle.replace(/\s+/g, '_');
//Retrieve last timestamp from DB
var sendTitle = 'getLastTime?title=' + searchedArticle
$.ajax({
url: sendTitle,
type: 'GET',
global: false,
dataType: 'json',
success: function(data) {
lastTS = data;
//AJAX within an AJAX
var wikiEndpoint = "https://en.wikipedia.org/w/api.php",
parameters = [ "action=query",
"format=json",
"formatversion=2",
"prop=revisions",
"titles="+searchQuery,
"rvstart="+lastTS,
"rvdir=newer",
"order=desc",
"rvlimit=max",
//Only querying for timestamps
"rvprop=timestamp",
"origin=*",
"callback=?"]
var url = wikiEndpoint + "?" + parameters.join("&");
console.log(url);
//Get data from MediaWiki API
$.ajax({
url: url,
type: 'GET',
dataType: 'jsonp',
contentType: "application/json; charset=utf-8",
success: function(data, jqXHR) {
page=data.query.pages;
revs = page[Object.keys(page)[0]].revisions
//Retrieve latest timestamp from the array
latestTS = revs[Object.keys(revs).length-1].timestamp
//Moment.js library to compare dates
var diff = moment.duration(moment(latestTS).diff(moment(lastTS)));
//Date conversions
var days = parseInt(diff.asDays());
var hours = parseInt(diff.asHours());
adjustedHours = hours - days*24;
var minutes = parseInt(diff.asMinutes());
minutes = minutes - (days*24*60 + adjustedHours*60);
console.log(searchQuery+": "+days+" days "+adjustedHours+" hours " + minutes +" minutes");
console.log("Latest: "+latestTS);
console.log("Database: "+lastTS);
//Displays number of revisions if available
if (hours<24) {
$("#wasIUpdated").empty();
$("#wasIUpdated").text("No new revisions!");
} else {
$("#wasIUpdated").empty();
$("#wasIUpdated").text("Update available: " +revs.length + " revisions");
}
}
})
}
})
if (!validTitle) {
alert("There are no articles with this title (titles are case sensitive)");
} else {
//check if any data is being returned
//Add if statement with alert in case of an incorrect title
//Display article name
$("#putTitleHere").empty();
var newTitle = searchedArticle.trim();
for(var c = 0; c < allArticleTitles.length; c++){
var toTest = allArticleTitles[c].trim();
if(newTitle == toTest){
newTitle = allArticletitlesWithRevisions[c];
}
}
$("#putTitleHere").text(newTitle);
//Add in title and class of table
$("putTopRegUsersHeading").empty();
$("#putTopRegUsersHeading").text("Top Users For " + searchedArticle);
$("#putClassHere").addClass("longtable");
//Add in charts
$("#putChartTitleHere").text("Graphs");
//Get chart data and draw charts
var destination2 = 'pieDataIndividualArticle?title=' + searchedArticle
$.getJSON(destination2, null, function (rdata) {
pieData = rdata
}
);
var destination3 = 'barDataIndividualArticle?title=' + searchedArticle
$.getJSON(destination3, null, function (rdata) {
barData = rdata
drawBar('#IndividualArticleChart'); //default chart
}
);
//Draws the chart controls and add the top 5 users to the table
var chartControls = $.get('views/chartControls.html', null, function (data) {
appendMe = $(data)
$('#putChartControlsHere').empty();
$('#putChartControlsHere').append(appendMe);
//Add in top 5 users to both the chart above and the select list
var destination = 'getTopUsersForArticle?title=' + searchedArticle
$.get(destination, null, function (data) {
console.log(data)
$('#putTopRegUsers').empty();
for (var x = 0; x < data.length; x++) {
var ranking = x + 1;
ranking = ranking + '. '
var appendMe = $('<li>' + ranking + data[x]._id + ' (revisions: ' + data[x].count + ')' + '</li>');
$('#putTopRegUsers').append(appendMe);
appendMe = $('<option value=' + data[x]._id + '>' + data[x]._id + '</option>');
$('#putUsersHere').append(appendMe)
}
})
//Registers event handler for update button
$('#chartSwitcherIndividualArticle').click(function () {
//get value from select box
var specificUser = $('#chartSelectorIndividualArticles').val();
if (specificUser == "In Total") {
drawPie('#IndividualArticleChart');
} else if (specificUser == "Over Time") {
drawBar('#IndividualArticleChart');
} else {
var destination4 = 'barDataSpecificUser?user=' + specificUser + '&title=' + searchedArticle
$.getJSON(destination4, null, function (rdata) {
console.log('here it is...')
console.log(rdata)
drawBarSpecificUser('#IndividualArticleChart', rdata);
}
);
}
})
})
}
} | }
function getAuthorAnalyticsPage() {
$('#main').empty();
$('#main').load('views/authorAnalytics.html', null, function () { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.