file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
mod.rs | {
pub(crate) path: PathBuf,
#[visit(skip)]
pub(crate) mapping: NodeMapping,
#[visit(skip)]
scene: Scene,
}
impl TypeUuidProvider for Model {
fn type_uuid() -> Uuid {
MODEL_RESOURCE_UUID
}
}
/// Type alias for model resources.
pub type ModelResource = Resource<Model>;
/// Extension trait for model resources.
pub trait ModelResourceExtension: Sized {
/// Tries to instantiate model from given resource.
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap);
/// Tries to instantiate model from given resource.
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>;
/// Instantiates a prefab and places it at specified position and orientation in global coordinates.
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene.
///
/// Animation retargeting allows you to "transfer" animation from a model to a model
/// instance on a scene. Imagine you have a character that should have multiple animations
/// like idle, run, shoot, walk, etc. and you want to store each animation in a separate
/// file. Then when you creating a character on a level you want to have all possible
/// animations assigned to a character, this is where this function comes into play:
/// you just load a model of your character with skeleton, but without any animations,
/// then you load several "models" which have only skeleton with some animation (such
/// "models" can be considered as "animation" resources). After this you need to
/// instantiate model on your level and retarget all animations you need to that instance
/// from other "models". All you have after this is a handle to a model and bunch of
/// handles to specific animations. After this animations can be blended in any combinations
/// you need to. For example idle animation can be blended with walk animation when your
/// character starts walking.
///
/// # Notes
///
/// Most of the 3d model formats can contain only one animation, so in most cases
/// this function will return vector with only one animation.
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to the specified animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`]
/// component.
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to a first animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`).
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>;
}
impl ModelResourceExtension for ModelResource {
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap) {
let (root, old_to_new) =
model_data
.scene
.graph
.copy_node(handle, dest_graph, &mut |_, _| true);
// Notify instantiated nodes about resource they were created from.
let mut stack = vec![root];
while let Some(node_handle) = stack.pop() {
let node = &mut dest_graph[node_handle];
node.resource = Some(model.clone());
// Reset resource instance root flag, this is needed because a node after instantiation cannot
// be a root anymore.
node.is_resource_instance_root = false;
// Reset inheritable properties, so property inheritance system will take properties
// from parent objects on resolve stage.
node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node));
// Continue on children.
stack.extend_from_slice(node.children());
}
// Fill original handles to instances.
for (&old, &new) in old_to_new.inner().iter() {
dest_graph[new].original_handle_in_resource = old;
}
dest_graph.update_hierarchical_data_for_descendants(root);
(root, old_to_new)
}
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> {
let data = self.data_ref();
let instance_root = Self::instantiate_from(
self.clone(),
&data,
data.scene.graph.get_root(),
&mut dest_scene.graph,
)
.0;
dest_scene.graph[instance_root].is_resource_instance_root = true;
std::mem::drop(data);
instance_root
}
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
let root = self.instantiate(scene);
scene.graph[root]
.local_transform_mut()
.set_position(position)
.set_rotation(orientation);
scene.graph.update_hierarchical_data_for_descendants(root);
root
}
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> {
let mut retargetted_animations = Vec::new();
let data = self.data_ref();
for src_node_ref in data.scene.graph.linear_iter() {
if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() {
for src_anim in src_player.animations().iter() {
let mut anim_copy = src_anim.clone();
// Remap animation track nodes from resource to instance. This is required
// because we've made a plain copy and it has tracks with node handles mapped
// to nodes of internal scene.
for (i, ref_track) in src_anim.tracks().iter().enumerate() {
let ref_node = &data.scene.graph[ref_track.target()];
let track = &mut anim_copy.tracks_mut()[i];
// Find instantiated node that corresponds to node in resource
match graph.find_by_name(root, ref_node.name()) {
Some((instance_node, _)) => {
// One-to-one track mapping so there is [i] indexing.
track.set_target(instance_node);
}
None => {
track.set_target(Handle::NONE);
Log::writeln(
MessageKind::Error,
format!(
"Failed to retarget animation {:?} for node {}",
data.path(),
ref_node.name()
),
);
}
}
}
retargetted_animations.push(anim_copy);
}
}
}
retargetted_animations
}
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>> {
let mut animation_handles = Vec::new();
let animations = self.retarget_animations_directly(root, graph);
let dest_animation_player = graph[dest_animation_player]
.query_component_mut::<AnimationPlayer>()
.unwrap();
for animation in animations {
animation_handles.push(dest_animation_player.animations_mut().add(animation));
}
animation_handles
}
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> {
if let Some((animation_player, _)) = graph.find(root, &mut |n| {
n.query_component_ref::<AnimationPlayer>().is_some()
}) {
self.retarget_animations_to_player(root, animation_player, graph)
} else {
Default::default()
}
}
}
impl ResourceData for Model {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(&self.path)
}
fn set_path(&mut self, path: PathBuf) {
self.path = path;
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn type_uuid(&self) -> Uuid {
<Self as TypeUuidProvider>::type_uuid()
}
}
impl Default for Model {
fn default() -> Self {
Self {
path: PathBuf::new(),
mapping: NodeMapping::UseNames,
scene: Scene::new(),
}
}
}
/// Defines a way of searching materials when loading a model resource from foreign file format such as FBX.
///
/// # Mot | Model | identifier_name | |
hypersonic.go | Bomb(myX, myY-1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
// in danger, no where to go
return false
}
func willIDieHere(x int, y int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
for _, bomb := range bombs {
if canIEscapeThisBomb(x, y, bomb, bomb.time, 0, bomb.reach, bombs, floor) {
return false
}
}
return true
}
/**
* How many boxes are within bombing range of the given cell, are there items in those boxes, and can I get there?
**/
func scoreACell(x int, y int, myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) Cell {
if (myX != x || myY != y) { // I'm not already standing here
if !canIBeHere(x, y, 1, bombsOnTheFloor, floor) {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot move to here next turn
}
moves, maybe := canIGoToThere(myX, myY, myX, myY, x, y, SEARCH_DEPTH_LIMIT, bombsOnTheFloor, floor)
if !maybe {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot get here, even after multiple turns
if willIDieHere(x, y, bombsOnTheFloor, floor) {return Cell{score: DANGER_SCORE, distance: TOO_FAR}} // does not account for time left on the bomb, could optimize here rather than walling it off
score := 0
for i := 0; i < myReach; i++ {
if x+i < WIDTH && floor[x+i][y] >= BOX {score++}
if x-i > 0 && floor[x-i][y] >= BOX {score++}
if y+i < HEIGHT && floor[x][y+i] >= BOX {score++}
if y-i > 0 && floor[x][y-i] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func scoreTheFloor(myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
moves, yes = canIGoToThere(x, y, myX, myY+1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Up
moves, yes = canIGoToThere(x, y, myX, myY-1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// all possibilities exhausted
return minMoves, isPathFound
}
func markBombs(bombs []Bomb, scoreFloor [WIDTH][HEIGHT]Cell) [WIDTH][HEIGHT]Cell {
var dangerScore int
for _, bomb := range bombs {
if bomb.time < 2 {
dangerScore = DANGER_SCORE
// on the bomb and it's exploding
scoreFloor[bomb.x][bomb.y].score = dangerScore
} else {
dangerScore = DANGER_SCORE / (bomb.time - 1) // treat the bomb's timer as if we'd already advanced to the next turn
}
// on the bomb
if scoreFloor[bomb.x][bomb.y].score > dangerScore {scoreFloor[bomb.x][bomb.y].score = WALL_SCORE}
// left of the bomb
for i := bomb.x; i >= bomb.x - bomb.reach; i-- {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// right of the bomb
for i := bomb.x; i <= bomb.x+bomb.reach; i++ {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// below the bomb
for i := bomb.y; i >= bomb.y - bomb.reach; i-- {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// above the bomb
for i := bomb.y; i <= bomb.y+bomb.reach; i++ {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
}
return scoreFloor
}
func buildTheFloor(row string, y int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
width := len(row)
for x := 0; x < width; x++ {
if string(row[x]) == "." {
floor[x][y] = CELL
} else if string(row[x]) == "X" {
floor[x][y] = WALL
} else {
floor[x][y] = int(row[x] - '0')
}
}
return floor
}
func canIBeHere(x int, y int, timeElapsed int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
if !amIWithinTheBoundaries(x, y) {return false}
if floor[x][y] == WALL || floor[x][y] >= BOX || floor[x][y] == EXPLOSION {return false}
for _, bomb := range bombs {
if x == bomb.x && y == bomb.y {return false} // can't walk through bombs once they're placed
}
return true
}
func amIWithinTheBoundaries(x int, y int) bool | {
if x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT {return false}
return true
} | identifier_body | |
hypersonic.go | ] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func scoreTheFloor(myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
moves, yes = canIGoToThere(x, y, myX, myY+1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Up
moves, yes = canIGoToThere(x, y, myX, myY-1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// all possibilities exhausted
return minMoves, isPathFound
}
func markBombs(bombs []Bomb, scoreFloor [WIDTH][HEIGHT]Cell) [WIDTH][HEIGHT]Cell {
var dangerScore int
for _, bomb := range bombs {
if bomb.time < 2 {
dangerScore = DANGER_SCORE
// on the bomb and it's exploding
scoreFloor[bomb.x][bomb.y].score = dangerScore
} else {
dangerScore = DANGER_SCORE / (bomb.time - 1) // treat the bomb's timer as if we'd already advanced to the next turn
}
// on the bomb
if scoreFloor[bomb.x][bomb.y].score > dangerScore {scoreFloor[bomb.x][bomb.y].score = WALL_SCORE}
// left of the bomb
for i := bomb.x; i >= bomb.x - bomb.reach; i-- {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// right of the bomb
for i := bomb.x; i <= bomb.x+bomb.reach; i++ {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// below the bomb
for i := bomb.y; i >= bomb.y - bomb.reach; i-- {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// above the bomb
for i := bomb.y; i <= bomb.y+bomb.reach; i++ {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
}
return scoreFloor
}
func buildTheFloor(row string, y int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
width := len(row)
for x := 0; x < width; x++ {
if string(row[x]) == "." {
floor[x][y] = CELL
} else if string(row[x]) == "X" {
floor[x][y] = WALL
} else {
floor[x][y] = int(row[x] - '0')
}
}
return floor
}
func canIBeHere(x int, y int, timeElapsed int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
if !amIWithinTheBoundaries(x, y) {return false}
if floor[x][y] == WALL || floor[x][y] >= BOX || floor[x][y] == EXPLOSION {return false}
for _, bomb := range bombs {
if x == bomb.x && y == bomb.y {return false} // can't walk through bombs once they're placed
}
return true
}
func amIWithinTheBoundaries(x int, y int) bool {
if x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT {return false}
return true
}
// TODO: this does not account for walls & boxes, which block propagation of the explosion
func amIWithinTheBlastRadius(myX int, myY int, bomb Bomb) bool{
if myX > bomb.x + bomb.reach || myX < bomb.x - bomb.reach || myY > bomb.y + bomb.reach || myY < bomb.y - bomb.reach {return true}
return false
}
func transferExplosions(scoreFloor [WIDTH][HEIGHT]Cell, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score == DANGER_SCORE {floor[i][j] = EXPLOSION}
}
}
return floor
}
func scoreFloorToString(floor [WIDTH][HEIGHT]Cell) string {
var buffer bytes.Buffer
var scoreStr, distanceStr string
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
scoreStr = strconv.Itoa(floor[j][i].score)
distanceStr = strconv.Itoa(floor[j][i].distance)
buffer.WriteString("[")
for f := 0; f < 3 - len(scoreStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(scoreStr)
buffer.WriteString(", ")
for f := 0; f < 3 - len(distanceStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(distanceStr)
buffer.WriteString("]")
}
buffer.WriteString("\n")
}
return buffer.String()
}
func floorToString(floor [WIDTH][HEIGHT]int) string {
var buffer bytes.Buffer
var cell int
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
cell = floor[j][i]
buffer.WriteString(" ")
if cell == BOX {buffer.WriteString("B")}
if cell == WALL {buffer.WriteString("W")}
if cell == CELL {buffer.WriteString("_")}
if cell == EXPLOSION | {buffer.WriteString("E")} | conditional_block | |
hypersonic.go | TOO_FAR}} // does not account for time left on the bomb, could optimize here rather than walling it off
score := 0
for i := 0; i < myReach; i++ {
if x+i < WIDTH && floor[x+i][y] >= BOX {score++}
if x-i > 0 && floor[x-i][y] >= BOX {score++}
if y+i < HEIGHT && floor[x][y+i] >= BOX {score++}
if y-i > 0 && floor[x][y-i] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func scoreTheFloor(myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
moves, yes = canIGoToThere(x, y, myX, myY+1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Up
moves, yes = canIGoToThere(x, y, myX, myY-1, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// all possibilities exhausted
return minMoves, isPathFound
}
func markBombs(bombs []Bomb, scoreFloor [WIDTH][HEIGHT]Cell) [WIDTH][HEIGHT]Cell {
var dangerScore int
for _, bomb := range bombs {
if bomb.time < 2 {
dangerScore = DANGER_SCORE
// on the bomb and it's exploding
scoreFloor[bomb.x][bomb.y].score = dangerScore
} else {
dangerScore = DANGER_SCORE / (bomb.time - 1) // treat the bomb's timer as if we'd already advanced to the next turn
}
// on the bomb
if scoreFloor[bomb.x][bomb.y].score > dangerScore {scoreFloor[bomb.x][bomb.y].score = WALL_SCORE}
// left of the bomb
for i := bomb.x; i >= bomb.x - bomb.reach; i-- {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// right of the bomb
for i := bomb.x; i <= bomb.x+bomb.reach; i++ {
if amIWithinTheBoundaries(i, 0) { // prevent array index out of bounds
if scoreFloor[i][bomb.y].score > dangerScore { // do not overwrite a score that's already even lower
scoreFloor[i][bomb.y].score = dangerScore
if scoreFloor[i][bomb.y].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// below the bomb
for i := bomb.y; i >= bomb.y - bomb.reach; i-- {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
// above the bomb
for i := bomb.y; i <= bomb.y+bomb.reach; i++ {
if amIWithinTheBoundaries(0, i) {
if scoreFloor[bomb.x][i].score > dangerScore {
scoreFloor[bomb.x][i].score = dangerScore
if scoreFloor[bomb.x][i].score == WALL_SCORE {break} // stop propagating the explosion in this direction, there is a blocker (wall or box)
}
}
}
}
return scoreFloor
}
func buildTheFloor(row string, y int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
width := len(row)
for x := 0; x < width; x++ {
if string(row[x]) == "." {
floor[x][y] = CELL
} else if string(row[x]) == "X" {
floor[x][y] = WALL
} else {
floor[x][y] = int(row[x] - '0')
}
}
return floor
}
func canIBeHere(x int, y int, timeElapsed int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
if !amIWithinTheBoundaries(x, y) {return false}
if floor[x][y] == WALL || floor[x][y] >= BOX || floor[x][y] == EXPLOSION {return false}
for _, bomb := range bombs {
if x == bomb.x && y == bomb.y {return false} // can't walk through bombs once they're placed
}
return true
}
func amIWithinTheBoundaries(x int, y int) bool {
if x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT {return false}
return true
}
// TODO: this does not account for walls & boxes, which block propagation of the explosion
func amIWithinTheBlastRadius(myX int, myY int, bomb Bomb) bool{
if myX > bomb.x + bomb.reach || myX < bomb.x - bomb.reach || myY > bomb.y + bomb.reach || myY < bomb.y - bomb.reach {return true}
return false
}
func transferExplosions(scoreFloor [WIDTH][HEIGHT]Cell, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]int {
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score == DANGER_SCORE {floor[i][j] = EXPLOSION}
}
}
return floor
}
func scoreFloorToString(floor [WIDTH][HEIGHT]Cell) string {
var buffer bytes.Buffer
var scoreStr, distanceStr string
for i := 0; i < HEIGHT; i++ {
for j := 0; j < WIDTH; j++ {
scoreStr = strconv.Itoa(floor[j][i].score)
distanceStr = strconv.Itoa(floor[j][i].distance)
buffer.WriteString("[")
for f := 0; f < 3 - len(scoreStr); f++ {buffer.WriteString(" ")}
buffer.WriteString(scoreStr)
buffer.WriteString(", ")
for f := 0; f < 3 - len(distanceStr); f++ {buffer.WriteString(" ")} | buffer.WriteString(distanceStr)
buffer.WriteString("]")
}
buffer.WriteString("\n")
} | random_line_split | |
hypersonic.go | myX = x
myY = y
}
if entityType == BOMB { // don't bother going here (get x,y and affect their score somehow)
bombsOnTheFloor = append(bombsOnTheFloor,
Bomb{x: x, y: y, time: param1, reach: param2})
}
}
// fmt.Fprintln(os.Stderr, bombsOnTheFloor)
xT, yT := myX, myY
maxScore := Cell{score: WALL_SCORE, distance: TOO_FAR}
scoreFloor := scoreTheFloor(myX, myY, bombsOnTheFloor, myReach, floor)
scoreFloor = markBombs(bombsOnTheFloor, scoreFloor)
floor = transferExplosions(scoreFloor, floor)
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
if scoreFloor[i][j].score > maxScore.score || (scoreFloor[i][j].score == maxScore.score && scoreFloor[i][j].distance < maxScore.distance) {
maxScore = scoreFloor[i][j]
xT = i
yT = j
}
}
}
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MAX: %d, TARGET: %d, x: %d, y: %d", maxScore, scoreFloor[xT][yT], xT, yT))
// fmt.Fprintln(os.Stderr, scoreFloorToString(scoreFloor))
// fmt.Fprintln(os.Stderr, floorToString(floor))
// if canIBeHere(myX, myY, 0, bombsOnTheFloor, floor) && !canIBeHere(x, y, 1, bombsOnTheFloor, floor)
if myBombCount > 0 && myX == xT && myY == yT && canIEscapeThisBomb(myX, myY, Bomb{x: myX, y: myY, reach: myReach, time: MAX_BOMB_TIME}, MAX_BOMB_TIME, 0, myReach, bombsOnTheFloor, floor) { // drop bomb on current cell while moving toward target cell (could be equivalent)
fmt.Println(fmt.Sprintf("BOMB %d %d BUTT SOUP", xT, yT))
} else {
fmt.Println(fmt.Sprintf("MOVE %d %d (%d, %d) = %d", xT, yT, xT, yT, scoreFloor[xT][yT]))
}
turnCounter++
}
}
/**
*
**/
func canIEscapeThisBomb(myX int, myY int, bomb Bomb, turnLimit int, numTurns int, reach int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("MX: %d, MY: %d, BX: %d, BY: %d, t: %d, r: %d", myX, myY, bombX, bombY, numTurns, reach))
// Already safe on a diagonal from the bomb's cell, don't need to move
if myX != bomb.x && myY != bomb.y {return true}
// I'm lined up with the bomb, but out of its reach
// fmt.Fprintln(os.Stderr, fmt.Sprintf("myX: %d, myY: %d, BX: %d, BY: %d, turns: %d", myX, myY, bombX, bombY, numTurns))
if myX > bomb.x + reach || myX < bomb.x - reach || myY > bomb.y + reach || myY < bomb.y - reach {return true}
// fmt.Fprintln(os.Stderr, "MARKER 1")
// In danger, need to move, but there is no time left
if turnLimit - numTurns < 1 {return false}
// In danger, need to move, have some time left
if canIBeHere(myX+1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX+1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX-1, myY, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX-1, myY, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY+1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY+1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
if canIBeHere(myX, myY-1, numTurns+1, bombs, floor) && canIEscapeThisBomb(myX, myY-1, bomb, turnLimit, numTurns+1, reach, bombs, floor) {return true}
// in danger, no where to go
return false
}
func willIDieHere(x int, y int, bombs []Bomb, floor [WIDTH][HEIGHT]int) bool {
for _, bomb := range bombs {
if canIEscapeThisBomb(x, y, bomb, bomb.time, 0, bomb.reach, bombs, floor) {
return false
}
}
return true
}
/**
* How many boxes are within bombing range of the given cell, are there items in those boxes, and can I get there?
**/
func scoreACell(x int, y int, myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) Cell {
if (myX != x || myY != y) { // I'm not already standing here
if !canIBeHere(x, y, 1, bombsOnTheFloor, floor) {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot move to here next turn
}
moves, maybe := canIGoToThere(myX, myY, myX, myY, x, y, SEARCH_DEPTH_LIMIT, bombsOnTheFloor, floor)
if !maybe {return Cell{score: WALL_SCORE, distance: TOO_FAR}} // cannot get here, even after multiple turns
if willIDieHere(x, y, bombsOnTheFloor, floor) {return Cell{score: DANGER_SCORE, distance: TOO_FAR}} // does not account for time left on the bomb, could optimize here rather than walling it off
score := 0
for i := 0; i < myReach; i++ {
if x+i < WIDTH && floor[x+i][y] >= BOX {score++}
if x-i > 0 && floor[x-i][y] >= BOX {score++}
if y+i < HEIGHT && floor[x][y+i] >= BOX {score++}
if y-i > 0 && floor[x][y-i] >= BOX {score++}
}
if floor[x][y] > BOX {score++} // there's an item in the box
return Cell{score: score, distance: moves}
}
func | (myX int, myY int, bombsOnTheFloor []Bomb, myReach int, floor [WIDTH][HEIGHT]int) [WIDTH][HEIGHT]Cell{
scoreFloor := [WIDTH][HEIGHT]Cell{}
for i := 0; i < WIDTH; i++ {
for j := 0; j < HEIGHT; j++ {
scoreFloor[i][j] = scoreACell(i, j, myX, myY, bombsOnTheFloor, myReach, floor)
}
}
return scoreFloor
}
func canIGoToThere(x int, y int, myX int, myY int, xT int, yT int, moveLimit int, bombs []Bomb, floor [WIDTH][HEIGHT]int) (distance int, maybe bool) {
// fmt.Fprintln(os.Stderr, fmt.Sprintf("GO - x: %d, y: %d, m: %d", myX, myY, moves))
moves, minMoves := 0, TOO_FAR
yes, isPathFound := false, false
if moveLimit < 1 {return TOO_FAR, false}
// if it's not the cell that I'm already standing on, then ensure that I can stand on it when I get there
if (x != myX || y != myY) && !canIBeHere(myX, myY, 0, bombs, floor) {return TOO_FAR, false}
if myX == xT && myY == yT {return moves, true}
// try moving Right
moves, yes = canIGoToThere(x, y, myX+1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Left
moves, yes = canIGoToThere(x, y, myX-1, myY, xT, yT, moveLimit-1, bombs, floor)
if yes {
moves++
if moves < minMoves {minMoves = moves}
isPathFound = true
}
// try moving Down
| scoreTheFloor | identifier_name |
util.py | _interface_mixin, self)
return tuple(base.__getitem__(i) for i in range(len(self)))
def items(self):
keys = self.keys()
values = self.values()
return tuple((keys[i], values[i]) for i in range(len(self)))
def __str__(self):
return self.__class__.__name__ + "(" + ", ".join("{}={}".format(k, repr(v)) for (k, v) in self.items()) + ")"
class Param(dict_interface_mixin, namedtuple("Param",
"number name value error is_const is_fixed has_limits "
"has_lower_limit has_upper_limit lower_limit upper_limit")):
"""Data object for a single Parameter."""
__slots__ = ()
class Params(list):
"""List of parameter data objects."""
def __init__(self, seq, merrors):
list.__init__(self, seq)
self.merrors = merrors
def _repr_html_(self):
return repr_html.params(self)
def __str__(self):
return repr_text.params(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("[...]")
else:
p.text(str(self))
class MError(dict_interface_mixin, namedtuple("MError",
"name is_valid lower upper lower_valid upper_valid at_lower_limit at_upper_limit "
"at_lower_max_fcn at_upper_max_fcn lower_new_min upper_new_min nfcn min")):
"""Minos result object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.merror(self)
def __str__(self):
return repr_text.merror(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MError(...)")
else:
p.text(str(self))
class MErrors(OrderedDict):
"""Dict from parameter name to Minos result object."""
def _repr_html_(self):
return "\n".join([x._repr_html_() for x in self.values()])
def __str__(self):
return "\n".join([str(x) for x in self.values()])
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MErrors(...)")
else:
p.text(str(self))
class FMin(dict_interface_mixin, namedtuple("FMin",
"fval edm tolerance nfcn ncalls up is_valid has_valid_parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso:: |
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y |
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose) | random_line_split |
util.py | _repr_html_(self):
return "\n".join([x._repr_html_() for x in self.values()])
def __str__(self):
return "\n".join([str(x) for x in self.values()])
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MErrors(...)")
else:
p.text(str(self))
class FMin(dict_interface_mixin, namedtuple("FMin",
"fval edm tolerance nfcn ncalls up is_valid has_valid_parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso::
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose)
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
if k.startswith(p):
vn = k[len(p):]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
def true_param(p):
"""Check if ``p`` is a parameter name, not a limit/error/fix attributes.
"""
return (not p.startswith('limit_') and
not p.startswith('error_') and
not p.startswith('fix_'))
def param_name(p):
"""Extract parameter name from attributes.
Examples:
- ``fix_x`` -> ``x``
- ``error_x`` -> ``x``
- ``limit_x`` -> ``x``
"""
prefix = ['limit_', 'error_', 'fix_']
for prf in prefix:
if p.startswith(prf):
return p[len(prf):]
return p
def extract_iv(b):
"""Extract initial value from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if true_param(k))
def extract_limit(b):
"""Extract limit from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('limit_'))
def | extract_error | identifier_name | |
util.py | _parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso::
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose)
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
if k.startswith(p):
vn = k[len(p):]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
def true_param(p):
"""Check if ``p`` is a parameter name, not a limit/error/fix attributes.
"""
return (not p.startswith('limit_') and
not p.startswith('error_') and
not p.startswith('fix_'))
def param_name(p):
"""Extract parameter name from attributes.
Examples:
- ``fix_x`` -> ``x``
- ``error_x`` -> ``x``
- ``limit_x`` -> ``x``
"""
prefix = ['limit_', 'error_', 'fix_']
for prf in prefix:
if p.startswith(prf):
return p[len(prf):]
return p
def extract_iv(b):
"""Extract initial value from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if true_param(k))
def extract_limit(b):
"""Extract limit from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('limit_'))
def extract_error(b):
"""Extract error from fitargs dictionary."""
return dict((k, v) for k, v in b.items() if k.startswith('error_'))
def extract_fix(b):
"""extract fix attribute from fitargs dictionary"""
return dict((k, v) for k, v in b.items() if k.startswith('fix_'))
def remove_var(b, exclude):
| """Exclude variable in exclude list from b."""
return dict((k, v) for k, v in b.items() if param_name(k) not in exclude) | identifier_body | |
util.py | ."""
def __init__(self, seq, merrors):
list.__init__(self, seq)
self.merrors = merrors
def _repr_html_(self):
return repr_html.params(self)
def __str__(self):
return repr_text.params(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("[...]")
else:
p.text(str(self))
class MError(dict_interface_mixin, namedtuple("MError",
"name is_valid lower upper lower_valid upper_valid at_lower_limit at_upper_limit "
"at_lower_max_fcn at_upper_max_fcn lower_new_min upper_new_min nfcn min")):
"""Minos result object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.merror(self)
def __str__(self):
return repr_text.merror(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MError(...)")
else:
p.text(str(self))
class MErrors(OrderedDict):
"""Dict from parameter name to Minos result object."""
def _repr_html_(self):
return "\n".join([x._repr_html_() for x in self.values()])
def __str__(self):
return "\n".join([str(x) for x in self.values()])
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MErrors(...)")
else:
p.text(str(self))
class FMin(dict_interface_mixin, namedtuple("FMin",
"fval edm tolerance nfcn ncalls up is_valid has_valid_parameters has_accurate_covar "
"has_posdef_covar has_made_posdef_covar hesse_failed has_covariance is_above_max_edm "
"has_reached_call_limit")):
"""Function minimum status object."""
__slots__ = ()
def _repr_html_(self):
return repr_html.fmin(self)
def __str__(self):
return repr_text.fmin(self)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("FMin(...)")
else:
p.text(str(self))
# MigradResult used to be a tuple, so we don't add the dict interface
class MigradResult(namedtuple("MigradResult", "fmin params")):
"""Holds the Migrad result."""
__slots__ = ()
def __str__(self):
return str(self.fmin) + "\n" + str(self.params)
def _repr_html_(self):
return self.fmin._repr_html_() + self.params._repr_html_()
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MigradResult(...)")
else:
p.text(str(self))
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret
def fc_or_c(f):
if hasattr(f, 'func_code'):
return f.func_code
else:
return f.__code__
def arguments_from_funccode(f):
"""Check f.funccode for arguments
"""
fc = fc_or_c(f)
vnames = fc.co_varnames
nargs = fc.co_argcount
# bound method and fake function will be None
args = vnames[1 if is_bound(f) else 0:nargs]
if not args:
raise RuntimeError('Function has variable number of arguments')
return list(args)
def arguments_from_call_funccode(f):
"""Check f.__call__.func_code for arguments
"""
fc = fc_or_c(f.__call__)
argcount = fc.co_argcount
args = list(fc.co_varnames[1:argcount])
if not args:
raise RuntimeError('Function has variable number of arguments')
return args
def is_bound(f):
"""Test whether ``f`` is a bound function.
"""
return getattr(f, '__self__', None) is not None
def dock_if_bound(f, v):
"""Dock off ``self`` if a bound function is passed.
"""
return v[1:] if is_bound(f) else v
def better_arg_spec(f, verbose=False):
"""Extract function signature.
..seealso::
:ref:`function-sig-label`
"""
# using funccode
try:
return arguments_from_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.func_code/__code__ fails")
# using __call__ funccode
try:
return arguments_from_call_funccode(f)
except Exception as e:
if verbose:
print(e) # TODO: this might not be such a good idea.
print("Extracting arguments from f.__call__.func_code/__code__ fails")
# try:
# return list(inspect.getargspec(f.__call__)[0][1:])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# try:
# return list(inspect.getargspec(f)[0])
# except Exception as e:
# if verbose:
# print(e)
# print("inspect.getargspec(f)[0] fails")
# now we are parsing __call__.__doc__
# we assume that __call__.__doc__ doesn't have self
# this is what cython gives
try:
t = arguments_from_docstring(f.__call__.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __call__.__doc__")
# how about just __doc__
try:
t = arguments_from_docstring(f.__doc__)
if t[0] == 'self':
t = t[1:]
return t
except Exception as e:
if verbose:
print(e)
print("fail parsing __doc__")
raise TypeError("Unable to obtain function signature")
def describe(f, verbose=False):
"""Try to extract the function argument names.
.. seealso::
:ref:`function-sig-label`
"""
return better_arg_spec(f, verbose)
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
| if k.startswith(p):
vn = k[len(p):]
pf = p | conditional_block | |
properties.ts | scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>(
valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => {
switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y')) {
return barConfig.continuousBandSize;
}
}
}
if (scaleType === ScaleType.POINT) {
return scaleConfig.pointPadding;
}
}
return undefined;
}
export function paddingInner(
paddingValue: number | SignalRef,
channel: ScaleChannel,
mark: Mark,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingInner.
return undefined;
}
if (isXorY(channel)) {
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
// paddingOuter would only be called if it's a band scale, just return the default for bandScale.
const {bandPaddingInner, barBandPaddingInner, rectBandPaddingInner, bandWithNestedOffsetPaddingInner} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingInner;
}
return getFirstDefined(bandPaddingInner, mark === 'bar' ? barBandPaddingInner : rectBandPaddingInner);
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.BAND) {
return scaleConfig.offsetBandPaddingInner;
}
}
return undefined;
}
export function paddingOuter(
paddingValue: number | SignalRef,
channel: ScaleChannel,
scaleType: ScaleType,
paddingInnerValue: number | SignalRef,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingOuter.
return undefined;
}
if (isXorY(channel)) | {
const {bandPaddingOuter, bandWithNestedOffsetPaddingOuter} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingOuter;
}
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
if (scaleType === ScaleType.BAND) {
return getFirstDefined(
bandPaddingOuter,
/* By default, paddingOuter is paddingInner / 2. The reason is that
size (width/height) = step * (cardinality - paddingInner + 2 * paddingOuter).
and we want the width/height to be integer by default.
Note that step (by default) and cardinality are integers.) */
isSignalRef(paddingInnerValue) ? {signal: `${paddingInnerValue.signal}/2`} : paddingInnerValue / 2
);
}
} | conditional_block | |
properties.ts | (model: Model, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
if (isUnitModel(model)) {
parseUnitScaleProperty(model, property);
} else {
parseNonUnitScaleProperty(model, property);
}
}
function parseUnitScaleProperty(model: UnitModel, property: Exclude<keyof (Scale | ScaleComponentProps), 'range'>) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
const {config, encoding, markDef, specifiedScales} = model;
for (const channel of keys(localScaleComponents)) {
const specifiedScale = specifiedScales[channel];
const localScaleCmpt = localScaleComponents[channel];
const mergedScaleCmpt = model.getScaleComponent(channel);
const fieldOrDatumDef = getFieldOrDatumDef(encoding[channel]) as ScaleFieldDef<string, Type> | ScaleDatumDef;
const specifiedValue = specifiedScale[property];
const scaleType = mergedScaleCmpt.get('type');
const scalePadding = mergedScaleCmpt.get('padding');
const scalePaddingInner = mergedScaleCmpt.get('paddingInner');
const supportedByScaleType = scaleTypeSupportProperty(scaleType, property);
const channelIncompatability = channelScalePropertyIncompatability(channel, property);
if (specifiedValue !== undefined) {
// If there is a specified value, check if it is compatible with scale type and channel
if (!supportedByScaleType) {
log.warn(log.message.scalePropertyNotWorkWithScaleType(scaleType, property, channel));
} else if (channelIncompatability) {
// channel
log.warn(channelIncompatability);
}
}
if (supportedByScaleType && channelIncompatability === undefined) {
if (specifiedValue !== undefined) {
const timeUnit = fieldOrDatumDef['timeUnit'];
const type = fieldOrDatumDef.type;
switch (property) {
// domainMax/Min to signal if the value is a datetime object
case 'domainMax':
case 'domainMin':
if (isDateTime(specifiedScale[property]) || type === 'temporal' || timeUnit) {
localScaleCmpt.set(property, {signal: valueExpr(specifiedScale[property], {type, timeUnit})}, true);
} else {
localScaleCmpt.set(property, specifiedScale[property] as any, true);
}
break;
default:
localScaleCmpt.copyKeyFromObject<Omit<ScaleComponentProps, 'range' | 'domainMin' | 'domainMax'>>(
property,
specifiedScale
);
}
} else {
const value =
property in scaleRules
? scaleRules[property]({
model,
channel,
fieldOrDatumDef,
scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>(
valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => {
switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y | parseScaleProperty | identifier_name | |
properties.ts | localScaleCmpt = localScaleComponents[channel];
const mergedScaleCmpt = model.getScaleComponent(channel);
const fieldOrDatumDef = getFieldOrDatumDef(encoding[channel]) as ScaleFieldDef<string, Type> | ScaleDatumDef;
const specifiedValue = specifiedScale[property];
const scaleType = mergedScaleCmpt.get('type');
const scalePadding = mergedScaleCmpt.get('padding');
const scalePaddingInner = mergedScaleCmpt.get('paddingInner');
const supportedByScaleType = scaleTypeSupportProperty(scaleType, property);
const channelIncompatability = channelScalePropertyIncompatability(channel, property);
if (specifiedValue !== undefined) {
// If there is a specified value, check if it is compatible with scale type and channel
if (!supportedByScaleType) {
log.warn(log.message.scalePropertyNotWorkWithScaleType(scaleType, property, channel));
} else if (channelIncompatability) {
// channel
log.warn(channelIncompatability);
}
}
if (supportedByScaleType && channelIncompatability === undefined) {
if (specifiedValue !== undefined) {
const timeUnit = fieldOrDatumDef['timeUnit'];
const type = fieldOrDatumDef.type;
switch (property) {
// domainMax/Min to signal if the value is a datetime object
case 'domainMax':
case 'domainMin':
if (isDateTime(specifiedScale[property]) || type === 'temporal' || timeUnit) {
localScaleCmpt.set(property, {signal: valueExpr(specifiedScale[property], {type, timeUnit})}, true);
} else {
localScaleCmpt.set(property, specifiedScale[property] as any, true);
}
break;
default:
localScaleCmpt.copyKeyFromObject<Omit<ScaleComponentProps, 'range' | 'domainMin' | 'domainMax'>>(
property,
specifiedScale
);
}
} else {
const value =
property in scaleRules
? scaleRules[property]({
model,
channel,
fieldOrDatumDef,
scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>( | switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y')) {
return barConfig.continuousBandSize;
}
}
}
if (scaleType === ScaleType.POINT) {
return scaleConfig.pointPadding;
}
}
return undefined;
}
export function paddingInner(
paddingValue: number | SignalRef,
channel: ScaleChannel,
mark: Mark,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingInner.
return undefined;
}
if (isXorY(channel)) {
// | valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => { | random_line_split |
properties.ts | // If there is a specified value, check if it is compatible with scale type and channel
if (!supportedByScaleType) {
log.warn(log.message.scalePropertyNotWorkWithScaleType(scaleType, property, channel));
} else if (channelIncompatability) {
// channel
log.warn(channelIncompatability);
}
}
if (supportedByScaleType && channelIncompatability === undefined) {
if (specifiedValue !== undefined) {
const timeUnit = fieldOrDatumDef['timeUnit'];
const type = fieldOrDatumDef.type;
switch (property) {
// domainMax/Min to signal if the value is a datetime object
case 'domainMax':
case 'domainMin':
if (isDateTime(specifiedScale[property]) || type === 'temporal' || timeUnit) {
localScaleCmpt.set(property, {signal: valueExpr(specifiedScale[property], {type, timeUnit})}, true);
} else {
localScaleCmpt.set(property, specifiedScale[property] as any, true);
}
break;
default:
localScaleCmpt.copyKeyFromObject<Omit<ScaleComponentProps, 'range' | 'domainMin' | 'domainMax'>>(
property,
specifiedScale
);
}
} else {
const value =
property in scaleRules
? scaleRules[property]({
model,
channel,
fieldOrDatumDef,
scaleType,
scalePadding,
scalePaddingInner,
domain: specifiedScale.domain,
domainMin: specifiedScale.domainMin,
domainMax: specifiedScale.domainMax,
markDef,
config,
hasNestedOffsetScale: channelHasNestedOffsetScale(encoding, channel),
hasSecondaryRangeChannel: !!encoding[getSecondaryRangeChannel(channel)]
})
: config.scale[property];
if (value !== undefined) {
localScaleCmpt.set(property, value, false);
}
}
}
}
}
export interface ScaleRuleParams {
model: Model;
channel: ScaleChannel;
fieldOrDatumDef: ScaleFieldDef<string, Type> | ScaleDatumDef;
hasNestedOffsetScale: boolean;
scaleType: ScaleType;
scalePadding: number | SignalRef;
scalePaddingInner: number | SignalRef;
domain: Domain;
domainMin: Scale['domainMin'];
domainMax: Scale['domainMax'];
markDef: MarkDef<Mark, SignalRef>;
config: Config<SignalRef>;
hasSecondaryRangeChannel: boolean;
}
export const scaleRules: {
[k in keyof Scale]?: (params: ScaleRuleParams) => Scale[k];
} = {
bins: ({model, fieldOrDatumDef}) => (isFieldDef(fieldOrDatumDef) ? bins(model, fieldOrDatumDef) : undefined),
interpolate: ({channel, fieldOrDatumDef}) => interpolate(channel, fieldOrDatumDef.type),
nice: ({scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef}) =>
nice(scaleType, channel, domain, domainMin, domainMax, fieldOrDatumDef),
padding: ({channel, scaleType, fieldOrDatumDef, markDef, config}) =>
padding(channel, scaleType, config.scale, fieldOrDatumDef, markDef, config.bar),
paddingInner: ({scalePadding, channel, markDef, scaleType, config, hasNestedOffsetScale}) =>
paddingInner(scalePadding, channel, markDef.type, scaleType, config.scale, hasNestedOffsetScale),
paddingOuter: ({scalePadding, channel, scaleType, scalePaddingInner, config, hasNestedOffsetScale}) =>
paddingOuter(scalePadding, channel, scaleType, scalePaddingInner, config.scale, hasNestedOffsetScale),
reverse: ({fieldOrDatumDef, scaleType, channel, config}) => {
const sort = isFieldDef(fieldOrDatumDef) ? fieldOrDatumDef.sort : undefined;
return reverse(scaleType, sort, channel, config.scale);
},
zero: ({channel, fieldOrDatumDef, domain, markDef, scaleType, config, hasSecondaryRangeChannel}) =>
zero(channel, fieldOrDatumDef, domain, markDef, scaleType, config.scale, hasSecondaryRangeChannel)
};
// This method is here rather than in range.ts to avoid circular dependency.
export function parseScaleRange(model: Model) {
if (isUnitModel(model)) {
parseUnitScaleRange(model);
} else {
parseNonUnitScaleProperty(model, 'range');
}
}
export function parseNonUnitScaleProperty(model: Model, property: keyof (Scale | ScaleComponentProps)) {
const localScaleComponents: ScaleComponentIndex = model.component.scales;
for (const child of model.children) {
if (property === 'range') {
parseScaleRange(child);
} else {
parseScaleProperty(child, property);
}
}
for (const channel of keys(localScaleComponents)) {
let valueWithExplicit: Explicit<any>;
for (const child of model.children) {
const childComponent = child.component.scales[channel];
if (childComponent) {
const childValueWithExplicit = childComponent.getWithExplicit(property);
valueWithExplicit = mergeValuesWithExplicit<VgScale, any>(
valueWithExplicit,
childValueWithExplicit,
property,
'scale',
tieBreakByComparing<VgScale, any>((v1, v2) => {
switch (property) {
case 'range':
// For step, prefer larger step
if (v1.step && v2.step) {
return v1.step - v2.step;
}
return 0;
// TODO: precedence rule for other properties
}
return 0;
})
);
}
}
localScaleComponents[channel].setWithExplicit(property, valueWithExplicit);
}
}
export function bins(model: Model, fieldDef: TypedFieldDef<string>) {
const bin = fieldDef.bin;
if (isBinning(bin)) {
const binSignal = getBinSignalName(model, fieldDef.field, bin);
return new SignalRefWrapper(() => {
return model.getSignalName(binSignal);
});
} else if (isBinned(bin) && isBinParams(bin) && bin.step !== undefined) {
// start and stop will be determined from the scale domain
return {
step: bin.step
};
}
return undefined;
}
export function interpolate(channel: ScaleChannel, type: Type): Scale['interpolate'] {
if (contains([COLOR, FILL, STROKE], channel) && type !== 'nominal') {
return 'hcl';
}
return undefined;
}
export function nice(
scaleType: ScaleType,
channel: ScaleChannel,
specifiedDomain: Domain,
domainMin: Scale['domainMin'],
domainMax: Scale['domainMax'],
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef
): boolean | TimeInterval {
if (
getFieldDef(fieldOrDatumDef)?.bin ||
isArray(specifiedDomain) ||
domainMax != null ||
domainMin != null ||
util.contains([ScaleType.TIME, ScaleType.UTC], scaleType)
) {
return undefined;
}
return isXorY(channel) ? true : undefined;
}
export function padding(
channel: ScaleChannel,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
fieldOrDatumDef: TypedFieldDef<string> | ScaleDatumDef,
markDef: MarkDef<Mark, SignalRef>,
barConfig: RectConfig<SignalRef>
) {
if (isXorY(channel)) {
if (isContinuousToContinuous(scaleType)) {
if (scaleConfig.continuousPadding !== undefined) {
return scaleConfig.continuousPadding;
}
const {type, orient} = markDef;
if (type === 'bar' && !(isFieldDef(fieldOrDatumDef) && (fieldOrDatumDef.bin || fieldOrDatumDef.timeUnit))) {
if ((orient === 'vertical' && channel === 'x') || (orient === 'horizontal' && channel === 'y')) {
return barConfig.continuousBandSize;
}
}
}
if (scaleType === ScaleType.POINT) {
return scaleConfig.pointPadding;
}
}
return undefined;
}
export function paddingInner(
paddingValue: number | SignalRef,
channel: ScaleChannel,
mark: Mark,
scaleType: ScaleType,
scaleConfig: ScaleConfig<SignalRef>,
hasNestedOffsetScale = false
) | {
if (paddingValue !== undefined) {
// If user has already manually specified "padding", no need to add default paddingInner.
return undefined;
}
if (isXorY(channel)) {
// Padding is only set for X and Y by default.
// Basically it doesn't make sense to add padding for color and size.
// paddingOuter would only be called if it's a band scale, just return the default for bandScale.
const {bandPaddingInner, barBandPaddingInner, rectBandPaddingInner, bandWithNestedOffsetPaddingInner} = scaleConfig;
if (hasNestedOffsetScale) {
return bandWithNestedOffsetPaddingInner;
}
return getFirstDefined(bandPaddingInner, mark === 'bar' ? barBandPaddingInner : rectBandPaddingInner);
} else if (isXorYOffset(channel)) {
if (scaleType === ScaleType.BAND) { | identifier_body | |
io.rs | }
}
fn next(&mut self) -> Result<Option<H>, Error> {
let size = std::mem::size_of::<H>();
if self.queue.is_empty() {
let mut buf = vec![0; size * Self::BATCH_SIZE];
let from = self.file.seek(io::SeekFrom::Start(self.index))?;
match self.file.read_exact(&mut buf) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => |
Err(err) => return Err(err.into()),
}
self.index += buf.len() as u64;
let items = buf.len() / size;
let mut cursor = io::Cursor::new(buf);
let mut item = vec![0; size];
for _ in 0..items {
cursor.read_exact(&mut item)?;
let item = H::consensus_decode(&mut item.as_slice())?;
self.queue.push_back(item);
}
}
Ok(self.queue.pop_front())
}
}
/// An iterator over block headers in a file.
#[derive(Debug)]
pub struct Iter<H> {
height: Height,
file: FileReader<H>,
}
impl<H: Decodable> Iter<H> {
fn new(file: fs::File) -> Self {
Self {
file: FileReader::new(file),
height: 1,
}
}
}
impl<H: Decodable> Iterator for Iter<H> {
type Item = Result<(Height, H), Error>;
fn next(&mut self) -> Option<Self::Item> {
let height = self.height;
assert!(height > 0);
match self.file.next() {
// If we hit this branch, it's because we're trying to read passed the end
// of the file, which means there are no further headers remaining.
Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None,
// If another kind of error occurs, we want to yield it to the caller, so
// that it can be propagated.
Err(err) => Some(Err(err)),
Ok(Some(header)) => {
self.height = height + 1;
Some(Ok((height, header)))
}
Ok(None) => None,
}
}
}
/// A `Store` backed by a single file.
#[derive(Debug)]
pub struct File<H> {
file: fs::File,
genesis: H,
}
impl<H> File<H> {
/// Open a new file store from the given path and genesis header.
pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> {
fs::OpenOptions::new()
.create(true)
.read(true)
.append(true)
.open(path)
.map(|file| Self { file, genesis })
}
/// Create a new file store at the given path, with the provided genesis header.
pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> {
let file = fs::OpenOptions::new()
.create_new(true)
.read(true)
.append(true)
.open(path)?;
Ok(Self { file, genesis })
}
}
impl<H: 'static + Copy + Encodable + Decodable> Store for File<H> {
type Header = H;
/// Get the genesis block.
fn genesis(&self) -> H {
self.genesis
}
/// Append a block to the end of the file.
fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> {
self::put(&mut self.file, headers)
}
/// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if
/// the height is not found.
fn get(&self, height: Height) -> Result<H, Error> {
if let Some(ix) = height.checked_sub(1) {
// Clone so this function doesn't have to take a `&mut self`.
let mut file = self.file.try_clone()?;
get(&mut file, ix)
} else {
Ok(self.genesis)
}
}
/// Rollback the chain to the given height. Behavior is undefined if the given
/// height is not contained in the store.
fn rollback(&mut self, height: Height) -> Result<(), Error> {
let size = mem::size_of::<H>();
self.file
.set_len((height) * size as u64)
.map_err(Error::from)
}
/// Flush changes to disk.
fn sync(&mut self) -> Result<(), Error> {
self.file.sync_data().map_err(Error::from)
}
/// Iterate over all headers in the store.
fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> {
// Clone so this function doesn't have to take a `&mut self`.
match self.file.try_clone() {
Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))),
Err(err) => Box::new(iter::once(Err(Error::Io(err)))),
}
}
/// Return the number of headers in the store.
fn len(&self) -> Result<usize, Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
if len as usize % size != 0 {
return Err(Error::Corruption);
}
Ok(len as usize / size + 1)
}
/// Return the block height of the store.
fn height(&self) -> Result<Height, Error> {
self.len().map(|n| n as Height - 1)
}
/// Check the file store integrity.
fn check(&self) -> Result<(), Error> {
self.len().map(|_| ())
}
/// Attempt to heal data corruption.
fn heal(&self) -> Result<(), Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
let extraneous = len as usize % size;
if extraneous != 0 {
self.file.set_len(len - extraneous as u64)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use std::{io, iter};
use nakamoto_common::bitcoin::TxMerkleNode;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::BlockHash;
use super::{Error, File, Height, Store};
use crate::block::BlockHeader;
const HEADER_SIZE: usize = 80;
fn store(path: &str) -> File<BlockHeader> {
let tmp = tempfile::tempdir().unwrap();
let genesis = BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 39123818,
nonce: 0,
};
File::open(tmp.path().join(path), genesis).unwrap()
}
#[test]
fn test_put_get() {
let mut store = store("headers.db");
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis.block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
};
assert_eq!(
store.get(0).unwrap(),
store.genesis,
"when the store is empty, we can `get` the genesis"
);
assert!(
store.get(1).is_err(),
"when the store is empty, we can't get height `1`"
);
let height = store.put(iter::once(header)).unwrap();
store.sync().unwrap();
assert_eq!(height, 1);
assert_eq!(store.get(height).unwrap(), header);
}
#[test]
fn test_put_get_batch() {
let mut store = store("headers.db");
assert_eq!(store.len().unwrap(), 1);
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
// Put all headers into the store and check that we can retrieve them.
{
let height = store.put(iter).unwrap();
assert_eq!(height, headers.len() as Height);
assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis.
for | {
self.file.seek(io::SeekFrom::Start(from))?;
let n = self.file.read_to_end(&mut buf)?;
buf.truncate(n);
} | conditional_block |
io.rs | }
}
fn next(&mut self) -> Result<Option<H>, Error> {
let size = std::mem::size_of::<H>();
if self.queue.is_empty() {
let mut buf = vec![0; size * Self::BATCH_SIZE];
let from = self.file.seek(io::SeekFrom::Start(self.index))?;
match self.file.read_exact(&mut buf) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => {
self.file.seek(io::SeekFrom::Start(from))?;
let n = self.file.read_to_end(&mut buf)?;
buf.truncate(n);
}
Err(err) => return Err(err.into()),
}
self.index += buf.len() as u64;
let items = buf.len() / size;
let mut cursor = io::Cursor::new(buf);
let mut item = vec![0; size];
| }
}
Ok(self.queue.pop_front())
}
}
/// An iterator over block headers in a file.
#[derive(Debug)]
pub struct Iter<H> {
height: Height,
file: FileReader<H>,
}
impl<H: Decodable> Iter<H> {
fn new(file: fs::File) -> Self {
Self {
file: FileReader::new(file),
height: 1,
}
}
}
impl<H: Decodable> Iterator for Iter<H> {
type Item = Result<(Height, H), Error>;
fn next(&mut self) -> Option<Self::Item> {
let height = self.height;
assert!(height > 0);
match self.file.next() {
// If we hit this branch, it's because we're trying to read passed the end
// of the file, which means there are no further headers remaining.
Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None,
// If another kind of error occurs, we want to yield it to the caller, so
// that it can be propagated.
Err(err) => Some(Err(err)),
Ok(Some(header)) => {
self.height = height + 1;
Some(Ok((height, header)))
}
Ok(None) => None,
}
}
}
/// A `Store` backed by a single file.
#[derive(Debug)]
pub struct File<H> {
file: fs::File,
genesis: H,
}
impl<H> File<H> {
/// Open a new file store from the given path and genesis header.
pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> {
fs::OpenOptions::new()
.create(true)
.read(true)
.append(true)
.open(path)
.map(|file| Self { file, genesis })
}
/// Create a new file store at the given path, with the provided genesis header.
pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> {
let file = fs::OpenOptions::new()
.create_new(true)
.read(true)
.append(true)
.open(path)?;
Ok(Self { file, genesis })
}
}
impl<H: 'static + Copy + Encodable + Decodable> Store for File<H> {
type Header = H;
/// Get the genesis block.
fn genesis(&self) -> H {
self.genesis
}
/// Append a block to the end of the file.
fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> {
self::put(&mut self.file, headers)
}
/// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if
/// the height is not found.
fn get(&self, height: Height) -> Result<H, Error> {
if let Some(ix) = height.checked_sub(1) {
// Clone so this function doesn't have to take a `&mut self`.
let mut file = self.file.try_clone()?;
get(&mut file, ix)
} else {
Ok(self.genesis)
}
}
/// Rollback the chain to the given height. Behavior is undefined if the given
/// height is not contained in the store.
fn rollback(&mut self, height: Height) -> Result<(), Error> {
let size = mem::size_of::<H>();
self.file
.set_len((height) * size as u64)
.map_err(Error::from)
}
/// Flush changes to disk.
fn sync(&mut self) -> Result<(), Error> {
self.file.sync_data().map_err(Error::from)
}
/// Iterate over all headers in the store.
fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> {
// Clone so this function doesn't have to take a `&mut self`.
match self.file.try_clone() {
Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))),
Err(err) => Box::new(iter::once(Err(Error::Io(err)))),
}
}
/// Return the number of headers in the store.
fn len(&self) -> Result<usize, Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
if len as usize % size != 0 {
return Err(Error::Corruption);
}
Ok(len as usize / size + 1)
}
/// Return the block height of the store.
fn height(&self) -> Result<Height, Error> {
self.len().map(|n| n as Height - 1)
}
/// Check the file store integrity.
fn check(&self) -> Result<(), Error> {
self.len().map(|_| ())
}
/// Attempt to heal data corruption.
fn heal(&self) -> Result<(), Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
let extraneous = len as usize % size;
if extraneous != 0 {
self.file.set_len(len - extraneous as u64)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use std::{io, iter};
use nakamoto_common::bitcoin::TxMerkleNode;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::BlockHash;
use super::{Error, File, Height, Store};
use crate::block::BlockHeader;
const HEADER_SIZE: usize = 80;
fn store(path: &str) -> File<BlockHeader> {
let tmp = tempfile::tempdir().unwrap();
let genesis = BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 39123818,
nonce: 0,
};
File::open(tmp.path().join(path), genesis).unwrap()
}
#[test]
fn test_put_get() {
let mut store = store("headers.db");
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis.block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
};
assert_eq!(
store.get(0).unwrap(),
store.genesis,
"when the store is empty, we can `get` the genesis"
);
assert!(
store.get(1).is_err(),
"when the store is empty, we can't get height `1`"
);
let height = store.put(iter::once(header)).unwrap();
store.sync().unwrap();
assert_eq!(height, 1);
assert_eq!(store.get(height).unwrap(), header);
}
#[test]
fn test_put_get_batch() {
let mut store = store("headers.db");
assert_eq!(store.len().unwrap(), 1);
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
// Put all headers into the store and check that we can retrieve them.
{
let height = store.put(iter).unwrap();
assert_eq!(height, headers.len() as Height);
assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis.
for ( | for _ in 0..items {
cursor.read_exact(&mut item)?;
let item = H::consensus_decode(&mut item.as_slice())?;
self.queue.push_back(item); | random_line_split |
io.rs | }
}
fn next(&mut self) -> Result<Option<H>, Error> {
let size = std::mem::size_of::<H>();
if self.queue.is_empty() {
let mut buf = vec![0; size * Self::BATCH_SIZE];
let from = self.file.seek(io::SeekFrom::Start(self.index))?;
match self.file.read_exact(&mut buf) {
Ok(()) => {}
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => {
self.file.seek(io::SeekFrom::Start(from))?;
let n = self.file.read_to_end(&mut buf)?;
buf.truncate(n);
}
Err(err) => return Err(err.into()),
}
self.index += buf.len() as u64;
let items = buf.len() / size;
let mut cursor = io::Cursor::new(buf);
let mut item = vec![0; size];
for _ in 0..items {
cursor.read_exact(&mut item)?;
let item = H::consensus_decode(&mut item.as_slice())?;
self.queue.push_back(item);
}
}
Ok(self.queue.pop_front())
}
}
/// An iterator over block headers in a file.
#[derive(Debug)]
pub struct Iter<H> {
height: Height,
file: FileReader<H>,
}
impl<H: Decodable> Iter<H> {
fn new(file: fs::File) -> Self {
Self {
file: FileReader::new(file),
height: 1,
}
}
}
impl<H: Decodable> Iterator for Iter<H> {
type Item = Result<(Height, H), Error>;
fn | (&mut self) -> Option<Self::Item> {
let height = self.height;
assert!(height > 0);
match self.file.next() {
// If we hit this branch, it's because we're trying to read passed the end
// of the file, which means there are no further headers remaining.
Err(Error::Io(err)) if err.kind() == io::ErrorKind::UnexpectedEof => None,
// If another kind of error occurs, we want to yield it to the caller, so
// that it can be propagated.
Err(err) => Some(Err(err)),
Ok(Some(header)) => {
self.height = height + 1;
Some(Ok((height, header)))
}
Ok(None) => None,
}
}
}
/// A `Store` backed by a single file.
#[derive(Debug)]
pub struct File<H> {
file: fs::File,
genesis: H,
}
impl<H> File<H> {
/// Open a new file store from the given path and genesis header.
pub fn open<P: AsRef<Path>>(path: P, genesis: H) -> io::Result<Self> {
fs::OpenOptions::new()
.create(true)
.read(true)
.append(true)
.open(path)
.map(|file| Self { file, genesis })
}
/// Create a new file store at the given path, with the provided genesis header.
pub fn create<P: AsRef<Path>>(path: P, genesis: H) -> Result<Self, Error> {
let file = fs::OpenOptions::new()
.create_new(true)
.read(true)
.append(true)
.open(path)?;
Ok(Self { file, genesis })
}
}
impl<H: 'static + Copy + Encodable + Decodable> Store for File<H> {
type Header = H;
/// Get the genesis block.
fn genesis(&self) -> H {
self.genesis
}
/// Append a block to the end of the file.
fn put<I: Iterator<Item = Self::Header>>(&mut self, headers: I) -> Result<Height, Error> {
self::put(&mut self.file, headers)
}
/// Get the block at the given height. Returns `io::ErrorKind::UnexpectedEof` if
/// the height is not found.
fn get(&self, height: Height) -> Result<H, Error> {
if let Some(ix) = height.checked_sub(1) {
// Clone so this function doesn't have to take a `&mut self`.
let mut file = self.file.try_clone()?;
get(&mut file, ix)
} else {
Ok(self.genesis)
}
}
/// Rollback the chain to the given height. Behavior is undefined if the given
/// height is not contained in the store.
fn rollback(&mut self, height: Height) -> Result<(), Error> {
let size = mem::size_of::<H>();
self.file
.set_len((height) * size as u64)
.map_err(Error::from)
}
/// Flush changes to disk.
fn sync(&mut self) -> Result<(), Error> {
self.file.sync_data().map_err(Error::from)
}
/// Iterate over all headers in the store.
fn iter(&self) -> Box<dyn Iterator<Item = Result<(Height, H), Error>>> {
// Clone so this function doesn't have to take a `&mut self`.
match self.file.try_clone() {
Ok(file) => Box::new(iter::once(Ok((0, self.genesis))).chain(Iter::new(file))),
Err(err) => Box::new(iter::once(Err(Error::Io(err)))),
}
}
/// Return the number of headers in the store.
fn len(&self) -> Result<usize, Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
if len as usize % size != 0 {
return Err(Error::Corruption);
}
Ok(len as usize / size + 1)
}
/// Return the block height of the store.
fn height(&self) -> Result<Height, Error> {
self.len().map(|n| n as Height - 1)
}
/// Check the file store integrity.
fn check(&self) -> Result<(), Error> {
self.len().map(|_| ())
}
/// Attempt to heal data corruption.
fn heal(&self) -> Result<(), Error> {
let meta = self.file.metadata()?;
let len = meta.len();
let size = mem::size_of::<H>();
assert!(len <= usize::MAX as u64);
let extraneous = len as usize % size;
if extraneous != 0 {
self.file.set_len(len - extraneous as u64)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use std::{io, iter};
use nakamoto_common::bitcoin::TxMerkleNode;
use nakamoto_common::bitcoin_hashes::Hash;
use nakamoto_common::block::BlockHash;
use super::{Error, File, Height, Store};
use crate::block::BlockHeader;
const HEADER_SIZE: usize = 80;
fn store(path: &str) -> File<BlockHeader> {
let tmp = tempfile::tempdir().unwrap();
let genesis = BlockHeader {
version: 1,
prev_blockhash: BlockHash::all_zeros(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 39123818,
nonce: 0,
};
File::open(tmp.path().join(path), genesis).unwrap()
}
#[test]
fn test_put_get() {
let mut store = store("headers.db");
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis.block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 312143,
};
assert_eq!(
store.get(0).unwrap(),
store.genesis,
"when the store is empty, we can `get` the genesis"
);
assert!(
store.get(1).is_err(),
"when the store is empty, we can't get height `1`"
);
let height = store.put(iter::once(header)).unwrap();
store.sync().unwrap();
assert_eq!(height, 1);
assert_eq!(store.get(height).unwrap(), header);
}
#[test]
fn test_put_get_batch() {
let mut store = store("headers.db");
assert_eq!(store.len().unwrap(), 1);
let count = 32;
let header = BlockHeader {
version: 1,
prev_blockhash: store.genesis().block_hash(),
merkle_root: TxMerkleNode::all_zeros(),
bits: 0x2ffffff,
time: 1842918273,
nonce: 0,
};
let iter = (0..count).map(|i| BlockHeader { nonce: i, ..header });
let headers = iter.clone().collect::<Vec<_>>();
// Put all headers into the store and check that we can retrieve them.
{
let height = store.put(iter).unwrap();
assert_eq!(height, headers.len() as Height);
assert_eq!(store.len().unwrap(), headers.len() + 1); // Account for genesis.
for | next | identifier_name |
tweet_utils.py | length of generated query strings,
depending on account type it might be 400 or 1000
:additional_query_parameters
:return :list[string] of generated query strings
'''
queries = []
query = keywords[0]
for keyword in keywords[1:]:
tmp_query = '{} OR "{}"'.format(query, keyword)
if len(tmp_query + additional_query_parameters) > max_query_lenght:
queries.append(f'{tmp_query} {additional_query_parameters}')
query = f'"{keyword}"'
continue
query = tmp_query
queries.append(f'{tmp_query} {additional_query_parameters}')
return queries
def splitQueries(declensionsDf,prefix,writeToFile = True):
'''
Function to take a DataFrame of keywords and
combine with OR operators to make a series of
queries under 1024 characters. Optionally write
the queries to a series of files
'''
n = 0
lastN = 0
nFile = 0
tempQ = ''
qs = []
print('Splitting queries')
if writeToFile:
path = input('Enter path stem (query_{:s}[_<n>.csv])'.format(prefix))
if path == '':
path = 'query_{:s}'.format(prefix)
cleanPath = 'n'
cleanPath = input('Clean existing query files? (y/N)').lower()
if cleanPath in ['','y']:
cleanPath = True
else:
cleanPath = False
if cleanPath:
print('Removing {:s}*'.format(path))
for file in glob.glob('{:s}*'.format(path)):
os.remove(file)
print('Shape:',declensionsDf.shape[0])
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of declined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d %H:%M"))
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab 1k tweets first to see how far it goes
if len(results) > 0:
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df,drop = True)
# Get rid of the tokens for now
dfs[n] = dfs[n].append(df)
if verbose:
print('Takes us to',dfs[n].index[-1].isoformat())
print('{:d} tweets so far'.format(dfs[n].shape[0]))
print() | else:
print('No results....')
dfs[n] = dfs[n].append(pd.DataFrame())
breakOut = True
| random_line_split | |
tweet_utils.py | removeNoisyTerms(df,noisyTerms = ['veteran','truce']):
'''
Removes a set of noisy terms from DataFrame with
all declined keywords
Returns @df
'''
removeNoisy = input('Remove noisy terms? ({:s}) (Y/n)'.format(','.join(noisyTerms)))
if removeNoisy == 'n':
removeNoisy = False
print('Not removing')
else:
removeNoisy == True
print('Removing')
df = df[~df[0].isin(noisyTerms)]
return df
def splitQueriesSimple(keywords, max_query_lenght = 400, additional_query_parameters = ''):
'''
Simpler verstion to generate the query strings from list of a keywords
:param keywords: list[string] list of keywords
:param max_query_lenght: int the length of generated query strings,
depending on account type it might be 400 or 1000
:additional_query_parameters
:return :list[string] of generated query strings
'''
queries = []
query = keywords[0]
for keyword in keywords[1:]:
tmp_query = '{} OR "{}"'.format(query, keyword)
if len(tmp_query + additional_query_parameters) > max_query_lenght:
queries.append(f'{tmp_query} {additional_query_parameters}')
query = f'"{keyword}"'
continue
query = tmp_query
queries.append(f'{tmp_query} {additional_query_parameters}')
return queries
def splitQueries(declensionsDf,prefix,writ | ue):
'''
Function to take a DataFrame of keywords and
combine with OR operators to make a series of
queries under 1024 characters. Optionally write
the queries to a series of files
'''
n = 0
lastN = 0
nFile = 0
tempQ = ''
qs = []
print('Splitting queries')
if writeToFile:
path = input('Enter path stem (query_{:s}[_<n>.csv])'.format(prefix))
if path == '':
path = 'query_{:s}'.format(prefix)
cleanPath = 'n'
cleanPath = input('Clean existing query files? (y/N)').lower()
if cleanPath in ['','y']:
cleanPath = True
else:
cleanPath = False
if cleanPath:
print('Removing {:s}*'.format(path))
for file in glob.glob('{:s}*'.format(path)):
os.remove(file)
print('Shape:',declensionsDf.shape[0])
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of declined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d %H | eToFile = Tr | identifier_name |
tweet_utils.py |
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?')
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of declined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d %H:%M"))
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab 1k tweets first to see how far it goes
if len(results) > 0:
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df,drop = True)
# Get rid of the tokens for now
dfs[n] = dfs[n].append(df)
if verbose:
print('Takes us to',dfs[n].index[-1].isoformat())
print('{:d} tweets so far'.format(dfs[n].shape[0]))
print()
else:
print('No results....')
dfs[n] = dfs[n].append(pd.DataFrame())
breakOut = True
if breakOut:
print('Breaking out...')
break
print('Now we are done')
print('Got {:d} tweets in total'.format(dfs[n].shape[0]))
print('Between:')
print(dfs[n].index[0])
print(dfs[n].index[-1])
print('+++++++\n')
else:
print('No results...\n+++++++\n')
dfs.append(pd.DataFrame())
return dfs
def countTerms(text,stopWords = None):
'''
Convenience function to count terms in
an iterable of text (pandas series, list etc)
Returns @c counter object
'''
c = collections.Counter()
text = text.astype(str)
text.apply(lambda x:c.update(x.lower().split()))
if stopWords:
for sw in stopWords:
del c[sw]
return c
def writeData(dfs,prefix):
'''
Write dataframes with results to file
'''
stem = input('Enter data file stem (data_{:s}[_<n>.csv])'.format(prefix))
if stem == '':
stem = 'data_{:s}_'.format(prefix)
for n,df in enumerate(dfs):
fileName = '{:s}{:d}.csv'.format(stem,n)
df.to_csv(fileName)
#print('Print to',fileName)
def getMatchingKeywords(t,qs):
'''
Returns a list of keywords tha | t match a string
'''
matches = []
tokens = t.lower().split()
for q in qs:
for kw in q.split(' OR '):
if kw in tokens:
#print('MATCHED',kw)
matches.append(kw)
return matches
def queryToList(q):
'''
Conv | identifier_body | |
tweet_utils.py | NoisyTerms(df,noisyTerms = ['veteran','truce']):
'''
Removes a set of noisy terms from DataFrame with
all declined keywords
Returns @df
'''
removeNoisy = input('Remove noisy terms? ({:s}) (Y/n)'.format(','.join(noisyTerms)))
if removeNoisy == 'n':
removeNoisy = False
print('Not removing')
else:
removeNoisy == True
print('Removing')
df = df[~df[0].isin(noisyTerms)]
return df
def splitQueriesSimple(keywords, max_query_lenght = 400, additional_query_parameters = ''):
'''
Simpler verstion to generate the query strings from list of a keywords
:param keywords: list[string] list of keywords
:param max_query_lenght: int the length of generated query strings,
depending on account type it might be 400 or 1000
:additional_query_parameters
:return :list[string] of generated query strings
'''
queries = []
query = keywords[0]
for keyword in keywords[1:]:
tmp_query = '{} OR "{}"'.format(query, keyword)
if len(tmp_query + additional_query_parameters) > max_query_lenght:
queries.append(f'{tmp_query} {additional_query_parameters}')
query = f'"{keyword}"'
continue
query = tmp_query
queries.append(f'{tmp_query} {additional_query_parameters}')
return queries
def splitQueries(declensionsDf,prefix,writeToFile = True):
'''
Function to take a DataFrame of keywords and
combine with OR operators to make a series of
queries under 1024 characters. Optionally write
the queries to a series of files
'''
n = 0
lastN = 0
nFile = 0
tempQ = ''
qs = []
print('Splitting queries')
if writeToFile:
path = input('Enter path stem (query_{:s}[_<n>.csv])'.format(prefix))
if path == '':
path = 'query_{:s}'.format(prefix)
cleanPath = 'n'
cleanPath = input('Clean existing query files? (y/N)').lower()
if cleanPath in ['','y']:
cleanPath = True
else:
cleanPath = False
if cleanPath:
print('Removing {:s}*'.format(path))
for file in glob.glob('{:s}*'.format(path)):
os.remove(file)
print('Shape:',declensionsDf.shape[0])
declensionsDf[0] = parseOperators(declensionsDf)
while n < declensionsDf.shape[0]:
tempQ = ' OR '.join(declensionsDf[0].values[lastN:n])
if len(tempQ) > 1024:
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
lastN = n
n-=1
nFile+=1
n+=1
if nFile == 0:
# In case all keywords fit in one 1024 query string
qs.append(' OR '.join(declensionsDf[0].values[lastN:n-1]))
if writeToFile:
print('Writing to file : ' + '{:s}_{:d}.csv'.format(path,nFile))
queryFileName = '{:s}_{:d}.csv'.format(path,nFile)
with open(queryFileName,'w') as outFile:
outFile.writelines(qs[-1])
print('\tLength written {:d}'.format(len(qs[-1])))
print()
return qs
def makeComplexQuery(denoise = False):
'''
Function to create a query for input into
Twitter search API based on keywords read
from files.
Returns list of query strings @qs and
language prefix @prefix
'''
print('Making complex query...')
prefix = ''
while not prefix in ['az','hy']:
prefix = input('Which query (AZ/hy)?') | eclined keywords...')
fileName = ''
fileName = input('Enter file path for keywords(default: {:s}_declensions.csv)'.format(prefix))
if fileName == '':
fileName = '{:s}_declensions.csv'.format(prefix)
print('Reading declined keywords file...')
declensionsDf = pd.read_csv(fileName,header=None,sep = '\t')
declensionsDf.iloc[:,0] = declensionsDf.iloc[:,0]
print('Got {:d} keywords'.format(declensionsDf.shape[0]))
if denoise:
declensionsDf = removeNoisyTerms(declensionsDf)
qs = splitQueries(declensionsDf,prefix)
return qs,prefix
def getTokens(df,drop = False):
'''
Convenience function to deal with the paging
information added into results returned
Returns @tokenDf and @df, with tokens and tweets
respectively
'''
if 'newest_id' in df.columns:
tokenDf = df[~pd.isna(df['newest_id'])]
if drop:
df = df[pd.isna(df['newest_id'])]
return tokenDf,df
else:
return pd.DataFrame(),df
def executeQueries(qs,prefix,startTime,search_args,period = '1 days',nResults = 100000,verbose = True, results_per_call= 100):
'''
Main routine to execute requests against search API
for each query string. Some logic required to make sure
each query backfills desired time period.
---------------------------------
Requires
@qs - list of query strings
@prefix - language codes
@startTime - datetime of latest date to grab
@period - time to backfill
@search_args - credentials object for API
Returns a list of DataFrames @dfs
'''
dfs = [pd.DataFrame()]*len(qs)
# Make one empty dataframe for each query
# We will append to each one
#nResults = 10000
for n,q in enumerate(qs):
print('Query {:d} of {:d}...'.format(n,len(qs)))
endTime = startTime + pd.to_timedelta(period)
query = gen_request_parameters(q, False, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=startTime.isoformat()[0:10],end_time=endTime.isoformat()[0:10])
results = collect_results(query,max_tweets=nResults,result_stream_args=search_args)
# Grab first batch of tweets to see how close to backfilling we get
print('Grabbing first tweets')
if len(results) > 0:
# Check there is at least one match
tweets = results[:-1]
metadata = results[-1]
df = pd.DataFrame(data = tweets)
df.set_index(pd.to_datetime(df['created_at']),inplace=True)
tokenDf,df = getTokens(df)
# Get rid of the tokens for now
if verbose:
print('Got {:d} tweets'.format(df.shape[0]))
dfs[n] = dfs[n].append(df)
# Add the new tweets to the array
if verbose:
print('Takes us to',df.index[-1].isoformat()[0:-6])
breakOut = False
startTimeOffset = pd.to_timedelta('0 days')
# We need this flag to break the while loop
# for when the day ranges shift
while df.index[-1] > startTime:
# Keep grabbing tweets for this query
# Until entire date range is backfilled
print(df.index[-1])
print(startTime)
endTime = df.index[-1]
if (endTime - startTime).days == 0:
startTimeOffset = pd.to_timedelta('1 hours')
# Nudge the start date back by an hour
# To make sure that start is always before end
# Or API returns error
if verbose:
print('We need more tweets to look further back (to {:s})'.format(startTime.isoformat()[0:10]))
print('Querying with:')
print('startTime',(startTime - startTimeOffset).isoformat()[0:19])
print('endTime',endTime.isoformat()[0:19])
query = gen_request_parameters(q, results_per_call=results_per_call,tweet_fields='text,author_id,id,created_at', start_time=(startTime - startTimeOffset).isoformat()[0:10], end_time=endTime.to_pydatetime().strftime("%Y-%m-%d % |
if prefix == '':
prefix = 'az'
print(prefix+' chosen')
print('Getting list of d | conditional_block |
dataset_data_particle.py | be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
| self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws Sample | """ Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
| identifier_body |
dataset_data_particle.py | be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def set_internal_timestamp(self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self): | Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws Sample | """ | random_line_split |
dataset_data_particle.py | if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws SampleException When there is a problem with the preferred
timestamp in the sample.
"""
if self.contents[DataParticleKey.PREFERRED_TIMESTAMP] is None:
raise SampleException("Missing preferred timestamp, %s, in particle" %
self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
# This should be handled downstream. Don't want to not publish data because
# the port agent stopped putting out timestamps
# if self.contents[self.contents[DataParticleKey.PREFERRED_TIMESTAMP]] == None:
# raise SampleException("Preferred timestamp, %s, is not defined" %
# self.contents[DataParticleKey.PREFERRED_TIMESTAMP])
return True
def _encode_value(self, name, value, encoding_function, value_range=None):
"""
Encode a value using the encoding function, if it fails store the error in a queue
:param value_range tuple containing min/max numerical values or min/max lengths
"""
encoded_val = None
# noinspection PyBroadException
# - custom encoding_function exceptions are not known a priori
try:
encoded_val = encoding_function(value)
except ValueError as e:
log.error('Unable to convert %s to %s.', encoded_val, encoding_function)
self._encoding_errors.append({name: value})
except Exception as e:
log.error('Data particle error encoding. Name: %s Value: %s, Encoding: %s', name, value, encoding_function)
self._encoding_errors.append({name: value})
# optional range checking
if value_range:
try:
vmin, vmax = value_range
except ValueError as e: # this only occurs as a programming error and should cause the parser to exit
log.exception('_encode_value must have exactly two values for tuple argument value_range')
raise ValueError(e)
if encoding_function in [int, float]:
if vmin and encoded_val < vmin:
log.error('Particle value (%s) below minimum threshold (%s < %s)', name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and encoded_val > vmax:
log.error('Particle value (%s) exceeds maximum threshold (%s > %s)', name, value, vmax)
self._encoding_errors.append({name: value})
elif hasattr(encoded_val, '__len__'):
try:
if vmin and len(encoded_val) < vmin:
log.error('Particle value (%s) length below minimum threshold (%s < %s)',
name, value, vmin)
self._encoding_errors.append({name: value})
elif vmax and len(encoded_val) > vmax:
log.error('Particle value (%s) length exceeds maximum threshold (%s > %s)',
name, value, vmax)
self._encoding_errors.append({name: value})
# in the unlikely event that a range was specified and the encoding object created a bogus len()
# we'll just ignore the range check
except TypeError:
log.warning('_encode_value received an encoding function (%s) that claimed to implement len() but '
'does not. Unable to apply range test to %s', encoding_function, name)
return {DataParticleKey.VALUE_ID: name,
DataParticleKey.VALUE: encoded_val}
def get_encoding_errors(self):
"""
Return the encoding errors list
"""
return self._encoding_errors
class RawDataParticleKey(BaseEnum):
PAYLOAD = "raw"
LENGTH = "length"
TYPE = "type"
CHECKSUM = "checksum"
class RawDataParticle(DataParticle):
"""
This class a common data particle for generating data particles of raw
data.
It essentially is a translation of the port agent packet
"""
_data_particle_type = CommonDataParticleType.RAW
def _build_parsed_values(self):
"""
Build a particle out of a port agent packet.
@returns A list that is ready to be added to the "values" tag before
the structure is JSONified
"""
port_agent_packet = self.raw_data
if not isinstance(port_agent_packet, dict):
raise SampleException("raw data not a dictionary")
for param in ["raw", "length", "type", "checksum"]:
if param not in port_agent_packet:
| raise SampleException("raw data not a complete port agent packet. missing %s" % param) | conditional_block | |
dataset_data_particle.py | be unique
# for all data particles. Best practice is to access this variable using the accessor method:
# data_particle_type()
_data_particle_type = None
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=None,
quality_flag=DataParticleValue.OK,
new_sequence=None):
""" Build a particle seeded with appropriate information
@param raw_data The raw data used in the particle
"""
if new_sequence is not None and not isinstance(new_sequence, bool):
raise TypeError("new_sequence is not a bool")
self.contents = {
DataParticleKey.PKT_FORMAT_ID: DataParticleValue.JSON_DATA,
DataParticleKey.PKT_VERSION: 1,
DataParticleKey.PORT_TIMESTAMP: port_timestamp,
DataParticleKey.INTERNAL_TIMESTAMP: internal_timestamp,
DataParticleKey.DRIVER_TIMESTAMP: ntplib.system_to_ntp_time(time.time()),
DataParticleKey.PREFERRED_TIMESTAMP: preferred_timestamp,
DataParticleKey.QUALITY_FLAG: quality_flag,
}
self._encoding_errors = []
if new_sequence is not None:
self.contents[DataParticleKey.NEW_SEQUENCE] = new_sequence
self.raw_data = raw_data
self._values = None
def __eq__(self, arg):
"""
Quick equality check for testing purposes. If they have the same raw
data, timestamp, they are the same enough for this particle
"""
allowed_diff = .000001
if self._data_particle_type != arg._data_particle_type:
log.debug('Data particle type does not match: %s %s', self._data_particle_type, arg._data_particle_type)
return False
if self.raw_data != arg.raw_data:
log.debug('Raw data does not match')
return False
t1 = self.contents[DataParticleKey.INTERNAL_TIMESTAMP]
t2 = arg.contents[DataParticleKey.INTERNAL_TIMESTAMP]
if (t1 is None) or (t2 is None):
tdiff = allowed_diff
else:
tdiff = abs(t1 - t2)
if tdiff > allowed_diff:
log.debug('Timestamp %s does not match %s', t1, t2)
return False
generated1 = json.loads(self.generate())
generated2 = json.loads(arg.generate())
missing, differing = self._compare(generated1, generated2, ignore_keys=[DataParticleKey.DRIVER_TIMESTAMP,
DataParticleKey.PREFERRED_TIMESTAMP])
if missing:
log.error('Key mismatch between particle dictionaries: %r', missing)
return False
if differing:
log.error('Value mismatch between particle dictionaries: %r', differing)
return True
@staticmethod
def _compare(d1, d2, ignore_keys=None):
ignore_keys = ignore_keys if ignore_keys else []
missing = set(d1).symmetric_difference(d2)
differing = {}
for k in d1:
if k in ignore_keys or k in missing:
continue
if d1[k] != d2[k]:
differing[k] = (d1[k], d2[k])
return missing, differing
def | (self, timestamp=None, unix_time=None):
"""
Set the internal timestamp
@param timestamp: NTP timestamp to set
@param unit_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
# if(not self._check_timestamp(timestamp)):
# raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)
def set_port_timestamp(self, timestamp=None, unix_time=None):
"""
Set the port timestamp
@param timestamp: NTP timestamp to set
@param unix_time: Unix time as returned from time.time()
@raise InstrumentParameterException if timestamp or unix_time not supplied
"""
if timestamp is None and unix_time is None:
raise InstrumentParameterException("timestamp or unix_time required")
if unix_time is not None:
timestamp = ntplib.system_to_ntp_time(unix_time)
# Do we want this to happen here or in down stream processes?
if not self._check_timestamp(timestamp):
raise InstrumentParameterException("invalid timestamp")
self.contents[DataParticleKey.PORT_TIMESTAMP] = float(timestamp)
def set_value(self, id, value):
"""
Set a content value, restricted as necessary
@param id The ID of the value to set, should be from DataParticleKey
@param value The value to set
@raises ReadOnlyException If the parameter cannot be set
"""
if (id == DataParticleKey.INTERNAL_TIMESTAMP) and (self._check_timestamp(value)):
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = value
else:
raise ReadOnlyException("Parameter %s not able to be set to %s after object creation!" %
(id, value))
def get_value(self, id):
""" Return a stored value from contents
@param id The ID (from DataParticleKey) for the parameter to return
@raises NotImplementedException If there is an invalid id
"""
if DataParticleKey.has(id):
return self.contents[id]
else:
raise NotImplementedException("Value %s not available in particle!", id)
def get_value_from_values(self, value_id):
""" Return a stored value from values list
@param value_id The ID of the parameter to return
"""
if not self._values:
return None
values = [i for i in self._values if i[DataParticleKey.VALUE_ID] == value_id]
if not values:
return None
return values[0][DataParticleKey.VALUE]
def data_particle_type(self):
"""
Return the data particle type (aka stream name)
@raise: NotImplementedException if _data_particle_type is not set
"""
if self._data_particle_type is None:
raise NotImplementedException("_data_particle_type not initialized")
return self._data_particle_type
def generate_dict(self):
"""
Generate a simple dictionary of sensor data and timestamps, without
going to JSON. This is useful for the times when JSON is not needed to
go across an interface. There are times when particles are used
internally to a component/process/module/etc.
@retval A python dictionary with the proper timestamps and data values
@throws InstrumentDriverException if there is a problem wtih the inputs
"""
# verify preferred timestamp exists in the structure...
if not self._check_preferred_timestamps():
raise SampleException("Preferred timestamp not in particle!")
# build response structure
self._encoding_errors = []
if self._values is None:
self._values = self._build_parsed_values()
result = self._build_base_structure()
result[DataParticleKey.STREAM_NAME] = self.data_particle_type()
result[DataParticleKey.VALUES] = self._values
return result
def generate(self, sorted=False):
"""
Generates a JSON_parsed packet from a sample dictionary of sensor data and
associates a timestamp with it
@param sorted Returned sorted json dict, useful for testing, but slow,
so dont do it unless it is important
@return A JSON_raw string, properly structured with port agent time stamp
and driver timestamp
@throws InstrumentDriverException If there is a problem with the inputs
"""
json_result = json.dumps(self.generate_dict(), sort_keys=sorted)
return json_result
def _build_parsed_values(self):
"""
Build values of a parsed structure. Just the values are built so
so that a child class can override this class, but call it with
super() to get the base structure before modification
@return the values tag for this data structure ready to JSONify
@raises SampleException when parsed values can not be properly returned
"""
raise SampleException("Parsed values block not overridden")
def _build_base_structure(self):
"""
Build the base/header information for an output structure.
Follow on methods can then modify it by adding or editing values.
@return A fresh copy of a core structure to be exported
"""
result = dict(self.contents)
# clean out optional fields that were missing
if not self.contents[DataParticleKey.PORT_TIMESTAMP]:
del result[DataParticleKey.PORT_TIMESTAMP]
if not self.contents[DataParticleKey.INTERNAL_TIMESTAMP]:
del result[DataParticleKey.INTERNAL_TIMESTAMP]
return result
def _check_timestamp(self, timestamp):
"""
Check to make sure the timestamp is reasonable
@param timestamp An NTP4 formatted timestamp (64bit)
@return True if timestamp is okay or None, False otherwise
"""
if timestamp is None:
return True
if not isinstance(timestamp, float):
return False
# is it sufficiently in the future to be unreasonable?
if timestamp > ntplib.system_to_ntp_time(time.time() + (86400 * 365)):
return False
else:
return True
def _check_preferred_timestamps(self):
"""
Check to make sure the preferred timestamp indicated in the
particle is actually listed, possibly adjusting to 2nd best
if not there.
@throws | set_internal_timestamp | identifier_name |
perception.py | Perception(RobotPart):
def __init__(self, robot_name, tf_listener):
super(Perception, self).__init__(robot_name=robot_name, tf_listener=tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def _image_cb(self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
|
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability)
return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition?
def clear_face(self):
"""
| return self._get_faces(image).recognitions, image.header.stamp | conditional_block |
perception.py | =tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def _image_cb(self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
return self._get_faces(image).recognitions, image.header.stamp
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability)
return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition?
def clear_face(self):
| """
clearing all faces from the OpenFace node.
:return: no return
"""
rospy.loginfo('clearing all learned faces')
self._clear_srv() | identifier_body | |
perception.py | Perception(RobotPart):
def __init__(self, robot_name, tf_listener):
super(Perception, self).__init__(robot_name=robot_name, tf_listener=tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def _image_cb(self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
return self._get_faces(image).recognitions, image.header.stamp
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability) |
def clear_face(self):
| return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition? | random_line_split |
perception.py | Perception(RobotPart):
def __init__(self, robot_name, tf_listener):
super(Perception, self).__init__(robot_name=robot_name, tf_listener=tf_listener)
self._camera_lazy_sub = None
self._camera_cv = Condition()
self._camera_last_image = None
self._annotate_srv = self.create_service_client('/' + robot_name + '/face_recognition/annotate', Annotate)
self._recognize_srv = self.create_service_client('/' + robot_name + '/face_recognition/recognize', Recognize)
self._clear_srv = self.create_service_client('/' + robot_name + '/face_recognition/clear', Empty)
self._face_properties_srv = self.create_service_client('/' + robot_name + '/face_recognition/get_face_properties', GetFaceProperties)
self._projection_srv = self.create_service_client('/' + robot_name + '/top_kinect/project_2d_to_3d',
Project2DTo3D)
def close(self):
pass
def reset(self, timeout=0):
pass
def | (self, image):
self._camera_cv.acquire()
self._camera_last_image = image
self._camera_cv.notify()
self._camera_cv.release()
def get_image(self, timeout=5):
# lazy subscribe to the kinect
if not self._camera_lazy_sub:
# for test with tripod kinect
# self._camera_lazy_sub = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self._image_cb)
# for the robot
rospy.loginfo("Creating subscriber")
self._camera_lazy_sub = rospy.Subscriber("/" + self.robot_name + "/top_kinect/rgb/image", Image, self._image_cb)
rospy.loginfo('lazy subscribe to %s', self._camera_lazy_sub.name)
rospy.loginfo("getting one image...")
self._camera_cv.acquire()
self._camera_last_image = None
for i in range(timeout):
if self._camera_last_image:
rospy.loginfo("len(self._camera_last_image): {}".format(len(self._camera_last_image.data)))
break
else:
rospy.loginfo("self._camera_last_image: {}".format(self._camera_last_image))
if rospy.is_shutdown():
return
self._camera_cv.wait(timeout=1)
else:
raise Exception('no image received from %s' % self._camera_lazy_sub.name)
image = self._camera_last_image
self._camera_cv.release()
rospy.loginfo("got %d bytes of image data", len(image.data))
return image
def project_roi(self, roi, frame_id=None):
""" Projects a region of interest of a depth image to a 3D Point. Hereto, a service is used
:param roi: sensor_msgs/RegionOfInterest
:param frame_id: if specified, the result is transformed into this frame id
:return: VectorStamped object
"""
response = self.project_rois(rois=[roi]).points[0]
# Convert to VectorStamped
result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,
frame_id=response.header.frame_id)
# If necessary, transform the point
if frame_id is not None:
print("Transforming roi to {}".format(frame_id))
result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)
# Return the result
return result
def project_rois(self, rois):
# Call the service with the provided Region of Interest
try:
points = self._projection_srv(rois=rois)
except rospy.ServiceException as e:
raise ValueError('project_roi failed', e)
else:
rospy.loginfo('project_rois response: %s', points)
return points
# OpenFace
def _get_faces(self, image=None):
if not image:
image = self.get_image()
try:
r = self._recognize_srv(image=image)
rospy.loginfo('found %d face(s) in the image', len(r.recognitions))
except rospy.ServiceException as e:
rospy.logerr(e.message)
r = RecognizeResponse()
return r
def learn_person(self, name='operator'):
HEIGHT_TRESHOLD = 88
WIDTH_TRESHOLD = 88
try:
image = self.get_image()
except:
rospy.logerr("Cannot get image")
return False
raw_recognitions = self._get_faces(image).recognitions
recognitions = [r for r in raw_recognitions if r.roi.height > HEIGHT_TRESHOLD and r.roi.width > WIDTH_TRESHOLD]
rospy.loginfo('found %d valid face(s)', len(recognitions))
if len(recognitions) != 1:
rospy.loginfo("Too many faces: {}".format(len(recognitions)))
return False
recognition = recognitions[0]
rospy.loginfo('annotating that face as %s', name)
try:
self._annotate_srv(image=image, annotations=[Annotation(label=name, roi=recognition.roi)])
except rospy.ServiceException as e:
rospy.logerr('annotate failed: {}'.format(e))
return False
return True
def detect_faces(self, image=None, stamp=False):
"""
Snap an image with the camera and return the recognized faces.
:param image: image to use for recognition
:type image: sensor_msgs/Image
:param stamp: Return recognitions and stamp
:type stamp: bool
:return: recognitions of the faces
:rtype: list[image_recognition_msgs/Recognition]
"""
if not image:
image = self.get_image()
if stamp:
return self._get_faces(image).recognitions, image.header.stamp
else:
return self._get_faces(image).recognitions
@staticmethod
def get_best_face_recognition(recognitions, desired_label, probability_threshold=4.0):
"""
Returns the Recognition with the highest probability of having the desired_label.
Assumes that the probability distributions in Recognition are already sorted by probability (descending, highest first)
:param recognitions: The recognitions to select the best one with desired_label from
:type recognitions: list[image_recognition_msgs/Recognition]
:param desired_label: what label to look for in the recognitions
:type desired_label: str
:param probability_threshold: only accept recognitions with probability higher than threshold
:type probability_threshold: double
:return the best recognition matching the given desired_label
:rtype image_recognition_msgs/Recognition
"""
rospy.logdebug("get_best_face_recognition: recognitions = {}".format(recognitions))
# Only take detections with operator
# detections = []
# The old implementation took, for each recognition, the (label, prob) pairs where label==desired_label.
# Other pairs in the same distribution may have higher probability.
# When the best_recognition is picked, it picked the recognition where the probability for the desired_label is hhighest comapared to other recognitions. BUT: a recognitions highest probability may be for a different label
# because the selection only compares matching labels, not looking at the probability of non-matching pairs.
# For example: we have 2 recognitions.
# in recognition 1, A has 50%, desired_label has 30%, B has 20%.
# in recognition 2, B has 60%, desired_label has 35%, A has 5%.
# Then, recognition 2 has the highest probability for the desired_label and is thus picked.
# Because we take the [0]'th index of the distribution, that name is B
#
# Solution: because the probability distributions are sorted, just take the probability distribution where the desired label has the highest probability.
#for recog in recognitions:
# for cp in recog.categorical_distribution.probabilities:
# if cp.label == desired_label:
# detections.append((recog, cp.probability))
# Sort based on probability
#if detections:
# sorted_detections = sorted(detections, key=lambda det: det[1])
# best_detection = sorted_detections[0][0] # A CategoricalDistribution in a Recognition is already ordered, max prob is at [0]
#else:
# best_detection = None
rospy.loginfo("Probability threshold %.2f", probability_threshold)
for index, recog in enumerate(recognitions):
rospy.loginfo("{index}: {dist}".format(index=index,
dist=[(cp.label, "{:.2f}".format(cp.probability)) for cp in recog.categorical_distribution.probabilities]))
matching_recognitions = [recog for recog in recognitions if \
recog.categorical_distribution.probabilities and \
recog.categorical_distribution.probabilities[0].label == desired_label]
if matching_recognitions:
best_recognition = max(matching_recognitions, key=lambda recog: recog.categorical_distribution.probabilities[0].probability)
return best_recognition if best_recognition.categorical_distribution.probabilities[0].probability > probability_threshold else None
else:
return None # TODO: Maybe so something smart with selecting a recognition where the desired_label is not the most probable for a recognition?
def clear_face(self):
"""
| _image_cb | identifier_name |
routeOperator.go | ile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return
}
}
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); err != nil {
klog.Errorf("un error occurred while cleaning up policy routing rules: %v", err)
}
}
if rc.vxlanDev != nil {
err := netlink.LinkDel(rc.vxlanDev.Link)
if err != nil && err.Error() != "Link not found" {
klog.Errorf("an error occurred while deleting vxlan device {%s}: %v", rc.vxlanDev.Link.Name, err)
}
}
// Attempt to remove our finalizer from all tunnel endpoints. In case this operation fails,
// the cleanup will be performed by tunnel-operator when a tunnel endpoint is going to be deleted.
var teps netv1alpha1.TunnelEndpointList
if err := rc.List(context.Background(), &teps); err != nil {
klog.Errorf("an error occurred while listing tunnel endpoints: %v", err)
return
}
for i := range teps.Items {
original := teps.Items[i].DeepCopy()
if controllerutil.RemoveFinalizer(&teps.Items[i], liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// Using patch instead of update, to prevent issues in case of conflicts.
if err := rc.Client.Patch(context.Background(), &teps.Items[i], client.MergeFrom(original)); err != nil {
klog.Errorf("%s -> unable to remove finalizer from tunnel endpoint %q: %v",
original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]), err)
continue
}
klog.V(4).Infof("%s -> finalizer successfully removed from tunnel endpoint %q", original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]))
}
}
}
// SetupWithManager used to set up the controller with a given manager.
func (rc *RouteController) SetupWithManager(mgr ctrl.Manager) error | {
resourceToBeProccesedPredicate := predicate.Funcs{
DeleteFunc: func(e event.DeleteEvent) bool {
// Finalizers are used to check if a resource is being deleted, and perform there the needed actions
// we don't want to reconcile on the delete of a resource.
return false
},
}
return ctrl.NewControllerManagedBy(mgr).WithEventFilter(resourceToBeProccesedPredicate).
For(&netv1alpha1.TunnelEndpoint{}).
Complete(rc)
} | identifier_body | |
routeOperator.go | "
netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1"
liqoconst "github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/liqonet/overlay"
liqorouting "github.com/liqotech/liqo/pkg/liqonet/routing"
liqonetutils "github.com/liqotech/liqo/pkg/liqonet/utils"
)
var (
result = ctrl.Result{}
)
// RouteController reconciles a TunnelEndpoint object.
type RouteController struct {
client.Client
record.EventRecorder
liqorouting.Routing
vxlanDev *overlay.VxlanDevice
podIP string
firewallChan chan bool
}
// NewRouteController returns a configured route controller ready to be started.
func NewRouteController(podIP string, vxlanDevice *overlay.VxlanDevice, router liqorouting.Routing, er record.EventRecorder,
cl client.Client) *RouteController {
r := &RouteController{
Client: cl,
Routing: router,
vxlanDev: vxlanDevice,
EventRecorder: er,
podIP: podIP,
}
return r
}
// cluster-role
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get
// role
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=secrets,verbs=create;update;patch;get;list;watch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=update;patch;get;list;watch
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=services,verbs=update;patch;get;list;watch
// Reconcile handle requests on TunnelEndpoint object to create and configure routes on Nodes.
func (rc *RouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for | }
}
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); err | {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return | conditional_block |
routeOperator.go | "
netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1"
liqoconst "github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/liqonet/overlay"
liqorouting "github.com/liqotech/liqo/pkg/liqonet/routing"
liqonetutils "github.com/liqotech/liqo/pkg/liqonet/utils"
)
var (
result = ctrl.Result{}
)
// RouteController reconciles a TunnelEndpoint object.
type RouteController struct {
client.Client
record.EventRecorder
liqorouting.Routing
vxlanDev *overlay.VxlanDevice
podIP string
firewallChan chan bool
}
// NewRouteController returns a configured route controller ready to be started.
func | (podIP string, vxlanDevice *overlay.VxlanDevice, router liqorouting.Routing, er record.EventRecorder,
cl client.Client) *RouteController {
r := &RouteController{
Client: cl,
Routing: router,
vxlanDev: vxlanDevice,
EventRecorder: er,
podIP: podIP,
}
return r
}
// cluster-role
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=net.liqo.io,resources=tunnelendpoints/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get
// role
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=secrets,verbs=create;update;patch;get;list;watch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=update;patch;get;list;watch
// +kubebuilder:rbac:groups=core,namespace="do-not-care",resources=services,verbs=update;patch;get;list;watch
// Reconcile handle requests on TunnelEndpoint object to create and configure routes on Nodes.
func (rc *RouteController) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return
}
}
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); | NewRouteController | identifier_name |
routeOperator.go | concile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
tep := new(netv1alpha1.TunnelEndpoint)
var err error
if err = rc.Get(ctx, req.NamespacedName, tep); err != nil && !k8sApiErrors.IsNotFound(err) {
klog.Errorf("unable to fetch resource {%s} :%v", req.String(), err)
return result, err
}
// In case the resource does not exist anymore, we just forget it.
if k8sApiErrors.IsNotFound(err) {
return result, nil
}
// Here we check that the tunnelEndpoint resource has been fully processed. If not we do nothing.
if tep.Status.GatewayIP == "" {
return result, nil
}
clusterIdentity := tep.Spec.ClusterIdentity
_, remotePodCIDR := liqonetutils.GetPodCIDRS(tep)
_, remoteExternalCIDR := liqonetutils.GetExternalCIDRS(tep)
// Examine DeletionTimestamp to determine if object is under deletion.
if tep.ObjectMeta.DeletionTimestamp.IsZero() {
if !controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
controllerutil.AddFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
} else {
// The object is being deleted, if we encounter an error while removing the routes than we record an
// event on the resource to notify the user. The finalizer is not removed.
if controllerutil.ContainsFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP)) {
klog.Infof("resource {%s} of type {%s} is being removed", tep.Name, tep.GroupVersionKind().String())
deleted, err := rc.RemoveRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to remove route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to remove route: %s", err.Error())
return result, err
}
if deleted {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly removed",
clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destination {%s} and {%s} correctly removed",
remotePodCIDR, remoteExternalCIDR)
}
// remove the finalizer from the list and update it.
controllerutil.RemoveFinalizer(tep, liqoconst.LiqoRouteFinalizer(rc.podIP))
if err := rc.Update(ctx, tep); err != nil {
if k8sApiErrors.IsConflict(err) {
klog.V(4).Infof("%s -> unable to add finalizers to resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
klog.Errorf("%s -> unable to remove finalizers from resource {%s}: %s", clusterIdentity, req.String(), err)
return result, err
}
}
return result, nil
}
added, err := rc.EnsureRoutesPerCluster(tep)
if err != nil {
klog.Errorf("%s -> unable to configure route for destinations {%s} and {%s}: %s",
clusterIdentity, remotePodCIDR, remoteExternalCIDR, err)
rc.Eventf(tep, "Warning", "Processing", "unable to configure route for destinations {%s} and {%s}: %s",
remotePodCIDR, remoteExternalCIDR, err.Error())
return result, err
}
if added {
klog.Infof("%s -> route for destinations {%s} and {%s} correctly configured", clusterIdentity, remotePodCIDR, remoteExternalCIDR)
rc.Eventf(tep, "Normal", "Processing", "route for destinations {%s} and {%s} configured", remotePodCIDR, remoteExternalCIDR)
}
return result, nil
}
// ConfigureFirewall launches a long-running go routine that ensures the firewall configuration.
func (rc *RouteController) ConfigureFirewall() error {
iptHandler, err := iptables.New()
if err != nil {
return err
}
rc.firewallChan = make(chan bool)
fwRules := generateRules(rc.vxlanDev.Link.Name)
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C: // every five seconds we enforce the firewall rules.
for i := range fwRules {
if err := addRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to insert firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} configured", fwRules[i].String())
}
}
case <-rc.firewallChan:
for i := range fwRules {
if err := deleteRule(iptHandler, &fwRules[i]); err != nil {
klog.Errorf("unable to remove firewall rule {%s}: %v", fwRules[i].String(), err)
} else {
klog.V(5).Infof("firewall rule {%s} removed", fwRules[i].String())
}
}
close(rc.firewallChan)
return
}
}
}()
return nil
}
// cleanUp removes all the routes, rules and devices (if any) from the
// node inserted by the operator. It is called at exit time.
func (rc *RouteController) cleanUp() {
if rc.firewallChan != nil {
// send signal to clean firewall rules and close the go routine.
rc.firewallChan <- true
// wait for the go routine to clean up.
<-rc.firewallChan
}
if rc.Routing != nil {
if err := rc.Routing.CleanRoutingTable(); err != nil {
klog.Errorf("un error occurred while cleaning up routes: %v", err)
}
if err := rc.Routing.CleanPolicyRules(); err != nil {
klog.Errorf("un error occurred while cleaning up policy routing rules: %v", err)
}
}
if rc.vxlanDev != nil {
err := netlink.LinkDel(rc.vxlanDev.Link)
if err != nil && err.Error() != "Link not found" {
klog.Errorf("an error occurred while deleting vxlan device {%s}: %v", rc.vxlanDev.Link.Name, err)
}
}
// Attempt to remove our finalizer from all tunnel endpoints. In case this operation fails,
// the cleanup will be performed by tunnel-operator when a tunnel endpoint is going to be deleted.
var teps netv1alpha1.TunnelEndpointList
if err := rc.List(context.Background(), &teps); err != nil {
klog.Errorf("an error occurred while listing tunnel endpoints: %v", err)
return
}
for i := range teps.Items {
original := teps.Items[i].DeepCopy()
if controllerutil.RemoveFinalizer(&teps.Items[i], liqoconst.LiqoRouteFinalizer(rc.podIP)) {
// Using patch instead of update, to prevent issues in case of conflicts.
if err := rc.Client.Patch(context.Background(), &teps.Items[i], client.MergeFrom(original)); err != nil {
klog.Errorf("%s -> unable to remove finalizer from tunnel endpoint %q: %v",
original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]), err)
continue
}
klog.V(4).Infof("%s -> finalizer successfully removed from tunnel endpoint %q", original.Spec.ClusterIdentity, klog.KObj(&teps.Items[i]))
}
}
}
// SetupWithManager used to set up the controller with a given manager.
func (rc *RouteController) SetupWithManager(mgr ctrl.Manager) error {
resourceToBeProccesedPredicate := predicate.Funcs{
DeleteFunc: func(e event.DeleteEvent) bool {
// Finalizers are used to check if a resource is being deleted, and perform there the needed actions
// we don't want to reconcile on the delete of a resource.
return false
},
}
return ctrl.NewControllerManagedBy(mgr).WithEventFilter(resourceToBeProccesedPredicate).
For(&netv1alpha1.TunnelEndpoint{}).
Complete(rc)
} | random_line_split | ||
th_logistic_regression.py | \frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995 # a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many minibatches before checking the network on the validation set; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
# print( 'epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) )
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
|
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches | patience = max(patience, iter * patience_increase) | conditional_block |
th_logistic_regression.py | = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# early-stopping parameters
patience = 5000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is found
improvement_threshold = 0.995 # a relative improvement of this much is considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many minibatches before checking the network on the validation set; in this case we check every epoch
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
# print( 'epoch %i, minibatch %i/%i, validation error %f %%' %
# (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) )
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
# test it on the test set
test_losses = [test_model(i)
for i in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
# print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) %
# ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) )
# save the best model
with open(write_model_file, 'w') as f:
cPickle.dump(classifier, f)
if patience <= iter:
done_looping = True
break
end_time = time.clock()
print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%')
% (test_fold, best_validation_loss * 100., test_score * 100.) )
print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time))
# print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time)))
# end-snippet-4
# Now we do the predictions
# load the saved best model for this fold
classifier = cPickle.load(open(write_model_file))
# compile a predictor function
predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x])
# compile a confidence predictor function
# predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x)
# We can test it on some examples from test test
""" *************** build AUC curve *************** """
# get the probability of our predictions
test_set = test_set_x.get_value()
predicted_values, conf_preds = predict_model(test_set[:(rows_test)])
conf_predictions = []
for i in range(len(conf_preds)):
# ignore the first column; this gives a lower score that seems wrong.
conf_predictions.append(conf_preds[i][1])
# determine ROC / AUC
fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions)
auc = metrics.auc(fpr, tpr) # e.g. 0.855
""" *********************************************** """
num_correct = 0
num_false = 0
for i in range(len(predicted_values)):
if predicted_values[i] == test_set_labels[i]:
num_correct += 1
else:
num_false += 1
total = len(predicted_values)
percent_correct = num_correct / float(total)
fold_results = ''
fold_results += '#################### Results for ' + data_type + ' ####################' + '\n'
fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \
str(total) + ' wrong: ' + \
str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc)
print fold_results
write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt'
with open(write_predictions_file, 'w') as f:
f.write(fold_results + "\n")
# def run_predictions(data_type, curr_target):
# fold_path = get_fold_path(data_type)
# targets = build_targets(fold_path, data_type)
# # print "Found " + str(len(targets)) + " targets for " + data_type
# fold_accuracies = {}
# did_something = False
# for target, fnames in targets.iteritems():
# if (target != curr_target):
# continue
# else:
# did_something = True
# # retrieve our stratified folds
# folds = get_folds(data_type, fold_path, target, fnames)
# pct_ct = []
# roc_auc = []
# # run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# # folds 1-4
# temp_data = []
# for i in range(len(folds)):
# if(i == curr_fl):
# # don't include the test fold
# continue
# else:
# temp_data += folds[i]
# # vs current 5th test fold
# test_data = folds[curr_fl]
# """ Turning 1024 bits into features is a slow process """
# # build training data
# X = []
# Y = []
# for i in range(len(temp_data)):
# row = []
# for bit in temp_data[i][0]:
# row.append(int(bit))
# X.append(row)
# Y.append(int(temp_data[i][1]))
# X = np.array(X)
# Y = np.array(Y)
# # build test data
# X_test = []
# Y_test = []
# for i in range(len(test_data)):
# row = []
# for bit in test_data[i][0]:
# row.append(int(bit))
# X_test.append(row)
# Y_test.append(int(test_data[i][1]))
# X_test = np.array(X_test)
| # Y_test = np.array(Y_test)
| random_line_split | |
th_logistic_regression.py | self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
################ TRAIN MODEL ################
# | """ Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )
# symbolic expression for computing the matrix of class-membership probabilities where:
# W is a matrix where column-k represent the separation hyper plain for class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper plane-k
| identifier_body | |
th_logistic_regression.py | (self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True )
# initialize the baises b as a vector of n_out 0s
self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True )
# symbolic expression for computing the matrix of class-membership probabilities where:
# W is a matrix where column-k represent the separation hyper plain for class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of hyper plane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1]
# T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class
# LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and
# T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch.
#print "y.ndim = ",y.ndim
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) )
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100):
"""
Demonstrate stochastic gradient descent optimization of a log-linear model
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
"""
test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX
write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl'
fold_path = helpers.get_fold_path(data_type)
targets = helpers.build_targets(fold_path, data_type)
fnames = targets[target]
fold_accuracies = {}
did_something = False
# pct_ct = []
# roc_auc = []
# run 4 folds vs 1 fold with each possible scenario
# for curr_fl in range(5):
# print 'Building data for target: ' + target + ', fold: ' + str(curr_fl)
# loop through all folds, for now just do 1!
datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold)
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
valid_set_x = train_set_x
valid_set_y = train_set_y
# compute number of rows for training, validation and testing
rows_train = train_set_x.get_value(borrow=True).shape[0]
rows_valid = valid_set_x.get_value(borrow=True).shape[0]
rows_test = test_set_x.get_value(borrow=True).shape[0]
# compute number of minibatches for training, validation and testing
n_train_batches = rows_train / batch_size
n_valid_batches = rows_valid / batch_size
n_test_batches = rows_test / batch_size
####################### BUILD ACTUAL MODEL #######################
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# n_in: Each MNIST image has size 32*32 = 1024
# n_out: 10 different digits - multi-task LR
classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2)
# the cost we minimize during training is the negative log likelihood of the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by the model on a minibatch
test_model = theano.function( inputs=[index], outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function( inputs=[index], outputs=cost, updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end | __init__ | identifier_name | |
schema.go | }
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
}
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
}
if s.AdditionalProperties != nil {
push(s.AdditionalProperties.Schema, "additionalProperties")
}
for _, m := range []struct {
name string
schemas map[string]*RefOrSchema
}{
{"definitions", s.Definitions},
{"properties", s.Properties},
{"patternProperties", s.PatternProperties},
{"dependencies", s.Dependencies},
} {
for k, v := range m.schemas {
push(v, m.name, k)
}
}
for _, a := range []struct {
name string
schemas []*RefOrSchema
}{
{"allOf", s.AllOf},
{"anyOf", s.AnyOf},
{"oneOf", s.OneOf},
} {
for i, v := range a.schemas {
push(v, a.name, i)
}
}
push(s.Not, "not")
return
}
// String returns a simple string identifier for the schema
func (s *Schema) String() string {
if s.ID == nil {
return "<nil>"
}
return s.ID.String()
}
// ChooseType returns the best known type for this field.
func (s *Schema) ChooseType() JSONType {
switch {
case s.Type != nil && len(*s.Type) > 0:
return (*s.Type)[0]
case len(s.Properties) > 0,
s.AdditionalProperties.Present(),
len(s.PatternProperties) > 0,
s.MinProperties > 0,
s.MaxProperties != nil,
len(s.AllOf) > 0:
return JSONObject
case s.Items.Present(),
s.UniqueItems,
s.MinItems != 0,
s.MaxItems != nil:
return JSONArray
case s.Pattern != nil,
s.MinLength > 0,
s.MaxLength != nil:
return JSONString
}
return JSONUnknown
}
// UnmarshalJSON is custom JSON deserialization for the Schema type
func (s *Schema) UnmarshalJSON(data []byte) error {
{
type schema Schema
var s2 schema
if err := json.Unmarshal(data, &s2); err != nil {
return fmt.Errorf("unmarshal schema: %w", err)
}
*s = Schema(s2)
}
var possAnnos map[string]json.RawMessage
if err := json.Unmarshal(data, &possAnnos); err != nil {
return fmt.Errorf("unmarshal annotations: %w", err)
}
for field, v := range possAnnos {
if knownSchemaFields[field] {
continue
}
if s.Annotations == nil {
s.Annotations = make(map[string]json.RawMessage)
}
s.Annotations[field] = v
}
for _, key := range []string{"$id", "id"} {
idBytes, ok := s.Annotations[key]
if !ok {
continue
}
var (
id string
err error
)
if err = json.Unmarshal(idBytes, &id); err != nil {
return err
}
if s.ID, err = url.Parse(id); err != nil {
return err
}
break
}
return nil
}
func getJSONFieldNames(val interface{}) (fields []string) {
t := reflect.TypeOf(val)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if r, _ := utf8.DecodeRuneInString(field.Name); r == utf8.RuneError || unicode.IsLower(r) {
continue
}
vals := strings.SplitN(field.Tag.Get("json"), ",", 2)
if len(vals) == 0 || vals[0] == "" {
fields = append(fields, field.Name)
continue
}
if vals[0] != "-" {
fields = append(fields, vals[0])
}
}
return
}
// NormalizeComment takes a comment string and makes sure it's normalized for Go
func NormalizeComment(s string) string {
if s == "" | {
return ""
} | conditional_block | |
schema.go | "" {
r.ref = &ref.Ref
return nil
}
r.schema = new(Schema)
return json.Unmarshal(b, r.schema)
}
// Resolve either returns the schema if set or else resolves the reference using the referer schema and loader.
func (r *RefOrSchema) Resolve(ctx context.Context, referer *Schema, loader Loader) (*Schema, error) {
if r.ref == nil {
return r.schema, nil
}
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
}
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
}
if s.AdditionalProperties != nil {
push(s.AdditionalProperties.Schema, "additionalProperties")
}
for _, m := range []struct {
name string
schemas map[string]*RefOrSchema
}{
{"definitions", s.Definitions},
{"properties", s.Properties},
{"patternProperties", s.PatternProperties},
{"dependencies", s.Dependencies},
} {
for k, v := range m.schemas {
push(v, m.name, k)
}
}
for _, a := range []struct {
name string
schemas []*RefOrSchema
}{
{"allOf", s.AllOf},
{"anyOf", s.AnyOf},
{"oneOf", s.OneOf},
} {
for i, v := range a.schemas {
push(v, a.name, i)
}
}
push(s.Not, "not")
return
}
// String returns a simple string identifier for the schema
func (s *Schema) String() string {
if s.ID == nil {
return "<nil>"
}
return s.ID.String()
}
// ChooseType returns the best known type for this field.
func (s *Schema) ChooseType() JSONType {
switch {
case s.Type != nil && len(*s.Type) > 0:
return (*s.Type)[0]
case len(s.Properties) > 0,
s.AdditionalProperties.Present(),
len(s.PatternProperties) > 0,
s.MinProperties > 0,
s.MaxProperties != nil,
len(s.AllOf) > 0:
return JSONObject
case s.Items.Present(),
s.UniqueItems,
s.MinItems != 0,
s.MaxItems != nil:
return JSONArray
case s.Pattern != nil,
s.MinLength > 0,
s.MaxLength != nil:
return JSONString
}
return JSONUnknown
}
// UnmarshalJSON is custom JSON deserialization for the Schema type
func (s *Schema) UnmarshalJSON(data []byte) error {
{
type schema Schema
var s2 schema
if err := json.Unmarshal(data, &s2); err != nil {
return fmt.Errorf("unmarshal schema: %w", err)
}
*s = Schema(s2)
}
var possAnnos map[string]json.RawMessage
if err := json.Unmarshal(data, &possAnnos); err != nil {
return fmt.Errorf("unmarshal annotations: %w", err)
}
for field, v := range possAnnos {
if knownSchemaFields[field] {
continue
}
if s.Annotations == nil {
s.Annotations = make(map[string]json.RawMessage)
}
s.Annotations[field] = v
}
for _, key := range []string{"$id", "id"} {
idBytes, ok := s.Annotations[key]
if !ok {
continue
}
var (
id string
err error
)
if err = json.Unmarshal(idBytes, &id); err != nil {
return err
}
if s.ID, err = url.Parse(id); err != nil {
return err
}
break
}
return nil
}
func getJSONFieldNames(val interface{}) (fields []string) {
t := reflect.TypeOf(val)
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
if r, _ := utf8.DecodeRuneInString(field.Name); r == utf8.RuneError || unicode.IsLower(r) {
continue
}
vals := strings.SplitN(field.Tag.Get("json"), ",", 2) | random_line_split | ||
schema.go | }
*t = append(*t, typ)
}
return nil
}
return fmt.Errorf("unable to unmarshal %T into TypeField", val)
}
// convenience method to draw out the first token; if this errs, later calls will err anyway so discards
// the err
func peekToken(data []byte) json.Token {
tok, _ := json.NewDecoder(bytes.NewReader(data)).Token()
return tok
}
// BoolOrSchema may have either a boolean or a RefOrSchema.
type BoolOrSchema struct {
Bool *bool
Schema *RefOrSchema
}
func (a *BoolOrSchema) Present() bool {
return a != nil && (a.Schema != nil || (a.Bool != nil && *a.Bool))
}
// UnmarshalJSON performs some custom deserialization of JSON into BoolOrSchema
func (a *BoolOrSchema) UnmarshalJSON(data []byte) error {
if b, ok := peekToken(data).(bool); ok {
a.Bool = &b
return nil
}
a.Schema = new(RefOrSchema)
return json.Unmarshal(data, a.Schema)
}
// ItemsField contains information indicating whether the modified array is a dynamically sized list of multiple
// types or a "tuple" -- a specifically sized array with potentially different types for each position.
type ItemsField struct {
Items *RefOrSchema
TupleFields []*RefOrSchema
}
func (i *ItemsField) Present() bool {
return i != nil && (i.Items != nil || len(i.TupleFields) > 0)
}
// UnmarshalJSON conditionally deserializes into ItemsField according to the shape of the provided JSON
func (i *ItemsField) UnmarshalJSON(data []byte) error {
if peekToken(data) == json.Delim('{') {
i.Items = new(RefOrSchema)
return json.Unmarshal(data, i.Items)
}
return json.Unmarshal(data, &i.TupleFields)
}
// TagMap contains all of the different user extended tags as json.RawMessage for later deserialization
type TagMap map[string]json.RawMessage
// GetString attempts to deserialize the value for the provided key into a string. If the key is absent or there is an
// error deserializing the value, the returned string will be empty.
func (t TagMap) | (k string) (s string) {
_, _ = t.Unmarshal(k, &s)
return
}
// Read unmarshals the json at the provided key into the provided interface (which should be a pointer amenable to
// json.Read. If the key is not present, the pointer will be untouched, and false and nil will be returned. If the
// deserialization fails, an error will be returned.
func (t TagMap) Unmarshal(k string, val interface{}) (bool, error) {
msg, ok := t[k]
if !ok {
return false, nil
}
err := json.Unmarshal(msg, val)
return true, err
}
// NewRefOrSchema is a convenience constructor for RefOrSchema
func NewRefOrSchema(s *Schema, ref *string) *RefOrSchema {
return &RefOrSchema{ref: ref, schema: s}
}
// RefOrSchema is either a schema or a reference to a schema.
type RefOrSchema struct {
ref *string
schema *Schema
}
// UnmarshalJSON conditionally deserializes the JSON, either into a reference or a schema.
func (r *RefOrSchema) UnmarshalJSON(b []byte) error {
var ref struct {
Ref string `json:"$ref"`
}
if err := json.Unmarshal(b, &ref); err != nil {
return fmt.Errorf("unmarshal $ref: %w", err)
}
if ref.Ref != "" {
r.ref = &ref.Ref
return nil
}
r.schema = new(Schema)
return json.Unmarshal(b, r.schema)
}
// Resolve either returns the schema if set or else resolves the reference using the referer schema and loader.
func (r *RefOrSchema) Resolve(ctx context.Context, referer *Schema, loader Loader) (*Schema, error) {
if r.ref == nil {
return r.schema, nil
}
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
}
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
| GetString | identifier_name |
schema.go | }
*t = append(*t, typ)
}
return nil
}
return fmt.Errorf("unable to unmarshal %T into TypeField", val)
}
// convenience method to draw out the first token; if this errs, later calls will err anyway so discards
// the err
func peekToken(data []byte) json.Token {
tok, _ := json.NewDecoder(bytes.NewReader(data)).Token()
return tok
}
// BoolOrSchema may have either a boolean or a RefOrSchema.
type BoolOrSchema struct {
Bool *bool
Schema *RefOrSchema
}
func (a *BoolOrSchema) Present() bool {
return a != nil && (a.Schema != nil || (a.Bool != nil && *a.Bool))
}
// UnmarshalJSON performs some custom deserialization of JSON into BoolOrSchema
func (a *BoolOrSchema) UnmarshalJSON(data []byte) error {
if b, ok := peekToken(data).(bool); ok {
a.Bool = &b
return nil
}
a.Schema = new(RefOrSchema)
return json.Unmarshal(data, a.Schema)
}
// ItemsField contains information indicating whether the modified array is a dynamically sized list of multiple
// types or a "tuple" -- a specifically sized array with potentially different types for each position.
type ItemsField struct {
Items *RefOrSchema
TupleFields []*RefOrSchema
}
func (i *ItemsField) Present() bool {
return i != nil && (i.Items != nil || len(i.TupleFields) > 0)
}
// UnmarshalJSON conditionally deserializes into ItemsField according to the shape of the provided JSON
func (i *ItemsField) UnmarshalJSON(data []byte) error {
if peekToken(data) == json.Delim('{') {
i.Items = new(RefOrSchema)
return json.Unmarshal(data, i.Items)
}
return json.Unmarshal(data, &i.TupleFields)
}
// TagMap contains all of the different user extended tags as json.RawMessage for later deserialization
type TagMap map[string]json.RawMessage
// GetString attempts to deserialize the value for the provided key into a string. If the key is absent or there is an
// error deserializing the value, the returned string will be empty.
func (t TagMap) GetString(k string) (s string) {
_, _ = t.Unmarshal(k, &s)
return
}
// Read unmarshals the json at the provided key into the provided interface (which should be a pointer amenable to
// json.Read. If the key is not present, the pointer will be untouched, and false and nil will be returned. If the
// deserialization fails, an error will be returned.
func (t TagMap) Unmarshal(k string, val interface{}) (bool, error) {
msg, ok := t[k]
if !ok {
return false, nil
}
err := json.Unmarshal(msg, val)
return true, err
}
// NewRefOrSchema is a convenience constructor for RefOrSchema
func NewRefOrSchema(s *Schema, ref *string) *RefOrSchema {
return &RefOrSchema{ref: ref, schema: s}
}
// RefOrSchema is either a schema or a reference to a schema.
type RefOrSchema struct {
ref *string
schema *Schema
}
// UnmarshalJSON conditionally deserializes the JSON, either into a reference or a schema.
func (r *RefOrSchema) UnmarshalJSON(b []byte) error {
var ref struct {
Ref string `json:"$ref"`
}
if err := json.Unmarshal(b, &ref); err != nil {
return fmt.Errorf("unmarshal $ref: %w", err)
}
if ref.Ref != "" {
r.ref = &ref.Ref
return nil
}
r.schema = new(Schema)
return json.Unmarshal(b, r.schema)
}
// Resolve either returns the schema if set or else resolves the reference using the referer schema and loader.
func (r *RefOrSchema) Resolve(ctx context.Context, referer *Schema, loader Loader) (*Schema, error) |
// Schema is the core representation of the JSONSchema meta schema.
type Schema struct {
// this could be a ref
Ref *string `json:"$ref,omitempty"`
// meta
ID *url.URL `json:"-"` // set either from "$id", "id", or calculated based on parent (see IDCalc); never nil
IDCalc bool `json:"-"` // whether this ID was calculated
Src *url.URL `json:"-"` // the resource from which this schema was loaded; never nil
Schema string `json:"$schema,omitempty"`
// number qualifiers
MultipleOf *float64 `json:"multipleOf,omitempty"`
Maximum *float64 `json:"maximum,omitempty"`
ExclusiveMaximum *bool `json:"exclusiveMaximum,omitempty"`
Minimum *float64 `json:"minimum,omitempty"`
ExclusiveMinimum *bool `json:"exclusiveMinimum,omitempty"`
// string qualifiers
MaxLength *uint64 `json:"maxLength,omitempty"`
MinLength uint64 `json:"minLength,omitempty"`
Pattern *string `json:"pattern,omitempty"`
// array qualifiers
AdditionalItems *BoolOrSchema `json:"additionalItems,omitempty"`
Items *ItemsField `json:"items,omitempty"`
MaxItems *uint64 `json:"maxItems,omitempty"`
MinItems uint64 `json:"minItems,omitempty"`
UniqueItems bool `json:"uniqueItems,omitempty"`
// object qualifiers
MaxProperties *uint64 `json:"maxProperties,omitempty"`
MinProperties uint64 `json:"minProperties,omitempty"`
Required []string `json:"required,omitempty"`
AdditionalProperties *BoolOrSchema `json:"additionalProperties,omitempty"`
Definitions map[string]*RefOrSchema `json:"definitions,omitempty"`
Properties map[string]*RefOrSchema `json:"properties,omitempty"`
PatternProperties map[string]*RefOrSchema `json:"patternProperties,omitempty"`
Dependencies map[string]*RefOrSchema `json:"dependencies,omitempty"`
// extra special
Enum []interface{} `json:"enum,omitempty"`
Type *TypeField `json:"type,omitempty"`
Format string `json:"format,omitempty"`
// polymorphic support
AllOf []*RefOrSchema `json:"allOf,omitempty"`
AnyOf []*RefOrSchema `json:"anyOf,omitempty"`
OneOf []*RefOrSchema `json:"oneOf,omitempty"`
Not *RefOrSchema `json:"not,omitempty"`
// jsonschema2go Config
Config Config `json:"x-jsonschema2go"`
// user extensible
Annotations TagMap `json:"-"`
}
// Config is a series of jsonschema2go user extensions
type Config struct {
GoPath string `json:"gopath"`
Exclude bool `json:"exclude"`
Discriminator Discriminator `json:"Discriminator"`
NoValidate bool `json:"noValidate"`
PromoteFields bool `json:"promoteFields"`
NoOmitEmpty bool `json:"noOmitEmpty"`
OmitEmptyArray bool `json:"omitEmptyArray"`
RawMessage bool `json:"rawMessage"`
FieldAliases map[string]string `json:"fieldAliases"`
}
// Discriminator is jsonschema2go specific info for discriminating between multiple oneOf objects
type Discriminator struct {
PropertyName string `json:"propertyName"`
Mapping map[string]string `json:"mapping"`
}
// IsSet returns whether there is a discriminator present.
func (d *Discriminator) IsSet() bool {
return d.PropertyName != ""
}
func (s *Schema) setSrc(u *url.URL) {
s.Src = u
for _, c := range s.children() {
if c.schema != nil {
c.schema.setSrc(u)
}
}
}
func (s *Schema) calculateID() {
for _, c := range s.children() {
if c.schema == nil {
continue
}
if c.schema.ID == nil {
childID, _ := s.ID.Parse(s.ID.String()) // silly deep copy
if len(c.path) > 0 {
fragment := make([]string, 0, len(c.path))
for _, v := range c.path {
fragment = append(fragment, fmt.Sprint(v))
}
childID.Fragment += "/" + strings.Join(fragment, "/")
}
c.schema.ID = childID
c.schema.IDCalc = true
}
c.schema.calculateID()
}
}
type child struct {
*RefOrSchema
path []interface{}
}
func (s *Schema) children() (children []child) {
push := func(s *RefOrSchema, path ...interface{}) {
if s != nil {
children = append(children, child{s, path})
}
}
if s.AdditionalItems != nil {
push(s.AdditionalItems.Schema, "additionalItems")
}
if s.Items != nil {
push(s.Items.Items, "items")
for i, f := range s.Items.TupleFields {
push(f, "items", i)
}
| {
if r.ref == nil {
return r.schema, nil
}
parsed2, err := url.Parse(*r.ref)
if err != nil {
return nil, fmt.Errorf("parse $ref: %w", err)
}
return loader.Load(ctx, referer.Src.ResolveReference(parsed2))
} | identifier_body |
policy.go |
// Deadlines are immutable when the challenge window is open, and during
// the previous challenge window.
immutableWindow := 2 * WPoStChallengeWindow
// We want to reserve at least one deadline's worth of time to compact a
// deadline.
minCompactionWindow := WPoStChallengeWindow
// Make sure we have enough time in the proving period to do everything we need.
if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod {
panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)",
minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod))
}
}
// The maximum number of partitions that can be loaded in a single invocation.
// This limits the number of simultaneous fault, recovery, or sector-extension declarations.
// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline.
const AddressedPartitionsMax = MaxPartitionsPerDeadline
// Maximum number of unique "declarations" in batch operations.
const DeclarationsMax = AddressedPartitionsMax
// The maximum number of sector infos that can be loaded in a single invocation.
// This limits the amount of state to be read in a single message execution.
const AddressedSectorsMax = 25_000 // PARAM_SPEC
// Libp2p peer info limits.
const (
// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID.
// Most Peer IDs are expected to be less than 50 bytes.
MaxPeerIDLength = 128 // PARAM_SPEC
// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs.
MaxMultiaddrData = 1024 // PARAM_SPEC
)
// Maximum number of control addresses a miner may register.
const MaxControlAddresses = 10
// The maximum number of partitions that may be required to be loaded in a single invocation,
// when all the sector infos for the partitions will be loaded.
func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 {
return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax)
}
// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible)
// This is a conservative value that is chosen via simulations of all known attacks.
const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC
// Prefix for sealed sector CIDs (CommR).
var SealedCIDPrefix = cid.Prefix{
Version: 1,
Codec: cid.FilCommitmentSealed,
MhType: mh.POSEIDON_BLS12_381_A1_FC1,
MhLength: 32,
}
// List of proof types which may be used when creating a new miner actor.
// This is mutable to allow configuration of testing and development networks.
var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency
const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not | {
// Check that the challenge windows divide the proving period evenly.
if WPoStProvingPeriod%WPoStChallengeWindow != 0 {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check that WPoStPeriodDeadlines is consistent with the proving period and challenge window.
if abi.ChainEpoch(WPoStPeriodDeadlines)*WPoStChallengeWindow != WPoStProvingPeriod {
panic(fmt.Sprintf("incompatible proving period %d and challenge window %d", WPoStProvingPeriod, WPoStChallengeWindow))
}
// Check to make sure the dispute window is longer than finality so there's always some time to dispute bad proofs.
if WPoStDisputeWindow <= ChainFinality {
panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality))
}
// A deadline becomes immutable one challenge window before it's challenge window opens.
// The challenge lookback must fall within this immutability period.
if WPoStChallengeLookback > WPoStChallengeWindow {
panic("the challenge lookback cannot exceed one challenge window")
} | identifier_body | |
policy.go | ity so there's always some time to dispute bad proofs.
if WPoStDisputeWindow <= ChainFinality |
// A deadline becomes immutable one challenge window before it's challenge window opens.
// The challenge lookback must fall within this immutability period.
if WPoStChallengeLookback > WPoStChallengeWindow {
panic("the challenge lookback cannot exceed one challenge window")
}
// Deadlines are immutable when the challenge window is open, and during
// the previous challenge window.
immutableWindow := 2 * WPoStChallengeWindow
// We want to reserve at least one deadline's worth of time to compact a
// deadline.
minCompactionWindow := WPoStChallengeWindow
// Make sure we have enough time in the proving period to do everything we need.
if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod {
panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)",
minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod))
}
}
// The maximum number of partitions that can be loaded in a single invocation.
// This limits the number of simultaneous fault, recovery, or sector-extension declarations.
// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline.
const AddressedPartitionsMax = MaxPartitionsPerDeadline
// Maximum number of unique "declarations" in batch operations.
const DeclarationsMax = AddressedPartitionsMax
// The maximum number of sector infos that can be loaded in a single invocation.
// This limits the amount of state to be read in a single message execution.
const AddressedSectorsMax = 25_000 // PARAM_SPEC
// Libp2p peer info limits.
const (
// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID.
// Most Peer IDs are expected to be less than 50 bytes.
MaxPeerIDLength = 128 // PARAM_SPEC
// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs.
MaxMultiaddrData = 1024 // PARAM_SPEC
)
// Maximum number of control addresses a miner may register.
const MaxControlAddresses = 10
// The maximum number of partitions that may be required to be loaded in a single invocation,
// when all the sector infos for the partitions will be loaded.
func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 {
return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax)
}
// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible)
// This is a conservative value that is chosen via simulations of all known attacks.
const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC
// Prefix for sealed sector CIDs (CommR).
var SealedCIDPrefix = cid.Prefix{
Version: 1,
Codec: cid.FilCommitmentSealed,
MhType: mh.POSEIDON_BLS12_381_A1_FC1,
MhLength: 32,
}
// List of proof types which may be used when creating a new miner actor.
// This is mutable to allow configuration of testing and development networks.
var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency
const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges.
const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC
// The maximum age of a fault before the sector is terminated.
// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral.
var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC
// Staging period for a miner worker key change.
// This delay prevents a miner choosing a more favorable worker key that wins leader elections.
const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC
// Minimum number of epochs past the current epoch a sector may be set to expire.
const MinSectorExpiration = 1 | {
panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality))
} | conditional_block |
policy.go | than finality so there's always some time to dispute bad proofs.
if WPoStDisputeWindow <= ChainFinality {
panic(fmt.Sprintf("the proof dispute period %d must exceed finality %d", WPoStDisputeWindow, ChainFinality))
}
// A deadline becomes immutable one challenge window before it's challenge window opens.
// The challenge lookback must fall within this immutability period.
if WPoStChallengeLookback > WPoStChallengeWindow {
panic("the challenge lookback cannot exceed one challenge window")
}
// Deadlines are immutable when the challenge window is open, and during
// the previous challenge window.
immutableWindow := 2 * WPoStChallengeWindow
// We want to reserve at least one deadline's worth of time to compact a
// deadline.
minCompactionWindow := WPoStChallengeWindow
// Make sure we have enough time in the proving period to do everything we need.
if (minCompactionWindow + immutableWindow + WPoStDisputeWindow) > WPoStProvingPeriod {
panic(fmt.Sprintf("together, the minimum compaction window (%d) immutability window (%d) and the dispute window (%d) exceed the proving period (%d)",
minCompactionWindow, immutableWindow, WPoStDisputeWindow, WPoStProvingPeriod))
}
}
// The maximum number of partitions that can be loaded in a single invocation.
// This limits the number of simultaneous fault, recovery, or sector-extension declarations.
// We set this to same as MaxPartitionsPerDeadline so we can process that many partitions every deadline.
const AddressedPartitionsMax = MaxPartitionsPerDeadline
// Maximum number of unique "declarations" in batch operations.
const DeclarationsMax = AddressedPartitionsMax
// The maximum number of sector infos that can be loaded in a single invocation.
// This limits the amount of state to be read in a single message execution.
const AddressedSectorsMax = 25_000 // PARAM_SPEC
// Libp2p peer info limits.
const (
// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID.
// Most Peer IDs are expected to be less than 50 bytes.
MaxPeerIDLength = 128 // PARAM_SPEC
// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs.
MaxMultiaddrData = 1024 // PARAM_SPEC
)
// Maximum number of control addresses a miner may register.
const MaxControlAddresses = 10
// The maximum number of partitions that may be required to be loaded in a single invocation,
// when all the sector infos for the partitions will be loaded.
func loadPartitionsSectorsMax(partitionSectorCount uint64) uint64 {
return min64(AddressedSectorsMax/partitionSectorCount, AddressedPartitionsMax)
}
// Epochs after which chain state is final with overwhelming probability (hence the likelihood of two fork of this size is negligible)
// This is a conservative value that is chosen via simulations of all known attacks.
const ChainFinality = abi.ChainEpoch(900) // PARAM_SPEC
// Prefix for sealed sector CIDs (CommR).
var SealedCIDPrefix = cid.Prefix{
Version: 1,
Codec: cid.FilCommitmentSealed,
MhType: mh.POSEIDON_BLS12_381_A1_FC1,
MhLength: 32,
}
// List of proof types which may be used when creating a new miner actor.
// This is mutable to allow configuration of testing and development networks.
var WindowPoStProofTypes = map[abi.RegisteredPoStProof]struct{}{
abi.RegisteredPoStProof_StackedDrgWindow32GiBV1: {},
abi.RegisteredPoStProof_StackedDrgWindow64GiBV1: {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency | const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges.
const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC
// The maximum age of a fault before the sector is terminated.
// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral.
var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC
// Staging period for a miner worker key change.
// This delay prevents a miner choosing a more favorable worker key that wins leader elections.
const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC
// Minimum number of epochs past the current epoch a sector may be set to expire.
const MinSectorExpiration = 18 | random_line_split | |
policy.go | {},
}
// Checks whether a PoSt proof type is supported for new miners.
func CanWindowPoStProof(s abi.RegisteredPoStProof) bool {
_, ok := WindowPoStProofTypes[s]
return ok
}
// List of proof types which may be used when pre-committing a new sector.
// This is mutable to allow configuration of testing and development networks.
// From network version 8, sectors sealed with the V1 seal proof types cannot be committed.
var PreCommitSealProofTypesV8 = map[abi.RegisteredSealProof]struct{}{
abi.RegisteredSealProof_StackedDrg32GiBV1_1: {},
abi.RegisteredSealProof_StackedDrg64GiBV1_1: {},
}
// Checks whether a seal proof type is supported for new miners and sectors.
func CanPreCommitSealProof(s abi.RegisteredSealProof) bool {
_, ok := PreCommitSealProofTypesV8[s]
return ok
}
// Checks whether a seal proof type is supported for new miners and sectors.
// As of network version 11, all permitted seal proof types may be extended.
func CanExtendSealProofType(_ abi.RegisteredSealProof) bool {
return true
}
// Maximum delay to allow between sector pre-commit and subsequent proof.
// The allowable delay depends on seal proof algorithm.
var MaxProveCommitDuration = map[abi.RegisteredSealProof]abi.ChainEpoch{
abi.RegisteredSealProof_StackedDrg32GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1: builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg32GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay, // PARAM_SPEC
abi.RegisteredSealProof_StackedDrg2KiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg8MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg512MiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
abi.RegisteredSealProof_StackedDrg64GiBV1_1: 30*builtin.EpochsInDay + PreCommitChallengeDelay,
}
// The maximum number of sector pre-commitments in a single batch.
// 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year.
const PreCommitSectorBatchMaxSize = 256
// The maximum number of sector replica updates in a single batch.
// Same as PreCommitSectorBatchMaxSize for consistency
const ProveReplicaUpdatesMaxSize = PreCommitSectorBatchMaxSize
// Maximum delay between challenge and pre-commitment.
// This prevents a miner sealing sectors far in advance of committing them to the chain, thus committing to a
// particular chain.
var MaxPreCommitRandomnessLookback = builtin.EpochsInDay + ChainFinality // PARAM_SPEC
// Number of epochs between publishing a sector pre-commitment and when the challenge for interactive PoRep is drawn.
// This (1) prevents a miner predicting a challenge before staking their pre-commit deposit, and
// (2) prevents a miner attempting a long fork in the past to insert a pre-commitment after seeing the challenge.
var PreCommitChallengeDelay = abi.ChainEpoch(150) // PARAM_SPEC
// Lookback from the deadline's challenge window opening from which to sample chain randomness for the WindowPoSt challenge seed.
// This means that deadline windows can be non-overlapping (which make the programming simpler) without requiring a
// miner to wait for chain stability during the challenge window.
// This value cannot be too large lest it compromise the rationality of honest storage (from Window PoSt cost assumptions).
const WPoStChallengeLookback = abi.ChainEpoch(20) // PARAM_SPEC
// Minimum period between fault declaration and the next deadline opening.
// If the number of epochs between fault declaration and deadline's challenge window opening is lower than FaultDeclarationCutoff,
// the fault declaration is considered invalid for that deadline.
// This guarantees that a miner is not likely to successfully fork the chain and declare a fault after seeing the challenges.
const FaultDeclarationCutoff = WPoStChallengeLookback + 50 // PARAM_SPEC
// The maximum age of a fault before the sector is terminated.
// This bounds the time a miner can lose client's data before sacrificing pledge and deal collateral.
var FaultMaxAge = WPoStProvingPeriod * 42 // PARAM_SPEC
// Staging period for a miner worker key change.
// This delay prevents a miner choosing a more favorable worker key that wins leader elections.
const WorkerKeyChangeDelay = ChainFinality // PARAM_SPEC
// Minimum number of epochs past the current epoch a sector may be set to expire.
const MinSectorExpiration = 180 * builtin.EpochsInDay // PARAM_SPEC
// The maximum number of epochs past the current epoch that sector lifetime may be extended.
// A sector may be extended multiple times, however, the total maximum lifetime is also bounded by
// the associated seal proof's maximum lifetime.
const MaxSectorExpirationExtension = 540 * builtin.EpochsInDay // PARAM_SPEC
// Ratio of sector size to maximum number of deals per sector.
// The maximum number of deals is the sector size divided by this number (2^27)
// which limits 32GiB sectors to 256 deals and 64GiB sectors to 512
const DealLimitDenominator = 134217728 // PARAM_SPEC
// Number of epochs after a consensus fault for which a miner is ineligible
// for permissioned actor methods and winning block elections.
const ConsensusFaultIneligibilityDuration = ChainFinality
// DealWeight and VerifiedDealWeight are spacetime occupied by regular deals and verified deals in a sector.
// Sum of DealWeight and VerifiedDealWeight should be less than or equal to total SpaceTime of a sector.
// Sectors full of VerifiedDeals will have a SectorQuality of VerifiedDealWeightMultiplier/QualityBaseMultiplier.
// Sectors full of Deals will have a SectorQuality of DealWeightMultiplier/QualityBaseMultiplier.
// Sectors with neither will have a SectorQuality of QualityBaseMultiplier/QualityBaseMultiplier.
// SectorQuality of a sector is a weighted average of multipliers based on their proportions.
func QualityForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.SectorQuality {
// sectorSpaceTime = size * duration
sectorSpaceTime := big.Mul(big.NewIntUnsigned(uint64(size)), big.NewInt(int64(duration)))
// totalDealSpaceTime = dealWeight + verifiedWeight
totalDealSpaceTime := big.Add(dealWeight, verifiedWeight)
// Base - all size * duration of non-deals
// weightedBaseSpaceTime = (sectorSpaceTime - totalDealSpaceTime) * QualityBaseMultiplier
weightedBaseSpaceTime := big.Mul(big.Sub(sectorSpaceTime, totalDealSpaceTime), builtin.QualityBaseMultiplier)
// Deal - all deal size * deal duration * 10
// weightedDealSpaceTime = dealWeight * DealWeightMultiplier
weightedDealSpaceTime := big.Mul(dealWeight, builtin.DealWeightMultiplier)
// Verified - all verified deal size * verified deal duration * 100
// weightedVerifiedSpaceTime = verifiedWeight * VerifiedDealWeightMultiplier
weightedVerifiedSpaceTime := big.Mul(verifiedWeight, builtin.VerifiedDealWeightMultiplier)
// Sum - sum of all spacetime
// weightedSumSpaceTime = weightedBaseSpaceTime + weightedDealSpaceTime + weightedVerifiedSpaceTime
weightedSumSpaceTime := big.Sum(weightedBaseSpaceTime, weightedDealSpaceTime, weightedVerifiedSpaceTime)
// scaledUpWeightedSumSpaceTime = weightedSumSpaceTime * 2^20
scaledUpWeightedSumSpaceTime := big.Lsh(weightedSumSpaceTime, builtin.SectorQualityPrecision)
// Average of weighted space time: (scaledUpWeightedSumSpaceTime / sectorSpaceTime * 10)
return big.Div(big.Div(scaledUpWeightedSumSpaceTime, sectorSpaceTime), builtin.QualityBaseMultiplier)
}
// The power for a sector size, committed duration, and weight.
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
quality := QualityForWeight(size, duration, dealWeight, verifiedWeight)
return big.Rsh(big.Mul(big.NewIntUnsigned(uint64(size)), quality), builtin.SectorQualityPrecision)
}
// The quality-adjusted power for a sector.
func | QAPowerForSector | identifier_name | |
yuva_info.rs | 1, and V is in channel 1 of plane 1. Channel ordering
/// within a pixmap/texture given the channels it contains:
/// A: 0:A
/// Luminance/Gray: 0:Gray
/// Luminance/Gray + Alpha: 0:Gray, 1:A
/// RG 0:R, 1:G
/// RGB 0:R, 1:G, 2:B
/// RGBA 0:R, 1:G, 2:B, 3:A
pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig;
variant_name!(PlaneConfig::YUV);
/// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is
/// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub-
/// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values
/// that have U and V in different planes than Y (and A, if present).
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Subsampling {
Unknown = SkYUVAInfo_Subsampling::kUnknown as _,
S444 = SkYUVAInfo_Subsampling::k444 as _,
S422 = SkYUVAInfo_Subsampling::k422 as _,
S420 = SkYUVAInfo_Subsampling::k420 as _,
S440 = SkYUVAInfo_Subsampling::k440 as _,
S411 = SkYUVAInfo_Subsampling::k411 as _,
S410 = SkYUVAInfo_Subsampling::k410 as _,
}
native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout);
/// Describes how subsampled chroma values are sited relative to luma values.
///
/// Currently only centered siting is supported but will expand to support additional sitings.
pub use sb::SkYUVAInfo_Siting as Siting;
variant_name!(Siting::Centered);
/// Ratio of Y/A values to U/V values in x and y.
pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) };
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)`
/// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling]
/// `plane_index` combinations. `(0, 0)` is returned for invalid inputs.
pub fn plane_subsampling_factors(
plane: PlaneConfig,
subsampling: Subsampling,
plane_index: usize,
) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe {
sb::C_SkYUVAInfo_PlaneSubsamplingFactors(
plane,
subsampling.into_native(),
plane_index.try_into().unwrap(),
&mut factors[0],
)
};
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected
/// size of each plane. Returns the expected planes. The input image dimensions are as displayed
/// (after the planes have been transformed to the intended display orientation). The plane
/// dimensions are output as the planes are stored in memory (may be rotated from image dimensions).
pub fn plane_dimensions(
image_dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
origin: EncodedOrigin,
) -> Vec<ISize> {
let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES];
let size: usize = unsafe {
SkYUVAInfo::PlaneDimensions(
image_dimensions.into().into_native(),
config,
subsampling.into_native(),
origin.into_native(),
plane_dimensions.native_mut().as_mut_ptr(),
)
}
.try_into()
.unwrap();
plane_dimensions[0..size].to_vec()
}
/// Number of planes for a given [PlaneConfig].
pub fn num_planes(config: PlaneConfig) -> usize {
unsafe { sb::C_SkYUVAInfo_NumPlanes(config) }
.try_into()
.unwrap()
}
/// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is
/// invalid).
pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> {
(i < num_planes(config)).if_true_then_some(|| {
unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) }
.try_into()
.unwrap()
})
}
/// Does the [PlaneConfig] have alpha values?
pub fn has_alpha(config: PlaneConfig) -> bool {
unsafe { sb::SkYUVAInfo_HasAlpha(config) }
}
impl Default for YUVAInfo {
fn default() -> Self {
Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) })
}
}
impl NativePartialEq for YUVAInfo {
fn eq(&self, rhs: &Self) -> bool {
unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) }
}
}
impl fmt::Debug for YUVAInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("YUVAInfo")
.field("dimensions", &self.dimensions())
.field("plane_config", &self.plane_config())
.field("subsampling", &self.subsampling())
.field("yuv_color_space", &self.yuv_color_space())
.field("origin", &self.origin())
.field("siting_xy", &self.siting_xy())
.finish()
}
}
impl YUVAInfo {
pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _;
/// `dimensions` should specify the size of the full resolution image (after planes have been
/// oriented to how the image is displayed as indicated by `origin`).
pub fn new(
dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
color_space: image_info::YUVColorSpace,
origin: impl Into<Option<EncodedOrigin>>,
siting_xy: impl Into<Option<(Siting, Siting)>>,
) -> Option<Self> {
let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft);
let (siting_x, siting_y) = siting_xy
.into()
.unwrap_or((Siting::Centered, Siting::Centered));
let n = unsafe {
SkYUVAInfo::new(
dimensions.into().into_native(),
config,
subsampling.into_native(),
color_space,
origin.into_native(),
siting_x,
siting_y,
)
};
Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n))
}
pub fn plane_config(&self) -> PlaneConfig {
self.native().fPlaneConfig
}
pub fn subsampling(&self) -> Subsampling {
Subsampling::from_native_c(self.native().fSubsampling)
}
pub fn | (&self, plane_index: usize) -> (i32, i32) {
plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index)
}
/// Dimensions of the full resolution image (after planes have been oriented to how the image
/// is displayed as indicated by fOrigin).
pub fn dimensions(&self) -> ISize {
ISize::from_native_c(self.native().fDimensions)
}
pub fn width(&self) -> i32 {
self.dimensions().width
}
pub fn height(&self) -> i32 {
self.dimensions().height
}
pub fn yuv_color_space(&self) -> image_info::YUVColorSpace {
self.native().fYUVColorSpace
}
pub fn siting_xy(&self) -> (Siting, Siting) {
let n = self.native();
(n.fSitingX, n.fSitingY)
}
pub fn origin(&self) -> EncodedOrigin {
EncodedOrigin::from_native_c(self.native().fOrigin)
}
pub fn origin_matrix(&self) -> Matrix {
self.origin().to_matrix((self.width(), self.height()))
}
pub fn has_alpha(&self) -> bool {
has_alpha(self.plane_config())
}
/// Returns the dimensions for each plane. Dimensions are as stored in memory, before
/// transformation to image | plane_subsampling_factors | identifier_name |
yuva_info.rs | 1, and V is in channel 1 of plane 1. Channel ordering
/// within a pixmap/texture given the channels it contains:
/// A: 0:A
/// Luminance/Gray: 0:Gray
/// Luminance/Gray + Alpha: 0:Gray, 1:A
/// RG 0:R, 1:G
/// RGB 0:R, 1:G, 2:B
/// RGBA 0:R, 1:G, 2:B, 3:A
pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig;
variant_name!(PlaneConfig::YUV);
/// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is
/// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub-
/// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values
/// that have U and V in different planes than Y (and A, if present).
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Subsampling {
Unknown = SkYUVAInfo_Subsampling::kUnknown as _,
S444 = SkYUVAInfo_Subsampling::k444 as _,
S422 = SkYUVAInfo_Subsampling::k422 as _,
S420 = SkYUVAInfo_Subsampling::k420 as _,
S440 = SkYUVAInfo_Subsampling::k440 as _,
S411 = SkYUVAInfo_Subsampling::k411 as _,
S410 = SkYUVAInfo_Subsampling::k410 as _,
}
native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout);
/// Describes how subsampled chroma values are sited relative to luma values.
///
/// Currently only centered siting is supported but will expand to support additional sitings.
pub use sb::SkYUVAInfo_Siting as Siting;
variant_name!(Siting::Centered);
/// Ratio of Y/A values to U/V values in x and y.
pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) };
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)`
/// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling]
/// `plane_index` combinations. `(0, 0)` is returned for invalid inputs.
pub fn plane_subsampling_factors(
plane: PlaneConfig,
subsampling: Subsampling,
plane_index: usize,
) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe {
sb::C_SkYUVAInfo_PlaneSubsamplingFactors(
plane,
subsampling.into_native(),
plane_index.try_into().unwrap(),
&mut factors[0],
)
};
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected
/// size of each plane. Returns the expected planes. The input image dimensions are as displayed
/// (after the planes have been transformed to the intended display orientation). The plane
/// dimensions are output as the planes are stored in memory (may be rotated from image dimensions).
pub fn plane_dimensions(
image_dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
origin: EncodedOrigin,
) -> Vec<ISize> {
let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES];
let size: usize = unsafe {
SkYUVAInfo::PlaneDimensions(
image_dimensions.into().into_native(),
config,
subsampling.into_native(),
origin.into_native(),
plane_dimensions.native_mut().as_mut_ptr(),
)
}
.try_into()
.unwrap();
plane_dimensions[0..size].to_vec()
}
/// Number of planes for a given [PlaneConfig].
pub fn num_planes(config: PlaneConfig) -> usize |
/// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is
/// invalid).
pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> {
(i < num_planes(config)).if_true_then_some(|| {
unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) }
.try_into()
.unwrap()
})
}
/// Does the [PlaneConfig] have alpha values?
pub fn has_alpha(config: PlaneConfig) -> bool {
unsafe { sb::SkYUVAInfo_HasAlpha(config) }
}
impl Default for YUVAInfo {
fn default() -> Self {
Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) })
}
}
impl NativePartialEq for YUVAInfo {
fn eq(&self, rhs: &Self) -> bool {
unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) }
}
}
impl fmt::Debug for YUVAInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("YUVAInfo")
.field("dimensions", &self.dimensions())
.field("plane_config", &self.plane_config())
.field("subsampling", &self.subsampling())
.field("yuv_color_space", &self.yuv_color_space())
.field("origin", &self.origin())
.field("siting_xy", &self.siting_xy())
.finish()
}
}
impl YUVAInfo {
pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _;
/// `dimensions` should specify the size of the full resolution image (after planes have been
/// oriented to how the image is displayed as indicated by `origin`).
pub fn new(
dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
color_space: image_info::YUVColorSpace,
origin: impl Into<Option<EncodedOrigin>>,
siting_xy: impl Into<Option<(Siting, Siting)>>,
) -> Option<Self> {
let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft);
let (siting_x, siting_y) = siting_xy
.into()
.unwrap_or((Siting::Centered, Siting::Centered));
let n = unsafe {
SkYUVAInfo::new(
dimensions.into().into_native(),
config,
subsampling.into_native(),
color_space,
origin.into_native(),
siting_x,
siting_y,
)
};
Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n))
}
pub fn plane_config(&self) -> PlaneConfig {
self.native().fPlaneConfig
}
pub fn subsampling(&self) -> Subsampling {
Subsampling::from_native_c(self.native().fSubsampling)
}
pub fn plane_subsampling_factors(&self, plane_index: usize) -> (i32, i32) {
plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index)
}
/// Dimensions of the full resolution image (after planes have been oriented to how the image
/// is displayed as indicated by fOrigin).
pub fn dimensions(&self) -> ISize {
ISize::from_native_c(self.native().fDimensions)
}
pub fn width(&self) -> i32 {
self.dimensions().width
}
pub fn height(&self) -> i32 {
self.dimensions().height
}
pub fn yuv_color_space(&self) -> image_info::YUVColorSpace {
self.native().fYUVColorSpace
}
pub fn siting_xy(&self) -> (Siting, Siting) {
let n = self.native();
(n.fSitingX, n.fSitingY)
}
pub fn origin(&self) -> EncodedOrigin {
EncodedOrigin::from_native_c(self.native().fOrigin)
}
pub fn origin_matrix(&self) -> Matrix {
self.origin().to_matrix((self.width(), self.height()))
}
pub fn has_alpha(&self) -> bool {
has_alpha(self.plane_config())
}
/// Returns the dimensions for each plane. Dimensions are as stored in memory, before
/// transformation to | {
unsafe { sb::C_SkYUVAInfo_NumPlanes(config) }
.try_into()
.unwrap()
} | identifier_body |
yuva_info.rs | 1, and V is in channel 1 of plane 1. Channel ordering
/// within a pixmap/texture given the channels it contains:
/// A: 0:A
/// Luminance/Gray: 0:Gray
/// Luminance/Gray + Alpha: 0:Gray, 1:A
/// RG 0:R, 1:G
/// RGB 0:R, 1:G, 2:B
/// RGBA 0:R, 1:G, 2:B, 3:A
pub use sb::SkYUVAInfo_PlaneConfig as PlaneConfig;
variant_name!(PlaneConfig::YUV);
/// UV subsampling is also specified in the enum value names using J:a:b notation (e.g. 4:2:0 is
/// 1/2 horizontal and 1/2 vertical resolution for U and V). If alpha is present it is not sub-
/// sampled. Note that Subsampling values other than k444 are only valid with [PlaneConfig] values
/// that have U and V in different planes than Y (and A, if present).
#[repr(i32)]
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Subsampling {
Unknown = SkYUVAInfo_Subsampling::kUnknown as _,
S444 = SkYUVAInfo_Subsampling::k444 as _,
S422 = SkYUVAInfo_Subsampling::k422 as _,
S420 = SkYUVAInfo_Subsampling::k420 as _,
S440 = SkYUVAInfo_Subsampling::k440 as _,
S411 = SkYUVAInfo_Subsampling::k411 as _,
S410 = SkYUVAInfo_Subsampling::k410 as _,
}
native_transmutable!(SkYUVAInfo_Subsampling, Subsampling, subsampling_layout);
/// Describes how subsampled chroma values are sited relative to luma values.
///
/// Currently only centered siting is supported but will expand to support additional sitings.
pub use sb::SkYUVAInfo_Siting as Siting;
variant_name!(Siting::Centered);
/// Ratio of Y/A values to U/V values in x and y.
pub fn subsampling_factors(subsampling: Subsampling) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe { sb::C_SkYUVAInfo_SubsamplingFactors(subsampling.into_native(), &mut factors[0]) };
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// `SubsamplingFactors(Subsampling)` if `plane_index` refers to a U/V plane and otherwise `(1, 1)`
/// if inputs are valid. Invalid inputs consist of incompatible [PlaneConfig] [Subsampling]
/// `plane_index` combinations. `(0, 0)` is returned for invalid inputs.
pub fn plane_subsampling_factors(
plane: PlaneConfig,
subsampling: Subsampling,
plane_index: usize,
) -> (i32, i32) {
let mut factors: [i32; 2] = Default::default();
unsafe {
sb::C_SkYUVAInfo_PlaneSubsamplingFactors(
plane,
subsampling.into_native(),
plane_index.try_into().unwrap(),
&mut factors[0],
)
};
#[allow(clippy::tuple_array_conversions)]
(factors[0], factors[1])
}
/// Given image dimensions, a planer configuration, subsampling, and origin, determine the expected
/// size of each plane. Returns the expected planes. The input image dimensions are as displayed
/// (after the planes have been transformed to the intended display orientation). The plane
/// dimensions are output as the planes are stored in memory (may be rotated from image dimensions).
pub fn plane_dimensions(
image_dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
origin: EncodedOrigin,
) -> Vec<ISize> {
let mut plane_dimensions = [ISize::default(); YUVAInfo::MAX_PLANES];
let size: usize = unsafe {
SkYUVAInfo::PlaneDimensions(
image_dimensions.into().into_native(),
config,
subsampling.into_native(),
origin.into_native(),
plane_dimensions.native_mut().as_mut_ptr(),
)
}
.try_into()
.unwrap();
plane_dimensions[0..size].to_vec()
}
/// Number of planes for a given [PlaneConfig].
pub fn num_planes(config: PlaneConfig) -> usize {
unsafe { sb::C_SkYUVAInfo_NumPlanes(config) }
.try_into()
.unwrap()
}
/// Number of Y, U, V, A channels in the ith plane for a given [PlaneConfig] (or [None] if i is
/// invalid).
pub fn num_channels_in_plane(config: PlaneConfig, i: usize) -> Option<usize> {
(i < num_planes(config)).if_true_then_some(|| {
unsafe { sb::C_SkYUVAInfo_NumChannelsInPlane(config, i.try_into().unwrap()) }
.try_into()
.unwrap()
})
}
/// Does the [PlaneConfig] have alpha values?
pub fn has_alpha(config: PlaneConfig) -> bool {
unsafe { sb::SkYUVAInfo_HasAlpha(config) }
}
impl Default for YUVAInfo {
fn default() -> Self {
Self::construct(|yi| unsafe { sb::C_SkYUVAInfo_Construct(yi) })
}
}
impl NativePartialEq for YUVAInfo {
fn eq(&self, rhs: &Self) -> bool {
unsafe { sb::C_SkYUVAInfo_equals(self.native(), rhs.native()) }
}
}
impl fmt::Debug for YUVAInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("YUVAInfo")
.field("dimensions", &self.dimensions())
.field("plane_config", &self.plane_config())
.field("subsampling", &self.subsampling())
.field("yuv_color_space", &self.yuv_color_space())
.field("origin", &self.origin())
.field("siting_xy", &self.siting_xy())
.finish()
}
}
impl YUVAInfo {
pub const MAX_PLANES: usize = sb::SkYUVAInfo_kMaxPlanes as _;
/// `dimensions` should specify the size of the full resolution image (after planes have been
/// oriented to how the image is displayed as indicated by `origin`).
pub fn new(
dimensions: impl Into<ISize>,
config: PlaneConfig,
subsampling: Subsampling,
color_space: image_info::YUVColorSpace,
origin: impl Into<Option<EncodedOrigin>>,
siting_xy: impl Into<Option<(Siting, Siting)>>,
) -> Option<Self> {
let origin = origin.into().unwrap_or(EncodedOrigin::TopLeft);
let (siting_x, siting_y) = siting_xy
.into()
.unwrap_or((Siting::Centered, Siting::Centered));
let n = unsafe {
SkYUVAInfo::new(
dimensions.into().into_native(),
config,
subsampling.into_native(),
color_space,
origin.into_native(),
siting_x,
siting_y,
)
};
Self::native_is_valid(&n).if_true_then_some(|| Self::from_native_c(n))
}
pub fn plane_config(&self) -> PlaneConfig {
self.native().fPlaneConfig
}
pub fn subsampling(&self) -> Subsampling {
Subsampling::from_native_c(self.native().fSubsampling)
}
pub fn plane_subsampling_factors(&self, plane_index: usize) -> (i32, i32) {
plane_subsampling_factors(self.plane_config(), self.subsampling(), plane_index)
}
/// Dimensions of the full resolution image (after planes have been oriented to how the image
/// is displayed as indicated by fOrigin).
pub fn dimensions(&self) -> ISize {
ISize::from_native_c(self.native().fDimensions)
}
pub fn width(&self) -> i32 {
self.dimensions().width
}
pub fn height(&self) -> i32 {
self.dimensions().height
}
pub fn yuv_color_space(&self) -> image_info::YUVColorSpace {
self.native().fYUVColorSpace
}
pub fn siting_xy(&self) -> (Siting, Siting) {
let n = self.native();
(n.fSitingX, n.fSitingY)
}
pub fn origin(&self) -> EncodedOrigin {
EncodedOrigin::from_native_c(self.native().fOrigin)
}
pub fn origin_matrix(&self) -> Matrix {
self.origin().to_matrix((self.width(), self.height()))
} | pub fn has_alpha(&self) -> bool {
has_alpha(self.plane_config())
}
/// Returns the dimensions for each plane. Dimensions are as stored in memory, before
/// transformation to image display | random_line_split | |
labstats-subscriber.py | Will delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time.
# Returns True if timestamp is outdated
def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
|
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def main(ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping | new_dict[hostname] = timestamp | conditional_block |
labstats-subscriber.py | delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
|
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time.
# Returns True if timestamp is outdated
def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
new_dict[hostname] = timestamp
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def main(ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping | error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit) | identifier_body |
labstats-subscriber.py | Will delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time. | def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
new_dict[hostname] = timestamp
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def main(ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping ( | # Returns True if timestamp is outdated | random_line_split |
labstats-subscriber.py | delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time.
# Returns True if timestamp is outdated
def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
new_dict[hostname] = timestamp
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def | (ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping | main | identifier_name |
gocomics.py | fe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
cls('Frazz', 'frazz'),
cls('FredBasset', 'fredbasset'),
cls('FredBassetEnEspanol', 'fredbassetespanol', 'es'),
cls('FreeRange', 'freerange'),
cls('FreshlySqueezed', 'freshlysqueezed'),
cls('FrogApplause', 'frogapplause'),
cls('Garfield', 'garfield'),
cls('GarfieldClassics', 'garfield-classics'),
cls('GarfieldEnEspanol', 'garfieldespanol', 'es'),
cls('GaryMarkstein', 'garymarkstein'),
cls('GaryVarvel', 'garyvarvel'),
cls('GasolineAlley', 'gasolinealley'),
cls('Gaturro', 'gaturro', 'es'),
cls('Geech', 'geech'),
cls('GetALife', 'getalife'),
cls('GetFuzzy', 'getfuzzy'),
cls('Gil', 'gil'),
cls('GilThorp', 'gilthorp'),
cls('GingerMeggs', 'gingermeggs'),
cls('GingerMeggsEnEspanol', 'gingermeggs-espanol', 'es'),
cls('GlasbergenCartoons', 'glasbergen-cartoons'),
cls('Globetrotter', 'globetrotter'),
cls('GManWebcomics', 'g-man-webcomics'),
cls('Goats', 'goats'),
cls('GrandAvenue', 'grand-avenue'),
cls('GrayMatters', 'gray-matters'),
cls('GreenHumour', 'green-humour'),
cls('HaircutPractice', 'haircut-practice'),
cls('HalfFull', 'half-full'),
cls('Harley', 'harley'),
cls('HeartOfTheCity', 'heartofthecity'),
cls('Heathcliff', 'heathcliff'),
cls('HeathcliffEnEspanol', 'heathcliffespanol', 'es'),
cls('HenryPayne', 'henrypayne'),
cls('HerbAndJamaal', 'herbandjamaal'),
cls('Herman', 'herman'),
cls('HomeAndAway', 'homeandaway'),
cls('HotComicsForCoolPeople', 'hot-comics-for-cool-people'),
cls('HutchOwen', 'hutch-owen'),
cls('ImagineThis', 'imaginethis'),
cls('ImogenQuest', 'imogen-quest'),
cls('InkPen', 'inkpen'),
cls('InSecurity', 'in-security'),
cls('InTheBleachers', 'inthebleachers'),
cls('InTheSticks', 'inthesticks'),
cls('InvisibleBread', 'invisible-bread'),
cls('ItsAllAboutYou', 'itsallaboutyou'),
cls('JackOhman', 'jackohman'),
cls('JakeLikesOnions', 'jake-likes-onions'),
cls('JanesWorld', 'janesworld'),
cls('JeffDanziger', 'jeffdanziger'),
cls('JeffStahler', 'jeffstahler'),
cls('JenSorensen', 'jen-sorensen'),
cls('JimBentonCartoons', 'jim-benton-cartoons'),
cls('JimMorin', 'jimmorin'),
cls('JoeHeller', 'joe-heller'),
cls('JoelPett', 'joelpett'),
cls('JohnDeering', 'johndeering'),
cls('JumpStart', 'jumpstart'), | cls('KidBeowulf', 'kid-beowulf'),
cls('KitchenCapers', 'kitchen-capers'),
cls('Kliban', 'kliban'),
cls('KlibansCats', 'klibans-cats'),
cls('LaCucaracha', 'lacucaracha'),
cls('LaCucarachaEnEspanol', 'la-cucaracha-en-espanol', 'es'),
cls('LaloAlcaraz', 'laloalcaraz'),
cls('LaloAlcarazEnEspanol', 'laloenespanol', 'es'),
cls('LardsWorldPeaceTips', 'lards-world-peace-tips'),
cls('LasHermanasStone', 'stonesoup_espanol', 'es'),
cls('LastKiss', 'lastkiss'),
cls('LaughingRedheadComics', 'laughing-redhead-comics'),
cls('LayLines', 'lay-lines'),
cls('LearnToSpeakCat', 'learn-to-speak-cat'),
cls('LibertyMeadows', 'libertymeadows'),
cls('LifeOnEarth', 'life-on-earth'),
cls('LilAbner', 'lil-abner'),
cls('Lio', 'lio'),
cls('LioEnEspanol', | cls('JunkDrawer', 'junk-drawer'),
cls('JustoYFranco', 'justo-y-franco', 'es'),
cls('KevinKallaugher', 'kal'),
cls('KevinNecessaryEditorialCartoons', 'kevin-necessary-editorial-cartoons'), | random_line_split |
gocomics.py |
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index):
return '{}/{}'.format(self.url, index)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: CFQ001
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyBlues', 'babyblues'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('Bozo', 'bozo'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Buckles', 'buckles'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
| self.lang = lang | conditional_block | |
gocomics.py | return '{}/{}'.format(self.url, index)
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: CFQ001
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyBlues', 'babyblues'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('Bozo', 'bozo'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Buckles', 'buckles'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls(' | url = 'https://www.gocomics.com/'
imageSearch = '//picture[d:class("item-comic-image")]/img'
prevSearch = '//a[d:class("js-previous-comic")]'
latestSearch = '//div[d:class("gc-deck--cta-0")]//a'
starter = indirectStarter
help = 'Index format: yyyy/mm/dd'
def __init__(self, name, path, lang=None):
super(GoComics, self).__init__('GoComics/' + name)
self.session.add_throttle('www.gocomics.com', 1.0, 2.0)
self.url = 'https://www.gocomics.com/' + path
self.shortname = name
if lang:
self.lang = lang
def namer(self, image_url, page_url):
prefix, year, month, day = page_url.rsplit('/', 3)
return "%s_%s%s%s.gif" % (self.shortname, year, month, day)
def getIndexStripUrl(self, index): | identifier_body | |
gocomics.py | (self, url, data):
"""Skip pages without images."""
return data.xpath('//img[contains(@src, "content-error-missing")]')
@classmethod
def getmodules(cls): # noqa: CFQ001
return (
# old comics removed from the listing
cls('HeavenlyNostrils', 'heavenly-nostrils'),
# do not edit anything below since these entries are generated from
# scripts/gocomics.py
# START AUTOUPDATE
cls('1AndDone', '1-and-done'),
cls('9ChickweedLane', '9chickweedlane'),
cls('9ChickweedLaneClassics', '9-chickweed-lane-classics'),
cls('9To5', '9to5'),
cls('Aaggghhh', 'Aaggghhh', 'es'),
cls('AdamAtHome', 'adamathome'),
cls('AdultChildren', 'adult-children'),
cls('Agnes', 'agnes'),
cls('AJAndMagnus', 'aj-and-magnus'),
cls('AlGoodwynEditorialCartoons', 'algoodwyn'),
cls('AlisHouse', 'alis-house'),
cls('AlleyOop', 'alley-oop'),
cls('AmandaTheGreat', 'amanda-the-great'),
cls('Andertoons', 'andertoons'),
cls('AndyCapp', 'andycapp'),
cls('AngryLittleGirls', 'angry-little-girls'),
cls('AnimalCrackers', 'animalcrackers'),
cls('Annie', 'annie'),
cls('AProblemLikeJamal', 'a-problem-like-jamal'),
cls('ArloAndJanis', 'arloandjanis'),
cls('AskShagg', 'askshagg'),
cls('AtTavicat', 'tavicat'),
cls('AuntyAcid', 'aunty-acid'),
cls('BabyBlues', 'babyblues'),
cls('BackInTheDay', 'backintheday'),
cls('BackToBC', 'back-to-bc'),
cls('Bacon', 'bacon'),
cls('Badlands', 'badlands'),
cls('BadMachinery', 'bad-machinery'),
cls('Baldo', 'baldo'),
cls('BaldoEnEspanol', 'baldoespanol', 'es'),
cls('BallardStreet', 'ballardstreet'),
cls('BananaTriangle', 'banana-triangle'),
cls('BarkeaterLake', 'barkeaterlake'),
cls('BarneyAndClyde', 'barneyandclyde'),
cls('BasicInstructions', 'basicinstructions'),
cls('BatchRejection', 'batch-rejection'),
cls('BC', 'bc'),
cls('BeanieTheBrownie', 'beanie-the-brownie'),
cls('Beardo', 'beardo'),
cls('BearWithMe', 'bear-with-me'),
cls('Ben', 'ben'),
cls('BenitinYEneas', 'muttandjeffespanol', 'es'),
cls('BergerAndWyse', 'berger-and-wyse'),
cls('BerkeleyMews', 'berkeley-mews'),
cls('Betty', 'betty'),
cls('BFGFSyndrome', 'bfgf-syndrome'),
cls('BigNate', 'bignate'),
cls('BigNateFirstClass', 'big-nate-first-class'),
cls('BigTop', 'bigtop'),
cls('BirdAndMoon', 'bird-and-moon'),
cls('Birdbrains', 'birdbrains'),
cls('BleekerTheRechargeableDog', 'bleeker'),
cls('Bliss', 'bliss'),
cls('BloomCounty', 'bloomcounty'),
cls('BloomCounty2019', 'bloom-county'),
cls('BobGorrell', 'bobgorrell'),
cls('BobTheSquirrel', 'bobthesquirrel'),
cls('BoNanas', 'bonanas'),
cls('Boomerangs', 'boomerangs'),
cls('Bottomliners', 'bottomliners'),
cls('BoundAndGagged', 'boundandgagged'),
cls('Bozo', 'bozo'),
cls('BreakingCatNews', 'breaking-cat-news'),
cls('BreakOfDay', 'break-of-day'),
cls('Brevity', 'brevity'),
cls('BrewsterRockit', 'brewsterrockit'),
cls('BrianMcFadden', 'brian-mcfadden'),
cls('BroomHilda', 'broomhilda'),
cls('Buckles', 'buckles'),
cls('Bully', 'bully'),
cls('Buni', 'buni'),
cls('CalvinAndHobbes', 'calvinandhobbes'),
cls('CalvinAndHobbesEnEspanol', 'calvinandhobbesespanol', 'es'),
cls('Candorville', 'candorville'),
cls('CatanaComics', 'little-moments-of-love'),
cls('CathyClassics', 'cathy'),
cls('CathyCommiserations', 'cathy-commiserations'),
cls('CatsCafe', 'cats-cafe'),
cls('CattitudeDoggonit', 'cattitude-doggonit'),
cls('CestLaVie', 'cestlavie'),
cls('CheerUpEmoKid', 'cheer-up-emo-kid'),
cls('ChipBok', 'chipbok'),
cls('ChrisBritt', 'chrisbritt'),
cls('ChuckDrawsThings', 'chuck-draws-things'),
cls('ChuckleBros', 'chucklebros'),
cls('CitizenDog', 'citizendog'),
cls('Claw', 'claw'),
cls('ClayBennett', 'claybennett'),
cls('ClayJones', 'clayjones'),
cls('Cleats', 'cleats'),
cls('CloseToHome', 'closetohome'),
cls('Computoon', 'compu-toon'),
cls('Cornered', 'cornered'),
cls('CowAndBoyClassics', 'cowandboy'),
cls('CowTown', 'cowtown'),
cls('Crabgrass', 'crabgrass'),
cls('Crumb', 'crumb'),
cls('CulDeSac', 'culdesac'),
cls('DaddysHome', 'daddyshome'),
cls('DanaSummers', 'danasummers'),
cls('DarkSideOfTheHorse', 'darksideofthehorse'),
cls('DeepDarkFears', 'deep-dark-fears'),
cls('DeFlocked', 'deflocked'),
cls('DiamondLil', 'diamondlil'),
cls('DickTracy', 'dicktracy'),
cls('DilbertClassics', 'dilbert-classics'),
cls('DilbertEnEspanol', 'dilbert-en-espanol', 'es'),
cls('DinosaurComics', 'dinosaur-comics'),
cls('DogEatDoug', 'dogeatdoug'),
cls('DogsOfCKennel', 'dogsofckennel'),
cls('DomesticAbuse', 'domesticabuse'),
cls('DonBrutus', 'don-brutus', 'es'),
cls('DoodleForFood', 'doodle-for-food'),
cls('DoodleTown', 'doodle-town'),
cls('Doonesbury', 'doonesbury'),
cls('Drabble', 'drabble'),
cls('DrewSheneman', 'drewsheneman'),
cls('DumbwichCastle', 'dumbwich-castle'),
cls('EdgeCity', 'edge-city'),
cls('Eek', 'eek'),
cls('ElCafDePoncho', 'el-cafe-de-poncho', 'es'),
cls('EmmyLou', 'emmy-lou'),
cls('Endtown', 'endtown'),
cls('EverydayPeopleCartoons', 'everyday-people-cartoons'),
cls('Eyebeam', 'eyebeam'),
cls('EyebeamClassic', 'eyebeam-classic'),
cls('FalseKnees', 'false-knees'),
cls('FamilyTree', 'familytree'),
cls('Farcus', 'farcus'),
cls('FatCats', 'fat-cats'),
cls('FloAndFriends', 'floandfriends'),
cls('FMinus', 'fminus'),
cls('FoolishMortals', 'foolish-mortals'),
cls('ForBetterOrForWorse', 'forbetterorforworse'),
cls('ForHeavensSake', 'forheavenssake'),
cls('FourEyes', 'four-eyes'),
cls('FowlLanguage', 'fowl-language'),
cls('FoxTrot', 'foxtrot'),
cls('FoxTrotClassics', 'foxtrotclassics'),
cls('FoxTrotEnEspanol', 'foxtrotespanol', 'es'),
cls('Francis', 'francis'),
cls('FrankAndErnest', 'frank-and-ernest'),
| shouldSkipUrl | identifier_name | |
PushForm.js | type: PropTypes.number.isRequired,
deviceType: PropTypes.number.isRequired,
countryList: PropTypes.array.isRequired,
};
static defaultProp = {
countryList: []
}
componentWillMount() {
const { actions, countryList } = this.props;
if (!countryList || countryList.length < 1) {
actions.getStaticDataByCodeType(Constant.USER_COUNTRY_ATTR_CODE);
}
}
onPushBigTypeChange = (evt) => {
const { getFieldValue, setFieldsValue } = this.props.form;
setFieldsValue(pushTypeFN,
pushType[getFieldValue(pushBigTypeFN)][0].key);
};
handleSubmit = (e) => {
e.preventDefault();
this.props.form.validateFields((errors, values) => {
if (!!errors) {
return;
}
let data = Immutable.Map(values).
set('type', this.props.type === undefined ? 0 : Number(
this.props.type))//0:表示消息(默认为0), 1:表示通知
.set('deviceType', this.props.deviceType); //设备类型,取值范围为:0:iOS设备 1:Andriod设备 3:全部类型设备
if (data.get('deviceType') === '1') { //android设备推送不需要上传ios apns环境变量
data = data.delete(apnsEnvFN);
}
if (data.get('type') === 1) { //通知类型才有打开方式选项
if (data.get(androidOpenTypeFN) === 'ACTIVITY') { //打开指定页面
if (!data.get(androidActivityFN)) {
MsgUtil.showwarning('指定打开的页面不允许为空');
return;
}
data.set('xiaomiActivity', data.get(androidActivityFN)); //xiaomiActivity与androidActivity赋值一直,由服务端判断怎么推
}
if (data.get(androidOpenTypeFN) === 'URL' &&
!data.get(androidOpenUrlFN)) { //打开指定网页
MsgUtil.showwarning('指定打开的网页不允许为空');
return;
}
//删除无效字段
if (data.get(androidOpenTypeFN) === 'APPLICATION' ||
data.get(androidOpenTypeFN) === 'NONE') { //打开应用或者无逻辑
data = data.delete(androidActivityFN).
delete(androidOpenUrlFN);
}
} else {
data = data.delete(androidActivityFN).delete(androidOpenUrlFN);
}
//填充自定义扩展参数
if (data.get(isCustomExtParamsFN) || data.get(isIosAddOpenUrlFN)) {
const extParameters = {};
data.get(customExtParamsItemsFN).map((item) => {
const key = `extParamsKey_${item}`;
if (data.get(key)) {
extParameters[data.get(key)] = data.get(
`extParamsValue_${item}`);
}
});
data = data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
data = data.delete(isCustomExtParamsFN).
delete(customExtParamsItemsFN);
if (data.get(targetFN) === 'DEVICE' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定设备信息不正确');
return;
}
if (data.get(targetFN) === 'ACCOUNT' && !data | } else {
console.log('需要精确到分钟____________', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmm'));
data.set('pushTime', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmmss'));
const pushTimeFormNowLengthSplit = moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow().split(' ');
if (pushTimeFormNowLengthSplit[1] === '天前') {
MsgUtil.showwarning(`推送日期不允许选择${moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow()}`);
return;
}
if (Number(pushTimeFormNowLengthSplit[0]) > 7) {
MsgUtil.showwarning('推送日期不允许大于7天');
return;
}
}
}
data = data.delete(isSetPushTimeFN).
delete(pushTimeDatePickerFN).
delete(pushTimeTimePickerFN);
if (data.get(isStoreOfflineFn) === 'true') {
const expireTime = Number(data.get(expireTimeFN));
if (expireTime < 1 || expireTime > 72) {
MsgUtil.showwarning('离线保存时间最短1小时,最长72小时');
return;
}
} else {
data = data.delete(expireTimeFN);
}
//当设备类型为1-android或者3-所有设备时,填充Android提醒方式
if (data.get('deviceType') === '1' ||
data.get('deviceType') === '3') {
const extParameters = {};
extParameters['_NOTIFY_TYPE_'] = data.get(notifyTypeFN);
data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
if (data.get('extParameters')) {
data = data.set('extParameters', JSON.stringify(data.get('extParameters').toJS()));
}
data = data.delete(notifyTypeFN);
//target targetValue处理
if (data.get(targetFN) === 'COUNTRY') {
console.log('data.get(targetValueFN)', data.get(targetValueFN));
if ( data.get(targetValueFN) && data.get(targetValueFN) !== 'ALL' ) {
data = data.set(targetFN, 'tag');
} else {
data = data.set(targetFN, 'ALL').set(targetValueFN, 'ALL');
}
}
data = data.set(androidNotificationChannelFN, 1);
if (data.get('type') === 1) {
this.props.actions.pushNotification(data);
} else {
this.props.actions.pushMessage(data);
}
});
};
render () {
const { getFieldDecorator, getFieldValue, setFieldsValue } = this.props.form;
//通知大类
const PushBigTypeProp = getFieldDecorator(pushBigTypeFN, {
initialValue: pushBigType[0].key,
onChange: this.onPushBigTypeChange,
});
const pushBigTypeOptions = pushBigType.map(
bigType =>
<Option key={bigType.key} >{bigType.key}-{bigType.value}</Option>);
//通知小类
const PushTypeProp = getFieldDecorator(pushTypeFN,
{ initialValue: pushType[getFieldValue(pushBigTypeFN)][0].key });
const pushTypeOptions = () => {
return pushType[getFieldValue(pushBigTypeFN)].map(
pushType =>
<Option key={pushType.key} >{pushType.key}-{pushType.value}</Option>);
};
//获取标题实际长度
const titleFactSize = () => {
const value = getFieldValue(titleFN);
return String(value ? value.length : 0);
};
const titleNode = (contentLabel) => {
if (this.props.type === '1' && this.props.deviceType === '0') { //通知且ios设备
getFieldDecorator(titleFN, { initialValue: '' });
return undefined;
} else {
const TitleProp = getFieldDecorator(titleFN, {
initialValue: '',
rules: [
{ required: true, max: 20 },
],
validateTrigger: 'onBlur',
});
const titleLabel = `${contentLabel}标题`;
return (
<Row>
<Row style={{ height: 25 }} >{titleLabel}:
<span style={{ float: 'right' }} >
{titleFactSize()}/{String(titleMaxSize)}
</span>
</Row>
<FormItem {...formItemLayout}>
{TitleProp(
<Input style={{ width: '100%' }} />,
)}
</FormItem>
</Row>
);
}
};
const BodyProp = getFieldDecorator(bodyFN, {
initialValue: '',
rules: [{ required: true, max: 60 }],
validateTrigger: 'onBlur',
});
//获取内容实际长度
const bodyFactSize = () => {
const value = getFieldValue(bodyFN);
return String(value ? value.length : 0);
};
//ios推送环境
const ApnsEnvProp = getFieldDecorator(apnsEnvFN,
{ initialValue: 'DEV' });
| .get(targetValueFN)) {
MsgUtil.showwarning('指定wenwenId信息不正确');
return;
}
if (data.get(targetFN) === 'ALL') {
data.set(targetValueFN, 'ALL');
}
//设置推送时间
if (data.get(isSetPushTimeFN) === 'true') {
if (!data.get(pushTimeDatePickerFN) || !data.get(pushTimeTimePickerFN)) {
MsgUtil.showwarning('推送日期不允许为空');
return;
| conditional_block |
PushForm.js | type: PropTypes.number.isRequired,
deviceType: PropTypes.number.isRequired,
countryList: PropTypes.array.isRequired,
};
static defaultProp = {
countryList: []
}
componentWillMount() {
const { | , countryList } = this.props;
if (!countryList || countryList.length < 1) {
actions.getStaticDataByCodeType(Constant.USER_COUNTRY_ATTR_CODE);
}
}
onPushBigTypeChange = (evt) => {
const { getFieldValue, setFieldsValue } = this.props.form;
setFieldsValue(pushTypeFN,
pushType[getFieldValue(pushBigTypeFN)][0].key);
};
handleSubmit = (e) => {
e.preventDefault();
this.props.form.validateFields((errors, values) => {
if (!!errors) {
return;
}
let data = Immutable.Map(values).
set('type', this.props.type === undefined ? 0 : Number(
this.props.type))//0:表示消息(默认为0), 1:表示通知
.set('deviceType', this.props.deviceType); //设备类型,取值范围为:0:iOS设备 1:Andriod设备 3:全部类型设备
if (data.get('deviceType') === '1') { //android设备推送不需要上传ios apns环境变量
data = data.delete(apnsEnvFN);
}
if (data.get('type') === 1) { //通知类型才有打开方式选项
if (data.get(androidOpenTypeFN) === 'ACTIVITY') { //打开指定页面
if (!data.get(androidActivityFN)) {
MsgUtil.showwarning('指定打开的页面不允许为空');
return;
}
data.set('xiaomiActivity', data.get(androidActivityFN)); //xiaomiActivity与androidActivity赋值一直,由服务端判断怎么推
}
if (data.get(androidOpenTypeFN) === 'URL' &&
!data.get(androidOpenUrlFN)) { //打开指定网页
MsgUtil.showwarning('指定打开的网页不允许为空');
return;
}
//删除无效字段
if (data.get(androidOpenTypeFN) === 'APPLICATION' ||
data.get(androidOpenTypeFN) === 'NONE') { //打开应用或者无逻辑
data = data.delete(androidActivityFN).
delete(androidOpenUrlFN);
}
} else {
data = data.delete(androidActivityFN).delete(androidOpenUrlFN);
}
//填充自定义扩展参数
if (data.get(isCustomExtParamsFN) || data.get(isIosAddOpenUrlFN)) {
const extParameters = {};
data.get(customExtParamsItemsFN).map((item) => {
const key = `extParamsKey_${item}`;
if (data.get(key)) {
extParameters[data.get(key)] = data.get(
`extParamsValue_${item}`);
}
});
data = data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
data = data.delete(isCustomExtParamsFN).
delete(customExtParamsItemsFN);
if (data.get(targetFN) === 'DEVICE' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定设备信息不正确');
return;
}
if (data.get(targetFN) === 'ACCOUNT' && !data.get(targetValueFN)) {
MsgUtil.showwarning('指定wenwenId信息不正确');
return;
}
if (data.get(targetFN) === 'ALL') {
data.set(targetValueFN, 'ALL');
}
//设置推送时间
if (data.get(isSetPushTimeFN) === 'true') {
if (!data.get(pushTimeDatePickerFN) || !data.get(pushTimeTimePickerFN)) {
MsgUtil.showwarning('推送日期不允许为空');
return;
} else {
console.log('需要精确到分钟____________', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmm'));
data.set('pushTime', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmmss'));
const pushTimeFormNowLengthSplit = moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow().split(' ');
if (pushTimeFormNowLengthSplit[1] === '天前') {
MsgUtil.showwarning(`推送日期不允许选择${moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow()}`);
return;
}
if (Number(pushTimeFormNowLengthSplit[0]) > 7) {
MsgUtil.showwarning('推送日期不允许大于7天');
return;
}
}
}
data = data.delete(isSetPushTimeFN).
delete(pushTimeDatePickerFN).
delete(pushTimeTimePickerFN);
if (data.get(isStoreOfflineFn) === 'true') {
const expireTime = Number(data.get(expireTimeFN));
if (expireTime < 1 || expireTime > 72) {
MsgUtil.showwarning('离线保存时间最短1小时,最长72小时');
return;
}
} else {
data = data.delete(expireTimeFN);
}
//当设备类型为1-android或者3-所有设备时,填充Android提醒方式
if (data.get('deviceType') === '1' ||
data.get('deviceType') === '3') {
const extParameters = {};
extParameters['_NOTIFY_TYPE_'] = data.get(notifyTypeFN);
data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
if (data.get('extParameters')) {
data = data.set('extParameters', JSON.stringify(data.get('extParameters').toJS()));
}
data = data.delete(notifyTypeFN);
//target targetValue处理
if (data.get(targetFN) === 'COUNTRY') {
console.log('data.get(targetValueFN)', data.get(targetValueFN));
if ( data.get(targetValueFN) && data.get(targetValueFN) !== 'ALL' ) {
data = data.set(targetFN, 'tag');
} else {
data = data.set(targetFN, 'ALL').set(targetValueFN, 'ALL');
}
}
data = data.set(androidNotificationChannelFN, 1);
if (data.get('type') === 1) {
this.props.actions.pushNotification(data);
} else {
this.props.actions.pushMessage(data);
}
});
};
render () {
const { getFieldDecorator, getFieldValue, setFieldsValue } = this.props.form;
//通知大类
const PushBigTypeProp = getFieldDecorator(pushBigTypeFN, {
initialValue: pushBigType[0].key,
onChange: this.onPushBigTypeChange,
});
const pushBigTypeOptions = pushBigType.map(
bigType =>
<Option key={bigType.key} >{bigType.key}-{bigType.value}</Option>);
//通知小类
const PushTypeProp = getFieldDecorator(pushTypeFN,
{ initialValue: pushType[getFieldValue(pushBigTypeFN)][0].key });
const pushTypeOptions = () => {
return pushType[getFieldValue(pushBigTypeFN)].map(
pushType =>
<Option key={pushType.key} >{pushType.key}-{pushType.value}</Option>);
};
//获取标题实际长度
const titleFactSize = () => {
const value = getFieldValue(titleFN);
return String(value ? value.length : 0);
};
const titleNode = (contentLabel) => {
if (this.props.type === '1' && this.props.deviceType === '0') { //通知且ios设备
getFieldDecorator(titleFN, { initialValue: '' });
return undefined;
} else {
const TitleProp = getFieldDecorator(titleFN, {
initialValue: '',
rules: [
{ required: true, max: 20 },
],
validateTrigger: 'onBlur',
});
const titleLabel = `${contentLabel}标题`;
return (
<Row>
<Row style={{ height: 25 }} >{titleLabel}:
<span style={{ float: 'right' }} >
{titleFactSize()}/{String(titleMaxSize)}
</span>
</Row>
<FormItem {...formItemLayout}>
{TitleProp(
<Input style={{ width: '100%' }} />,
)}
</FormItem>
</Row>
);
}
};
const BodyProp = getFieldDecorator(bodyFN, {
initialValue: '',
rules: [{ required: true, max: 60 }],
validateTrigger: 'onBlur',
});
//获取内容实际长度
const bodyFactSize = () => {
const value = getFieldValue(bodyFN);
return String(value ? value.length : 0);
};
//ios推送环境
const ApnsEnvProp = getFieldDecorator(apnsEnvFN,
{ initialValue: 'DEV' });
| actions | identifier_name |
PushForm.js | (!data.get(pushTimeDatePickerFN) || !data.get(pushTimeTimePickerFN)) {
MsgUtil.showwarning('推送日期不允许为空');
return;
} else {
console.log('需要精确到分钟____________', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmm'));
data.set('pushTime', moment(data.get(pushTimeDatePickerFN)).format('YYYYMMDD') + moment(data.get(pushTimeTimePickerFN)).format('HHmmss'));
const pushTimeFormNowLengthSplit = moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow().split(' ');
if (pushTimeFormNowLengthSplit[1] === '天前') {
MsgUtil.showwarning(`推送日期不允许选择${moment(data.get('pushTime'), 'YYYYMMDDHHmmss').fromNow()}`);
return;
}
if (Number(pushTimeFormNowLengthSplit[0]) > 7) {
MsgUtil.showwarning('推送日期不允许大于7天');
return;
}
}
}
data = data.delete(isSetPushTimeFN).
delete(pushTimeDatePickerFN).
delete(pushTimeTimePickerFN);
if (data.get(isStoreOfflineFn) === 'true') {
const expireTime = Number(data.get(expireTimeFN));
if (expireTime < 1 || expireTime > 72) {
MsgUtil.showwarning('离线保存时间最短1小时,最长72小时');
return;
}
} else {
data = data.delete(expireTimeFN);
}
//当设备类型为1-android或者3-所有设备时,填充Android提醒方式
if (data.get('deviceType') === '1' ||
data.get('deviceType') === '3') {
const extParameters = {};
extParameters['_NOTIFY_TYPE_'] = data.get(notifyTypeFN);
data.set('extParameters', Immutable.Map(extParameters).merge(data.get('extParameters')));
}
if (data.get('extParameters')) {
data = data.set('extParameters', JSON.stringify(data.get('extParameters').toJS()));
}
data = data.delete(notifyTypeFN);
//target targetValue处理
if (data.get(targetFN) === 'COUNTRY') {
console.log('data.get(targetValueFN)', data.get(targetValueFN));
if ( data.get(targetValueFN) && data.get(targetValueFN) !== 'ALL' ) {
data = data.set(targetFN, 'tag');
} else {
data = data.set(targetFN, 'ALL').set(targetValueFN, 'ALL');
}
}
data = data.set(androidNotificationChannelFN, 1);
if (data.get('type') === 1) {
this.props.actions.pushNotification(data);
} else {
this.props.actions.pushMessage(data);
}
});
};
render () {
const { getFieldDecorator, getFieldValue, setFieldsValue } = this.props.form;
//通知大类
const PushBigTypeProp = getFieldDecorator(pushBigTypeFN, {
initialValue: pushBigType[0].key,
onChange: this.onPushBigTypeChange,
});
const pushBigTypeOptions = pushBigType.map(
bigType =>
<Option key={bigType.key} >{bigType.key}-{bigType.value}</Option>);
//通知小类
const PushTypeProp = getFieldDecorator(pushTypeFN,
{ initialValue: pushType[getFieldValue(pushBigTypeFN)][0].key });
const pushTypeOptions = () => {
return pushType[getFieldValue(pushBigTypeFN)].map(
pushType =>
<Option key={pushType.key} >{pushType.key}-{pushType.value}</Option>);
};
//获取标题实际长度
const titleFactSize = () => {
const value = getFieldValue(titleFN);
return String(value ? value.length : 0);
};
const titleNode = (contentLabel) => {
if (this.props.type === '1' && this.props.deviceType === '0') { //通知且ios设备
getFieldDecorator(titleFN, { initialValue: '' });
return undefined;
} else {
const TitleProp = getFieldDecorator(titleFN, {
initialValue: '',
rules: [
{ required: true, max: 20 },
],
validateTrigger: 'onBlur',
});
const titleLabel = `${contentLabel}标题`;
return (
<Row>
<Row style={{ height: 25 }} >{titleLabel}:
<span style={{ float: 'right' }} >
{titleFactSize()}/{String(titleMaxSize)}
</span>
</Row>
<FormItem {...formItemLayout}>
{TitleProp(
<Input style={{ width: '100%' }} />,
)}
</FormItem>
</Row>
);
}
};
const BodyProp = getFieldDecorator(bodyFN, {
initialValue: '',
rules: [{ required: true, max: 60 }],
validateTrigger: 'onBlur',
});
//获取内容实际长度
const bodyFactSize = () => {
const value = getFieldValue(bodyFN);
return String(value ? value.length : 0);
};
//ios推送环境
const ApnsEnvProp = getFieldDecorator(apnsEnvFN,
{ initialValue: 'DEV' });
//点击后操作的字段
const IsCustomExtParamsProp = getFieldDecorator(isCustomExtParamsFN,
{ initialValue: false });
const isCustomExtParams = getFieldValue(isCustomExtParamsFN);
const AndroidOpenTypeProp = getFieldDecorator(androidOpenTypeFN,
{ initialValue: 'APPLICATION' });
const androidOpenTypeValue = getFieldValue(androidOpenTypeFN);
const AndroidActivityProp = getFieldDecorator(androidActivityFN,
{ initialValue: undefined });
const AndroidOpenUrlProp = getFieldDecorator(androidOpenUrlFN,
{ initialValue: undefined });
getFieldDecorator(customExtParamsItemsFN, {
initialValue: [extParamsItemUuid],
});
const customExtParamsItems = getFieldValue(customExtParamsItemsFN);
//推送目标
const TargetProp = getFieldDecorator(targetFN, { initialValue: 'ALL' });
const TargetValueProp = getFieldDecorator(targetValueFN, { initialValue: 'ALL' });
const targetValueNode = () => {
switch (getFieldValue(targetFN)) {
case 'ALL':
return undefined;
case 'DEVICE':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入deviceId,多个终端用逗号分隔" />,
)}
</Row>
);
case 'ACCOUNT':
return (
<Row>
{TargetValueProp(
<Input placeholder="请输入wenwenId,多个wenwenId用逗号分隔" />,
)}
</Row>
);
case 'COUNTRY': {
getFieldDecorator(targetValueFN, { initialValue: '' });
const { countryList } = this.props;
return (
<Row>
{TargetValueProp(
<MySelect defaultValue="" selectOptionDataList={countryList} descKey="codeName" valueKey="codeValue" />
)}
</Row>
);
}
}
};
//推送时间
const IsSetPushTimeProp = getFieldDecorator(isSetPushTimeFN,
{ initialValue: 'false' });
const currentDate = moment(new Date());
const PushTimeDatePickerProp = getFieldDecorator(pushTimeDatePickerFN, {
initialValue: currentDate,
format: 'YYYY-MM-DD',
});
const PushTimeTimePickerProp = getFieldDecorator(pushTimeTimePickerFN, {
initialValue: currentDate,
format: 'HH:mm:ss',
});
const pushTimeNode = () => {
switch (getFieldValue(isSetPushTimeFN)) {
case 'true':
return (
<Row>
{PushTimeDatePickerProp(<DatePicker />)}
{PushTimeTimePickerProp(<TimePicker />)}
</Row>
);
default:
return undefined;
}
};
//离线保存
const IsStoreOfflineProp = getFieldDecorator(isStoreOfflineFn,
{ initialValue: 'true' });
const ExpireTimeProp = getFieldDecorator(expireTimeFN,
{ initialValue: 72 });
const expireTimeNode = () => {
if (getFieldValue(isStoreOfflineFn) === 'true') {
return (<Row>保存 {ExpireTimeProp(
<InputNumber min={1} max={72} />)}小时,该时段之后再上线的用户将收不到推送</Row>);
}
return undefined;
};
const formItemLayout = {
style: { width: '100%' },
wrapperCol: { span: 24 },
};
let contentPanelBoxLabel;
if (this.props.type === '1') {
contentPanelBoxLabel = '通知'; | } else {
contentPanelBoxLabel = '消息';
}
return ( | random_line_split | |
app.js | 'bootstrap', 'legend', 'panoramio', 'geolocation', 'core', 'wirecloud', 'angular-gettext', 'translations'],
function(angular, ol, toolbar, layermanager) {
var modules_to_load = [
'hs.toolbar',
'hs.layermanager',
'hs.map',
'hs.ows',
'hs.query',
'hs.search', 'hs.print', 'hs.permalink', 'hs.measure',
'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function rainbow(numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) |
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
}
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
}
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if | {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
} | identifier_body |
app.js | 'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function rainbow(numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
}
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
}
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
}
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if (typeof MashupPlatform !== 'undefined')
MashupPlatform.wiring.registerCallback("data_received_slot", config.wirecloud_data_consumer);
//This is needed because data can arrive before hslayers is loaded, so we store it in tmp and process later.
for (var i = 0; i < tmp_data_received.length; i++) {
config.wirecloud_data_consumer(tmp_data_received[i]);
}
}
]); |
return module; | random_line_split | |
app.js | 'bootstrap', 'legend', 'panoramio', 'geolocation', 'core', 'wirecloud', 'angular-gettext', 'translations'],
function(angular, ol, toolbar, layermanager) {
var modules_to_load = [
'hs.toolbar',
'hs.layermanager',
'hs.map',
'hs.ows',
'hs.query',
'hs.search', 'hs.print', 'hs.permalink', 'hs.measure',
'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function | (numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
}
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
}
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
}
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if | rainbow | identifier_name |
app.js | 'bootstrap', 'legend', 'panoramio', 'geolocation', 'core', 'wirecloud', 'angular-gettext', 'translations'],
function(angular, ol, toolbar, layermanager) {
var modules_to_load = [
'hs.toolbar',
'hs.layermanager',
'hs.map',
'hs.ows',
'hs.query',
'hs.search', 'hs.print', 'hs.permalink', 'hs.measure',
'hs.legend', 'hs.geolocation', 'hs.core', 'hs.wirecloud', 'gettext', 'hs.sidebar'
];
if (typeof MashupPlatform !== 'undefined') {
modules_to_load = eval(MashupPlatform.prefs.get('modules_to_load'));
}
var module = angular.module('hs', modules_to_load);
module.directive('hs', ['hs.map.service', 'Core', function(OlMap, Core) {
return {
templateUrl: hsl_path + 'hslayers.html',
link: function(scope, element) {
Core.fullScreenMap(element);
}
};
}]);
var location_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
text: new ol.style.Text({
text: feature.get('temperature'),
offsetY: -10,
offsetX: 5,
fill: new ol.style.Fill({
color: '#000'
})
}),
image: new ol.style.Circle({
fill: new ol.style.Fill({
color: feature.color ? feature.color : [242, 121, 0, 0.7]
}),
stroke: new ol.style.Stroke({
color: [0x33, 0x33, 0x33, 0.9]
}),
radius: 5
})
})]
}
});
var extent_layer = new ol.layer.Vector({
title: "Locations",
show_in_manager: true,
source: new ol.source.Vector(),
style: function(feature, resolution) {
return [new ol.style.Style({
stroke: new ol.style.Stroke({
color: '#005CB6',
width: 3
}),
fill: new ol.style.Fill({
color: 'rgba(0, 0, 255, 0.1)'
})
})]
}
});
var location_feature_ids = {};
function rainbow(numOfSteps, step, opacity) {
// based on http://stackoverflow.com/a/7419630
// This function generates vibrant, "evenly spaced" colours (i.e. no clustering). This is ideal for creating easily distiguishable vibrant markers in Google Maps and other apps.
// Adam Cole, 2011-Sept-14
// HSV to RBG adapted from: http://mjijackson.com/2008/02/rgb-to-hsl-and-rgb-to-hsv-color-model-conversion-algorithms-in-javascript
var r, g, b;
var h = step / (numOfSteps * 1.00000001);
var i = ~~(h * 4);
var f = h * 4 - i;
var q = 1 - f;
switch (i % 4) {
case 2:
r = f, g = 1, b = 0;
break;
case 0:
r = 0, g = f, b = 1;
break;
case 3:
r = 1, g = q, b = 0;
break;
case 1:
r = 0, g = 1, b = q;
break;
}
var c = "rgba(" + ~~(r * 235) + "," + ~~(g * 235) + "," + ~~(b * 235) + ", " + opacity + ")";
return (c);
}
function processObject(data) {
//Get settings from configuration
var id_attr_name = MashupPlatform.prefs.get('id_attr_name');
var coordinates_attr_name = MashupPlatform.prefs.get('coordinates_attr_name');
var measurements_attr_names = MashupPlatform.prefs.get('measurements_attr_names').split(',');
var timestamp_attr_name = MashupPlatform.prefs.get('timestamp_attr_name');
//Read attributes
var attributes = {
id: data[id_attr_name],
timestamp: data[timestamp_attr_name]
};
var projection = 'EPSG:4326';
if (angular.isUndefined(data[coordinates_attr_name])) return;
var coords = data[coordinates_attr_name].split(','); //Supposed ccordinates are lon, lat seperated by comma
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
for (var attr_i = 0; attr_i < measurements_attr_names.length; attr_i++) {
var t = parseFloat(data[measurements_attr_names[attr_i]]);
attributes[measurements_attr_names[attr_i]] = t.toFixed(2);
}
//Create feature if necessary. Set the attribute values for the feature
var feature = null;
if (location_feature_ids[data[id_attr_name]]) {
feature = location_feature_ids[data[id_attr_name]];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data[id_attr_name]] = feature;
}
//Compute color
//feature.color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
}
function processUnit(data) {
var attributes = {
id: data.id
};
var projection = 'EPSG:4326';
/*for(var meta_i; meta_i<attr.metadatas.length; meta_i++){
if(attr.metadatas[meta_i].name=="location")
projection = attr.metadatas[meta_i].value;
}*/
var coords = data.position.split(',');
attributes.geometry = new ol.geom.Point(ol.proj.transform([parseFloat(coords[1]), parseFloat(coords[0])], projection, 'EPSG:3857'));
attributes.timestamp = data.timestamp;
var feature = null;
if (location_feature_ids[data.id]) {
feature = location_feature_ids[data.id];
feature.setGeometry(attributes.geometry);
for (var atr in attributes) {
feature.set(atr, attributes[atr]);
}
} else {
feature = new ol.Feature(attributes);
feature.tags = {};
location_layer.getSource().addFeatures([feature]);
location_feature_ids[data.id] = feature;
}
}
function processTag(data) {
if (location_feature_ids[data.unit]) | }
module.value('config', {
default_layers: [
new ol.layer.Tile({
source: new ol.source.OSM(),
show_in_manager: true,
title: "Base layer",
base: true
}),
location_layer,
extent_layer
],
wirecloud_data_consumer: function(data) {
data = angular.fromJson(data);
if (console) console.log(data);
if (typeof data.type !== 'undefined') {
switch (data.type) {
/*case "Unit":
processUnit(data);
break;
case "Tag":
processTag(data);
break;*/
default: processObject(data);
}
} else {
process_object(data);
}
},
default_view: new ol.View({
center: ol.proj.transform([17.474129, 52.574000], 'EPSG:4326', 'EPSG:3857'), //Latitude longitude to Spherical Mercator
zoom: 4,
units: "m"
})
});
module.controller('Main', ['$scope', 'Core', 'hs.query.baseService', 'config',
function($scope, Core, QueryService, config) {
$scope.hsl_path = hsl_path; //Get this from hslayers.js file
$scope.Core = Core;
$scope.$on('query.dataUpdated', function(event) {
if (console) console.log('Attributes', QueryService.data.attributes, 'Groups', QueryService.data.groups);
});
if ( | {
location_feature_ids[data.unit].tags[data.id] = data;
var max_temp = -273.15;
var timestamp = "";
for (var tag in location_feature_ids[data.unit].tags) {
var t = parseFloat(location_feature_ids[data.unit].tags[tag].temperature);
max_temp = t > max_temp ? t : max_temp;
timestamp = location_feature_ids[data.unit].tags[tag].timestamp;
}
location_feature_ids[data.unit].color = rainbow(30, Math.min(Math.max(max_temp, -15), 15) + 15, 0.7);
location_feature_ids[data.unit].set("max_temp", max_temp.toFixed(2) + " °C");
location_feature_ids[data.unit].set("timestamp", timestamp);
}
| conditional_block |
main.py | 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
5:
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
6:
[[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
7:
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]}
# Permutes the sbox value for the final 32 bit value that is then added on top of the L_n-1 value
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
# A final permutation that is placed on the reverse concatenated R_16 L_16 bit string
IP_INVERSE = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
####################################################### END OF CONSTANTS ########################################################################
####################################################### GENERIC FUNCTIONS ########################################################################
#convert string to a hexadecimal representation
def stringToHex(stringInput):
return ''.join(hex(ord(x))[2:] for x in stringInput)
#for a given string, convert it to hex and partition it in 64bit words and add padding if needed (Padding is just zeroes)
def getHexwords(msg):
"""break the ASCII message into a 64bit (16 hex bytes) words"""
hexwords = []
for i in range(0, len(msg), 8):
msgBlock = msg[i:i+8]
m = stringToHex(msgBlock)
hexwords.append(m)
last = hexwords[-1]
hexwords[-1] += ''.join(['0'] * (16-len(last)))
return hexwords
def stringToBits(string_input):
string_output = bitarray.bitarray()
string_output.frombytes(string_input.encode("utf-8"))
return string_output.to01()
def leftshift(array, value):
return array[value:] + array[:value]
def hexToBinary(hexstr):
return str(bin(int(hexstr, 16)))[2:].rjust(64, '0')
#take a list of hex Words and convert each of them to binary.
def getBinWords(hexWords):
binWords = []
for message in hexWords:
binWord = hexToBinary(message)
binWords.append(binWord)
return binWords
# XORs two bit values val1 and val2
def xor(val1, val2):
xoredBits = []
for i in range(len(val1)):
bit1 = int(val1[i])
bit2 = int(val2[i])
xorBit = int(bool(bit1) ^ bool(bit2))
xoredBits.append(xorBit)
return ''.join(map(str,xoredBits))
########################################################## KEY FUNCTIONS ##########################################################################
def generate_subkeys(key):
| key_bits = hexToBinary(key)
if len(key_bits) != 64:
print("Incorrect key provided.")
sys.exit()
key_up = []
for i in range (56):
key_up.append(key_bits[PC1[i]-1])
key_up = ''.join(key_up)
print("The initial key is {}".format(key_bits))
print("They permuted key is {}".format(key_up))
subkeys = []
left = key_up[:28]
right = key_up[28:]
print(left)
print(right)
for i in range(16):
left = leftshift(left, LSHIFT_MAP[i]) | identifier_body | |
main.py | 11, 14, 1, 7, 6, 0, 8, 13]],
6:
[[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]],
7:
[[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]}
# Permutes the sbox value for the final 32 bit value that is then added on top of the L_n-1 value
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
# A final permutation that is placed on the reverse concatenated R_16 L_16 bit string
IP_INVERSE = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
####################################################### END OF CONSTANTS ########################################################################
####################################################### GENERIC FUNCTIONS ########################################################################
#convert string to a hexadecimal representation
def stringToHex(stringInput):
return ''.join(hex(ord(x))[2:] for x in stringInput)
#for a given string, convert it to hex and partition it in 64bit words and add padding if needed (Padding is just zeroes)
def getHexwords(msg):
"""break the ASCII message into a 64bit (16 hex bytes) words"""
hexwords = []
for i in range(0, len(msg), 8):
msgBlock = msg[i:i+8]
m = stringToHex(msgBlock)
hexwords.append(m)
last = hexwords[-1]
hexwords[-1] += ''.join(['0'] * (16-len(last)))
return hexwords
def stringToBits(string_input):
string_output = bitarray.bitarray()
string_output.frombytes(string_input.encode("utf-8"))
return string_output.to01()
def leftshift(array, value):
return array[value:] + array[:value]
def hexToBinary(hexstr):
return str(bin(int(hexstr, 16)))[2:].rjust(64, '0')
#take a list of hex Words and convert each of them to binary.
def getBinWords(hexWords):
binWords = []
for message in hexWords:
binWord = hexToBinary(message)
binWords.append(binWord)
return binWords
# XORs two bit values val1 and val2
def xor(val1, val2):
xoredBits = []
for i in range(len(val1)):
bit1 = int(val1[i])
bit2 = int(val2[i])
xorBit = int(bool(bit1) ^ bool(bit2))
xoredBits.append(xorBit)
return ''.join(map(str,xoredBits))
########################################################## KEY FUNCTIONS ##########################################################################
def generate_subkeys(key):
key_bits = hexToBinary(key)
if len(key_bits) != 64:
print("Incorrect key provided.")
sys.exit()
key_up = []
for i in range (56):
key_up.append(key_bits[PC1[i]-1])
key_up = ''.join(key_up)
print("The initial key is {}".format(key_bits))
print("They permuted key is {}".format(key_up))
subkeys = []
left = key_up[:28]
right = key_up[28:]
print(left)
print(right)
for i in range(16):
left = leftshift(left, LSHIFT_MAP[i])
right = leftshift(right, LSHIFT_MAP[i])
subkey = left + right
subkey_final = []
for j in range(48):
subkey_final.append(subkey[PC2[j]-1])
subkeys.append("".join(subkey_final))
for i in range (16):
print("Subkey #{} is {}".format(i+1,subkeys[i]))
print("the length is : {}".format(len(subkeys[0])))
print("Keys have been generated.")
return subkeys
############################################################### END OF KEY FUNCTIONS #########################################################
############################################################### ENCODING STARTS HERE #########################################################
#take a list of binary words and permute them according to IP. Returns a list of binaries as strings.
def permute(binMessageList):
permutedList = []
temp = []
for message in binMessageList:
for elem in IP:
temp.append(message[int(elem)-1])
strTemp = ''.join(map(str,temp))
permutedList.append(strTemp)
return permutedList
# Run 8 rounds of S-box with the given 48 bit value
def sbox_substitution(mixed_R):
reducedR = []
splitList = []
bitList = list(mixed_R)
#create 8 lists of 6 elems
splitList = [bitList[i:i + 6] for i in range(0, len(bitList), 6)]
for i in range(8):
| row = int(splitList[i][0] + splitList[i][-1],2)
col = int(splitList[i][1] + splitList[i][2] + splitList[i][3] + splitList[i][4],2)
newVal = SBOXES[int(i)][row][col]
bits = str(format(newVal,"b")).zfill(4)
reducedR.append(bits) | conditional_block | |
main.py | 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
# Substituting back to get a 32 bit value
# Done by splitting the 48 bit into 6 bit segments,
# The first and last bit are considered the row number
# The middle 4 bits are the column number
SBOXES = {0:
[[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13]],
1:
[[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], | [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9]],
2:
[[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12]],
3:
[[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14]],
4:
[[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3]],
5:
[[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13]],
6:
[[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[ | random_line_split | |
main.py | [ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]]}
# Permutes the sbox value for the final 32 bit value that is then added on top of the L_n-1 value
P = [16, 7, 20, 21,
29, 12, 28, 17,
1, 15, 23, 26,
5, 18, 31, 10,
2, 8, 24, 14,
32, 27, 3, 9,
19, 13, 30, 6,
22, 11, 4, 25]
# A final permutation that is placed on the reverse concatenated R_16 L_16 bit string
IP_INVERSE = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
####################################################### END OF CONSTANTS ########################################################################
####################################################### GENERIC FUNCTIONS ########################################################################
#convert string to a hexadecimal representation
def stringToHex(stringInput):
return ''.join(hex(ord(x))[2:] for x in stringInput)
#for a given string, convert it to hex and partition it in 64bit words and add padding if needed (Padding is just zeroes)
def getHexwords(msg):
"""break the ASCII message into a 64bit (16 hex bytes) words"""
hexwords = []
for i in range(0, len(msg), 8):
msgBlock = msg[i:i+8]
m = stringToHex(msgBlock)
hexwords.append(m)
last = hexwords[-1]
hexwords[-1] += ''.join(['0'] * (16-len(last)))
return hexwords
def stringToBits(string_input):
string_output = bitarray.bitarray()
string_output.frombytes(string_input.encode("utf-8"))
return string_output.to01()
def leftshift(array, value):
return array[value:] + array[:value]
def hexToBinary(hexstr):
return str(bin(int(hexstr, 16)))[2:].rjust(64, '0')
#take a list of hex Words and convert each of them to binary.
def getBinWords(hexWords):
binWords = []
for message in hexWords:
binWord = hexToBinary(message)
binWords.append(binWord)
return binWords
# XORs two bit values val1 and val2
def xor(val1, val2):
xoredBits = []
for i in range(len(val1)):
bit1 = int(val1[i])
bit2 = int(val2[i])
xorBit = int(bool(bit1) ^ bool(bit2))
xoredBits.append(xorBit)
return ''.join(map(str,xoredBits))
########################################################## KEY FUNCTIONS ##########################################################################
def generate_subkeys(key):
key_bits = hexToBinary(key)
if len(key_bits) != 64:
print("Incorrect key provided.")
sys.exit()
key_up = []
for i in range (56):
key_up.append(key_bits[PC1[i]-1])
key_up = ''.join(key_up)
print("The initial key is {}".format(key_bits))
print("They permuted key is {}".format(key_up))
subkeys = []
left = key_up[:28]
right = key_up[28:]
print(left)
print(right)
for i in range(16):
left = leftshift(left, LSHIFT_MAP[i])
right = leftshift(right, LSHIFT_MAP[i])
subkey = left + right
subkey_final = []
for j in range(48):
subkey_final.append(subkey[PC2[j]-1])
subkeys.append("".join(subkey_final))
for i in range (16):
print("Subkey #{} is {}".format(i+1,subkeys[i]))
print("the length is : {}".format(len(subkeys[0])))
print("Keys have been generated.")
return subkeys
############################################################### END OF KEY FUNCTIONS #########################################################
############################################################### ENCODING STARTS HERE #########################################################
#take a list of binary words and permute them according to IP. Returns a list of binaries as strings.
def permute(binMessageList):
permutedList = []
temp = []
for message in binMessageList:
for elem in IP:
temp.append(message[int(elem)-1])
strTemp = ''.join(map(str,temp))
permutedList.append(strTemp)
return permutedList
# Run 8 rounds of S-box with the given 48 bit value
def sbox_substitution(mixed_R):
reducedR = []
splitList = []
bitList = list(mixed_R)
#create 8 lists of 6 elems
splitList = [bitList[i:i + 6] for i in range(0, len(bitList), 6)]
for i in range(8):
row = int(splitList[i][0] + splitList[i][-1],2)
col = int(splitList[i][1] + splitList[i][2] + splitList[i][3] + splitList[i][4],2)
newVal = SBOXES[int(i)][row][col]
bits = str(format(newVal,"b")).zfill(4)
reducedR.append(bits)
return ''.join(reducedR)
#Input an individual 64 bit message into to get encrypted
def message_encryption(message, subkeys):
temp_msg = message
print("the full message is : {}".format(temp_msg))
print("The message is {} -- {}".format(temp_msg[:32], temp_msg[32:]))
L_n = temp_msg[:32]
R_n = temp_msg[32:]
L_n1 = temp_msg[:32]
R_n1 = temp_msg[32:]
print("L_0 is : {}".format(L_n))
print("R_0 is : {}".format(R_n))
for i in range(16):
L_n = R_n1
print("L_{} is : {}".format(i+1, L_n))
expanded_R = []
for j in range(48):
expanded_R.append(L_n[E[j]-1])
mixed_R = xor(subkeys[15], expanded_R)
reduced_R = sbox_substitution(mixed_R)
permuted_R = []
for k in range (32):
permuted_R.append(reduced_R[P[k]-1])
R_n = xor(L_n1, permuted_R)
print("R_{} is : {}".format(i+1, R_n))
L_n1 = L_n
R_n1 = R_n
# temp_msg = L_n + R_n
encrypted_msg = []
norm = temp_msg[32:] + temp_msg[:32]
for i in range(64):
encrypted_msg.append(norm[IP_INVERSE[i]-1])
return ''.join(encrypted_msg)
def DESencryption(message):
messages = getBinWords(getHexwords(message))
# messages = getBinWords(message)
print("The message in hex is : {}".format(getHexwords(message)))
subkeys = generate_subkeys("54657374696e6731")
encrypted_messages = []
permute(messages)
for msg in messages:
encrypted_messages.append(message_encryption(msg, subkeys))
encrypted_message = hex(int(''.join(encrypted_messages), 2))
return encrypted_message
############################################################ END OF ENCODING #############################################################
def | test | identifier_name | |
data.go | Payload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func handleUplinkACK(ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
}
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws = make(map[lorawan.EUI64]gateway.Gateway)
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
asRxInfo := as.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
}
if gw, ok := gws[rxInfo.MAC]; ok {
asRxInfo.Name = gw.Name
asRxInfo.Latitude = gw.Location.Latitude
asRxInfo.Longitude = gw.Location.Longitude
asRxInfo.Altitude = gw.Altitude
}
publishDataUpReq.RxInfo = append(publishDataUpReq.RxInfo, &asRxInfo)
}
if macPL.FPort != nil {
publishDataUpReq.FPort = uint32(*macPL.FPort)
}
if len(macPL.FRMPayload) == 1 {
dataPL, ok := macPL.FRMPayload[0].(*lorawan.DataPayload)
if !ok {
return fmt.Errorf("expected type *lorawan.DataPayload, got %T", macPL.FRMPayload[0])
}
publishDataUpReq.Data = dataPL.Bytes
}
if _, err := common.Application.HandleDataUp(context.Background(), &publishDataUpReq); err != nil {
return fmt.Errorf("publish data up to application-server error: %s", err)
}
return nil
}
func handleUplinkMACCommands(ns *session.NodeSession, frmPayload bool, commands []lorawan.MACCommand, rxInfoSet models.RXInfoSet) error | {
var cids []lorawan.CID
blocks := make(map[lorawan.CID]maccommand.Block)
// group mac-commands by CID
for _, cmd := range commands {
block, ok := blocks[cmd.CID]
if !ok {
block = maccommand.Block{
CID: cmd.CID,
FRMPayload: frmPayload,
}
cids = append(cids, cmd.CID)
}
block.MACCommands = append(block.MACCommands, cmd)
blocks[cmd.CID] = block
}
for _, cid := range cids {
block := blocks[cid] | identifier_body | |
data.go | }
func decryptFRMPayloadMACCommands(ctx *DataUpContext) error {
// only decrypt when FPort is equal to 0
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if err := ctx.RXPacket.PHYPayload.DecryptFRMPayload(ctx.NodeSession.NwkSKey); err != nil {
return errors.Wrap(err, "decrypt FRMPayload error")
}
}
return nil
}
func sendRXInfoToNetworkController(ctx *DataUpContext) error {
// TODO: change so that errors get logged but not returned
if err := sendRXInfoPayload(ctx.NodeSession, ctx.RXPacket); err != nil {
return errors.Wrap(err, "send rx-info to network-controller error")
}
return nil
}
func handleFOptsMACCommands(ctx *DataUpContext) error {
if len(ctx.MACPayload.FHDR.FOpts) > 0 {
if err := handleUplinkMACCommands(&ctx.NodeSession, false, ctx.MACPayload.FHDR.FOpts, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fopts": ctx.MACPayload.FHDR.FOpts,
}).Errorf("handle FOpts mac commands error: %s", err)
}
}
return nil
}
func handleFRMPayloadMACCommands(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if len(ctx.MACPayload.FRMPayload) == 0 {
return errors.New("expected mac commands, but FRMPayload is empty (FPort=0)")
}
var commands []lorawan.MACCommand
for _, pl := range ctx.MACPayload.FRMPayload {
cmd, ok := pl.(*lorawan.MACCommand)
if !ok {
return fmt.Errorf("expected MACPayload, but got %T", ctx.MACPayload.FRMPayload)
}
commands = append(commands, *cmd)
}
if err := handleUplinkMACCommands(&ctx.NodeSession, true, commands, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"commands": commands,
}).Errorf("handle FRMPayload mac commands error: %s", err)
}
}
return nil
}
func sendFRMPayloadToApplicationServer(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func handleUplinkACK(ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8) | Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
}
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws = make | copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac, | random_line_split |
data.go | }
func decryptFRMPayloadMACCommands(ctx *DataUpContext) error {
// only decrypt when FPort is equal to 0
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if err := ctx.RXPacket.PHYPayload.DecryptFRMPayload(ctx.NodeSession.NwkSKey); err != nil {
return errors.Wrap(err, "decrypt FRMPayload error")
}
}
return nil
}
func sendRXInfoToNetworkController(ctx *DataUpContext) error {
// TODO: change so that errors get logged but not returned
if err := sendRXInfoPayload(ctx.NodeSession, ctx.RXPacket); err != nil {
return errors.Wrap(err, "send rx-info to network-controller error")
}
return nil
}
func handleFOptsMACCommands(ctx *DataUpContext) error {
if len(ctx.MACPayload.FHDR.FOpts) > 0 {
if err := handleUplinkMACCommands(&ctx.NodeSession, false, ctx.MACPayload.FHDR.FOpts, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fopts": ctx.MACPayload.FHDR.FOpts,
}).Errorf("handle FOpts mac commands error: %s", err)
}
}
return nil
}
func handleFRMPayloadMACCommands(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if len(ctx.MACPayload.FRMPayload) == 0 {
return errors.New("expected mac commands, but FRMPayload is empty (FPort=0)")
}
var commands []lorawan.MACCommand
for _, pl := range ctx.MACPayload.FRMPayload {
cmd, ok := pl.(*lorawan.MACCommand)
if !ok {
return fmt.Errorf("expected MACPayload, but got %T", ctx.MACPayload.FRMPayload)
}
commands = append(commands, *cmd)
}
if err := handleUplinkMACCommands(&ctx.NodeSession, true, commands, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"commands": commands,
}).Errorf("handle FRMPayload mac commands error: %s", err)
}
}
return nil
}
func sendFRMPayloadToApplicationServer(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func handleUplinkACK(ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil |
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws | {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
} | conditional_block |
data.go | }
func decryptFRMPayloadMACCommands(ctx *DataUpContext) error {
// only decrypt when FPort is equal to 0
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if err := ctx.RXPacket.PHYPayload.DecryptFRMPayload(ctx.NodeSession.NwkSKey); err != nil {
return errors.Wrap(err, "decrypt FRMPayload error")
}
}
return nil
}
func sendRXInfoToNetworkController(ctx *DataUpContext) error {
// TODO: change so that errors get logged but not returned
if err := sendRXInfoPayload(ctx.NodeSession, ctx.RXPacket); err != nil {
return errors.Wrap(err, "send rx-info to network-controller error")
}
return nil
}
func handleFOptsMACCommands(ctx *DataUpContext) error {
if len(ctx.MACPayload.FHDR.FOpts) > 0 {
if err := handleUplinkMACCommands(&ctx.NodeSession, false, ctx.MACPayload.FHDR.FOpts, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fopts": ctx.MACPayload.FHDR.FOpts,
}).Errorf("handle FOpts mac commands error: %s", err)
}
}
return nil
}
func handleFRMPayloadMACCommands(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort == 0 {
if len(ctx.MACPayload.FRMPayload) == 0 {
return errors.New("expected mac commands, but FRMPayload is empty (FPort=0)")
}
var commands []lorawan.MACCommand
for _, pl := range ctx.MACPayload.FRMPayload {
cmd, ok := pl.(*lorawan.MACCommand)
if !ok {
return fmt.Errorf("expected MACPayload, but got %T", ctx.MACPayload.FRMPayload)
}
commands = append(commands, *cmd)
}
if err := handleUplinkMACCommands(&ctx.NodeSession, true, commands, ctx.RXPacket.RXInfoSet); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"commands": commands,
}).Errorf("handle FRMPayload mac commands error: %s", err)
}
}
return nil
}
func sendFRMPayloadToApplicationServer(ctx *DataUpContext) error {
if ctx.MACPayload.FPort != nil && *ctx.MACPayload.FPort > 0 {
return publishDataUp(ctx.NodeSession, ctx.RXPacket, *ctx.MACPayload)
}
return nil
}
func handleChannelReconfiguration(ctx *DataUpContext) error {
// handle channel configuration
// note that this must come before ADR!
if err := channels.HandleChannelReconfigure(ctx.NodeSession, ctx.RXPacket); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
}).Warningf("handle channel reconfigure error: %s", err)
}
return nil
}
func handleADR(ctx *DataUpContext) error {
// handle ADR (should be executed before saving the node-session)
if err := adr.HandleADR(&ctx.NodeSession, ctx.RXPacket, ctx.MACPayload.FHDR.FCnt); err != nil {
log.WithFields(log.Fields{
"dev_eui": ctx.NodeSession.DevEUI,
"fcnt_up": ctx.MACPayload.FHDR.FCnt,
}).Warningf("handle adr error: %s", err)
}
return nil
}
func setLastRXInfoSet(ctx *DataUpContext) error {
// update the RXInfoSet
ctx.NodeSession.LastRXInfoSet = ctx.RXPacket.RXInfoSet
return nil
}
func syncUplinkFCnt(ctx *DataUpContext) error {
// sync counter with that of the device + 1
ctx.NodeSession.FCntUp = ctx.MACPayload.FHDR.FCnt + 1
return nil
}
func saveNodeSession(ctx *DataUpContext) error {
// save node-session
return session.SaveNodeSession(common.RedisPool, ctx.NodeSession)
}
func | (ctx *DataUpContext) error {
// TODO: only log in case of error?
if !ctx.MACPayload.FHDR.FCtrl.ACK {
return nil
}
_, err := common.Application.HandleDataDownACK(context.Background(), &as.HandleDataDownACKRequest{
AppEUI: ctx.NodeSession.AppEUI[:],
DevEUI: ctx.NodeSession.DevEUI[:],
FCnt: ctx.NodeSession.FCntDown,
})
if err != nil {
return errors.Wrap(err, "error publish downlink data ack to application-server")
}
return nil
}
func handleDownlink(ctx *DataUpContext) error {
// handle downlink (ACK)
time.Sleep(common.GetDownlinkDataDelay)
if err := downlink.Flow.RunUplinkResponse(
ctx.NodeSession,
ctx.MACPayload.FHDR.FCtrl.ADR,
ctx.MACPayload.FHDR.FCtrl.ADRACKReq,
ctx.RXPacket.PHYPayload.MHDR.MType == lorawan.ConfirmedDataUp,
); err != nil {
return errors.Wrap(err, "run uplink response flow error")
}
return nil
}
// sendRXInfoPayload sends the rx and tx meta-data to the network controller.
func sendRXInfoPayload(ns session.NodeSession, rxPacket models.RXPacket) error {
macPL, ok := rxPacket.PHYPayload.MACPayload.(*lorawan.MACPayload)
if !ok {
return fmt.Errorf("expected *lorawan.MACPayload, got: %T", rxPacket.PHYPayload.MACPayload)
}
rxInfoReq := nc.HandleRXInfoRequest{
DevEUI: ns.DevEUI[:],
TxInfo: &nc.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &nc.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
for _, rxInfo := range rxPacket.RXInfoSet {
// make sure we have a copy of the MAC byte slice, else every RxInfo
// slice item will get the same Mac
mac := make([]byte, 8)
copy(mac, rxInfo.MAC[:])
rxInfoReq.RxInfo = append(rxInfoReq.RxInfo, &nc.RXInfo{
Mac: mac,
Time: rxInfo.Time.Format(time.RFC3339Nano),
Rssi: int32(rxInfo.RSSI),
LoRaSNR: rxInfo.LoRaSNR,
})
}
_, err := common.Controller.HandleRXInfo(context.Background(), &rxInfoReq)
if err != nil {
return fmt.Errorf("publish rxinfo to network-controller error: %s", err)
}
log.WithFields(log.Fields{
"dev_eui": ns.DevEUI,
}).Info("rx info sent to network-controller")
return nil
}
func publishDataUp(ns session.NodeSession, rxPacket models.RXPacket, macPL lorawan.MACPayload) error {
publishDataUpReq := as.HandleDataUpRequest{
AppEUI: ns.AppEUI[:],
DevEUI: ns.DevEUI[:],
FCnt: macPL.FHDR.FCnt,
TxInfo: &as.TXInfo{
Frequency: int64(rxPacket.RXInfoSet[0].Frequency),
Adr: macPL.FHDR.FCtrl.ADR,
CodeRate: rxPacket.RXInfoSet[0].CodeRate,
DataRate: &as.DataRate{
Modulation: string(rxPacket.RXInfoSet[0].DataRate.Modulation),
BandWidth: uint32(rxPacket.RXInfoSet[0].DataRate.Bandwidth),
SpreadFactor: uint32(rxPacket.RXInfoSet[0].DataRate.SpreadFactor),
Bitrate: uint32(rxPacket.RXInfoSet[0].DataRate.BitRate),
},
},
}
var macs []lorawan.EUI64
for i := range rxPacket.RXInfoSet {
macs = append(macs, rxPacket.RXInfoSet[i].MAC)
}
// get gateway info
gws, err := gateway.GetGatewaysForMACs(common.DB, macs)
if err != nil {
log.WithField("macs", macs).Warningf("get gateways for macs error: %s", err)
gws = | handleUplinkACK | identifier_name |
kaart-teken-laag.component.ts | "../kaart-elementen";
import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) |
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private startMetTekenen(tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
}
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source,
type: tekenSettings.geometryType,
style: pipe(
tekenSettings.drawStyle,
option.map(toSt | {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
} | conditional_block |
kaart-teken-laag.component.ts | "../kaart-elementen";
import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
}
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private | (tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
}
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source,
type: tekenSettings.geometryType,
style: pipe(
tekenSettings.drawStyle,
option.map(toSty | startMetTekenen | identifier_name |
kaart-teken-laag.component.ts | import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
}
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private startMetTekenen(tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
}
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source | import { forEach } from "../../util/option";
import { KaartChildDirective } from "../kaart-child.directive";
import * as ke from "../kaart-elementen"; | random_line_split | |
kaart-teken-laag.component.ts | "../kaart-elementen";
import { VeldInfo } from "../kaart-elementen";
import {
KaartInternalMsg,
kaartLogOnlyWrapper,
tekenWrapper,
VerwijderTekenFeatureMsg,
} from "../kaart-internal-messages";
import * as prt from "../kaart-protocol";
import { KaartComponent } from "../kaart.component";
import { asStyleSelector, toStylish } from "../stijl-selector";
export const TekenenUiSelector = "Kaarttekenen";
export const TekenLaagNaam = "Tekenen van geometrie";
const defaultlaagStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "#ffcc33",
width: 2,
}),
image: new ol.style.Circle({
radius: 7,
fill: new ol.style.Fill({
color: "#ffcc33",
}),
}),
});
const defaultDrawStyle = new ol.style.Style({
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.5)",
lineDash: [10, 10],
width: 2,
}),
image: new ol.style.Circle({
radius: 5,
stroke: new ol.style.Stroke({
color: "rgba(0, 0, 0, 0.7)",
}),
fill: new ol.style.Fill({
color: "rgba(255, 255, 255, 0.2)",
}),
}),
});
@Component({
selector: "awv-kaart-teken-laag",
template: "<ng-content></ng-content>",
styleUrls: ["./kaart-teken-laag.component.scss"],
encapsulation: ViewEncapsulation.None,
})
export class KaartTekenLaagComponent
extends KaartChildDirective
implements OnInit, OnDestroy {
private changedGeometriesSubj: Subject<ke.Tekenresultaat>;
private tekenen = false;
private source: ol.source.Vector;
private drawInteraction: ol.interaction.Draw;
private modifyInteraction: ol.interaction.Modify;
private snapInteraction: ol.interaction.Snap;
private overlays: Array<ol.Overlay> = [];
constructor(parent: KaartComponent, zone: NgZone) {
super(parent, zone);
}
protected kaartSubscriptions(): prt.Subscription<KaartInternalMsg>[] {
return [prt.TekenenSubscription(tekenWrapper)];
}
ngOnInit(): void {
super.ngOnInit();
// Verwijder de feature en tooltip.
this.bindToLifeCycle(
this.internalMessage$.pipe(
ofType<VerwijderTekenFeatureMsg>("VerwijderTekenFeature"), //
observeOnAngular(this.zone)
)
).subscribe((msg) => {
const feature = this.source.getFeatureById(msg.featureId);
if (feature) {
const tooltip = feature.get("measuretooltip") as ol.Overlay;
if (tooltip) {
this.dispatch(prt.VerwijderOverlaysCmd([tooltip]));
}
this.source.removeFeature(feature);
}
});
// Hou de subject bij.
this.bindToLifeCycle(
this.kaartModel$.pipe(
distinctUntilChanged(
(k1, k2) => k1.geometryChangedSubj === k2.geometryChangedSubj
), //
map((kwi) => kwi.geometryChangedSubj)
)
).subscribe((gcSubj) => (this.changedGeometriesSubj = gcSubj));
this.bindToLifeCycle(
this.kaartModel$.pipe(
map((kwi) => kwi.tekenSettingsSubj.getValue()), //
distinctUntilChanged(),
skipWhile((settings) => option.isNone(settings)) // De eerste keer willen we startMetTekenen emitten
)
).subscribe((settings) => {
option.fold(
() => this.stopMetTekenen(), //
(ts: ke.TekenSettings) => this.startMetTekenen(ts) //
)(settings);
});
}
ngOnDestroy(): void {
this.stopMetTekenen();
super.ngOnDestroy();
}
private startMetTekenen(tekenSettings: ke.TekenSettings): void {
if (this.tekenen) {
this.stopMetTekenen();
}
this.source = option.fold(
() => new ol.source.Vector(),
(geom: ol.geom.Geometry) => {
const source = new ol.source.Vector();
source.addFeature(new ol.Feature(geom));
return source;
}
)(tekenSettings.geometry);
this.dispatch({
type: "VoegLaagToe",
positie: 0,
laag: this.createLayer(this.source, tekenSettings),
magGetoondWorden: true,
transparantie: Transparantie.opaak,
laaggroep: "Tools",
legende: option.none,
stijlInLagenKiezer: option.none,
filterinstellingen: option.none,
laagtabelinstellingen: option.none,
wrapper: kaartLogOnlyWrapper,
});
this.drawInteraction = this.createDrawInteraction(
this.source,
tekenSettings
);
this.dispatch(prt.VoegInteractieToeCmd(this.drawInteraction));
this.modifyInteraction = new ol.interaction.Modify({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.modifyInteraction));
this.snapInteraction = new ol.interaction.Snap({ source: this.source });
this.dispatch(prt.VoegInteractieToeCmd(this.snapInteraction));
this.tekenen = true;
}
private stopMetTekenen(): void |
private createLayer(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ke.VectorLaag {
return {
type: ke.VectorType,
titel: TekenLaagNaam,
source: source,
clusterDistance: option.none,
styleSelector: pipe(
tekenSettings.laagStyle,
option.alt(() => asStyleSelector(defaultlaagStyle))
),
styleSelectorBron: option.none,
selectieStyleSelector: option.none,
hoverStyleSelector: option.none,
selecteerbaar: false,
hover: false,
minZoom: 2,
maxZoom: 15,
offsetveld: option.none,
velden: new Map<string, VeldInfo>(),
verwijderd: false,
rijrichtingIsDigitalisatieZin: false,
filter: option.none,
};
}
private createMeasureTooltip(): [HTMLDivElement, ol.Overlay] {
const measureTooltipElement: HTMLDivElement = document.createElement("div");
measureTooltipElement.className = "tooltip tooltip-measure";
const measureTooltip = new ol.Overlay({
element: measureTooltipElement,
offset: [0, -15],
positioning: ol.overlay.Positioning.BOTTOM_CENTER,
});
this.dispatch({
type: "VoegOverlayToe",
overlay: measureTooltip,
});
this.overlays.push(measureTooltip);
return [measureTooltipElement, measureTooltip];
}
private initializeFeature(
feature: ol.Feature,
meerdereGeometrieen: Boolean
): void {
const [measureTooltipElement, measureTooltip] = this.createMeasureTooltip();
const volgnummer = this.volgendeVolgnummer();
feature.set("volgnummer", volgnummer);
feature.set("measuretooltip", measureTooltip);
feature.setId(uuid.v4());
feature.getGeometry()!.on("change", (evt) => {
// TODO na OL upgrade -> is this pointer OK?
const geometry = evt.target as ol.geom.Geometry;
this.changedGeometriesSubj.next(
ke.TekenResultaat(geometry, volgnummer, feature.getId()!)
);
const omschrijving = dimensieBeschrijving(geometry, false);
measureTooltipElement.innerHTML = meerdereGeometrieen
? volgnummer + ": " + omschrijving
: omschrijving;
forEach(this.tooltipCoord(geometry), (coord) =>
measureTooltip.setPosition(coord)
);
});
feature.getGeometry()!.changed();
}
private createDrawInteraction(
source: ol.source.Vector,
tekenSettings: ke.TekenSettings
): ol.interaction.Draw {
const draw = new ol.interaction.Draw({
source: source,
type: tekenSettings.geometryType,
style: pipe(
tekenSettings.drawStyle,
option.map(toSt | {
if (this.tekenen) {
this.dispatch(prt.VerwijderInteractieCmd(this.drawInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.modifyInteraction));
this.dispatch(prt.VerwijderInteractieCmd(this.snapInteraction));
this.dispatch(prt.VerwijderOverlaysCmd(this.overlays));
this.dispatch(prt.VerwijderLaagCmd(TekenLaagNaam, kaartLogOnlyWrapper));
}
this.tekenen = false;
} | identifier_body |
commands.py | reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname)
else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = | args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
allow_sudo = args.get("allow_sudo", None)
allow_edited_updates = args.get('allow_edited_updates', False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass | identifier_body | |
commands.py | (**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
allow_sudo = args.get("allow_sudo", None)
allow_edited_updates = args.get('allow_edited_updates', False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname)
else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip | zzaacckkyy | identifier_name | |
commands.py | bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py") | else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("javes_error.log", "w+")
file.write(ftext)
file.close()
try:
await check.client.send_file(send_to, "javes_error.log", caption=text)
remove("javes_error.log")
except:
pass
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if client2:
client2.add_event_handler(wrapper, events.NewMessage(**args))
if client3:
client3.add_event_handler(wrapper, events.NewMessage(**args))
| name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname) | random_line_split |
commands.py |
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname)
else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("javes_error.log", "w+")
file.write(ftext)
file.close()
try:
await check.client.send_file(send_to, "javes_error.log", caption=text)
remove("javes_error.log")
except:
pass
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if client2:
client2.add_event_handler(wrapper, events.NewMessage(**args))
if client3:
client3.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
borg = javes = bot ; admin_cmd = rekcah05 ; command = zzaacckkyy ; register = javes05 = javess
def errors_handler(func):
async def wrapper(event):
try:
return await func(event)
except Exception:
pass
return wrapper
async def progress(current, total, event, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}] {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress_str + \
"{0} of {1}\nETA: {2}".format(
humanbytes(current),
humanbytes(total),
time_formatter(estimated_total_time)
)
if file_name:
await event.edit("{}\nFile Name: `{}`\n{}".format(
type_of_ps, file_name, tmp))
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size | /= power
raised_to_pow += 1
| conditional_block | |
kuberuntime_sandbox.go | (ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil {
message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to generate sandbox config for pod", "pod", klog.KObj(pod))
return "", message, err
}
// Create pod logs directory
err = m.osInterface.MkdirAll(podSandboxConfig.LogDirectory, 0755)
if err != nil {
message := fmt.Sprintf("Failed to create log directory for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c)
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(p | createPodSandbox | identifier_name | |
kuberuntime_sandbox.go | pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
|
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) {
return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec | portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c) | random_line_split |
kuberuntime_sandbox.go | pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c)
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil | if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) {
return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec | {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
} | conditional_block |
kuberuntime_sandbox.go | pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create log directory for pod", "pod", klog.KObj(pod))
return "", message, err
}
runtimeHandler := ""
if m.runtimeClassManager != nil {
runtimeHandler, err = m.runtimeClassManager.LookupRuntimeHandler(pod.Spec.RuntimeClassName)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
return "", message, err
}
if runtimeHandler != "" {
klog.V(2).InfoS("Running pod with runtime handler", "pod", klog.KObj(pod), "runtimeHandler", runtimeHandler)
}
}
podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler)
if err != nil {
message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err)
klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod))
return "", message, err
}
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeapi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
podSandboxConfig := &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: pod.Name,
Namespace: pod.Namespace,
Uid: podUID,
Attempt: attempt,
},
Labels: newPodLabels(pod),
Annotations: newPodAnnotations(pod),
}
dnsConfig, err := m.runtimeHelper.GetPodDNS(pod)
if err != nil {
return nil, err
}
podSandboxConfig.DnsConfig = dnsConfig
if !kubecontainer.IsHostNetworkPod(pod) {
// TODO: Add domain support in new runtime interface
podHostname, podDomain, err := m.runtimeHelper.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
podHostname, err = util.GetNodenameForKernel(podHostname, podDomain, pod.Spec.SetHostnameAsFQDN)
if err != nil {
return nil, err
}
podSandboxConfig.Hostname = podHostname
}
logDir := BuildPodLogsDirectory(pod.Namespace, pod.Name, pod.UID)
podSandboxConfig.LogDirectory = logDir
portMappings := []*runtimeapi.PortMapping{}
for _, c := range pod.Spec.Containers {
containerPortMappings := kubecontainer.MakePortMappings(&c)
for idx := range containerPortMappings {
port := containerPortMappings[idx]
hostPort := int32(port.HostPort)
containerPort := int32(port.ContainerPort)
protocol := toRuntimeProtocol(port.Protocol)
portMappings = append(portMappings, &runtimeapi.PortMapping{
HostIp: port.HostIP,
HostPort: hostPort,
ContainerPort: containerPort,
Protocol: protocol,
})
}
}
if len(portMappings) > 0 {
podSandboxConfig.PortMappings = portMappings
}
lc, err := m.generatePodSandboxLinuxConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Linux = lc
if runtime.GOOS == "windows" {
wc, err := m.generatePodSandboxWindowsConfig(pod)
if err != nil {
return nil, err
}
podSandboxConfig.Windows = wc
}
// Update config to include overhead, sandbox level resources
if err := m.applySandboxResources(pod, podSandboxConfig); err != nil {
return nil, err
}
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
// We've to call PodSandboxLinuxConfig always irrespective of the underlying OS as securityContext is not part of
// podSandboxConfig. It is currently part of LinuxPodSandboxConfig. In future, if we have securityContext pulled out
// in podSandboxConfig we should be able to use it.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod) (*runtimeapi.LinuxPodSandboxConfig, error) {
cgroupParent := m.runtimeHelper.GetPodCgroupParent(pod)
lc := &runtimeapi.LinuxPodSandboxConfig{
CgroupParent: cgroupParent,
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
Privileged: kubecontainer.HasPrivilegedContainer(pod),
// Forcing sandbox to run as `runtime/default` allow users to
// use least privileged seccomp profiles at pod level. Issue #84623
Seccomp: &runtimeapi.SecurityProfile{
ProfileType: runtimeapi.SecurityProfile_RuntimeDefault,
},
},
}
sysctls := make(map[string]string)
if pod.Spec.SecurityContext != nil {
for _, c := range pod.Spec.SecurityContext.Sysctls {
sysctls[c.Name] = c.Value
}
}
lc.Sysctls = sysctls
if pod.Spec.SecurityContext != nil {
sc := pod.Spec.SecurityContext
if sc.RunAsUser != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsUser = &runtimeapi.Int64Value{Value: int64(*sc.RunAsUser)}
}
if sc.RunAsGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.RunAsGroup = &runtimeapi.Int64Value{Value: int64(*sc.RunAsGroup)}
}
namespaceOptions, err := runtimeutil.NamespacesForPod(pod, m.runtimeHelper)
if err != nil {
return nil, err
}
lc.SecurityContext.NamespaceOptions = namespaceOptions
if sc.FSGroup != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(*sc.FSGroup))
}
if groups := m.runtimeHelper.GetExtraSupplementalGroupsForPod(pod); len(groups) > 0 {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, groups...)
}
if sc.SupplementalGroups != nil {
for _, sg := range sc.SupplementalGroups {
lc.SecurityContext.SupplementalGroups = append(lc.SecurityContext.SupplementalGroups, int64(sg))
}
}
if sc.SELinuxOptions != nil && runtime.GOOS != "windows" {
lc.SecurityContext.SelinuxOptions = &runtimeapi.SELinuxOption{
User: sc.SELinuxOptions.User,
Role: sc.SELinuxOptions.Role,
Type: sc.SELinuxOptions.Type,
Level: sc.SELinuxOptions.Level,
}
}
}
return lc, nil
}
// generatePodSandboxWindowsConfig generates WindowsPodSandboxConfig from v1.Pod.
// On Windows this will get called in addition to LinuxPodSandboxConfig because not all relevant fields have been added to
// WindowsPodSandboxConfig at this time.
func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) (*runtimeapi.WindowsPodSandboxConfig, error) | return nil, fmt.Errorf("pod must not contain both HostProcess and non-HostProcess containers")
}
if !kubecontainer.IsHostNetworkPod(pod) {
return nil, fmt.Errorf("hostNetwork is required if Pod contains HostProcess containers")
}
wc.SecurityContext.HostProcess = true
}
sc := pod.Spec.SecurityContext
if sc == nil || sc.WindowsOptions == nil {
return wc, nil
}
wo := sc.WindowsOptions
if wo.GMSACredentialSpec | {
wc := &runtimeapi.WindowsPodSandboxConfig{
SecurityContext: &runtimeapi.WindowsSandboxSecurityContext{},
}
if utilfeature.DefaultFeatureGate.Enabled(features.WindowsHostNetwork) {
wc.SecurityContext.NamespaceOptions = &runtimeapi.WindowsNamespaceOption{}
if kubecontainer.IsHostNetworkPod(pod) {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_NODE
} else {
wc.SecurityContext.NamespaceOptions.Network = runtimeapi.NamespaceMode_POD
}
}
// If all of the containers in a pod are HostProcess containers, set the pod's HostProcess field
// explicitly because the container runtime requires this information at sandbox creation time.
if kubecontainer.HasWindowsHostProcessContainer(pod) {
// At present Windows all containers in a Windows pod must be HostProcess containers
// and HostNetwork is required to be set.
if !kubecontainer.AllContainersAreWindowsHostProcess(pod) { | identifier_body |
JsonLogparser.py | )):
if (str(dic[i]["last_update"]) >= from_t):
if (i!=0):
from_index = i
else:
from_index = i
break
i = i + 1
i = 0
if (dic[len(dic)-1]["last_update"]==to_t):
to_t = len(dic)-1
else:
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= to_t):
if (i != 0):
to_index = i
else:
to_index = i
break
i = i + 1
return from_index, to_index
def printPlot(dic, key, key2, label, starttime, endtime,all,indexes):
array = []
while(starttime < endtime):
if (key2 == None):
array.append(dic[starttime][key])
else :
array.append(dic[starttime][key][key2]) | array = np.array(array)
array = array.T
if (all):
for i in range(len(array)):
pp.plot(array[i], label="Thruster " + str(i))
else:
for i in indexes:
try:
pp.plot(array[i],label="Thruster " + str(i))
except:
print ("\nUnable to Plot\nPlotting index out of range, Please use appropriate range")
exit(10)
pp.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode="expand", borderaxespad=0.)
pp.xlabel("Time")
pp.ylabel("Thruster power")
pp.title(label)
pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1
def print2dim(dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thrusters',' | starttime = starttime + 1 | random_line_split |
JsonLogparser.py | pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1
def print2dim(dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime,printallplots,plotarray)
if (args.dbt):
print2dim(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime)
printPlot(data, 'thrusters','disabled_thrusters', 'Disabled thrusters', fromtime, totime,printallplots,plotarray)
if (args.trs):
print2dim(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime)
printPlot(data, 'thrusters','thruster_scales', 'Thruster scales', fromtime, totime,printallplots,plotarray)
if (args.frt):
print2dim(data, 'thrusters','frozen', 'Frozen', fromtime, totime)
printPlot(data, 'thrusters', 'frozen', 'Frozen', fromtime, totime, printallplots, plotarray)
if (args.claw):
print1dim(data,'claw','Claw',fromtime,totime)
if (args.pow):
print2dim(data, 'claw','power', 'Claw: Power', fromtime, totime)
if (args.led):
print1dim(data,'leds','LED\'s',fromtime,totime)
if (args.cled):
print2dim(data, 'leds','camera_leds', 'LED : Camera LED', fromtime, totime)
if (args.bled):
print2dim(data, 'leds','bluetooth_led', 'LED: Bluetooth LED', fromtime, totime)
if (args.cam):
| print1dim(data,'cameras','Camera',fromtime,totime) | conditional_block | |
JsonLogparser.py | )):
if (str(dic[i]["last_update"]) >= from_t):
if (i!=0):
from_index = i
else:
from_index = i
break
i = i + 1
i = 0
if (dic[len(dic)-1]["last_update"]==to_t):
to_t = len(dic)-1
else:
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= to_t):
if (i != 0):
to_index = i
else:
to_index = i
break
i = i + 1
return from_index, to_index
def printPlot(dic, key, key2, label, starttime, endtime,all,indexes):
array = []
while(starttime < endtime):
if (key2 == None):
array.append(dic[starttime][key])
else :
array.append(dic[starttime][key][key2])
starttime = starttime + 1
array = np.array(array)
array = array.T
if (all):
for i in range(len(array)):
pp.plot(array[i], label="Thruster " + str(i))
else:
for i in indexes:
try:
pp.plot(array[i],label="Thruster " + str(i))
except:
print ("\nUnable to Plot\nPlotting index out of range, Please use appropriate range")
exit(10)
pp.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode="expand", borderaxespad=0.)
pp.xlabel("Time")
pp.ylabel("Thruster power")
pp.title(label)
pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1
def | (dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thr | print2dim | identifier_name |
JsonLogparser.py | )):
if (str(dic[i]["last_update"]) >= from_t):
if (i!=0):
from_index = i
else:
from_index = i
break
i = i + 1
i = 0
if (dic[len(dic)-1]["last_update"]==to_t):
to_t = len(dic)-1
else:
while (i < len(dic)):
if (str(dic[i]["last_update"]) >= to_t):
if (i != 0):
to_index = i
else:
to_index = i
break
i = i + 1
return from_index, to_index
def printPlot(dic, key, key2, label, starttime, endtime,all,indexes):
array = []
while(starttime < endtime):
if (key2 == None):
array.append(dic[starttime][key])
else :
array.append(dic[starttime][key][key2])
starttime = starttime + 1
array = np.array(array)
array = array.T
if (all):
for i in range(len(array)):
pp.plot(array[i], label="Thruster " + str(i))
else:
for i in indexes:
try:
pp.plot(array[i],label="Thruster " + str(i))
except:
print ("\nUnable to Plot\nPlotting index out of range, Please use appropriate range")
exit(10)
pp.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=4, mode="expand", borderaxespad=0.)
pp.xlabel("Time")
pp.ylabel("Thruster power")
pp.title(label)
pp.show()
def print1dim( dic, key, label, starttime, endtime):
# helper function for non nested keys i.e. 1 dimensional array haha
|
def print2dim(dic, key, key2, label, starttime, endtime):
# helper function for nested keys i.e. 2 dimensional only
# add similar helper function for more nested keys if need be
print("\n" + label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key][key2])
starttime = starttime + 1
if __name__ == "__main__":
# if the enviroment variable for the directory path doesn't exist, print error
try:
LOG_DIR = "LOGDIR"
env = os.environ[LOG_DIR]
except:
print ("error")
exit(10)
# PATTERN = DD_HH_MIN_SEC_USEC
# keys_list = [] //to get all the keys.. not useable right now
# data1 =""
# env1= "C:/Users/Mudabbir/Desktop/Logfolder/2018_01_17__12_59_23/dearclientlog.txt"
# with open(env1) as f:
# for line in f:
# data1 = json.loads(line)
# data1 = json.loads(data1)
# break
#
# get_keys(data1,keys_list)
# print (data1,'\n\n')
# print (keys_list)
# sets up the argument parser
# edit this to add more arguments or take out arguments
parser = argparse.ArgumentParser()
parser.add_argument("-r", action='store_true', help="Prints out all the runs")
parser.add_argument("-run", type=int, help="Selects the run you want tot access")
parser.add_argument("-t", action='store_true',help="Prints out the times for all the logs")
parser.add_argument("-plot", type=str, help="Which one's do u want to plot? either enter \"1,2,3,4\" or \"all\"")
parser.add_argument("-df", action='store_true', help="Prints out logs for dearflask")
parser.add_argument("-des", action='store_true', help="Prints out the logs for desired thrust")
parser.add_argument("-dbt", action='store_true', help="Prints out the logs for disabled thrust")
parser.add_argument("-trs", action='store_true', help="Prints out the logs for thruster scales")
parser.add_argument("-claw", action='store_true', help="Prints out all the contents in claw")
parser.add_argument("-pow", action='store_true', help="Prints out the power component in claw")
parser.add_argument("-led", action='store_true', help="Prints out all the contents in led")
parser.add_argument("-cled", action='store_true', help="Prints out the Camera LED")
parser.add_argument("-bled", action='store_true', help="Prints out the Bluetooth LED")
parser.add_argument("-cam", action='store_true', help="Prints all the cameras")
parser.add_argument("-thruster" ,action='store_true', help="Prints out all the contents in the thrusters")
parser.add_argument("-frt", action='store_true', help="Prints out the logs for frozen/frozen thrusters")
parser.add_argument("-logtime", action='store_true', help="Prints the times inside the selected log")
parser.add_argument("-dc", action='store_true', help="Prints out the logs for dearclient")
parser.add_argument("-IMU",action='store_true', help="Prints out the logs for dearflask")
parser.add_argument("-pres", action='store_true', help="Prints out contests for pressure")
parser.add_argument("-camnum", type=int, help='Which camera do u want to print?')
# PATTERN = DD_HH_MIN_SEC_USEC
parser.add_argument("-fr", type=str, help="the starting point for the logs")
parser.add_argument("-to", type=str, help="The ending point of the logs")
args = parser.parse_args()
# gets all the directories i.e. the directories with time stamps for each run
dir_list = next(os.walk(env))[1]
# if a run is provided then access that run, else default to the most recent run
if args.run != None:
log_num = args.run
else:
log_num = 0 #default
if (args.r):
get_Directory()
exit(10)
# adding the directory path
env += dir_list[log_num]
choice = 0
file_list = os.listdir(env)
if (args.dc): # if dearclient is chosen; else if defaults to dearflask , however no error happens by defaulting
choice = 1
env = [env + "/" + file_list[0], env + "/" + file_list[1]][choice == 0] # setting the path to either dearflask or dearclient
data = []
with open(env) as f:
for line in f:
data.append(json.loads(line))
# PATTERN = DD_HH_MIN_SEC_USEC
fromtime = data[0]["last_update"] #by default it goes from start to end
totime = data[len(data)-1]["last_update"]
#print (totime)
# to check if we need to print all plots or a few specific ones
printallplots = False
plotarray = []
if (args.fr != None):
fromtime = str(args.fr)
if (args.to != None):
totime = args.to
if (args.plot != None):
strin = args.plot
if (strin == "all"):
printallplots = True
else:
# parse the string to extract out the numbers and make a list
strin = strin.replace(" ","")
str1 = strin.split(',')
for i in str1:
try :
plotarray.append(int(i))
except:
print("Please input integers and in the following format < int1, int2, int3>");
exit(10)
# gets the index of the times from strings for easier access
fromtime, totime = getTimeIndex(data, fromtime, totime)
print ("Please use this format for time inputs: 00_00_00_00_000000")
# Printing all the arguments , whatever was asked for
check = 0
# calls to printPlot plots the graph
# works for
# DearFlask: Desired, Frozen, Disabled thrusters and thruster scales
# DearClient: Thrusters
if (args.df):
check =1
if (args.t):
print("Time Format: DD_HH_MIN_SEC_USEC \nstart time: ",data[0]["last_update"]+ '\n' + "end time: ",data[len(data)-1]["last_update"] )
if (args.thruster):
print1dim(data,'thrusters','Thrusters',fromtime,totime)
if (args.des):
print2dim(data, 'thrusters','desired_thrust', 'Desired Thrusters', fromtime, totime)
printPlot(data, 'thrusters | print("\n"+label)
while (starttime < endtime):
print (dic[starttime]["last_update"]," : ",dic[starttime][key])
starttime = starttime + 1 | identifier_body |
mikes-modal.min.js | : (b.corners * b.width >> 1) + "px"
})
}
var g = 0, i;
for (; g < b.lines; g++)i = h(d(), {
position: "absolute",
top: 1 + ~(b.width / 2) + "px",
transform: b.hwaccel ? "translate3d(0,0,0)" : "",
opacity: b.opacity,
animation: m && f(b.opacity, b.trail, g, b.lines) + " " + 1 / b.speed + "s linear infinite"
}), b.shadow && e(i, h(c("#000", "0 0 4px #000"), {top: "2px"})), e(a, e(i, c(b.color, "0 0 1px rgba(0,0,0,.1)")));
return a
}, opacity: function (a, b, c) {
b < a.childNodes.length && (a.childNodes[b].style.opacity = c)
}
}), function () {
function a(a, b) {
return d("<" + a + ' xmlns="urn:schemas-microsoft.com:vml" class="spin-vml">', b)
}
var b = h(d("group"), {behavior: "url(#default#VML)"});
!g(b, "transform") && b.adj ? (n.addRule(".spin-vml", "behavior:url(#default#VML)"), p.prototype.lines = function (b, c) {
function d() {
return h(a("group", {coordsize: i + " " + i, coordorigin: -g + " " + -g}), {width: i, height: i})
}
function f(b, f, i) {
e(k, e(h(d(), {
rotation: 360 / c.lines * b + "deg",
left: ~~f
}), e(h(a("roundrect", {arcsize: c.corners}), {
width: g,
height: c.width,
left: c.radius,
top: -c.width >> 1,
filter: i
}), a("fill", {color: c.color, opacity: c.opacity}), a("stroke", {opacity: 0}))))
}
var g = c.length + c.width, i = 2 * g, j = -(c.width + c.length) * 2 + "px", k = h(d(), {
position: "absolute",
top: j,
left: j
}), l;
if (c.shadow)for (l = 1; l <= c.lines; l++)f(l, -2, "progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)");
for (l = 1; l <= c.lines; l++)f(l);
return e(b, k)
}, p.prototype.opacity = function (a, b, c, d) {
var e = a.firstChild;
d = d.shadow && d.lines || 0, e && b + d < e.childNodes.length && (e = e.childNodes[b + d], e = e && e.firstChild, e = e && e.firstChild, e && (e.opacity = c))
}) : m = g(b, "animation")
}(), typeof define == "function" && define.amd ? define(function () {
return p
}) : a.Spinner = p
}(window, document), function () {
var a, b, c, d, e = function (a, b) {
return function () {
return a.apply(b, arguments)
}
};
$.fn.mikesModal = function (a) {
return this.modal = new b($(this))
}, b = function () {
function b(a) {
this.addClose = e(this.addClose, this), this.marginLeft = e(this.marginLeft, this), this.marginTop = e(this.marginTop, this), this.imageMaxHeight = e(this.imageMaxHeight, this), this.imageMaxWidth = e(this.imageMaxWidth, this), this.triggerClose = e(this.triggerClose, this), this.imagePosition = e(this.imagePosition, this), this.imageLoaded = e(this.imageLoaded, this), this.loaded = e(this.loaded, this), this.closed = e(this.closed, this), this.opened = e(this.opened, this), this.bindMethods = e(this.bindMethods, this), this.createAllClasses = e(this.createAllClasses, this), this.modalBox = a, this.bindMethods(), this.createAllClasses(), this.modalBox.trigger("open"), this.imageLoaded(), this.addClose(), this.triggerClose()
}
return b.prototype.createAllClasses = function () {
return new c(this.modalBox), new a(this.modalBox), new d(this.modalBox)
}, b.prototype.bindMethods = function () {
return this.opened(), this.loaded(), this.closed()
}, b.prototype.opened = function () {
var a = this;
return this.modalBox.bind("open", function () {
return a.modalBox.find("img").css({"max-width": a.imageMaxWidth(), "max-height": a.imageMaxHeight()})
})
}, b.prototype.closed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.modalBox.hide()
})
}, b.prototype.loaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.modalBox.fadeIn("slow")
})
}, b.prototype.imageLoaded = function () {
var a = this;
this.modalBox.find("img").first().load(function () {
return a.imagePosition()
});
if (this.modalBox.find("img")[0].complete)return this.imagePosition()
}, b.prototype.imagePosition = function () {
return this.modalBox.trigger("loaded").css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.find(".description").css({height: this.modalBox.find("img").height() - 20})
}, b.prototype.triggerClose = function () {
var a = this;
return $(document).keyup(function (b) {
if (b.keyCode === 27)return a.modalBox.trigger("close")
}), this.modalBox.find(".close").click(function () {
return a.modalBox.trigger("close")
})
}, b.prototype.imageMaxWidth = function () {
return window.innerWidth * .8 - 300
}, b.prototype.imageMaxHeight = function () {
return window.innerHeight * .8
}, b.prototype.marginTop = function () {
return document.width > 700 ? "-" + this.modalBox.height() / 2 + "px" : "-" + (this.modalBox.height() / 2 - 80) + "px"
}, b.prototype.marginLeft = function () {
return "-" + this.modalBox.width() / 2 + "px"
}, b.prototype.addClose = function () {
return $(".description").before("")
}, b
}(), d = function () {
function a(a) {
this.bindClicks = e(this.bindClicks, this), this.bindClosed = e(this.bindClosed, this), this.bindLoaded = e(this.bindLoaded, this), this.modalBox = a, this.bindLoaded(), this.bindClosed(), this.bindClicks()
}
return a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return $("#the-lights").length ? a.theLights = $("#the-lights") : (a.theLights = $("<div id='the-lights'></div>"), a.theLights.appendTo("body"))
})
}, a.prototype.bindClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.theLights.remove()
})
}, a.prototype.bindClicks = function () {
var a = this;
return $("body").on("click touchstart", "#the-lights", function (b) {
return b.preventDefault(), b.stopPropagation(), a.modalBox.trigger("close")
})
}, a
}(), c = function () {
function a(a) {
this.bindFullClosed = e(this.bindFullClosed, this), this.bindFullLoaded = e(this.bindFullLoaded, this), this.modalBox = a, document.width > 700 && (this.bindFullLoaded(), this.bindFullClosed()), this.html = $("html")
}
return a.prototype.bindFullLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.html.css("overflow", "hidden")
})
}, a.prototype.bindFullClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.html.css("overflow", "auto")
})
}, a
}(), a = function () {
function a(a) | {
this.opts = e(this.opts, this), this.bindLoaded = e(this.bindLoaded, this), this.bindOpened = e(this.bindOpened, this), this.modalBox = a, this.bindOpened(), this.bindLoaded()
} | identifier_body | |
mikes-modal.min.js | #000"), {top: "2px"})), e(a, e(i, c(b.color, "0 0 1px rgba(0,0,0,.1)")));
return a
}, opacity: function (a, b, c) {
b < a.childNodes.length && (a.childNodes[b].style.opacity = c)
}
}), function () {
function a(a, b) {
return d("<" + a + ' xmlns="urn:schemas-microsoft.com:vml" class="spin-vml">', b)
}
var b = h(d("group"), {behavior: "url(#default#VML)"});
!g(b, "transform") && b.adj ? (n.addRule(".spin-vml", "behavior:url(#default#VML)"), p.prototype.lines = function (b, c) {
function d() {
return h(a("group", {coordsize: i + " " + i, coordorigin: -g + " " + -g}), {width: i, height: i})
}
function f(b, f, i) {
e(k, e(h(d(), {
rotation: 360 / c.lines * b + "deg",
left: ~~f
}), e(h(a("roundrect", {arcsize: c.corners}), {
width: g,
height: c.width,
left: c.radius,
top: -c.width >> 1,
filter: i
}), a("fill", {color: c.color, opacity: c.opacity}), a("stroke", {opacity: 0}))))
}
var g = c.length + c.width, i = 2 * g, j = -(c.width + c.length) * 2 + "px", k = h(d(), {
position: "absolute",
top: j,
left: j
}), l;
if (c.shadow)for (l = 1; l <= c.lines; l++)f(l, -2, "progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)");
for (l = 1; l <= c.lines; l++)f(l);
return e(b, k)
}, p.prototype.opacity = function (a, b, c, d) {
var e = a.firstChild;
d = d.shadow && d.lines || 0, e && b + d < e.childNodes.length && (e = e.childNodes[b + d], e = e && e.firstChild, e = e && e.firstChild, e && (e.opacity = c))
}) : m = g(b, "animation")
}(), typeof define == "function" && define.amd ? define(function () {
return p
}) : a.Spinner = p
}(window, document), function () {
var a, b, c, d, e = function (a, b) {
return function () {
return a.apply(b, arguments)
}
};
$.fn.mikesModal = function (a) {
return this.modal = new b($(this))
}, b = function () {
function b(a) {
this.addClose = e(this.addClose, this), this.marginLeft = e(this.marginLeft, this), this.marginTop = e(this.marginTop, this), this.imageMaxHeight = e(this.imageMaxHeight, this), this.imageMaxWidth = e(this.imageMaxWidth, this), this.triggerClose = e(this.triggerClose, this), this.imagePosition = e(this.imagePosition, this), this.imageLoaded = e(this.imageLoaded, this), this.loaded = e(this.loaded, this), this.closed = e(this.closed, this), this.opened = e(this.opened, this), this.bindMethods = e(this.bindMethods, this), this.createAllClasses = e(this.createAllClasses, this), this.modalBox = a, this.bindMethods(), this.createAllClasses(), this.modalBox.trigger("open"), this.imageLoaded(), this.addClose(), this.triggerClose()
}
return b.prototype.createAllClasses = function () {
return new c(this.modalBox), new a(this.modalBox), new d(this.modalBox)
}, b.prototype.bindMethods = function () {
return this.opened(), this.loaded(), this.closed()
}, b.prototype.opened = function () {
var a = this;
return this.modalBox.bind("open", function () {
return a.modalBox.find("img").css({"max-width": a.imageMaxWidth(), "max-height": a.imageMaxHeight()})
})
}, b.prototype.closed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.modalBox.hide()
})
}, b.prototype.loaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.modalBox.fadeIn("slow")
})
}, b.prototype.imageLoaded = function () {
var a = this;
this.modalBox.find("img").first().load(function () {
return a.imagePosition()
});
if (this.modalBox.find("img")[0].complete)return this.imagePosition()
}, b.prototype.imagePosition = function () {
return this.modalBox.trigger("loaded").css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.css({
"margin-top": this.marginTop(),
"margin-left": this.marginLeft()
}), this.modalBox.find(".description").css({height: this.modalBox.find("img").height() - 20})
}, b.prototype.triggerClose = function () {
var a = this;
return $(document).keyup(function (b) {
if (b.keyCode === 27)return a.modalBox.trigger("close")
}), this.modalBox.find(".close").click(function () {
return a.modalBox.trigger("close")
})
}, b.prototype.imageMaxWidth = function () {
return window.innerWidth * .8 - 300
}, b.prototype.imageMaxHeight = function () {
return window.innerHeight * .8
}, b.prototype.marginTop = function () {
return document.width > 700 ? "-" + this.modalBox.height() / 2 + "px" : "-" + (this.modalBox.height() / 2 - 80) + "px"
}, b.prototype.marginLeft = function () {
return "-" + this.modalBox.width() / 2 + "px"
}, b.prototype.addClose = function () {
return $(".description").before("")
}, b
}(), d = function () {
function a(a) {
this.bindClicks = e(this.bindClicks, this), this.bindClosed = e(this.bindClosed, this), this.bindLoaded = e(this.bindLoaded, this), this.modalBox = a, this.bindLoaded(), this.bindClosed(), this.bindClicks()
}
return a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return $("#the-lights").length ? a.theLights = $("#the-lights") : (a.theLights = $("<div id='the-lights'></div>"), a.theLights.appendTo("body"))
})
}, a.prototype.bindClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.theLights.remove()
})
}, a.prototype.bindClicks = function () {
var a = this;
return $("body").on("click touchstart", "#the-lights", function (b) {
return b.preventDefault(), b.stopPropagation(), a.modalBox.trigger("close")
})
}, a
}(), c = function () {
function a(a) {
this.bindFullClosed = e(this.bindFullClosed, this), this.bindFullLoaded = e(this.bindFullLoaded, this), this.modalBox = a, document.width > 700 && (this.bindFullLoaded(), this.bindFullClosed()), this.html = $("html")
}
return a.prototype.bindFullLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.html.css("overflow", "hidden")
})
}, a.prototype.bindFullClosed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.html.css("overflow", "auto")
})
}, a
}(), a = function () {
function a(a) {
this.opts = e(this.opts, this), this.bindLoaded = e(this.bindLoaded, this), this.bindOpened = e(this.bindOpened, this), this.modalBox = a, this.bindOpened(), this.bindLoaded()
}
return a.prototype.bindOpened = function () {
var a = this;
return this.modalBox.bind("open", function () {
var b;
return a.loading = $("<div id='loading-modal'></div>"), a.loading.appendTo("body").css({top: $(window).scrollTop() + 300 + "px"}), b = (new Spinner(a.opts())).spin(document.getElementById("loading-modal"))
})
}, a.prototype.bindLoaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.loading.remove()
}) | }, a.prototype.opts = function () {
return {
lines: 9,
length: 30, | random_line_split | |
mikes-modal.min.js | ++) {
var d = arguments[b];
for (var e in d)a[e] === c && (a[e] = d[e])
}
return a
}
function j(a) {
var b = {x: a.offsetLeft, y: a.offsetTop};
while (a = a.offsetParent)b.x += a.offsetLeft, b.y += a.offsetTop;
return b
}
var k = ["webkit", "Moz", "ms", "O"], l = {}, m, n = function () {
var a = d("style", {type: "text/css"});
return e(b.getElementsByTagName("head")[0], a), a.sheet || a.styleSheet
}(), o = {
lines: 12,
length: 7,
width: 5,
radius: 10,
rotate: 0,
corners: 1,
color: "#000",
speed: 1,
trail: 100,
opacity: .25,
fps: 20,
zIndex: 2e9,
className: "spinner",
top: "auto",
left: "auto"
}, p = function q(a) {
if (!this.spin)return new q(a);
this.opts = i(a || {}, q.defaults, o)
};
p.defaults = {}, i(p.prototype, {
spin: function (a) {
this.stop();
var b = this, c = b.opts, e = b.el = h(d(0, {className: c.className}), {
position: "relative",
width: 0,
zIndex: c.zIndex
}), f = c.radius + c.length + c.width, g, i;
a && (a.insertBefore(e, a.firstChild || null), i = j(a), g = j(e), h(e, {
left: (c.left == "auto" ? i.x - g.x + (a.offsetWidth >> 1) : parseInt(c.left, 10) + f) + "px",
top: (c.top == "auto" ? i.y - g.y + (a.offsetHeight >> 1) : parseInt(c.top, 10) + f) + "px"
})), e.setAttribute("aria-role", "progressbar"), b.lines(e, b.opts);
if (!m) {
var k = 0, l = c.fps, n = l / c.speed, o = (1 - c.opacity) / (n * c.trail / 100), p = n / c.lines;
(function q() {
k++;
for (var a = c.lines; a; a--) {
var d = Math.max(1 - (k + a * p) % n * o, c.opacity);
b.opacity(e, c.lines - a, d, c)
}
b.timeout = b.el && setTimeout(q, ~~(1e3 / l))
})()
}
return b
}, stop: function () {
var a = this.el;
return a && (clearTimeout(this.timeout), a.parentNode && a.parentNode.removeChild(a), this.el = c), this
}, lines: function (a, b) {
function c(a, c) {
return h(d(), {
position: "absolute",
width: b.length + b.width + "px",
height: b.width + "px",
background: a,
boxShadow: c,
transformOrigin: "left",
transform: "rotate(" + ~~(360 / b.lines * g + b.rotate) + "deg) translate(" + b.radius + "px" + ",0)",
borderRadius: (b.corners * b.width >> 1) + "px"
})
}
var g = 0, i;
for (; g < b.lines; g++)i = h(d(), {
position: "absolute",
top: 1 + ~(b.width / 2) + "px",
transform: b.hwaccel ? "translate3d(0,0,0)" : "",
opacity: b.opacity,
animation: m && f(b.opacity, b.trail, g, b.lines) + " " + 1 / b.speed + "s linear infinite"
}), b.shadow && e(i, h(c("#000", "0 0 4px #000"), {top: "2px"})), e(a, e(i, c(b.color, "0 0 1px rgba(0,0,0,.1)")));
return a
}, opacity: function (a, b, c) {
b < a.childNodes.length && (a.childNodes[b].style.opacity = c)
}
}), function () {
function a(a, b) {
return d("<" + a + ' xmlns="urn:schemas-microsoft.com:vml" class="spin-vml">', b)
}
var b = h(d("group"), {behavior: "url(#default#VML)"});
!g(b, "transform") && b.adj ? (n.addRule(".spin-vml", "behavior:url(#default#VML)"), p.prototype.lines = function (b, c) {
function d() {
return h(a("group", {coordsize: i + " " + i, coordorigin: -g + " " + -g}), {width: i, height: i})
}
function f(b, f, i) {
e(k, e(h(d(), {
rotation: 360 / c.lines * b + "deg",
left: ~~f
}), e(h(a("roundrect", {arcsize: c.corners}), {
width: g,
height: c.width,
left: c.radius,
top: -c.width >> 1,
filter: i
}), a("fill", {color: c.color, opacity: c.opacity}), a("stroke", {opacity: 0}))))
}
var g = c.length + c.width, i = 2 * g, j = -(c.width + c.length) * 2 + "px", k = h(d(), {
position: "absolute",
top: j,
left: j
}), l;
if (c.shadow)for (l = 1; l <= c.lines; l++)f(l, -2, "progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)");
for (l = 1; l <= c.lines; l++)f(l);
return e(b, k)
}, p.prototype.opacity = function (a, b, c, d) {
var e = a.firstChild;
d = d.shadow && d.lines || 0, e && b + d < e.childNodes.length && (e = e.childNodes[b + d], e = e && e.firstChild, e = e && e.firstChild, e && (e.opacity = c))
}) : m = g(b, "animation")
}(), typeof define == "function" && define.amd ? define(function () {
return p
}) : a.Spinner = p
}(window, document), function () {
var a, b, c, d, e = function (a, b) {
return function () {
return a.apply(b, arguments)
}
};
$.fn.mikesModal = function (a) {
return this.modal = new b($(this))
}, b = function () {
function | (a) {
this.addClose = e(this.addClose, this), this.marginLeft = e(this.marginLeft, this), this.marginTop = e(this.marginTop, this), this.imageMaxHeight = e(this.imageMaxHeight, this), this.imageMaxWidth = e(this.imageMaxWidth, this), this.triggerClose = e(this.triggerClose, this), this.imagePosition = e(this.imagePosition, this), this.imageLoaded = e(this.imageLoaded, this), this.loaded = e(this.loaded, this), this.closed = e(this.closed, this), this.opened = e(this.opened, this), this.bindMethods = e(this.bindMethods, this), this.createAllClasses = e(this.createAllClasses, this), this.modalBox = a, this.bindMethods(), this.createAllClasses(), this.modalBox.trigger("open"), this.imageLoaded(), this.addClose(), this.triggerClose()
}
return b.prototype.createAllClasses = function () {
return new c(this.modalBox), new a(this.modalBox), new d(this.modalBox)
}, b.prototype.bindMethods = function () {
return this.opened(), this.loaded(), this.closed()
}, b.prototype.opened = function () {
var a = this;
return this.modalBox.bind("open", function () {
return a.modalBox.find("img").css({"max-width": a.imageMaxWidth(), "max-height": a.imageMaxHeight()})
})
}, b.prototype.closed = function () {
var a = this;
return this.modalBox.bind("close", function () {
return a.modalBox.hide()
})
}, b.prototype.loaded = function () {
var a = this;
return this.modalBox.bind("loaded", function () {
return a.modalBox.fadeIn("slow")
})
}, b.prototype.imageLoaded = function () {
var a = this;
this.modalBox.find("img").first().load(function () {
return a.imagePosition()
});
if (this.modalBox.find("img")[0].complete)return this.imagePosition()
| b | identifier_name |
metadb_test.go | ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil {
panic(err)
}
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
// RunWithInstance runs a closure passing it an Instance.
func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) |
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func TestFromBlobString(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string | {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
} | identifier_body |
metadb_test.go | ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil |
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
// RunWithInstance runs a closure passing it an Instance.
func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
}
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func TestFromBlobString(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string | {
panic(err)
} | conditional_block |
metadb_test.go | :
ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil {
panic(err)
}
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
| func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
}
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func TestFromBlobString(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string{" | // RunWithInstance runs a closure passing it an Instance. | random_line_split |
metadb_test.go | ch <- errors.New(r.(string))
default:
ch <- nil
}
}
}()
fn()
}()
return <-ch
}
// RunWithDB runs a closure passing it a database handle which is disposed of
// afterward.
func RunWithDB(fn func(*sql.DB)) {
db, err := sql.Open("sqlite3", TestDBPath)
if err != nil {
panic(err)
}
fn(db)
err = db.Close()
if err != nil {
panic(err)
}
if err := os.Remove(TestDBPath); err != nil {
panic(err)
}
}
// RunWithInstance runs a closure passing it an Instance.
func RunWithInstance(fn func(*Instance)) {
RunWithDB(func(db *sql.DB) {
if instance, err := NewInstance(db); err != nil {
panic(err)
} else {
fn(instance)
}
})
}
// EntryFixture contains the basic data required for a metadata entry.
type EntryFixture struct {
Name string
Value interface{}
ValueType uint
}
// InsertFixtures takes a list of EntryFixtures and inserts them into the
// database handle managed by the provided Instance.
func InsertFixtures(instance *Instance, fixtures []EntryFixture) {
for _, fixture := range fixtures {
_, err := instance.DB.Exec(`
INSERT INTO metadata (Name, Value, ValueType) Values (?, ?, ?)
`, fixture.Name, fixture.Value, fixture.ValueType)
if err != nil {
panic(fmt.Sprint("tests: failed to insert fixtures:\n", err))
}
}
}
// GetFixtures returns an array of EntryFixtures read from all the metadata
// entries in the database managed by the provided Instance.
func GetFixtures(instance *Instance) map[string]*EntryFixture {
rows, err := instance.DB.Query("SELECT Name, Value, ValueType FROM metadata;")
if err != nil {
panic(fmt.Sprint("tests: failed to retrieve fixtures:\n", err))
}
fixtures := make(map[string]*EntryFixture)
for rows.Next() {
var value string
fixture := EntryFixture{}
if err := rows.Scan(&fixture.Name, &value, &fixture.ValueType); err != nil {
panic(fmt.Errorf("tests: failed to scan row while retrieving fixtures:\n%s", err))
}
fixture.Value = value
fixtures[fixture.Name] = &fixture
}
return fixtures
}
// TestNewInstance ensures that an Instance object is returned as expected with
// a valid database handle, and an error with an invalid handle.
func TestNewInstance(t *testing.T) {
if _, err := NewInstance(nil); err == nil {
t.Error("NewInstance: expected error with nil database handle")
}
RunWithDB(func(db *sql.DB) {
if _, err := NewInstance(db); err != nil {
t.Fatal("NewInstance: got error:\n", err)
}
})
}
// TestExists ensures that Instance.Exists is accurate.
func TestExists(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "bar", ValueType: 3},
})
if instance.Exists("bar") {
t.Error("Instance.Exists: got 'true' expected 'false'")
}
if !instance.Exists("foo") {
t.Error("Instance.Exists: got 'false' expected 'true'")
}
})
}
// TestToValueType ensures that the correct type index is returned for each of
// the allowed types.
func TestToValueType(t *testing.T) {
testValid := func(value interface{}, expected uint) {
if res, err := toValueType(value); err != nil {
t.Error("toValueType: got error:\n", err)
} else if res != expected {
t.Errorf("toValueType: got '%d' expected '%d'", res, expected)
}
}
testValid(true, 0)
testValid(281, 1)
testValid(43.183, 2)
testValid("hello world!", 3)
if _, err := toValueType([]string{"disallowed", "type"}); err == nil {
t.Error("toValueType: expected error with disallowed type")
}
}
// TestFromBlobString ensures that the correct data is returned for a number
// of combinations of blob strings and value types.
func | (t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "bool", Value: true, ValueType: 0},
{Name: "invalidBool", Value: "maybe", ValueType: 0},
{Name: "int", Value: 239, ValueType: 1},
{Name: "invalidInt", Value: "not a number", ValueType: 1},
{Name: "float", Value: 21.42, ValueType: 2},
{Name: "invalidFloat", Value: "21.48aje21", ValueType: 2},
{Name: "string", Value: "hello world!", ValueType: 3},
{Name: "unknown", Value: "nothing", ValueType: 100},
})
fixtures := GetFixtures(instance)
testFixture := func(name string, expected interface{}) {
fixture := fixtures[name]
res, err := fromBlobString(fixture.Value.(string), fixture.ValueType)
if err != nil {
t.Error("fromBlobString: got errror:\n", err)
} else if res != expected {
t.Errorf("fromBlobString: got '%v' expected '%v'", res, expected)
}
}
expectError := func(name string, msg string) {
fixture := fixtures[name]
if _, err := fromBlobString(fixture.Value.(string), fixture.ValueType); err == nil {
t.Errorf("fromBlobString: expected error with %s", msg)
}
}
testFixture("bool", true)
testFixture("int", 239)
testFixture("float", 21.42)
testFixture("string", "hello world!")
expectError("invalidBool", "invalid boolean blob string")
expectError("invalidInt", "invalid integer blob string")
expectError("invalidFloat", "invalid float blob string")
expectError("unknown", "invalid value type")
})
}
// TestGetValueType ensures that getValueType returns accurate data.
func TestGetValueType(t *testing.T) {
RunWithInstance(func(instance *Instance) {
InsertFixtures(instance, []EntryFixture{
{Name: "foo", Value: "1", ValueType: 0},
{Name: "bar", Value: "1011", ValueType: 1},
})
testValueType := func(name string, expected uint) {
if res, err := instance.getValueType(name); err != nil {
t.Error("Instance.getValueType: got error:\n", err)
} else if res != expected {
t.Errorf("Instance.getValueType: got '%d' expected '%d'", res, expected)
}
}
testValueType("foo", 0)
testValueType("bar", 1)
_, err := instance.getValueType("unknown")
if err == nil {
t.Error("Instance.getValueType: expected error with missing entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.getValueType: expected error of type *ErrNoEntry")
}
})
}
// TestGetAndSet ensures that Get and Set respond as expected to different
// combinations of data and that data can be accurately read and updated
// once set.
func TestGetAndSet(t *testing.T) {
RunWithInstance(func(instance *Instance) {
checkResultWithBool := func(name string, fetched interface{}, expected bool) {
if res, ok := fetched.(bool); ok {
if res != expected {
t.Errorf("Instance.%s: got '%t' expected '%t'", name, res, expected)
}
} else {
t.Errorf("Instance.%s: got result of an unknown type, expected 'bool'", name)
}
}
if err := instance.Set("foo", true); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
if foo, err := instance.Get("foo"); err != nil {
t.Error("Instance.Get: got error:\n", err)
} else {
checkResultWithBool("Get", foo, true)
}
if _, err := instance.Get("bar"); err == nil {
t.Error("Instance.Get: expected error with non-existent entry")
}
if err := instance.Set("foo", false); err != nil {
t.Fatal("Instance.Set: got error:\n", err)
}
foo := instance.MustGet("foo")
checkResultWithBool("MustGet", foo, false)
if err := panicked(func() { instance.MustGet("bar") }); err == nil {
t.Error("Instance.MustGet: expected panic with non-existent entry")
} else if _, ok := err.(*ErrNoEntry); !ok {
t.Error("Instance.MustGet: expected error of type *ErrNoEntry")
}
if err := instance.Set("foo", []string{" | TestFromBlobString | identifier_name |
memcache_zipserve.py | if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
self.TrueGet(name)
if max_age is not None:
MAX_AGE = max_age
if public is not None:
PUBLIC = public
return HandlerWrapper
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = 'cache://' # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = 'noncache://' # memcache key prefix for non-existant URL
intlString = 'intl/'
validLangs = ['en', 'de', 'es', 'fr','it','ja','zh-CN','zh-TW']
def TrueGet(self, reqUri):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method. Some logic is applied to the request to serve files
from an intl/<lang>/... directory or fall through to the default language.
Args:
name: URL requested
Returns:
None
"""
langName = 'en'
resetLangCookie = False
urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
return name
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->> | """Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0: | identifier_body | |
memcache_zipserve.py | urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
return name
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def CreateResponse(self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None: | self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * | resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None: | random_line_split |
memcache_zipserve.py | urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
|
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def CreateResponse(self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * | return name | conditional_block |
memcache_zipserve.py | urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
return name
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def | (self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * | CreateResponse | identifier_name |
chart.js | Update]').click(function () {
getTopRevs();
getBotRevs();
});
$('[name=chartUpdate]').click(function () {
var whichChart = $('[name=chartSelector]').val();
if (whichChart == "In Total") {
drawPie('#myChart');
} else {
drawBar('#myChart');
}
});
});
}
function getAuthorAnalyticsPage() |
//clears the .active class from the menu bar
function resetMenuBar() {
$('#Overview').removeClass("active");
$('#ArticleAnalytics').removeClass("active");
$('#AuthorAnalytics').removeClass("active");
}
/******************
LOAD THE CHART DATA
*******************/
function drawPie(where) {
console.log(where)
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Element');
graphData.addColumn('number', 'Percentage');
$.each(pieData, function (key, val) {
graphData.addRow([key, val]);
})
var chart = new google.visualization.PieChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBar(where) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'RegularUsers');
graphData.addColumn('number', 'Bots');
graphData.addColumn('number', 'Admins');
graphData.addColumn('number', 'Anon');
var test = [];
for (var i in barData) {
test.push(barData[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].RegularUsers, test[x].Bots, test[x].Admins, test[x].Anon]);
}
var chart = new google.visualization.ColumnChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBarSpecificUser(where, dataToUse) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'Revisions');
var test = [];
for (var i in dataToUse) {
test.push(dataToUse[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].Revisions]);
}
var chart = new google.visualization.BarChart($(where)[0]);
chart.draw(graphData, options);
}
/*******************************************
FUNCTIONS FOR LOADING REGULAR DATA INTO HTML
********************************************/
function getTopRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getTopRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#topRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#topRev').append(appendMe);
}
})
}
function getBotRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getBotRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#botRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#botRev').append(appendMe);
}
})
}
function getOldestArticles() {
var destination = 'getOldestArticles';
$.get(destination, null, function (data) {
console.log(data);
$('#oldestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#oldestArticles').append(appendMe);
}
})
}
function getNewestArticles() {
var destination = 'getNewestArticles';
console.log('here');
$.get(destination, null, function (data) {
console.log(data);
$('#newestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#newestArticles').append(appendMe);
}
})
}
function getTitleLargestRegUser(){
var destination = 'getLeastRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#mostUsers').empty();
$('#mostUsers').text(data);
})
}
function getTitleLeastRegUser(){
var destination = 'getLargestRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#leastUsers').empty();
$('#leastUsers').text(data);
})
}
function getAuthorArticleList() {
var authorName = $('#authorEntryBox').val();
console.log(authorName)
var destination = 'getAuthorArticleList?authorName=' + authorName;
var putListHere = $('#articleList');
$.get(destination, null, function (data) {
console.log('Here is the user list ')
console.log(data)
if (data.length == 0) {
alert("Could not find any users with names matching that query");
} else {
// var heading = $('<thead><tr>' + '<th>' + 'Article Name' + '</th>' + '<th>' + 'Number of Revisions' + '</th>' + '</tr></thead><tbody>')
// $('#articleList').append(heading);
// for (var x = 0; x < data.length; x++) {
// var test = "<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>';
// var appendMe = $("<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
// console.log(test)
// $('#articleList').append(appendMe);
// }
// var ending = $('</tbody>');
// $('#articleList').append(ending);
putListHere.empty();
//Add headers
var theader = $("<thead><tr><th>User Name</th><th>Article Name</th><th>Number of Revisions</th></tr></thead>")
$('#articleList').append(theader);
//Create data table
for (var x = 0; x < data.length; x++) {
var appnedMe = $("<tr class='articleEntry' id= '" + "entryID" + x + "'>" + '<td>' + data[x].user + '</td>' + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
$('#articleList').append(appnedMe);
var temp = '#entryID' + x;
// $(temp).click(function(x){ //Get timestamps
// console.log(x)
// })
}
//Create event handler seperately
function handleEvent(idVal){
var elementGetter = '#entryID' + idVal;
$(elementGetter).click(function(){
$(".timestamp").remove();
console.log(elementGetter)
var newdestination = 'getTimestamps?authorName=' + data[idVal]._id + "&title=" + data[idVal].user;
$.get(newdestination, null, function (newdata) {
console.log(newdata)
for(var z = 0; z < newdata.length; z++){
var myDate = new Date(newdata[z].timestamp)
console.log(myDate)
$('<tr><td class="timestamp">' + " " + myDate.toUTCString() + '</td></tr>').insertAfter(elementGetter);
}
})
})
}
for(var x = 0; x < data.length; x++){
handleEvent(x)
}
}
})
}
function fillAutocomplete() {
var destination = 'getAllArticleTitles'
$.get(destination, null, function (data) {
$('#articleEntryList').empty();
for (var x = 0; x < data.length; x++) {
console.log(data[x])
var appendMe = $('<option>' + data[x]._id + " [revisions: " + data[x].count + ']</option>')
$('#articleEntryList').append(appendMe);
allArticleTitles[x] = data[x]._id;
allArt | {
$('#main').empty();
$('#main').load('views/authorAnalytics.html', null, function () {
$('#authorSearchButton').click(function () {
getAuthorArticleList();
})
});
} | identifier_body |
chart.js | Update]').click(function () {
getTopRevs();
getBotRevs();
});
$('[name=chartUpdate]').click(function () {
var whichChart = $('[name=chartSelector]').val();
if (whichChart == "In Total") {
drawPie('#myChart');
} else {
drawBar('#myChart');
}
});
});
| $('#authorSearchButton').click(function () {
getAuthorArticleList();
})
});
}
//clears the .active class from the menu bar
function resetMenuBar() {
$('#Overview').removeClass("active");
$('#ArticleAnalytics').removeClass("active");
$('#AuthorAnalytics').removeClass("active");
}
/******************
LOAD THE CHART DATA
*******************/
function drawPie(where) {
console.log(where)
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Element');
graphData.addColumn('number', 'Percentage');
$.each(pieData, function (key, val) {
graphData.addRow([key, val]);
})
var chart = new google.visualization.PieChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBar(where) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'RegularUsers');
graphData.addColumn('number', 'Bots');
graphData.addColumn('number', 'Admins');
graphData.addColumn('number', 'Anon');
var test = [];
for (var i in barData) {
test.push(barData[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].RegularUsers, test[x].Bots, test[x].Admins, test[x].Anon]);
}
var chart = new google.visualization.ColumnChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBarSpecificUser(where, dataToUse) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'Revisions');
var test = [];
for (var i in dataToUse) {
test.push(dataToUse[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].Revisions]);
}
var chart = new google.visualization.BarChart($(where)[0]);
chart.draw(graphData, options);
}
/*******************************************
FUNCTIONS FOR LOADING REGULAR DATA INTO HTML
********************************************/
function getTopRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getTopRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#topRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#topRev').append(appendMe);
}
})
}
function getBotRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getBotRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#botRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#botRev').append(appendMe);
}
})
}
function getOldestArticles() {
var destination = 'getOldestArticles';
$.get(destination, null, function (data) {
console.log(data);
$('#oldestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#oldestArticles').append(appendMe);
}
})
}
function getNewestArticles() {
var destination = 'getNewestArticles';
console.log('here');
$.get(destination, null, function (data) {
console.log(data);
$('#newestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#newestArticles').append(appendMe);
}
})
}
function getTitleLargestRegUser(){
var destination = 'getLeastRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#mostUsers').empty();
$('#mostUsers').text(data);
})
}
function getTitleLeastRegUser(){
var destination = 'getLargestRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#leastUsers').empty();
$('#leastUsers').text(data);
})
}
function getAuthorArticleList() {
var authorName = $('#authorEntryBox').val();
console.log(authorName)
var destination = 'getAuthorArticleList?authorName=' + authorName;
var putListHere = $('#articleList');
$.get(destination, null, function (data) {
console.log('Here is the user list ')
console.log(data)
if (data.length == 0) {
alert("Could not find any users with names matching that query");
} else {
// var heading = $('<thead><tr>' + '<th>' + 'Article Name' + '</th>' + '<th>' + 'Number of Revisions' + '</th>' + '</tr></thead><tbody>')
// $('#articleList').append(heading);
// for (var x = 0; x < data.length; x++) {
// var test = "<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>';
// var appendMe = $("<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
// console.log(test)
// $('#articleList').append(appendMe);
// }
// var ending = $('</tbody>');
// $('#articleList').append(ending);
putListHere.empty();
//Add headers
var theader = $("<thead><tr><th>User Name</th><th>Article Name</th><th>Number of Revisions</th></tr></thead>")
$('#articleList').append(theader);
//Create data table
for (var x = 0; x < data.length; x++) {
var appnedMe = $("<tr class='articleEntry' id= '" + "entryID" + x + "'>" + '<td>' + data[x].user + '</td>' + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
$('#articleList').append(appnedMe);
var temp = '#entryID' + x;
// $(temp).click(function(x){ //Get timestamps
// console.log(x)
// })
}
//Create event handler seperately
function handleEvent(idVal){
var elementGetter = '#entryID' + idVal;
$(elementGetter).click(function(){
$(".timestamp").remove();
console.log(elementGetter)
var newdestination = 'getTimestamps?authorName=' + data[idVal]._id + "&title=" + data[idVal].user;
$.get(newdestination, null, function (newdata) {
console.log(newdata)
for(var z = 0; z < newdata.length; z++){
var myDate = new Date(newdata[z].timestamp)
console.log(myDate)
$('<tr><td class="timestamp">' + " " + myDate.toUTCString() + '</td></tr>').insertAfter(elementGetter);
}
})
})
}
for(var x = 0; x < data.length; x++){
handleEvent(x)
}
}
})
}
function fillAutocomplete() {
var destination = 'getAllArticleTitles'
$.get(destination, null, function (data) {
$('#articleEntryList').empty();
for (var x = 0; x < data.length; x++) {
console.log(data[x])
var appendMe = $('<option>' + data[x]._id + " [revisions: " + data[x].count + ']</option>')
$('#articleEntryList').append(appendMe);
allArticleTitles[x] = data[x]._id;
allArticlet | }
function getAuthorAnalyticsPage() {
$('#main').empty();
$('#main').load('views/authorAnalytics.html', null, function () { | random_line_split |
chart.js | Update]').click(function () {
getTopRevs();
getBotRevs();
});
$('[name=chartUpdate]').click(function () {
var whichChart = $('[name=chartSelector]').val();
if (whichChart == "In Total") {
drawPie('#myChart');
} else {
drawBar('#myChart');
}
});
});
}
function getAuthorAnalyticsPage() {
$('#main').empty();
$('#main').load('views/authorAnalytics.html', null, function () {
$('#authorSearchButton').click(function () {
getAuthorArticleList();
})
});
}
//clears the .active class from the menu bar
function resetMenuBar() {
$('#Overview').removeClass("active");
$('#ArticleAnalytics').removeClass("active");
$('#AuthorAnalytics').removeClass("active");
}
/******************
LOAD THE CHART DATA
*******************/
function drawPie(where) {
console.log(where)
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Element');
graphData.addColumn('number', 'Percentage');
$.each(pieData, function (key, val) {
graphData.addRow([key, val]);
})
var chart = new google.visualization.PieChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBar(where) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'RegularUsers');
graphData.addColumn('number', 'Bots');
graphData.addColumn('number', 'Admins');
graphData.addColumn('number', 'Anon');
var test = [];
for (var i in barData) {
test.push(barData[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].RegularUsers, test[x].Bots, test[x].Admins, test[x].Anon]);
}
var chart = new google.visualization.ColumnChart($(where)[0]);
chart.draw(graphData, options);
}
function drawBarSpecificUser(where, dataToUse) {
graphData = new google.visualization.DataTable();
graphData.addColumn('string', 'Year');
graphData.addColumn('number', 'Revisions');
var test = [];
for (var i in dataToUse) {
test.push(dataToUse[i])
}
// console.log(test);
for (var x = 0; x < test.length; x++) {
graphData.addRow([test[x].Year, test[x].Revisions]);
}
var chart = new google.visualization.BarChart($(where)[0]);
chart.draw(graphData, options);
}
/*******************************************
FUNCTIONS FOR LOADING REGULAR DATA INTO HTML
********************************************/
function getTopRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getTopRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#topRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#topRev').append(appendMe);
}
})
}
function getBotRevs() {
var quantity = $('[name=quantity]').val();
var destination = 'getBotRevs?quantity=' + quantity;
$.get(destination, quantity, function (data) {
$('#botRev').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#botRev').append(appendMe);
}
})
}
function getOldestArticles() {
var destination = 'getOldestArticles';
$.get(destination, null, function (data) {
console.log(data);
$('#oldestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#oldestArticles').append(appendMe);
}
})
}
function | () {
var destination = 'getNewestArticles';
console.log('here');
$.get(destination, null, function (data) {
console.log(data);
$('#newestArticles').empty();
for (var x = 0; x < data.length; x++) {
var num = x + 1;
num = num + '. ';
var appendMe = $('<li>' + num + data[x]._id + '</li>');
$('#newestArticles').append(appendMe);
}
})
}
function getTitleLargestRegUser(){
var destination = 'getLeastRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#mostUsers').empty();
$('#mostUsers').text(data);
})
}
function getTitleLeastRegUser(){
var destination = 'getLargestRegoUser';
$.get(destination, null, function (data) {
console.log(data);
$('#leastUsers').empty();
$('#leastUsers').text(data);
})
}
function getAuthorArticleList() {
var authorName = $('#authorEntryBox').val();
console.log(authorName)
var destination = 'getAuthorArticleList?authorName=' + authorName;
var putListHere = $('#articleList');
$.get(destination, null, function (data) {
console.log('Here is the user list ')
console.log(data)
if (data.length == 0) {
alert("Could not find any users with names matching that query");
} else {
// var heading = $('<thead><tr>' + '<th>' + 'Article Name' + '</th>' + '<th>' + 'Number of Revisions' + '</th>' + '</tr></thead><tbody>')
// $('#articleList').append(heading);
// for (var x = 0; x < data.length; x++) {
// var test = "<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>';
// var appendMe = $("<tr onclick='getTimestamps()' id= '" + "ArticleNameIs" + data[x]._id + "'>" + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
// console.log(test)
// $('#articleList').append(appendMe);
// }
// var ending = $('</tbody>');
// $('#articleList').append(ending);
putListHere.empty();
//Add headers
var theader = $("<thead><tr><th>User Name</th><th>Article Name</th><th>Number of Revisions</th></tr></thead>")
$('#articleList').append(theader);
//Create data table
for (var x = 0; x < data.length; x++) {
var appnedMe = $("<tr class='articleEntry' id= '" + "entryID" + x + "'>" + '<td>' + data[x].user + '</td>' + "<td>" + data[x]._id + "</td>" + '<td>' + data[x].count + '</td>' + '</tr>');
$('#articleList').append(appnedMe);
var temp = '#entryID' + x;
// $(temp).click(function(x){ //Get timestamps
// console.log(x)
// })
}
//Create event handler seperately
function handleEvent(idVal){
var elementGetter = '#entryID' + idVal;
$(elementGetter).click(function(){
$(".timestamp").remove();
console.log(elementGetter)
var newdestination = 'getTimestamps?authorName=' + data[idVal]._id + "&title=" + data[idVal].user;
$.get(newdestination, null, function (newdata) {
console.log(newdata)
for(var z = 0; z < newdata.length; z++){
var myDate = new Date(newdata[z].timestamp)
console.log(myDate)
$('<tr><td class="timestamp">' + " " + myDate.toUTCString() + '</td></tr>').insertAfter(elementGetter);
}
})
})
}
for(var x = 0; x < data.length; x++){
handleEvent(x)
}
}
})
}
function fillAutocomplete() {
var destination = 'getAllArticleTitles'
$.get(destination, null, function (data) {
$('#articleEntryList').empty();
for (var x = 0; x < data.length; x++) {
console.log(data[x])
var appendMe = $('<option>' + data[x]._id + " [revisions: " + data[x].count + ']</option>')
$('#articleEntryList').append(appendMe);
allArticleTitles[x] = data[x]._id;
allArtic | getNewestArticles | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.