file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
texture.rs | use enum_dispatch::enum_dispatch;
use log::error;
use crate::textures::dots::DotsTexture;
use crate::textures::constant::ConstantTexture;
use crate::textures::scaled::ScaleTexture;
use crate::core::interaction::SurfaceInteraction;
use crate::textures::imagemap::{ ImageTextureFloat, ImageTextureRGB};
use crate::textures::mix::MixTexture;
use crate::textures::biler::BilerTexture;
use crate::textures::uv::UVTexture;
use crate::textures::marble::MarbleTexture;
use crate::textures::wrinkled::WrinkledTexture;
use crate::textures::fbm::FBmTexture;
use crate::textures::windy::WindyTexture;
use crate::textures::checkerboard::{Checkerboard3DTexture, Checkerboard2DTexture};
use crate::core::geometry::vector::{Vector2f, Vector3f};
use crate::core::geometry::point::{Point2f, Point3f};
use crate::core::pbrt::{Float, INV_PI, INV2_PI, PI, lerp, clamp, log2};
use crate::core::transform::Transform;
use crate::core::geometry::geometry::{spherical_theta, spherical_phi};
use crate::core::spectrum::{Spectrum, RGBSpectrum, SampledSpectrum};
use std::ops::{Mul, Add, AddAssign, Div};
use crate::core::mipmap::Clampable;
use crate::core::paramset::TextureParams;
const NOISE_PERM_SIZE: usize = 256;
const NOISE_PERM: [usize; 2 * NOISE_PERM_SIZE] = [
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140,
36, 103, 30, 69, 142,
// Remainder of the noise permutation table
8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62,
94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55,
46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76,
132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100,
109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147,
118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28,
42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12,
191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31,
181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66,
215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194,
233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6,
148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60,
211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25,
63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135,
130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226,
250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59,
227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2,
44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19,
98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115,
121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72,
243, 141, 128, 195, 78, 66, 215, 61, 156, 180
];
pub type TextureFloat = Textures<Float, Float>;
pub type TextureSpec = Textures<Spectrum, Spectrum>;
#[enum_dispatch]
pub trait Texture<T2> {
fn evaluate(&self, s: &SurfaceInteraction) -> T2;
}
// All Texture generic types must implement these traits
pub trait SpectrumT<T>:
Copy +
Send +
Sync +
num::Zero +
Clampable +
AddAssign +
From<Float> +
From<SampledSpectrum> +
From<RGBSpectrum> +
Mul<T, Output = T> +
Mul<Float, Output = T> +
Div<Float, Output = T> +
Add<T, Output = T>{}
// Implementations for valid Texture generic types
impl SpectrumT<Float> for Float{}
impl SpectrumT<RGBSpectrum> for RGBSpectrum{}
impl SpectrumT<SampledSpectrum> for SampledSpectrum{}
#[enum_dispatch(Texture<T2>)]
pub enum Textures<T1, T2>
where T1: SpectrumT<T1> + Mul<T2, Output = T2>,
T2: SpectrumT<T2> + From<T1>
{
MarbleTexture,
UVTexture,
FBmTexture,
WrinkledTexture,
WindyTexture,
MixTexture(MixTexture<T2>),
BilerTexture(BilerTexture<T2>),
ScaleTexture(ScaleTexture<T1, T2>),
DotsTexture(DotsTexture<T2>),
ImageTextureFloat(ImageTextureFloat),
ImageTextureRGB(ImageTextureRGB),
ConstantTexture(ConstantTexture<T2>),
Checkerboard2DTexture(Checkerboard2DTexture<T2>),
Checkerboard3DTexture(Checkerboard3DTexture<T2>)
}
#[enum_dispatch]
pub trait TextureMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f;
}
#[enum_dispatch(TextureMapping2D)]
pub enum TextureMapping2Ds {
UVMapping2D,
PlannarMapping2D,
SphericalMapping2D,
CylindricalMapping2D
}
pub struct UVMapping2D {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl UVMapping2D {
pub fn new(su: Float, sv: Float, du: Float, dv: Float) -> Self {
Self { su, sv, du, dv }
}
}
impl TextureMapping2D for UVMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
// Compute texture differentials for sphere (u, v) mapping
*dstdx = Vector2f::new(self.su * si.dudx.get() , self.sv * si.dvdx.get());
*dstdy = Vector2f::new(self.su * si.dudy.get(), self.sv * si.dvdy.get());
Point2f::new(self.su * si.uv[0] + self.du, self.sv * si.uv[1] + self.dv)
}
}
impl Default for UVMapping2D {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0
}
}
}
pub struct SphericalMapping2D {
world_to_texture: Transform
}
impl SphericalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn sphere(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
let theta = spherical_theta(&vec);
let phi = spherical_phi(&vec);
Point2f::new(theta * INV_PI, phi * INV2_PI)
}
}
impl TextureMapping2D for SphericalMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let st = self.sphere(&si.p);
// Compute texture coordinate differentials for sphere (u, v) mapping
let delta = 0.1;
let st_deltax = self.sphere(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.sphere(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct CylindricalMapping2D {
world_to_texture: Transform
}
impl CylindricalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn cylinder(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
Point2f::new(PI + vec.y.atan2(vec.x) * INV2_PI, vec.z)
}
}
impl TextureMapping2D for CylindricalMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
let st = self.cylinder(&si.p);
// Compute texture coordinate differentials for cylinder (u, v) mapping
let delta = 0.1;
let st_deltax = self.cylinder(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.cylinder(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct PlannarMapping2D {
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float
}
impl PlannarMapping2D {
pub fn new(vs: &Vector3f, vt: &Vector3f,
ds: Float, dt: Float) -> Self {
Self {
ds,
dt,
vs: *vs,
vt: *vt
}
}
}
impl TextureMapping2D for PlannarMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let vec = Vector3f::from(si.p);
*dstdx = Vector2f::new(si.dpdx.get().dot(&self.vs), si.dpdx.get().dot(&self.vt));
*dstdy = Vector2f::new(si.dpdy.get().dot(&self.vs), si.dpdy.get().dot(&self.vt));
Point2f::new(self.ds + vec.dot(&self.vs), self.dt + vec.dot(&self.vt))
}
}
#[enum_dispatch]
pub trait TextureMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f;
}
#[enum_dispatch(TextureMapping3D)]
pub enum TextureMapping3Ds {
IdentityMapping3D
}
pub struct IdentityMapping3D {
world_to_texture: Transform
}
impl IdentityMapping3D {
pub fn new(w2t: &Transform) -> Self {
Self { world_to_texture: *w2t }
}
}
impl TextureMapping3D for IdentityMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f {
*dpdx = self.world_to_texture.transform_vector(&si.dpdx.get());
*dpdy = self.world_to_texture.transform_vector(&si.dpdy.get());
self.world_to_texture.transform_point(&si.p)
}
}
pub fn lanczos(mut x: Float, tau: Float) -> Float {
x = x.abs();
if x < 1.0e-5 { return 1.0; }
if x > 1.0 { return 0.0; }
x *= PI;
let s = (x * tau).sin() / ( x * tau);
let lanc = x.sin() / x;
s * lanc
}
pub fn noise(x: Float, y: Float, z: Float) -> Float {
let mut ix = x.floor() as usize;
let mut iy = y.floor() as usize;
let mut iz = z.floor() as usize;
let dx = x - ix as Float;
let dy = y - iy as Float;
let dz = z - iz as Float;
// Compute gradient weights
ix &= NOISE_PERM_SIZE - 1;
iy &= NOISE_PERM_SIZE - 1;
iz &= NOISE_PERM_SIZE - 1;
let w000 = grad(ix, iy, iz, dx, dy, dz);
let w100 = grad(ix + 1, iy, iz, dx - 1.0, dy, dz);
let w010 = grad(ix, iy + 1, iz, dx, dy - 1.0, dz);
let w110 = grad(ix + 1, iy + 1, iz, dx - 1.0, dy - 1.0, dz);
let w001 = grad(ix, iy, iz + 1, dx, dy, dz - 1.0);
let w101 = grad(ix + 1, iy, iz + 1, dx - 1.0, dy, dz - 1.0);
let w011 = grad(ix, iy + 1, iz + 1, dx, dy - 1.0, dz - 1.0);
let w111 = grad(ix + 1, iy + 1, iz + 1, dx - 1.0, dy - 1.0, dz - 1.0);
// Compute trilinear interpolation of weights
let wx = noise_weight(dx);
let wy = noise_weight(dy);
let wz = noise_weight(dz);
let x00 = lerp(wx, w000, w100);
let x10 = lerp(wx, w010, w110);
let x01 = lerp(wx, w001, w101);
let x11 = lerp(wx, w011, w111);
let y0 = lerp(wy, x00, x10);
let y1 = lerp(wy, x01, x11);
lerp(wz, y0, y1)
}
pub fn noisep(p: Point3f) -> Float {
noise(p.x, p.y, p.z)
}
fn grad(x: usize, y: usize, z: usize, dx: Float, dy: Float, dz: Float) -> Float {
let mut h = NOISE_PERM[NOISE_PERM[NOISE_PERM[x] + y] + z];
h &= 15;
let u = if h < 8 || h == 12 || h == 13 { dx } else { dy };
let v = if h < 4 || h == 12 || h == 13 { dy } else { dz };
(if (h & 1) != 0 { -u } else { u }) + (if (h & 2) != 0 { -v } else { v })
}
fn noise_weight(t: Float) -> Float {
let t3 = t * t * t;
let t4 = t3 * t;
6.0 * t4 * t - 15.0 * t4 + 10.0 * t3
}
pub fn fbm(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * log2(len2), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for fbm
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o * noisep(*p * lambda);
lambda *= 1.99;
o *= omega;
}
let npartial = n - nint as Float;
sum += o * smooth_step(0.3, 0.7, npartial) * noisep(*p * lambda);
sum
}
pub fn turbulence(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * len2.log2(), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for turbulence
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o + noisep(*p * lambda).abs();
lambda *= 1.99;
o *= omega;
}
// Account for contributions of clamped octaves in turbulence
let npartial = n - nint as Float;
sum += o + lerp( |
for _i in nint..max_octaves {
sum += o * 0.2;
o *= omega;
}
sum
}
fn smooth_step(min: Float, max: Float, value: Float) -> Float {
let v = clamp((value - min) / (max - min), 0.0, 1.0);
v * v * (-2.0 * v + 3.0)
}
pub fn get_mapping2d(t2w: &Transform, tp: &mut TextureParams) -> TextureMapping2Ds {
let ty = tp.find_string("mapping", "uv");
match ty.as_str() {
"uv" => {
let su = tp.find_float("uscale", 1.0);
let sv = tp.find_float("vscale", 1.0);
let du = tp.find_float("udelta", 0.0);
let dv = tp.find_float("vdelta", 0.0);
UVMapping2D::new(su, sv, du, dv).into()
},
"planar" => {
let vs = tp.find_vector3f("v1", Vector3f::new(1.0, 0.0, 0.0));
let vt = tp.find_vector3f("v2", Vector3f::new(0.0, 1.0, 0.0));
let ds = tp.find_float("udelta", 0.0);
let dt = tp.find_float("vdelta", 0.0);
PlannarMapping2D::new(&vs, &vt, ds, dt).into()
}
"spherical" => SphericalMapping2D::new(&Transform::inverse(t2w)).into(),
"cylindrical" => CylindricalMapping2D::new(&Transform::inverse(t2w)).into(),
_ => {
error!("2D texture mapping \"{}\" unknown", ty);
UVMapping2D::new(1.0, 1.0, 0.0, 0.0).into()
}
}
} | smooth_step(0.3, 0.7, npartial),
0.2,
noisep(*p * lambda).abs()); | random_line_split |
texture.rs | use enum_dispatch::enum_dispatch;
use log::error;
use crate::textures::dots::DotsTexture;
use crate::textures::constant::ConstantTexture;
use crate::textures::scaled::ScaleTexture;
use crate::core::interaction::SurfaceInteraction;
use crate::textures::imagemap::{ ImageTextureFloat, ImageTextureRGB};
use crate::textures::mix::MixTexture;
use crate::textures::biler::BilerTexture;
use crate::textures::uv::UVTexture;
use crate::textures::marble::MarbleTexture;
use crate::textures::wrinkled::WrinkledTexture;
use crate::textures::fbm::FBmTexture;
use crate::textures::windy::WindyTexture;
use crate::textures::checkerboard::{Checkerboard3DTexture, Checkerboard2DTexture};
use crate::core::geometry::vector::{Vector2f, Vector3f};
use crate::core::geometry::point::{Point2f, Point3f};
use crate::core::pbrt::{Float, INV_PI, INV2_PI, PI, lerp, clamp, log2};
use crate::core::transform::Transform;
use crate::core::geometry::geometry::{spherical_theta, spherical_phi};
use crate::core::spectrum::{Spectrum, RGBSpectrum, SampledSpectrum};
use std::ops::{Mul, Add, AddAssign, Div};
use crate::core::mipmap::Clampable;
use crate::core::paramset::TextureParams;
const NOISE_PERM_SIZE: usize = 256;
const NOISE_PERM: [usize; 2 * NOISE_PERM_SIZE] = [
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140,
36, 103, 30, 69, 142,
// Remainder of the noise permutation table
8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62,
94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55,
46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76,
132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100,
109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147,
118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28,
42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12,
191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31,
181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66,
215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194,
233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6,
148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60,
211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25,
63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135,
130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226,
250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59,
227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2,
44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19,
98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115,
121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72,
243, 141, 128, 195, 78, 66, 215, 61, 156, 180
];
pub type TextureFloat = Textures<Float, Float>;
pub type TextureSpec = Textures<Spectrum, Spectrum>;
#[enum_dispatch]
pub trait Texture<T2> {
fn evaluate(&self, s: &SurfaceInteraction) -> T2;
}
// All Texture generic types must implement these traits
pub trait SpectrumT<T>:
Copy +
Send +
Sync +
num::Zero +
Clampable +
AddAssign +
From<Float> +
From<SampledSpectrum> +
From<RGBSpectrum> +
Mul<T, Output = T> +
Mul<Float, Output = T> +
Div<Float, Output = T> +
Add<T, Output = T>{}
// Implementations for valid Texture generic types
impl SpectrumT<Float> for Float{}
impl SpectrumT<RGBSpectrum> for RGBSpectrum{}
impl SpectrumT<SampledSpectrum> for SampledSpectrum{}
#[enum_dispatch(Texture<T2>)]
pub enum Textures<T1, T2>
where T1: SpectrumT<T1> + Mul<T2, Output = T2>,
T2: SpectrumT<T2> + From<T1>
{
MarbleTexture,
UVTexture,
FBmTexture,
WrinkledTexture,
WindyTexture,
MixTexture(MixTexture<T2>),
BilerTexture(BilerTexture<T2>),
ScaleTexture(ScaleTexture<T1, T2>),
DotsTexture(DotsTexture<T2>),
ImageTextureFloat(ImageTextureFloat),
ImageTextureRGB(ImageTextureRGB),
ConstantTexture(ConstantTexture<T2>),
Checkerboard2DTexture(Checkerboard2DTexture<T2>),
Checkerboard3DTexture(Checkerboard3DTexture<T2>)
}
#[enum_dispatch]
pub trait TextureMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f;
}
#[enum_dispatch(TextureMapping2D)]
pub enum TextureMapping2Ds {
UVMapping2D,
PlannarMapping2D,
SphericalMapping2D,
CylindricalMapping2D
}
pub struct UVMapping2D {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl UVMapping2D {
pub fn new(su: Float, sv: Float, du: Float, dv: Float) -> Self {
Self { su, sv, du, dv }
}
}
impl TextureMapping2D for UVMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
// Compute texture differentials for sphere (u, v) mapping
*dstdx = Vector2f::new(self.su * si.dudx.get() , self.sv * si.dvdx.get());
*dstdy = Vector2f::new(self.su * si.dudy.get(), self.sv * si.dvdy.get());
Point2f::new(self.su * si.uv[0] + self.du, self.sv * si.uv[1] + self.dv)
}
}
impl Default for UVMapping2D {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0
}
}
}
pub struct SphericalMapping2D {
world_to_texture: Transform
}
impl SphericalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn sphere(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
let theta = spherical_theta(&vec);
let phi = spherical_phi(&vec);
Point2f::new(theta * INV_PI, phi * INV2_PI)
}
}
impl TextureMapping2D for SphericalMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let st = self.sphere(&si.p);
// Compute texture coordinate differentials for sphere (u, v) mapping
let delta = 0.1;
let st_deltax = self.sphere(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.sphere(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct CylindricalMapping2D {
world_to_texture: Transform
}
impl CylindricalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn cylinder(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
Point2f::new(PI + vec.y.atan2(vec.x) * INV2_PI, vec.z)
}
}
impl TextureMapping2D for CylindricalMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
let st = self.cylinder(&si.p);
// Compute texture coordinate differentials for cylinder (u, v) mapping
let delta = 0.1;
let st_deltax = self.cylinder(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.cylinder(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct PlannarMapping2D {
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float
}
impl PlannarMapping2D {
pub fn | (vs: &Vector3f, vt: &Vector3f,
ds: Float, dt: Float) -> Self {
Self {
ds,
dt,
vs: *vs,
vt: *vt
}
}
}
impl TextureMapping2D for PlannarMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let vec = Vector3f::from(si.p);
*dstdx = Vector2f::new(si.dpdx.get().dot(&self.vs), si.dpdx.get().dot(&self.vt));
*dstdy = Vector2f::new(si.dpdy.get().dot(&self.vs), si.dpdy.get().dot(&self.vt));
Point2f::new(self.ds + vec.dot(&self.vs), self.dt + vec.dot(&self.vt))
}
}
#[enum_dispatch]
pub trait TextureMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f;
}
#[enum_dispatch(TextureMapping3D)]
pub enum TextureMapping3Ds {
IdentityMapping3D
}
pub struct IdentityMapping3D {
world_to_texture: Transform
}
impl IdentityMapping3D {
pub fn new(w2t: &Transform) -> Self {
Self { world_to_texture: *w2t }
}
}
impl TextureMapping3D for IdentityMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f {
*dpdx = self.world_to_texture.transform_vector(&si.dpdx.get());
*dpdy = self.world_to_texture.transform_vector(&si.dpdy.get());
self.world_to_texture.transform_point(&si.p)
}
}
pub fn lanczos(mut x: Float, tau: Float) -> Float {
x = x.abs();
if x < 1.0e-5 { return 1.0; }
if x > 1.0 { return 0.0; }
x *= PI;
let s = (x * tau).sin() / ( x * tau);
let lanc = x.sin() / x;
s * lanc
}
pub fn noise(x: Float, y: Float, z: Float) -> Float {
let mut ix = x.floor() as usize;
let mut iy = y.floor() as usize;
let mut iz = z.floor() as usize;
let dx = x - ix as Float;
let dy = y - iy as Float;
let dz = z - iz as Float;
// Compute gradient weights
ix &= NOISE_PERM_SIZE - 1;
iy &= NOISE_PERM_SIZE - 1;
iz &= NOISE_PERM_SIZE - 1;
let w000 = grad(ix, iy, iz, dx, dy, dz);
let w100 = grad(ix + 1, iy, iz, dx - 1.0, dy, dz);
let w010 = grad(ix, iy + 1, iz, dx, dy - 1.0, dz);
let w110 = grad(ix + 1, iy + 1, iz, dx - 1.0, dy - 1.0, dz);
let w001 = grad(ix, iy, iz + 1, dx, dy, dz - 1.0);
let w101 = grad(ix + 1, iy, iz + 1, dx - 1.0, dy, dz - 1.0);
let w011 = grad(ix, iy + 1, iz + 1, dx, dy - 1.0, dz - 1.0);
let w111 = grad(ix + 1, iy + 1, iz + 1, dx - 1.0, dy - 1.0, dz - 1.0);
// Compute trilinear interpolation of weights
let wx = noise_weight(dx);
let wy = noise_weight(dy);
let wz = noise_weight(dz);
let x00 = lerp(wx, w000, w100);
let x10 = lerp(wx, w010, w110);
let x01 = lerp(wx, w001, w101);
let x11 = lerp(wx, w011, w111);
let y0 = lerp(wy, x00, x10);
let y1 = lerp(wy, x01, x11);
lerp(wz, y0, y1)
}
pub fn noisep(p: Point3f) -> Float {
noise(p.x, p.y, p.z)
}
fn grad(x: usize, y: usize, z: usize, dx: Float, dy: Float, dz: Float) -> Float {
let mut h = NOISE_PERM[NOISE_PERM[NOISE_PERM[x] + y] + z];
h &= 15;
let u = if h < 8 || h == 12 || h == 13 { dx } else { dy };
let v = if h < 4 || h == 12 || h == 13 { dy } else { dz };
(if (h & 1) != 0 { -u } else { u }) + (if (h & 2) != 0 { -v } else { v })
}
fn noise_weight(t: Float) -> Float {
let t3 = t * t * t;
let t4 = t3 * t;
6.0 * t4 * t - 15.0 * t4 + 10.0 * t3
}
pub fn fbm(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * log2(len2), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for fbm
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o * noisep(*p * lambda);
lambda *= 1.99;
o *= omega;
}
let npartial = n - nint as Float;
sum += o * smooth_step(0.3, 0.7, npartial) * noisep(*p * lambda);
sum
}
pub fn turbulence(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * len2.log2(), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for turbulence
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o + noisep(*p * lambda).abs();
lambda *= 1.99;
o *= omega;
}
// Account for contributions of clamped octaves in turbulence
let npartial = n - nint as Float;
sum += o + lerp(
smooth_step(0.3, 0.7, npartial),
0.2,
noisep(*p * lambda).abs());
for _i in nint..max_octaves {
sum += o * 0.2;
o *= omega;
}
sum
}
fn smooth_step(min: Float, max: Float, value: Float) -> Float {
let v = clamp((value - min) / (max - min), 0.0, 1.0);
v * v * (-2.0 * v + 3.0)
}
pub fn get_mapping2d(t2w: &Transform, tp: &mut TextureParams) -> TextureMapping2Ds {
let ty = tp.find_string("mapping", "uv");
match ty.as_str() {
"uv" => {
let su = tp.find_float("uscale", 1.0);
let sv = tp.find_float("vscale", 1.0);
let du = tp.find_float("udelta", 0.0);
let dv = tp.find_float("vdelta", 0.0);
UVMapping2D::new(su, sv, du, dv).into()
},
"planar" => {
let vs = tp.find_vector3f("v1", Vector3f::new(1.0, 0.0, 0.0));
let vt = tp.find_vector3f("v2", Vector3f::new(0.0, 1.0, 0.0));
let ds = tp.find_float("udelta", 0.0);
let dt = tp.find_float("vdelta", 0.0);
PlannarMapping2D::new(&vs, &vt, ds, dt).into()
}
"spherical" => SphericalMapping2D::new(&Transform::inverse(t2w)).into(),
"cylindrical" => CylindricalMapping2D::new(&Transform::inverse(t2w)).into(),
_ => {
error!("2D texture mapping \"{}\" unknown", ty);
UVMapping2D::new(1.0, 1.0, 0.0, 0.0).into()
}
}
} | new | identifier_name |
texture.rs | use enum_dispatch::enum_dispatch;
use log::error;
use crate::textures::dots::DotsTexture;
use crate::textures::constant::ConstantTexture;
use crate::textures::scaled::ScaleTexture;
use crate::core::interaction::SurfaceInteraction;
use crate::textures::imagemap::{ ImageTextureFloat, ImageTextureRGB};
use crate::textures::mix::MixTexture;
use crate::textures::biler::BilerTexture;
use crate::textures::uv::UVTexture;
use crate::textures::marble::MarbleTexture;
use crate::textures::wrinkled::WrinkledTexture;
use crate::textures::fbm::FBmTexture;
use crate::textures::windy::WindyTexture;
use crate::textures::checkerboard::{Checkerboard3DTexture, Checkerboard2DTexture};
use crate::core::geometry::vector::{Vector2f, Vector3f};
use crate::core::geometry::point::{Point2f, Point3f};
use crate::core::pbrt::{Float, INV_PI, INV2_PI, PI, lerp, clamp, log2};
use crate::core::transform::Transform;
use crate::core::geometry::geometry::{spherical_theta, spherical_phi};
use crate::core::spectrum::{Spectrum, RGBSpectrum, SampledSpectrum};
use std::ops::{Mul, Add, AddAssign, Div};
use crate::core::mipmap::Clampable;
use crate::core::paramset::TextureParams;
const NOISE_PERM_SIZE: usize = 256;
const NOISE_PERM: [usize; 2 * NOISE_PERM_SIZE] = [
151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140,
36, 103, 30, 69, 142,
// Remainder of the noise permutation table
8, 99, 37, 240, 21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62,
94, 252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174,
20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55,
46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76,
132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100,
109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147,
118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28,
42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144, 12,
191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192, 214, 31,
181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,
138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66,
215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194,
233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6,
148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74,
165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60,
211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25,
63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135,
130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226,
250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59,
227, 47, 16, 58, 17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2,
44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19,
98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249,
14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115,
121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72,
243, 141, 128, 195, 78, 66, 215, 61, 156, 180
];
pub type TextureFloat = Textures<Float, Float>;
pub type TextureSpec = Textures<Spectrum, Spectrum>;
#[enum_dispatch]
pub trait Texture<T2> {
fn evaluate(&self, s: &SurfaceInteraction) -> T2;
}
// All Texture generic types must implement these traits
pub trait SpectrumT<T>:
Copy +
Send +
Sync +
num::Zero +
Clampable +
AddAssign +
From<Float> +
From<SampledSpectrum> +
From<RGBSpectrum> +
Mul<T, Output = T> +
Mul<Float, Output = T> +
Div<Float, Output = T> +
Add<T, Output = T>{}
// Implementations for valid Texture generic types
impl SpectrumT<Float> for Float{}
impl SpectrumT<RGBSpectrum> for RGBSpectrum{}
impl SpectrumT<SampledSpectrum> for SampledSpectrum{}
#[enum_dispatch(Texture<T2>)]
pub enum Textures<T1, T2>
where T1: SpectrumT<T1> + Mul<T2, Output = T2>,
T2: SpectrumT<T2> + From<T1>
{
MarbleTexture,
UVTexture,
FBmTexture,
WrinkledTexture,
WindyTexture,
MixTexture(MixTexture<T2>),
BilerTexture(BilerTexture<T2>),
ScaleTexture(ScaleTexture<T1, T2>),
DotsTexture(DotsTexture<T2>),
ImageTextureFloat(ImageTextureFloat),
ImageTextureRGB(ImageTextureRGB),
ConstantTexture(ConstantTexture<T2>),
Checkerboard2DTexture(Checkerboard2DTexture<T2>),
Checkerboard3DTexture(Checkerboard3DTexture<T2>)
}
#[enum_dispatch]
pub trait TextureMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f;
}
#[enum_dispatch(TextureMapping2D)]
pub enum TextureMapping2Ds {
UVMapping2D,
PlannarMapping2D,
SphericalMapping2D,
CylindricalMapping2D
}
pub struct UVMapping2D {
su: Float,
sv: Float,
du: Float,
dv: Float,
}
impl UVMapping2D {
pub fn new(su: Float, sv: Float, du: Float, dv: Float) -> Self |
}
impl TextureMapping2D for UVMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
// Compute texture differentials for sphere (u, v) mapping
*dstdx = Vector2f::new(self.su * si.dudx.get() , self.sv * si.dvdx.get());
*dstdy = Vector2f::new(self.su * si.dudy.get(), self.sv * si.dvdy.get());
Point2f::new(self.su * si.uv[0] + self.du, self.sv * si.uv[1] + self.dv)
}
}
impl Default for UVMapping2D {
fn default() -> Self {
Self {
su: 1.0,
sv: 1.0,
du: 0.0,
dv: 0.0
}
}
}
pub struct SphericalMapping2D {
world_to_texture: Transform
}
impl SphericalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn sphere(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
let theta = spherical_theta(&vec);
let phi = spherical_phi(&vec);
Point2f::new(theta * INV_PI, phi * INV2_PI)
}
}
impl TextureMapping2D for SphericalMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let st = self.sphere(&si.p);
// Compute texture coordinate differentials for sphere (u, v) mapping
let delta = 0.1;
let st_deltax = self.sphere(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.sphere(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct CylindricalMapping2D {
world_to_texture: Transform
}
impl CylindricalMapping2D {
pub fn new(wtt: &Transform) -> Self {
Self { world_to_texture: *wtt }
}
fn cylinder(&self, p: &Point3f) -> Point2f {
let vec = (
self.world_to_texture.transform_point(p) -
Point3f::new(0.0, 0.0, 0.0))
.normalize();
Point2f::new(PI + vec.y.atan2(vec.x) * INV2_PI, vec.z)
}
}
impl TextureMapping2D for CylindricalMapping2D {
fn map(&self, si: &SurfaceInteraction,
dstdx: &mut Vector2f, dstdy: &mut Vector2f) -> Point2f {
let st = self.cylinder(&si.p);
// Compute texture coordinate differentials for cylinder (u, v) mapping
let delta = 0.1;
let st_deltax = self.cylinder(&(si.p + si.dpdx.get() * delta));
*dstdx = (st_deltax - st) / delta;
let st_deltay = self.cylinder(&(si.p + si.dpdy.get() * delta));
*dstdy = (st_deltay - st) / delta;
// Handle sphere mapping discontinuity for coordinate differentials
if dstdx[1] > 0.5 { dstdx[1] = 1.0 - dstdx[1]; }
else if (*dstdx)[1] < -0.5 { (*dstdx)[1] = -((*dstdx)[1] + 1.0); }
if dstdy[1] > 0.5 { dstdy[1] = 1.0 - dstdy[1]; }
else if dstdy[1] < -0.5 { dstdy[1] = -(dstdy[1] + 1.0); }
st
}
}
pub struct PlannarMapping2D {
vs: Vector3f,
vt: Vector3f,
ds: Float,
dt: Float
}
impl PlannarMapping2D {
pub fn new(vs: &Vector3f, vt: &Vector3f,
ds: Float, dt: Float) -> Self {
Self {
ds,
dt,
vs: *vs,
vt: *vt
}
}
}
impl TextureMapping2D for PlannarMapping2D {
fn map(&self, si: &SurfaceInteraction, dstdx: &mut Vector2f,
dstdy: &mut Vector2f) -> Point2f {
let vec = Vector3f::from(si.p);
*dstdx = Vector2f::new(si.dpdx.get().dot(&self.vs), si.dpdx.get().dot(&self.vt));
*dstdy = Vector2f::new(si.dpdy.get().dot(&self.vs), si.dpdy.get().dot(&self.vt));
Point2f::new(self.ds + vec.dot(&self.vs), self.dt + vec.dot(&self.vt))
}
}
#[enum_dispatch]
pub trait TextureMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f;
}
#[enum_dispatch(TextureMapping3D)]
pub enum TextureMapping3Ds {
IdentityMapping3D
}
pub struct IdentityMapping3D {
world_to_texture: Transform
}
impl IdentityMapping3D {
pub fn new(w2t: &Transform) -> Self {
Self { world_to_texture: *w2t }
}
}
impl TextureMapping3D for IdentityMapping3D {
fn map(&self, si: &SurfaceInteraction, dpdx: &mut Vector3f,
dpdy: &mut Vector3f) -> Point3f {
*dpdx = self.world_to_texture.transform_vector(&si.dpdx.get());
*dpdy = self.world_to_texture.transform_vector(&si.dpdy.get());
self.world_to_texture.transform_point(&si.p)
}
}
pub fn lanczos(mut x: Float, tau: Float) -> Float {
x = x.abs();
if x < 1.0e-5 { return 1.0; }
if x > 1.0 { return 0.0; }
x *= PI;
let s = (x * tau).sin() / ( x * tau);
let lanc = x.sin() / x;
s * lanc
}
pub fn noise(x: Float, y: Float, z: Float) -> Float {
let mut ix = x.floor() as usize;
let mut iy = y.floor() as usize;
let mut iz = z.floor() as usize;
let dx = x - ix as Float;
let dy = y - iy as Float;
let dz = z - iz as Float;
// Compute gradient weights
ix &= NOISE_PERM_SIZE - 1;
iy &= NOISE_PERM_SIZE - 1;
iz &= NOISE_PERM_SIZE - 1;
let w000 = grad(ix, iy, iz, dx, dy, dz);
let w100 = grad(ix + 1, iy, iz, dx - 1.0, dy, dz);
let w010 = grad(ix, iy + 1, iz, dx, dy - 1.0, dz);
let w110 = grad(ix + 1, iy + 1, iz, dx - 1.0, dy - 1.0, dz);
let w001 = grad(ix, iy, iz + 1, dx, dy, dz - 1.0);
let w101 = grad(ix + 1, iy, iz + 1, dx - 1.0, dy, dz - 1.0);
let w011 = grad(ix, iy + 1, iz + 1, dx, dy - 1.0, dz - 1.0);
let w111 = grad(ix + 1, iy + 1, iz + 1, dx - 1.0, dy - 1.0, dz - 1.0);
// Compute trilinear interpolation of weights
let wx = noise_weight(dx);
let wy = noise_weight(dy);
let wz = noise_weight(dz);
let x00 = lerp(wx, w000, w100);
let x10 = lerp(wx, w010, w110);
let x01 = lerp(wx, w001, w101);
let x11 = lerp(wx, w011, w111);
let y0 = lerp(wy, x00, x10);
let y1 = lerp(wy, x01, x11);
lerp(wz, y0, y1)
}
pub fn noisep(p: Point3f) -> Float {
noise(p.x, p.y, p.z)
}
fn grad(x: usize, y: usize, z: usize, dx: Float, dy: Float, dz: Float) -> Float {
let mut h = NOISE_PERM[NOISE_PERM[NOISE_PERM[x] + y] + z];
h &= 15;
let u = if h < 8 || h == 12 || h == 13 { dx } else { dy };
let v = if h < 4 || h == 12 || h == 13 { dy } else { dz };
(if (h & 1) != 0 { -u } else { u }) + (if (h & 2) != 0 { -v } else { v })
}
fn noise_weight(t: Float) -> Float {
let t3 = t * t * t;
let t4 = t3 * t;
6.0 * t4 * t - 15.0 * t4 + 10.0 * t3
}
pub fn fbm(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * log2(len2), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for fbm
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o * noisep(*p * lambda);
lambda *= 1.99;
o *= omega;
}
let npartial = n - nint as Float;
sum += o * smooth_step(0.3, 0.7, npartial) * noisep(*p * lambda);
sum
}
pub fn turbulence(
p: &Point3f, dpdx: &Vector3f, dpdy: &Vector3f,
omega: Float, max_octaves: usize) -> Float {
// Compute number of octaves for antialiased FBm
let len2 = dpdx.length_squared().max(dpdy.length_squared());
let n = clamp(-1.0 - 0.5 * len2.log2(), 0.0, max_octaves as Float);
let nint = n.floor() as usize;
// Compute sum of octaves of noise for turbulence
let (mut sum, mut lambda, mut o) = (0.0, 1.0, 1.0);
for _i in 0..nint {
sum += o + noisep(*p * lambda).abs();
lambda *= 1.99;
o *= omega;
}
// Account for contributions of clamped octaves in turbulence
let npartial = n - nint as Float;
sum += o + lerp(
smooth_step(0.3, 0.7, npartial),
0.2,
noisep(*p * lambda).abs());
for _i in nint..max_octaves {
sum += o * 0.2;
o *= omega;
}
sum
}
fn smooth_step(min: Float, max: Float, value: Float) -> Float {
let v = clamp((value - min) / (max - min), 0.0, 1.0);
v * v * (-2.0 * v + 3.0)
}
pub fn get_mapping2d(t2w: &Transform, tp: &mut TextureParams) -> TextureMapping2Ds {
let ty = tp.find_string("mapping", "uv");
match ty.as_str() {
"uv" => {
let su = tp.find_float("uscale", 1.0);
let sv = tp.find_float("vscale", 1.0);
let du = tp.find_float("udelta", 0.0);
let dv = tp.find_float("vdelta", 0.0);
UVMapping2D::new(su, sv, du, dv).into()
},
"planar" => {
let vs = tp.find_vector3f("v1", Vector3f::new(1.0, 0.0, 0.0));
let vt = tp.find_vector3f("v2", Vector3f::new(0.0, 1.0, 0.0));
let ds = tp.find_float("udelta", 0.0);
let dt = tp.find_float("vdelta", 0.0);
PlannarMapping2D::new(&vs, &vt, ds, dt).into()
}
"spherical" => SphericalMapping2D::new(&Transform::inverse(t2w)).into(),
"cylindrical" => CylindricalMapping2D::new(&Transform::inverse(t2w)).into(),
_ => {
error!("2D texture mapping \"{}\" unknown", ty);
UVMapping2D::new(1.0, 1.0, 0.0, 0.0).into()
}
}
} | {
Self { su, sv, du, dv }
} | identifier_body |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => {
let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap();
let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote != prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >= ? and id <= ?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize) != deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
}
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn | (compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete {
std::fs::remove_file(path)?;
}
}
Ok(())
}
| compact | identifier_name |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => {
let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap();
let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote != prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >= ? and id <= ?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize) != deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> |
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn compact(compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete {
std::fs::remove_file(path)?;
}
}
Ok(())
}
| {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
} | identifier_body |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => { | let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote != prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >= ? and id <= ?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize) != deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
}
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn compact(compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete {
std::fs::remove_file(path)?;
}
}
Ok(())
} | let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap(); | random_line_split |
batches.rs | #[macro_use]
extern crate clap;
use clap::{Command, Arg, ArgAction};
use rusqlite as rs;
use std::path::Path;
use std::error::Error;
use std::vec::Vec;
use std::ffi::OsString;
use std::collections::HashSet;
use chrono::Local;
use chrono::DateTime;
use chrono::Datelike;
use glob::glob;
use count_write::CountWrite;
use zstd::stream::copy_decode;
use zstd::stream::write::Encoder;
use zstd::stream::read::Decoder;
use tar::Builder;
use tar::Header;
use std::io::{SeekFrom, Seek};
use std::fs::File;
use std::fs::OpenOptions;
use std::ffi::OsStr;
fn main() {
let matches = Command::new("Karmator maintance batch")
.version(crate_version!())
.author(crate_authors!("\n"))
.about("Handles the maintance work for karmator")
.subcommand(
Command::new("runs")
.about("Detect runs of votes")
.arg(
Arg::new("min")
.short('m')
.help("Min count of runs before outputting")
.default_value("20")
.value_parser(value_parser!(u32).range(1..)),
)
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the runs detected")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("FILE")
.help("Database file to operate on")
.required(true),
),
)
.subcommand(
Command::new("prune")
.about("Prune and pack up old backups")
.arg(
Arg::new("delete")
.long("delete")
.help("Delete the old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("skip")
.long("skip")
.help("Skip compacting old files")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("BACKUPS")
.help("Backup directory to prune")
.required(true),
),
)
.get_matches();
match matches.subcommand() {
Some(("runs", m)) => {
let filename = m.get_one::<String>("FILE").unwrap();
let min = m.get_one::<u32>("min").unwrap();
let delete = m.contains_id("delete");
run(filename, *min, delete)
},
Some(("prune", m)) => {
let directory = m.get_one::<String>("BACKUPS").unwrap();
let delete = m.contains_id("delete");
let skip = m.contains_id("skip");
prune(directory, delete, skip)
}
_ => {
println!("meh do --help yourself");
Ok(())
},
}.unwrap();
}
#[derive(Debug, Clone)]
struct Vote {
id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
}
impl PartialEq for Vote {
fn eq(&self, other: &Self) -> bool {
(self.by_whom_name == other.by_whom_name)
&& (self.for_what_name == other.for_what_name)
&& (self.amount == other.amount)
}
}
#[derive(Debug)]
struct RunVal {
oldest_id: i32,
newest_id: i32,
by_whom_name: String,
for_what_name: String,
amount: i8,
count: u32,
}
fn get_run_val(srv: &Vote, pv: &Vote, count: u32) -> RunVal {
RunVal {
oldest_id: srv.id,
newest_id: pv.id,
by_whom_name: srv.by_whom_name.clone(),
for_what_name: srv.for_what_name.clone(),
amount: srv.amount,
count: count,
}
}
fn str_amount(amount: i8) -> &'static str {
match amount {
-1 => "Down",
0 => "Side",
1 => "Up",
_ => panic!("invalid amount"),
}
}
fn run(filename: &str, min: u32, delete: bool) -> Result<(), Box<dyn Error>> {
let conn =
rs::Connection::open_with_flags(Path::new(filename), rs::OpenFlags::SQLITE_OPEN_READ_WRITE)
.expect(&format!("Connection error: {}", filename));
let mut stmt = conn.prepare("SELECT id, by_whom_name, for_what_name, amount FROM votes")?;
let vote_iter = stmt.query_map(rs::params![], |row| {
Ok(Vote {
id: row.get(0)?,
by_whom_name: row.get(1)?,
for_what_name: row.get(2)?,
amount: row.get(3)?,
})
})?;
// Time to compute the run
let mut runs = Vec::new();
let mut start_run_vote = None;
let mut prev_vote = None;
let mut count = 0;
for rvote in vote_iter {
let vote = rvote?;
match (&start_run_vote, &prev_vote) {
(None, None) => {
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
(Some(srv), Some(pv)) => {
if pv == &vote {
// Current vote + prev vote are the same, inc prev vote
prev_vote = Some(vote);
count += 1;
} else {
// Current vote != prev vote, record the run, and reset
runs.push(get_run_val(srv, pv, count));
start_run_vote = Some(vote.clone());
prev_vote = Some(vote);
count = 1; // Run of 1
}
}
(_, _) => panic!("Shouldn't happen"),
};
}
// Record the last run
runs.push(get_run_val(
&start_run_vote.unwrap(),
&prev_vote.unwrap(),
count,
));
if delete {
// Scan and delete the offenders
let mut stmt = conn.prepare("DELETE FROM votes WHERE id >= ? and id <= ?")?;
for r in &runs {
if r.count > min {
let deleted = stmt.execute(rs::params![r.oldest_id, r.newest_id])?;
if (r.count as usize) != deleted {
panic!("Expected: {} to be deleted, got {}", r.count, deleted);
}
}
}
} else {
// Now we can scan for anything that > min and print them
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
"start_id", "end_id", "by_whom_name", "for_what_name", "amount", "count"
);
for r in &runs {
if r.count > min {
println!(
"{: >8}, {: >8}, {: >14.14}, {: >14.14}, {: >6}, {: >5}",
r.oldest_id,
r.newest_id,
r.by_whom_name,
r.for_what_name,
str_amount(r.amount),
r.count
);
}
}
}
Ok(())
}
fn prune(directory: &str, delete: bool, skip: bool) -> Result<(), Box<dyn Error>> {
let now: DateTime<Local> = Local::now();
let year = now.year();
let month = now.month();
// Fetch a set of all of the files
let all_files = collect_glob(directory, "/db-backup-????-??-??.sqlite.zst");
// Fetch a set of all of the file in the current month+year
let current_month_year = collect_glob(directory, &format!("/db-backup-{}-{:02}-??.sqlite.zst", year, month));
// Fetch a set of all of the file that is in previous year + first of the month
let previous_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year - 1));
// Fetch a set of all of the file that is in current year + first of the month
let current_first_month = collect_glob(directory, &format!("/db-backup-{}-??-01.sqlite.zst", year));
// Calculate the initial set of files to prune
let mut delete_files = delete_set(&all_files, vec![¤t_month_year, &previous_first_month, ¤t_first_month]);
// Compact pfm + cfm into their years
if skip {
println!("Compacting: Skipped");
} else {
if previous_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year-1);
print_compact(&previous_first_month, &tarfile)?;
delete_files.extend(previous_first_month.iter().map(|e| e.clone()));
}
if current_first_month.len() == 12 {
let tarfile = format!("{}/db-backup-{}.tar.zst", directory, year);
print_compact(¤t_first_month, &tarfile)?;
delete_files.extend(current_first_month.iter().map(|e| e.clone()));
}
}
// List the files we are going to delete
print_delete(&delete_files, delete)?;
Ok(())
}
fn collect_glob(directory: &str, glob_str: &str) -> HashSet<OsString> {
glob(&(directory.to_string() + glob_str)).unwrap()
.flatten()
.map(|e| e.into_os_string())
.collect::<HashSet<OsString>>()
}
fn delete_set(all: &HashSet<OsString>, keep: Vec<&HashSet<OsString>>) -> HashSet<OsString> {
let mut delete = all.clone();
for hs in keep {
let out = delete.difference(&hs).map(|e| e.clone()).collect();
delete = out;
}
delete
}
fn compact(compact: &HashSet<OsString>, filename: &str) -> Result<(), Box<dyn Error>> {
let tarfile = OpenOptions::new()
.write(true)
.create_new(true)
.open(filename);
let mut tar = Builder::new(Encoder::new(tarfile?, 21)?.auto_finish());
for f in compact.iter() {
let mut file = File::open(f)?;
let filename = Path::new(f).file_name().unwrap();
let filesize = {
let mut count = CountWrite::from(std::io::sink());
copy_decode(&file, &mut count)?;
count.count()
};
let mut header = Header::new_gnu();
header.set_path(filename)?;
header.set_size(filesize);
header.set_cksum();
file.seek(SeekFrom::Start(0))?;
tar.append(
&header,
Decoder::new(std::fs::File::open(f)?)?
)?;
}
tar.finish()?;
Ok(())
}
fn print_compact(to_compact: &HashSet<OsString>, tarfile: &str) -> Result<(), Box<dyn Error>> {
println!("Compacting: {}", tarfile);
let mut print = to_compact.iter()
.map(|e| Path::new(e).file_name().unwrap())
.collect::<Vec<&OsStr>>();
print.sort();
for i in print.iter() {
println!("\t{:?}", i);
}
compact(&to_compact, &tarfile)?;
Ok(())
}
fn print_delete(to_delete: &HashSet<OsString>, delete: bool) -> Result<(), Box<dyn Error>> {
println!("Deleting:");
let mut print = to_delete.iter().collect::<Vec<&OsString>>();
print.sort();
for i in print.iter() {
let path = Path::new(i);
println!("\t{:?}", path.file_name().unwrap());
if delete |
}
Ok(())
}
| {
std::fs::remove_file(path)?;
} | conditional_block |
reconcile_keyspaces.go | /*
Copyright 2019 PlanetScale Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vitesscluster
import (
"context"
"sort"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2"
"planetscale.dev/vitess-operator/pkg/operator/lockserver"
"planetscale.dev/vitess-operator/pkg/operator/reconciler"
"planetscale.dev/vitess-operator/pkg/operator/rollout"
"planetscale.dev/vitess-operator/pkg/operator/update"
"planetscale.dev/vitess-operator/pkg/operator/vitesskeyspace"
)
func (r *ReconcileVitessCluster) reconcileKeyspaces(ctx context.Context, vt *planetscalev2.VitessCluster) error {
labels := map[string]string{
planetscalev2.ClusterLabel: vt.Name,
}
// Generate keys (object names) for all desired keyspaces.
// Keep a map back from generated names to the keyspace specs.
// Oh boy it's awkward right now that the k8s client calls object names keys.
keys := make([]client.ObjectKey, 0, len(vt.Spec.Keyspaces))
keyspaceMap := make(map[client.ObjectKey]*planetscalev2.VitessKeyspaceTemplate, len(vt.Spec.Keyspaces))
for i := range vt.Spec.Keyspaces {
keyspace := &vt.Spec.Keyspaces[i]
key := client.ObjectKey{Namespace: vt.Namespace, Name: vitesskeyspace.Name(vt.Name, keyspace.Name)}
keys = append(keys, key)
keyspaceMap[key] = keyspace
// Initialize a status entry for every desired keyspace, so it will be
// listed even if we end up not having anything to report about it.
vt.Status.Keyspaces[keyspace.Name] = planetscalev2.NewVitessClusterKeyspaceStatus(keyspace)
}
return r.reconciler.ReconcileObjectSet(ctx, vt, keys, labels, reconciler.Strategy{
Kind: &planetscalev2.VitessKeyspace{},
New: func(key client.ObjectKey) runtime.Object {
return newVitessKeyspace(key, vt, labels, keyspaceMap[key])
},
UpdateInPlace: func(key client.ObjectKey, obj runtime.Object) {
newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key])
return
}
updateVitessKeyspaceInPlace(key, newObj, vt, labels, keyspaceMap[key])
},
UpdateRollingInPlace: func(key client.ObjectKey, obj runtime.Object) {
newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
// In this case we should use UpdateInPlace for all updates.
return
}
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key])
},
Status: func(key client.ObjectKey, obj runtime.Object) {
curObj := obj.(*planetscalev2.VitessKeyspace)
status := vt.Status.Keyspaces[curObj.Spec.Name]
status.PendingChanges = curObj.Annotations[rollout.ScheduledAnnotation]
status.Shards = int32(len(curObj.Status.Shards))
status.ReadyShards = 0
status.UpdatedShards = 0
status.Tablets = 0
status.ReadyTablets = 0
status.UpdatedTablets = 0
cells := map[string]struct{}{}
for _, shard := range curObj.Status.Shards {
if shard.ReadyTablets == shard.DesiredTablets {
status.ReadyShards++
}
if shard.UpdatedTablets == shard.Tablets {
status.UpdatedShards++
}
status.Tablets += shard.Tablets
status.ReadyTablets += shard.ReadyTablets
status.UpdatedTablets += shard.UpdatedTablets
for _, cell := range shard.Cells {
cells[cell] = struct{}{}
}
}
for cell := range cells {
status.Cells = append(status.Cells, cell)
}
sort.Strings(status.Cells)
vt.Status.Keyspaces[curObj.Spec.Name] = status
},
OrphanStatus: func(key client.ObjectKey, obj runtime.Object, orphanStatus *planetscalev2.OrphanStatus) {
curObj := obj.(*planetscalev2.VitessKeyspace)
vt.Status.OrphanedKeyspaces[curObj.Spec.Name] = *orphanStatus
},
PrepareForTurndown: func(key client.ObjectKey, obj runtime.Object) *planetscalev2.OrphanStatus {
curObj := obj.(*planetscalev2.VitessKeyspace)
// Make sure it's ok to delete this keyspace.
// The user may specify to skip turndown safety checks.
if curObj.Spec.TurndownPolicy == planetscalev2.VitessKeyspaceTurndownPolicyImmediate {
return nil
}
// Otherwise, we err on the safe side since losing a keyspace accidentally is very disruptive.
if curObj.Status.Idle == corev1.ConditionTrue {
// The keyspace is not depoyed in any cells.
return nil
}
// The keyspace is either not idle (Idle=False),
// or we can't be sure whether it's idle (Idle=Unknown).
return planetscalev2.NewOrphanStatus("NotIdle", "The keyspace can't be turned down because it's not idle. You must remove all tablet pools before removing the keyspace.")
},
})
}
// newVitessKeyspace expands a complete VitessKeyspace from a VitessKeyspaceTemplate.
//
// A VitessKeyspace consists of both user-configured parts, which come from VitessKeyspaceTemplate,
// plus auto-filled data that we propagate into each VitessKeyspace from here.
// This allows VitessKeyspace to do its job without looking at any other objects,
// and also lets us control when global changes roll out to each keyspace.
func newVitessKeyspace(key client.ObjectKey, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) *planetscalev2.VitessKeyspace {
template := keyspace.DeepCopy()
images := planetscalev2.VitessKeyspaceImages{}
planetscalev2.DefaultVitessKeyspaceImages(&images, &vt.Spec.Images)
// Copy parent labels map and add keyspace-specific label.
labels := make(map[string]string, len(parentLabels)+1)
for k, v := range parentLabels {
labels[k] = v
}
labels[planetscalev2.KeyspaceLabel] = keyspace.Name
var backupLocations []planetscalev2.VitessBackupLocation
var backupEngine planetscalev2.VitessBackupEngine
if vt.Spec.Backup != nil {
backupLocations = vt.Spec.Backup.Locations
backupEngine = vt.Spec.Backup.Engine
}
return &planetscalev2.VitessKeyspace{
ObjectMeta: metav1.ObjectMeta{
Namespace: key.Namespace,
Name: key.Name,
Labels: labels,
Annotations: keyspace.Annotations,
},
Spec: planetscalev2.VitessKeyspaceSpec{
VitessKeyspaceTemplate: *template,
GlobalLockserver: *lockserver.GlobalConnectionParams(&vt.Spec.GlobalLockserver, vt.Namespace, vt.Name),
Images: images,
ImagePullPolicies: vt.Spec.ImagePullPolicies,
ImagePullSecrets: vt.Spec.ImagePullSecrets,
ZoneMap: vt.Spec.ZoneMap(),
BackupLocations: backupLocations,
BackupEngine: backupEngine,
ExtraVitessFlags: vt.Spec.ExtraVitessFlags,
TopologyReconciliation: vt.Spec.TopologyReconciliation,
UpdateStrategy: vt.Spec.UpdateStrategy,
},
}
}
func updateVitessKeyspace(key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Add or remove annotations requested in vts.Spec.Annotations.
// This must be done before we update vtk.Spec.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
// For now, everything in Spec is safe to update.
vtk.Spec = newKeyspace.Spec
}
func | (key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Switching update strategies should always take effect immediately.
vtk.Spec.UpdateStrategy = newKeyspace.Spec.UpdateStrategy
// Update disk size immediately if specified to.
if *vtk.Spec.UpdateStrategy.Type == planetscalev2.ExternalVitessClusterUpdateStrategyType {
if vtk.Spec.UpdateStrategy.External.ResourceChangesAllowed(corev1.ResourceStorage) {
update.KeyspaceDiskSize(&vtk.Spec.VitessKeyspaceTemplate, &newKeyspace.Spec.VitessKeyspaceTemplate)
}
}
// Add or remove partitionings as needed, but don't immediately reconfigure
// partitionings that already exist.
update.PartitioningSet(&vtk.Spec.Partitionings, newKeyspace.Spec.Partitionings)
// Only update things that are safe to roll out immediately.
vtk.Spec.TurndownPolicy = newKeyspace.Spec.TurndownPolicy
// Add or remove annotations requested in vtk.Spec.Annotations.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
}
func updateVitessKeyspaceAnnotations(vtk *planetscalev2.VitessKeyspace, newKeyspace *planetscalev2.VitessKeyspace) {
differentAnnotations := differentKeys(vtk.Spec.Annotations, newKeyspace.Spec.Annotations)
for _, annotation := range differentAnnotations {
delete(vtk.Annotations, annotation)
}
// Update annotations we set.
update.Annotations(&vtk.Annotations, newKeyspace.Annotations)
vtk.Spec.Annotations = newKeyspace.Spec.Annotations
}
// differentKeys returns keys from an older map instance that are no longer in a newer map instance.
func differentKeys(oldMap, newMap map[string]string) []string {
var differentKeys []string
for k := range oldMap {
if _, exist := newMap[k]; !exist {
differentKeys = append(differentKeys, k)
}
}
return differentKeys
}
| updateVitessKeyspaceInPlace | identifier_name |
reconcile_keyspaces.go | /*
Copyright 2019 PlanetScale Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vitesscluster
import (
"context"
"sort"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2"
"planetscale.dev/vitess-operator/pkg/operator/lockserver"
"planetscale.dev/vitess-operator/pkg/operator/reconciler"
"planetscale.dev/vitess-operator/pkg/operator/rollout"
"planetscale.dev/vitess-operator/pkg/operator/update"
"planetscale.dev/vitess-operator/pkg/operator/vitesskeyspace"
)
func (r *ReconcileVitessCluster) reconcileKeyspaces(ctx context.Context, vt *planetscalev2.VitessCluster) error {
labels := map[string]string{
planetscalev2.ClusterLabel: vt.Name,
}
// Generate keys (object names) for all desired keyspaces.
// Keep a map back from generated names to the keyspace specs.
// Oh boy it's awkward right now that the k8s client calls object names keys.
keys := make([]client.ObjectKey, 0, len(vt.Spec.Keyspaces))
keyspaceMap := make(map[client.ObjectKey]*planetscalev2.VitessKeyspaceTemplate, len(vt.Spec.Keyspaces))
for i := range vt.Spec.Keyspaces {
keyspace := &vt.Spec.Keyspaces[i]
key := client.ObjectKey{Namespace: vt.Namespace, Name: vitesskeyspace.Name(vt.Name, keyspace.Name)}
keys = append(keys, key)
keyspaceMap[key] = keyspace
// Initialize a status entry for every desired keyspace, so it will be
// listed even if we end up not having anything to report about it.
vt.Status.Keyspaces[keyspace.Name] = planetscalev2.NewVitessClusterKeyspaceStatus(keyspace)
}
return r.reconciler.ReconcileObjectSet(ctx, vt, keys, labels, reconciler.Strategy{
Kind: &planetscalev2.VitessKeyspace{},
New: func(key client.ObjectKey) runtime.Object {
return newVitessKeyspace(key, vt, labels, keyspaceMap[key])
},
UpdateInPlace: func(key client.ObjectKey, obj runtime.Object) {
newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key])
return
}
updateVitessKeyspaceInPlace(key, newObj, vt, labels, keyspaceMap[key])
},
UpdateRollingInPlace: func(key client.ObjectKey, obj runtime.Object) {
newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
// In this case we should use UpdateInPlace for all updates.
return
}
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key])
},
Status: func(key client.ObjectKey, obj runtime.Object) {
curObj := obj.(*planetscalev2.VitessKeyspace)
status := vt.Status.Keyspaces[curObj.Spec.Name]
status.PendingChanges = curObj.Annotations[rollout.ScheduledAnnotation]
status.Shards = int32(len(curObj.Status.Shards))
status.ReadyShards = 0
status.UpdatedShards = 0
status.Tablets = 0
status.ReadyTablets = 0
status.UpdatedTablets = 0
cells := map[string]struct{}{}
for _, shard := range curObj.Status.Shards {
if shard.ReadyTablets == shard.DesiredTablets {
status.ReadyShards++
}
if shard.UpdatedTablets == shard.Tablets {
status.UpdatedShards++
}
status.Tablets += shard.Tablets
status.ReadyTablets += shard.ReadyTablets
status.UpdatedTablets += shard.UpdatedTablets
for _, cell := range shard.Cells {
cells[cell] = struct{}{}
}
}
for cell := range cells |
sort.Strings(status.Cells)
vt.Status.Keyspaces[curObj.Spec.Name] = status
},
OrphanStatus: func(key client.ObjectKey, obj runtime.Object, orphanStatus *planetscalev2.OrphanStatus) {
curObj := obj.(*planetscalev2.VitessKeyspace)
vt.Status.OrphanedKeyspaces[curObj.Spec.Name] = *orphanStatus
},
PrepareForTurndown: func(key client.ObjectKey, obj runtime.Object) *planetscalev2.OrphanStatus {
curObj := obj.(*planetscalev2.VitessKeyspace)
// Make sure it's ok to delete this keyspace.
// The user may specify to skip turndown safety checks.
if curObj.Spec.TurndownPolicy == planetscalev2.VitessKeyspaceTurndownPolicyImmediate {
return nil
}
// Otherwise, we err on the safe side since losing a keyspace accidentally is very disruptive.
if curObj.Status.Idle == corev1.ConditionTrue {
// The keyspace is not depoyed in any cells.
return nil
}
// The keyspace is either not idle (Idle=False),
// or we can't be sure whether it's idle (Idle=Unknown).
return planetscalev2.NewOrphanStatus("NotIdle", "The keyspace can't be turned down because it's not idle. You must remove all tablet pools before removing the keyspace.")
},
})
}
// newVitessKeyspace expands a complete VitessKeyspace from a VitessKeyspaceTemplate.
//
// A VitessKeyspace consists of both user-configured parts, which come from VitessKeyspaceTemplate,
// plus auto-filled data that we propagate into each VitessKeyspace from here.
// This allows VitessKeyspace to do its job without looking at any other objects,
// and also lets us control when global changes roll out to each keyspace.
func newVitessKeyspace(key client.ObjectKey, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) *planetscalev2.VitessKeyspace {
template := keyspace.DeepCopy()
images := planetscalev2.VitessKeyspaceImages{}
planetscalev2.DefaultVitessKeyspaceImages(&images, &vt.Spec.Images)
// Copy parent labels map and add keyspace-specific label.
labels := make(map[string]string, len(parentLabels)+1)
for k, v := range parentLabels {
labels[k] = v
}
labels[planetscalev2.KeyspaceLabel] = keyspace.Name
var backupLocations []planetscalev2.VitessBackupLocation
var backupEngine planetscalev2.VitessBackupEngine
if vt.Spec.Backup != nil {
backupLocations = vt.Spec.Backup.Locations
backupEngine = vt.Spec.Backup.Engine
}
return &planetscalev2.VitessKeyspace{
ObjectMeta: metav1.ObjectMeta{
Namespace: key.Namespace,
Name: key.Name,
Labels: labels,
Annotations: keyspace.Annotations,
},
Spec: planetscalev2.VitessKeyspaceSpec{
VitessKeyspaceTemplate: *template,
GlobalLockserver: *lockserver.GlobalConnectionParams(&vt.Spec.GlobalLockserver, vt.Namespace, vt.Name),
Images: images,
ImagePullPolicies: vt.Spec.ImagePullPolicies,
ImagePullSecrets: vt.Spec.ImagePullSecrets,
ZoneMap: vt.Spec.ZoneMap(),
BackupLocations: backupLocations,
BackupEngine: backupEngine,
ExtraVitessFlags: vt.Spec.ExtraVitessFlags,
TopologyReconciliation: vt.Spec.TopologyReconciliation,
UpdateStrategy: vt.Spec.UpdateStrategy,
},
}
}
func updateVitessKeyspace(key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Add or remove annotations requested in vts.Spec.Annotations.
// This must be done before we update vtk.Spec.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
// For now, everything in Spec is safe to update.
vtk.Spec = newKeyspace.Spec
}
func updateVitessKeyspaceInPlace(key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Switching update strategies should always take effect immediately.
vtk.Spec.UpdateStrategy = newKeyspace.Spec.UpdateStrategy
// Update disk size immediately if specified to.
if *vtk.Spec.UpdateStrategy.Type == planetscalev2.ExternalVitessClusterUpdateStrategyType {
if vtk.Spec.UpdateStrategy.External.ResourceChangesAllowed(corev1.ResourceStorage) {
update.KeyspaceDiskSize(&vtk.Spec.VitessKeyspaceTemplate, &newKeyspace.Spec.VitessKeyspaceTemplate)
}
}
// Add or remove partitionings as needed, but don't immediately reconfigure
// partitionings that already exist.
update.PartitioningSet(&vtk.Spec.Partitionings, newKeyspace.Spec.Partitionings)
// Only update things that are safe to roll out immediately.
vtk.Spec.TurndownPolicy = newKeyspace.Spec.TurndownPolicy
// Add or remove annotations requested in vtk.Spec.Annotations.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
}
func updateVitessKeyspaceAnnotations(vtk *planetscalev2.VitessKeyspace, newKeyspace *planetscalev2.VitessKeyspace) {
differentAnnotations := differentKeys(vtk.Spec.Annotations, newKeyspace.Spec.Annotations)
for _, annotation := range differentAnnotations {
delete(vtk.Annotations, annotation)
}
// Update annotations we set.
update.Annotations(&vtk.Annotations, newKeyspace.Annotations)
vtk.Spec.Annotations = newKeyspace.Spec.Annotations
}
// differentKeys returns keys from an older map instance that are no longer in a newer map instance.
func differentKeys(oldMap, newMap map[string]string) []string {
var differentKeys []string
for k := range oldMap {
if _, exist := newMap[k]; !exist {
differentKeys = append(differentKeys, k)
}
}
return differentKeys
}
| {
status.Cells = append(status.Cells, cell)
} | conditional_block |
reconcile_keyspaces.go | /*
Copyright 2019 PlanetScale Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vitesscluster
import (
"context"
"sort"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2"
"planetscale.dev/vitess-operator/pkg/operator/lockserver"
"planetscale.dev/vitess-operator/pkg/operator/reconciler"
"planetscale.dev/vitess-operator/pkg/operator/rollout"
"planetscale.dev/vitess-operator/pkg/operator/update"
"planetscale.dev/vitess-operator/pkg/operator/vitesskeyspace"
)
func (r *ReconcileVitessCluster) reconcileKeyspaces(ctx context.Context, vt *planetscalev2.VitessCluster) error {
labels := map[string]string{
planetscalev2.ClusterLabel: vt.Name,
}
// Generate keys (object names) for all desired keyspaces.
// Keep a map back from generated names to the keyspace specs.
// Oh boy it's awkward right now that the k8s client calls object names keys.
keys := make([]client.ObjectKey, 0, len(vt.Spec.Keyspaces))
keyspaceMap := make(map[client.ObjectKey]*planetscalev2.VitessKeyspaceTemplate, len(vt.Spec.Keyspaces))
for i := range vt.Spec.Keyspaces {
keyspace := &vt.Spec.Keyspaces[i]
key := client.ObjectKey{Namespace: vt.Namespace, Name: vitesskeyspace.Name(vt.Name, keyspace.Name)}
keys = append(keys, key)
keyspaceMap[key] = keyspace
// Initialize a status entry for every desired keyspace, so it will be
// listed even if we end up not having anything to report about it.
vt.Status.Keyspaces[keyspace.Name] = planetscalev2.NewVitessClusterKeyspaceStatus(keyspace)
}
return r.reconciler.ReconcileObjectSet(ctx, vt, keys, labels, reconciler.Strategy{
Kind: &planetscalev2.VitessKeyspace{},
New: func(key client.ObjectKey) runtime.Object {
return newVitessKeyspace(key, vt, labels, keyspaceMap[key])
},
UpdateInPlace: func(key client.ObjectKey, obj runtime.Object) {
newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key]) | newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
// In this case we should use UpdateInPlace for all updates.
return
}
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key])
},
Status: func(key client.ObjectKey, obj runtime.Object) {
curObj := obj.(*planetscalev2.VitessKeyspace)
status := vt.Status.Keyspaces[curObj.Spec.Name]
status.PendingChanges = curObj.Annotations[rollout.ScheduledAnnotation]
status.Shards = int32(len(curObj.Status.Shards))
status.ReadyShards = 0
status.UpdatedShards = 0
status.Tablets = 0
status.ReadyTablets = 0
status.UpdatedTablets = 0
cells := map[string]struct{}{}
for _, shard := range curObj.Status.Shards {
if shard.ReadyTablets == shard.DesiredTablets {
status.ReadyShards++
}
if shard.UpdatedTablets == shard.Tablets {
status.UpdatedShards++
}
status.Tablets += shard.Tablets
status.ReadyTablets += shard.ReadyTablets
status.UpdatedTablets += shard.UpdatedTablets
for _, cell := range shard.Cells {
cells[cell] = struct{}{}
}
}
for cell := range cells {
status.Cells = append(status.Cells, cell)
}
sort.Strings(status.Cells)
vt.Status.Keyspaces[curObj.Spec.Name] = status
},
OrphanStatus: func(key client.ObjectKey, obj runtime.Object, orphanStatus *planetscalev2.OrphanStatus) {
curObj := obj.(*planetscalev2.VitessKeyspace)
vt.Status.OrphanedKeyspaces[curObj.Spec.Name] = *orphanStatus
},
PrepareForTurndown: func(key client.ObjectKey, obj runtime.Object) *planetscalev2.OrphanStatus {
curObj := obj.(*planetscalev2.VitessKeyspace)
// Make sure it's ok to delete this keyspace.
// The user may specify to skip turndown safety checks.
if curObj.Spec.TurndownPolicy == planetscalev2.VitessKeyspaceTurndownPolicyImmediate {
return nil
}
// Otherwise, we err on the safe side since losing a keyspace accidentally is very disruptive.
if curObj.Status.Idle == corev1.ConditionTrue {
// The keyspace is not depoyed in any cells.
return nil
}
// The keyspace is either not idle (Idle=False),
// or we can't be sure whether it's idle (Idle=Unknown).
return planetscalev2.NewOrphanStatus("NotIdle", "The keyspace can't be turned down because it's not idle. You must remove all tablet pools before removing the keyspace.")
},
})
}
// newVitessKeyspace expands a complete VitessKeyspace from a VitessKeyspaceTemplate.
//
// A VitessKeyspace consists of both user-configured parts, which come from VitessKeyspaceTemplate,
// plus auto-filled data that we propagate into each VitessKeyspace from here.
// This allows VitessKeyspace to do its job without looking at any other objects,
// and also lets us control when global changes roll out to each keyspace.
func newVitessKeyspace(key client.ObjectKey, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) *planetscalev2.VitessKeyspace {
template := keyspace.DeepCopy()
images := planetscalev2.VitessKeyspaceImages{}
planetscalev2.DefaultVitessKeyspaceImages(&images, &vt.Spec.Images)
// Copy parent labels map and add keyspace-specific label.
labels := make(map[string]string, len(parentLabels)+1)
for k, v := range parentLabels {
labels[k] = v
}
labels[planetscalev2.KeyspaceLabel] = keyspace.Name
var backupLocations []planetscalev2.VitessBackupLocation
var backupEngine planetscalev2.VitessBackupEngine
if vt.Spec.Backup != nil {
backupLocations = vt.Spec.Backup.Locations
backupEngine = vt.Spec.Backup.Engine
}
return &planetscalev2.VitessKeyspace{
ObjectMeta: metav1.ObjectMeta{
Namespace: key.Namespace,
Name: key.Name,
Labels: labels,
Annotations: keyspace.Annotations,
},
Spec: planetscalev2.VitessKeyspaceSpec{
VitessKeyspaceTemplate: *template,
GlobalLockserver: *lockserver.GlobalConnectionParams(&vt.Spec.GlobalLockserver, vt.Namespace, vt.Name),
Images: images,
ImagePullPolicies: vt.Spec.ImagePullPolicies,
ImagePullSecrets: vt.Spec.ImagePullSecrets,
ZoneMap: vt.Spec.ZoneMap(),
BackupLocations: backupLocations,
BackupEngine: backupEngine,
ExtraVitessFlags: vt.Spec.ExtraVitessFlags,
TopologyReconciliation: vt.Spec.TopologyReconciliation,
UpdateStrategy: vt.Spec.UpdateStrategy,
},
}
}
func updateVitessKeyspace(key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Add or remove annotations requested in vts.Spec.Annotations.
// This must be done before we update vtk.Spec.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
// For now, everything in Spec is safe to update.
vtk.Spec = newKeyspace.Spec
}
func updateVitessKeyspaceInPlace(key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Switching update strategies should always take effect immediately.
vtk.Spec.UpdateStrategy = newKeyspace.Spec.UpdateStrategy
// Update disk size immediately if specified to.
if *vtk.Spec.UpdateStrategy.Type == planetscalev2.ExternalVitessClusterUpdateStrategyType {
if vtk.Spec.UpdateStrategy.External.ResourceChangesAllowed(corev1.ResourceStorage) {
update.KeyspaceDiskSize(&vtk.Spec.VitessKeyspaceTemplate, &newKeyspace.Spec.VitessKeyspaceTemplate)
}
}
// Add or remove partitionings as needed, but don't immediately reconfigure
// partitionings that already exist.
update.PartitioningSet(&vtk.Spec.Partitionings, newKeyspace.Spec.Partitionings)
// Only update things that are safe to roll out immediately.
vtk.Spec.TurndownPolicy = newKeyspace.Spec.TurndownPolicy
// Add or remove annotations requested in vtk.Spec.Annotations.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
}
func updateVitessKeyspaceAnnotations(vtk *planetscalev2.VitessKeyspace, newKeyspace *planetscalev2.VitessKeyspace) {
differentAnnotations := differentKeys(vtk.Spec.Annotations, newKeyspace.Spec.Annotations)
for _, annotation := range differentAnnotations {
delete(vtk.Annotations, annotation)
}
// Update annotations we set.
update.Annotations(&vtk.Annotations, newKeyspace.Annotations)
vtk.Spec.Annotations = newKeyspace.Spec.Annotations
}
// differentKeys returns keys from an older map instance that are no longer in a newer map instance.
func differentKeys(oldMap, newMap map[string]string) []string {
var differentKeys []string
for k := range oldMap {
if _, exist := newMap[k]; !exist {
differentKeys = append(differentKeys, k)
}
}
return differentKeys
} | return
}
updateVitessKeyspaceInPlace(key, newObj, vt, labels, keyspaceMap[key])
},
UpdateRollingInPlace: func(key client.ObjectKey, obj runtime.Object) { | random_line_split |
reconcile_keyspaces.go | /*
Copyright 2019 PlanetScale Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vitesscluster
import (
"context"
"sort"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
planetscalev2 "planetscale.dev/vitess-operator/pkg/apis/planetscale/v2"
"planetscale.dev/vitess-operator/pkg/operator/lockserver"
"planetscale.dev/vitess-operator/pkg/operator/reconciler"
"planetscale.dev/vitess-operator/pkg/operator/rollout"
"planetscale.dev/vitess-operator/pkg/operator/update"
"planetscale.dev/vitess-operator/pkg/operator/vitesskeyspace"
)
func (r *ReconcileVitessCluster) reconcileKeyspaces(ctx context.Context, vt *planetscalev2.VitessCluster) error {
labels := map[string]string{
planetscalev2.ClusterLabel: vt.Name,
}
// Generate keys (object names) for all desired keyspaces.
// Keep a map back from generated names to the keyspace specs.
// Oh boy it's awkward right now that the k8s client calls object names keys.
keys := make([]client.ObjectKey, 0, len(vt.Spec.Keyspaces))
keyspaceMap := make(map[client.ObjectKey]*planetscalev2.VitessKeyspaceTemplate, len(vt.Spec.Keyspaces))
for i := range vt.Spec.Keyspaces {
keyspace := &vt.Spec.Keyspaces[i]
key := client.ObjectKey{Namespace: vt.Namespace, Name: vitesskeyspace.Name(vt.Name, keyspace.Name)}
keys = append(keys, key)
keyspaceMap[key] = keyspace
// Initialize a status entry for every desired keyspace, so it will be
// listed even if we end up not having anything to report about it.
vt.Status.Keyspaces[keyspace.Name] = planetscalev2.NewVitessClusterKeyspaceStatus(keyspace)
}
return r.reconciler.ReconcileObjectSet(ctx, vt, keys, labels, reconciler.Strategy{
Kind: &planetscalev2.VitessKeyspace{},
New: func(key client.ObjectKey) runtime.Object {
return newVitessKeyspace(key, vt, labels, keyspaceMap[key])
},
UpdateInPlace: func(key client.ObjectKey, obj runtime.Object) {
newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key])
return
}
updateVitessKeyspaceInPlace(key, newObj, vt, labels, keyspaceMap[key])
},
UpdateRollingInPlace: func(key client.ObjectKey, obj runtime.Object) {
newObj := obj.(*planetscalev2.VitessKeyspace)
if *vt.Spec.UpdateStrategy.Type == planetscalev2.ImmediateVitessClusterUpdateStrategyType {
// In this case we should use UpdateInPlace for all updates.
return
}
updateVitessKeyspace(key, newObj, vt, labels, keyspaceMap[key])
},
Status: func(key client.ObjectKey, obj runtime.Object) {
curObj := obj.(*planetscalev2.VitessKeyspace)
status := vt.Status.Keyspaces[curObj.Spec.Name]
status.PendingChanges = curObj.Annotations[rollout.ScheduledAnnotation]
status.Shards = int32(len(curObj.Status.Shards))
status.ReadyShards = 0
status.UpdatedShards = 0
status.Tablets = 0
status.ReadyTablets = 0
status.UpdatedTablets = 0
cells := map[string]struct{}{}
for _, shard := range curObj.Status.Shards {
if shard.ReadyTablets == shard.DesiredTablets {
status.ReadyShards++
}
if shard.UpdatedTablets == shard.Tablets {
status.UpdatedShards++
}
status.Tablets += shard.Tablets
status.ReadyTablets += shard.ReadyTablets
status.UpdatedTablets += shard.UpdatedTablets
for _, cell := range shard.Cells {
cells[cell] = struct{}{}
}
}
for cell := range cells {
status.Cells = append(status.Cells, cell)
}
sort.Strings(status.Cells)
vt.Status.Keyspaces[curObj.Spec.Name] = status
},
OrphanStatus: func(key client.ObjectKey, obj runtime.Object, orphanStatus *planetscalev2.OrphanStatus) {
curObj := obj.(*planetscalev2.VitessKeyspace)
vt.Status.OrphanedKeyspaces[curObj.Spec.Name] = *orphanStatus
},
PrepareForTurndown: func(key client.ObjectKey, obj runtime.Object) *planetscalev2.OrphanStatus {
curObj := obj.(*planetscalev2.VitessKeyspace)
// Make sure it's ok to delete this keyspace.
// The user may specify to skip turndown safety checks.
if curObj.Spec.TurndownPolicy == planetscalev2.VitessKeyspaceTurndownPolicyImmediate {
return nil
}
// Otherwise, we err on the safe side since losing a keyspace accidentally is very disruptive.
if curObj.Status.Idle == corev1.ConditionTrue {
// The keyspace is not depoyed in any cells.
return nil
}
// The keyspace is either not idle (Idle=False),
// or we can't be sure whether it's idle (Idle=Unknown).
return planetscalev2.NewOrphanStatus("NotIdle", "The keyspace can't be turned down because it's not idle. You must remove all tablet pools before removing the keyspace.")
},
})
}
// newVitessKeyspace expands a complete VitessKeyspace from a VitessKeyspaceTemplate.
//
// A VitessKeyspace consists of both user-configured parts, which come from VitessKeyspaceTemplate,
// plus auto-filled data that we propagate into each VitessKeyspace from here.
// This allows VitessKeyspace to do its job without looking at any other objects,
// and also lets us control when global changes roll out to each keyspace.
func newVitessKeyspace(key client.ObjectKey, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) *planetscalev2.VitessKeyspace {
template := keyspace.DeepCopy()
images := planetscalev2.VitessKeyspaceImages{}
planetscalev2.DefaultVitessKeyspaceImages(&images, &vt.Spec.Images)
// Copy parent labels map and add keyspace-specific label.
labels := make(map[string]string, len(parentLabels)+1)
for k, v := range parentLabels {
labels[k] = v
}
labels[planetscalev2.KeyspaceLabel] = keyspace.Name
var backupLocations []planetscalev2.VitessBackupLocation
var backupEngine planetscalev2.VitessBackupEngine
if vt.Spec.Backup != nil {
backupLocations = vt.Spec.Backup.Locations
backupEngine = vt.Spec.Backup.Engine
}
return &planetscalev2.VitessKeyspace{
ObjectMeta: metav1.ObjectMeta{
Namespace: key.Namespace,
Name: key.Name,
Labels: labels,
Annotations: keyspace.Annotations,
},
Spec: planetscalev2.VitessKeyspaceSpec{
VitessKeyspaceTemplate: *template,
GlobalLockserver: *lockserver.GlobalConnectionParams(&vt.Spec.GlobalLockserver, vt.Namespace, vt.Name),
Images: images,
ImagePullPolicies: vt.Spec.ImagePullPolicies,
ImagePullSecrets: vt.Spec.ImagePullSecrets,
ZoneMap: vt.Spec.ZoneMap(),
BackupLocations: backupLocations,
BackupEngine: backupEngine,
ExtraVitessFlags: vt.Spec.ExtraVitessFlags,
TopologyReconciliation: vt.Spec.TopologyReconciliation,
UpdateStrategy: vt.Spec.UpdateStrategy,
},
}
}
func updateVitessKeyspace(key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Add or remove annotations requested in vts.Spec.Annotations.
// This must be done before we update vtk.Spec.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
// For now, everything in Spec is safe to update.
vtk.Spec = newKeyspace.Spec
}
func updateVitessKeyspaceInPlace(key client.ObjectKey, vtk *planetscalev2.VitessKeyspace, vt *planetscalev2.VitessCluster, parentLabels map[string]string, keyspace *planetscalev2.VitessKeyspaceTemplate) {
newKeyspace := newVitessKeyspace(key, vt, parentLabels, keyspace)
// Update labels, but ignore existing ones we don't set.
update.Labels(&vtk.Labels, newKeyspace.Labels)
// Switching update strategies should always take effect immediately.
vtk.Spec.UpdateStrategy = newKeyspace.Spec.UpdateStrategy
// Update disk size immediately if specified to.
if *vtk.Spec.UpdateStrategy.Type == planetscalev2.ExternalVitessClusterUpdateStrategyType {
if vtk.Spec.UpdateStrategy.External.ResourceChangesAllowed(corev1.ResourceStorage) {
update.KeyspaceDiskSize(&vtk.Spec.VitessKeyspaceTemplate, &newKeyspace.Spec.VitessKeyspaceTemplate)
}
}
// Add or remove partitionings as needed, but don't immediately reconfigure
// partitionings that already exist.
update.PartitioningSet(&vtk.Spec.Partitionings, newKeyspace.Spec.Partitionings)
// Only update things that are safe to roll out immediately.
vtk.Spec.TurndownPolicy = newKeyspace.Spec.TurndownPolicy
// Add or remove annotations requested in vtk.Spec.Annotations.
updateVitessKeyspaceAnnotations(vtk, newKeyspace)
}
func updateVitessKeyspaceAnnotations(vtk *planetscalev2.VitessKeyspace, newKeyspace *planetscalev2.VitessKeyspace) {
differentAnnotations := differentKeys(vtk.Spec.Annotations, newKeyspace.Spec.Annotations)
for _, annotation := range differentAnnotations {
delete(vtk.Annotations, annotation)
}
// Update annotations we set.
update.Annotations(&vtk.Annotations, newKeyspace.Annotations)
vtk.Spec.Annotations = newKeyspace.Spec.Annotations
}
// differentKeys returns keys from an older map instance that are no longer in a newer map instance.
func differentKeys(oldMap, newMap map[string]string) []string | {
var differentKeys []string
for k := range oldMap {
if _, exist := newMap[k]; !exist {
differentKeys = append(differentKeys, k)
}
}
return differentKeys
} | identifier_body | |
elasticsearch_metadata.go | package storage
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/elastic/go-elasticsearch/v6"
"github.com/elastic/go-elasticsearch/v6/esapi"
"github.com/graphite-ng/carbon-relay-ng/cfg"
"github.com/lestrrat-go/strftime"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
const (
namespace = "elasticsearch"
default_metrics_metadata_index = "biggraphite"
default_index_date_format = "%Y-%m-%d"
directories_index_suffix = "directories"
metrics_index_suffix = "metrics"
metricsMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"config": {
"type": "object"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
dirMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"parent": {
"type": "keyword"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
documentType = "_doc"
)
type BgMetadataElasticSearchConnector struct {
client ElasticSearchClient
UpdatedDocuments *prometheus.CounterVec
HTTPErrors *prometheus.CounterVec
WriteDurationMs prometheus.Histogram
DocumentBuildDurationMs prometheus.Histogram
RequestSize prometheus.Histogram
KnownIndices map[string]bool
bulkBuffer []ElasticSearchDocument
BulkSize uint
mux sync.Mutex
MaxRetry uint
IndexName, currentIndex string
IndexDateFmt string //strftime fmt string
DirectoriesIndexAlias, MetricsIndexAlias string
logger *zap.Logger
}
type ElasticSearchClient interface {
Perform(*http.Request) (*http.Response, error)
}
// NewBgMetadataElasticSearchConnector : contructor for BgMetadataElasticSearchConnector
func newBgMetadataElasticSearchConnector(elasticSearchClient ElasticSearchClient, registry prometheus.Registerer, bulkSize, maxRetry uint, indexName, IndexDateFmt string) *BgMetadataElasticSearchConnector {
var esc = BgMetadataElasticSearchConnector{
client: elasticSearchClient,
BulkSize: bulkSize,
bulkBuffer: make([]ElasticSearchDocument, 0, bulkSize),
MaxRetry: maxRetry,
IndexName: indexName,
IndexDateFmt: IndexDateFmt,
UpdatedDocuments: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "updated_documents",
Help: "total number of documents updated in ElasticSearch splited between metrics and directories",
}, []string{"status", "type"}),
HTTPErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "http_errors",
Help: "total number of http errors encountered partitionned by status code",
}, []string{"code"}),
WriteDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_duration_ms",
Help: "time spent writing to ElasticSearch based on `took` field of response ",
Buckets: []float64{250, 500, 750, 1000, 1500, 2000, 5000, 10000}}),
RequestSize: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_request_size_bytes",
Help: "Size of batch create requests performed on elasticsearch",
Buckets: []float64{10000, 100000, 1000000, 5000000, 10000000, 20000000, 50000000}}),
DocumentBuildDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "document_build_duration_ms",
Help: "time spent building an ElasticSearch document",
Buckets: []float64{1, 5, 10, 50, 100, 250, 500, 750, 1000, 2000}}),
logger: zap.L(),
}
_ = registry.Register(esc.UpdatedDocuments)
_ = registry.Register(esc.WriteDurationMs)
_ = registry.Register(esc.DocumentBuildDurationMs)
_ = registry.Register(esc.HTTPErrors)
_ = registry.Register(esc.RequestSize)
if esc.IndexName == "" {
esc.IndexName = default_metrics_metadata_index
}
if esc.IndexDateFmt == "" {
esc.DirectoriesIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, directories_index_suffix)
esc.MetricsIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, metrics_index_suffix)
}
esc.KnownIndices = map[string]bool{}
return &esc
}
func createElasticSearchClient(servers []string, username, password string, allow_insecure_tls bool) (*elasticsearch.Client, error) {
cfg := elasticsearch.Config{
Addresses: servers,
Username: username,
Password: password,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: allow_insecure_tls,
},
},
}
es, err := elasticsearch.NewClient(cfg)
if err != nil {
log.Fatalf("Error creating the ElasticSearch client: %s", err)
}
_, err = es.Info()
if err != nil {
log.Fatalf("Error getting ElasticSearch information response: %s", err)
}
return es, err
}
// NewBgMetadataElasticSearchConnectorWithDefaults is the public contructor of BgMetadataElasticSearchConnector
func NewBgMetadataElasticSearchConnectorWithDefaults(cfg *cfg.BgMetadataESConfig) *BgMetadataElasticSearchConnector {
es, err := createElasticSearchClient(cfg.StorageServers, cfg.Username, cfg.Password, cfg.AllowInsecureTLS)
if err != nil {
log.Fatalf("Could not create ElasticSearch connector: %v", err)
}
return newBgMetadataElasticSearchConnector(es, prometheus.DefaultRegisterer, cfg.BulkSize, cfg.MaxRetry, cfg.IndexName, cfg.IndexDateFmt)
}
func (esc *BgMetadataElasticSearchConnector) Close() {
}
func (esc *BgMetadataElasticSearchConnector) createIndicesAndMapping(metricIndexName, directoryIndexName string) error {
indices := []struct{ name, mapping string }{{metricIndexName, metricsMapping}, {directoryIndexName, dirMapping}}
for _, index := range indices {
indexCreateRequest := esapi.IndicesCreateRequest{Index: index.name}
res, err := indexCreateRequest.Do(context.Background(), esc.client)
esc.logger.Info("using index", zap.String("name", index.name))
// extract TODO error deserialize
r := strings.NewReader(index.mapping)
request := esapi.IndicesPutMappingRequest{Index: []string{index.name}, Body: r, DocumentType: documentType}
res, err = request.Do(context.Background(), esc.client)
if err != nil {
return fmt.Errorf("Could not set ElasticSearch mapping: %w", err)
}
if res.StatusCode != http.StatusOK {
errorMessage, _ := ioutil.ReadAll(res.Body)
return fmt.Errorf("Could not set ElasticSearch mapping (status %d, error: %s)", res.StatusCode, errorMessage)
}
}
return nil
}
// UpdateMetricMetadata stores the metric in a buffer, will bulkupdate when at full cap
// threadsafe
func (esc *BgMetadataElasticSearchConnector) UpdateMetricMetadata(metric *Metric) error {
return esc.addDocumentToBuff(metric)
}
func (esc *BgMetadataElasticSearchConnector) addDocumentToBuff(doc ElasticSearchDocument) error {
esc.mux.Lock()
defer esc.mux.Unlock()
esc.bulkBuffer = append(esc.bulkBuffer, doc)
if len(esc.bulkBuffer) == cap(esc.bulkBuffer) {
esc.sendAndClearBuffer()
}
return nil
}
func (esc *BgMetadataElasticSearchConnector) sendAndClearBuffer() error {
defer esc.clearBuffer()
metricIndex, directoryIndex, err := esc.getIndices()
var errorMessage []byte
var statusCode int
if err != nil {
esc.UpdatedDocuments.WithLabelValues("failure").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not get index: %w", err)
}
timeBeforeBuild := time.Now()
requestBody := BuildElasticSearchDocumentMulti(metricIndex, directoryIndex, esc.bulkBuffer)
esc.DocumentBuildDurationMs.Observe(float64(time.Since(timeBeforeBuild).Milliseconds()))
esc.RequestSize.Observe(float64(len(requestBody)))
for attempt := uint(0); attempt <= esc.MaxRetry; attempt++ {
res, err := esc.bulkUpdate(requestBody)
if err != nil {
// esapi resturns a nil body in case of error
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index: %w", err)
}
if !res.IsError() {
esc.updateInternalMetrics(res)
res.Body.Close()
return nil
} else {
esc.HTTPErrors.WithLabelValues(strconv.Itoa(res.StatusCode)).Inc()
statusCode = res.StatusCode
errorMessage, _ = ioutil.ReadAll(res.Body)
res.Body.Close()
}
}
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index (status %d, error: %s)", statusCode, errorMessage)
}
// updateInternalMetrics increments BGMetadataConnector's metrics,
func (esc *BgMetadataElasticSearchConnector) updateInternalMetrics(res *esapi.Response) {
defer func() {
if err := recover(); err != nil {
esc.logger.Warn("malformed bulk response", zap.Error(err.(error)))
}
}()
var mapResp map[string]interface{}
json.NewDecoder(res.Body).Decode(&mapResp)
esc.WriteDurationMs.Observe(mapResp["took"].(float64))
for _, item := range mapResp["items"].([]interface{}) {
mapCreate := item.(map[string]interface{})["create"].(map[string]interface{})
// protected by esc.Mux currentIndex may not change while looping
if int(mapCreate["status"].(float64)) == http.StatusCreated {
if strings.HasPrefix(mapCreate["_index"].(string), esc.currentIndex) {
esc.UpdatedDocuments.WithLabelValues("created", "metric").Inc()
} else {
esc.UpdatedDocuments.WithLabelValues("created", "directory").Inc()
}
}
}
}
func (esc *BgMetadataElasticSearchConnector) clearBuffer() error {
esc.bulkBuffer = esc.bulkBuffer[:0]
return nil | }
func (esc *BgMetadataElasticSearchConnector) bulkUpdate(body string) (*esapi.Response, error) {
req := esapi.BulkRequest{
Body: strings.NewReader(body),
DocumentType: documentType,
}
res, err := req.Do(context.Background(), esc.client)
return res, err
}
func (esc *BgMetadataElasticSearchConnector) getIndices() (string, string, error) {
metricIndexName, directoryIndexName := esc.getIndicesNames()
esc.currentIndex = metricIndexName
_, isKnownIndex := esc.KnownIndices[metricIndexName]
// no need to test both
if !isKnownIndex {
err := esc.createIndicesAndMapping(metricIndexName, directoryIndexName)
if err != nil {
return "", "", err
}
esc.KnownIndices[metricIndexName] = true
}
return metricIndexName, directoryIndexName, nil
}
// InsertDirectory will add directory to the bulkBuffer
func (esc *BgMetadataElasticSearchConnector) InsertDirectory(dir *MetricDirectory) error {
esc.addDocumentToBuff(dir)
return nil
}
// SelectDirectory unused, no need in ES
// returns an error to signal that parent dir does not exist
func (esc *BgMetadataElasticSearchConnector) SelectDirectory(dir string) (string, error) {
return dir, fmt.Errorf("")
}
func (esc *BgMetadataElasticSearchConnector) getIndicesNames() (metricIndexName, directoryIndexName string) {
if len(esc.DirectoriesIndexAlias) > 0 && len(esc.MetricsIndexAlias) > 0 {
return esc.MetricsIndexAlias, esc.DirectoriesIndexAlias
}
now := time.Now().UTC()
date, err := strftime.Format(esc.IndexDateFmt, now)
if err != nil {
log.Fatalf("Index date format invalid strftime format: %s", err)
}
metricIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, metrics_index_suffix, date)
directoryIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, directories_index_suffix, date)
return
} | random_line_split | |
elasticsearch_metadata.go | package storage
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/elastic/go-elasticsearch/v6"
"github.com/elastic/go-elasticsearch/v6/esapi"
"github.com/graphite-ng/carbon-relay-ng/cfg"
"github.com/lestrrat-go/strftime"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
const (
namespace = "elasticsearch"
default_metrics_metadata_index = "biggraphite"
default_index_date_format = "%Y-%m-%d"
directories_index_suffix = "directories"
metrics_index_suffix = "metrics"
metricsMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"config": {
"type": "object"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
dirMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"parent": {
"type": "keyword"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
documentType = "_doc"
)
type BgMetadataElasticSearchConnector struct {
client ElasticSearchClient
UpdatedDocuments *prometheus.CounterVec
HTTPErrors *prometheus.CounterVec
WriteDurationMs prometheus.Histogram
DocumentBuildDurationMs prometheus.Histogram
RequestSize prometheus.Histogram
KnownIndices map[string]bool
bulkBuffer []ElasticSearchDocument
BulkSize uint
mux sync.Mutex
MaxRetry uint
IndexName, currentIndex string
IndexDateFmt string //strftime fmt string
DirectoriesIndexAlias, MetricsIndexAlias string
logger *zap.Logger
}
type ElasticSearchClient interface {
Perform(*http.Request) (*http.Response, error)
}
// NewBgMetadataElasticSearchConnector : contructor for BgMetadataElasticSearchConnector
func newBgMetadataElasticSearchConnector(elasticSearchClient ElasticSearchClient, registry prometheus.Registerer, bulkSize, maxRetry uint, indexName, IndexDateFmt string) *BgMetadataElasticSearchConnector {
var esc = BgMetadataElasticSearchConnector{
client: elasticSearchClient,
BulkSize: bulkSize,
bulkBuffer: make([]ElasticSearchDocument, 0, bulkSize),
MaxRetry: maxRetry,
IndexName: indexName,
IndexDateFmt: IndexDateFmt,
UpdatedDocuments: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "updated_documents",
Help: "total number of documents updated in ElasticSearch splited between metrics and directories",
}, []string{"status", "type"}),
HTTPErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "http_errors",
Help: "total number of http errors encountered partitionned by status code",
}, []string{"code"}),
WriteDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_duration_ms",
Help: "time spent writing to ElasticSearch based on `took` field of response ",
Buckets: []float64{250, 500, 750, 1000, 1500, 2000, 5000, 10000}}),
RequestSize: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_request_size_bytes",
Help: "Size of batch create requests performed on elasticsearch",
Buckets: []float64{10000, 100000, 1000000, 5000000, 10000000, 20000000, 50000000}}),
DocumentBuildDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "document_build_duration_ms",
Help: "time spent building an ElasticSearch document",
Buckets: []float64{1, 5, 10, 50, 100, 250, 500, 750, 1000, 2000}}),
logger: zap.L(),
}
_ = registry.Register(esc.UpdatedDocuments)
_ = registry.Register(esc.WriteDurationMs)
_ = registry.Register(esc.DocumentBuildDurationMs)
_ = registry.Register(esc.HTTPErrors)
_ = registry.Register(esc.RequestSize)
if esc.IndexName == "" {
esc.IndexName = default_metrics_metadata_index
}
if esc.IndexDateFmt == "" {
esc.DirectoriesIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, directories_index_suffix)
esc.MetricsIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, metrics_index_suffix)
}
esc.KnownIndices = map[string]bool{}
return &esc
}
func createElasticSearchClient(servers []string, username, password string, allow_insecure_tls bool) (*elasticsearch.Client, error) |
// NewBgMetadataElasticSearchConnectorWithDefaults is the public contructor of BgMetadataElasticSearchConnector
func NewBgMetadataElasticSearchConnectorWithDefaults(cfg *cfg.BgMetadataESConfig) *BgMetadataElasticSearchConnector {
es, err := createElasticSearchClient(cfg.StorageServers, cfg.Username, cfg.Password, cfg.AllowInsecureTLS)
if err != nil {
log.Fatalf("Could not create ElasticSearch connector: %v", err)
}
return newBgMetadataElasticSearchConnector(es, prometheus.DefaultRegisterer, cfg.BulkSize, cfg.MaxRetry, cfg.IndexName, cfg.IndexDateFmt)
}
func (esc *BgMetadataElasticSearchConnector) Close() {
}
func (esc *BgMetadataElasticSearchConnector) createIndicesAndMapping(metricIndexName, directoryIndexName string) error {
indices := []struct{ name, mapping string }{{metricIndexName, metricsMapping}, {directoryIndexName, dirMapping}}
for _, index := range indices {
indexCreateRequest := esapi.IndicesCreateRequest{Index: index.name}
res, err := indexCreateRequest.Do(context.Background(), esc.client)
esc.logger.Info("using index", zap.String("name", index.name))
// extract TODO error deserialize
r := strings.NewReader(index.mapping)
request := esapi.IndicesPutMappingRequest{Index: []string{index.name}, Body: r, DocumentType: documentType}
res, err = request.Do(context.Background(), esc.client)
if err != nil {
return fmt.Errorf("Could not set ElasticSearch mapping: %w", err)
}
if res.StatusCode != http.StatusOK {
errorMessage, _ := ioutil.ReadAll(res.Body)
return fmt.Errorf("Could not set ElasticSearch mapping (status %d, error: %s)", res.StatusCode, errorMessage)
}
}
return nil
}
// UpdateMetricMetadata stores the metric in a buffer, will bulkupdate when at full cap
// threadsafe
func (esc *BgMetadataElasticSearchConnector) UpdateMetricMetadata(metric *Metric) error {
return esc.addDocumentToBuff(metric)
}
func (esc *BgMetadataElasticSearchConnector) addDocumentToBuff(doc ElasticSearchDocument) error {
esc.mux.Lock()
defer esc.mux.Unlock()
esc.bulkBuffer = append(esc.bulkBuffer, doc)
if len(esc.bulkBuffer) == cap(esc.bulkBuffer) {
esc.sendAndClearBuffer()
}
return nil
}
func (esc *BgMetadataElasticSearchConnector) sendAndClearBuffer() error {
defer esc.clearBuffer()
metricIndex, directoryIndex, err := esc.getIndices()
var errorMessage []byte
var statusCode int
if err != nil {
esc.UpdatedDocuments.WithLabelValues("failure").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not get index: %w", err)
}
timeBeforeBuild := time.Now()
requestBody := BuildElasticSearchDocumentMulti(metricIndex, directoryIndex, esc.bulkBuffer)
esc.DocumentBuildDurationMs.Observe(float64(time.Since(timeBeforeBuild).Milliseconds()))
esc.RequestSize.Observe(float64(len(requestBody)))
for attempt := uint(0); attempt <= esc.MaxRetry; attempt++ {
res, err := esc.bulkUpdate(requestBody)
if err != nil {
// esapi resturns a nil body in case of error
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index: %w", err)
}
if !res.IsError() {
esc.updateInternalMetrics(res)
res.Body.Close()
return nil
} else {
esc.HTTPErrors.WithLabelValues(strconv.Itoa(res.StatusCode)).Inc()
statusCode = res.StatusCode
errorMessage, _ = ioutil.ReadAll(res.Body)
res.Body.Close()
}
}
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index (status %d, error: %s)", statusCode, errorMessage)
}
// updateInternalMetrics increments BGMetadataConnector's metrics,
func (esc *BgMetadataElasticSearchConnector) updateInternalMetrics(res *esapi.Response) {
defer func() {
if err := recover(); err != nil {
esc.logger.Warn("malformed bulk response", zap.Error(err.(error)))
}
}()
var mapResp map[string]interface{}
json.NewDecoder(res.Body).Decode(&mapResp)
esc.WriteDurationMs.Observe(mapResp["took"].(float64))
for _, item := range mapResp["items"].([]interface{}) {
mapCreate := item.(map[string]interface{})["create"].(map[string]interface{})
// protected by esc.Mux currentIndex may not change while looping
if int(mapCreate["status"].(float64)) == http.StatusCreated {
if strings.HasPrefix(mapCreate["_index"].(string), esc.currentIndex) {
esc.UpdatedDocuments.WithLabelValues("created", "metric").Inc()
} else {
esc.UpdatedDocuments.WithLabelValues("created", "directory").Inc()
}
}
}
}
func (esc *BgMetadataElasticSearchConnector) clearBuffer() error {
esc.bulkBuffer = esc.bulkBuffer[:0]
return nil
}
func (esc *BgMetadataElasticSearchConnector) bulkUpdate(body string) (*esapi.Response, error) {
req := esapi.BulkRequest{
Body: strings.NewReader(body),
DocumentType: documentType,
}
res, err := req.Do(context.Background(), esc.client)
return res, err
}
func (esc *BgMetadataElasticSearchConnector) getIndices() (string, string, error) {
metricIndexName, directoryIndexName := esc.getIndicesNames()
esc.currentIndex = metricIndexName
_, isKnownIndex := esc.KnownIndices[metricIndexName]
// no need to test both
if !isKnownIndex {
err := esc.createIndicesAndMapping(metricIndexName, directoryIndexName)
if err != nil {
return "", "", err
}
esc.KnownIndices[metricIndexName] = true
}
return metricIndexName, directoryIndexName, nil
}
// InsertDirectory will add directory to the bulkBuffer
func (esc *BgMetadataElasticSearchConnector) InsertDirectory(dir *MetricDirectory) error {
esc.addDocumentToBuff(dir)
return nil
}
// SelectDirectory unused, no need in ES
// returns an error to signal that parent dir does not exist
func (esc *BgMetadataElasticSearchConnector) SelectDirectory(dir string) (string, error) {
return dir, fmt.Errorf("")
}
func (esc *BgMetadataElasticSearchConnector) getIndicesNames() (metricIndexName, directoryIndexName string) {
if len(esc.DirectoriesIndexAlias) > 0 && len(esc.MetricsIndexAlias) > 0 {
return esc.MetricsIndexAlias, esc.DirectoriesIndexAlias
}
now := time.Now().UTC()
date, err := strftime.Format(esc.IndexDateFmt, now)
if err != nil {
log.Fatalf("Index date format invalid strftime format: %s", err)
}
metricIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, metrics_index_suffix, date)
directoryIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, directories_index_suffix, date)
return
}
| {
cfg := elasticsearch.Config{
Addresses: servers,
Username: username,
Password: password,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: allow_insecure_tls,
},
},
}
es, err := elasticsearch.NewClient(cfg)
if err != nil {
log.Fatalf("Error creating the ElasticSearch client: %s", err)
}
_, err = es.Info()
if err != nil {
log.Fatalf("Error getting ElasticSearch information response: %s", err)
}
return es, err
} | identifier_body |
elasticsearch_metadata.go | package storage
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/elastic/go-elasticsearch/v6"
"github.com/elastic/go-elasticsearch/v6/esapi"
"github.com/graphite-ng/carbon-relay-ng/cfg"
"github.com/lestrrat-go/strftime"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
const (
namespace = "elasticsearch"
default_metrics_metadata_index = "biggraphite"
default_index_date_format = "%Y-%m-%d"
directories_index_suffix = "directories"
metrics_index_suffix = "metrics"
metricsMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"config": {
"type": "object"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
dirMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"parent": {
"type": "keyword"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
documentType = "_doc"
)
type BgMetadataElasticSearchConnector struct {
client ElasticSearchClient
UpdatedDocuments *prometheus.CounterVec
HTTPErrors *prometheus.CounterVec
WriteDurationMs prometheus.Histogram
DocumentBuildDurationMs prometheus.Histogram
RequestSize prometheus.Histogram
KnownIndices map[string]bool
bulkBuffer []ElasticSearchDocument
BulkSize uint
mux sync.Mutex
MaxRetry uint
IndexName, currentIndex string
IndexDateFmt string //strftime fmt string
DirectoriesIndexAlias, MetricsIndexAlias string
logger *zap.Logger
}
type ElasticSearchClient interface {
Perform(*http.Request) (*http.Response, error)
}
// NewBgMetadataElasticSearchConnector : contructor for BgMetadataElasticSearchConnector
func newBgMetadataElasticSearchConnector(elasticSearchClient ElasticSearchClient, registry prometheus.Registerer, bulkSize, maxRetry uint, indexName, IndexDateFmt string) *BgMetadataElasticSearchConnector {
var esc = BgMetadataElasticSearchConnector{
client: elasticSearchClient,
BulkSize: bulkSize,
bulkBuffer: make([]ElasticSearchDocument, 0, bulkSize),
MaxRetry: maxRetry,
IndexName: indexName,
IndexDateFmt: IndexDateFmt,
UpdatedDocuments: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "updated_documents",
Help: "total number of documents updated in ElasticSearch splited between metrics and directories",
}, []string{"status", "type"}),
HTTPErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "http_errors",
Help: "total number of http errors encountered partitionned by status code",
}, []string{"code"}),
WriteDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_duration_ms",
Help: "time spent writing to ElasticSearch based on `took` field of response ",
Buckets: []float64{250, 500, 750, 1000, 1500, 2000, 5000, 10000}}),
RequestSize: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_request_size_bytes",
Help: "Size of batch create requests performed on elasticsearch",
Buckets: []float64{10000, 100000, 1000000, 5000000, 10000000, 20000000, 50000000}}),
DocumentBuildDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "document_build_duration_ms",
Help: "time spent building an ElasticSearch document",
Buckets: []float64{1, 5, 10, 50, 100, 250, 500, 750, 1000, 2000}}),
logger: zap.L(),
}
_ = registry.Register(esc.UpdatedDocuments)
_ = registry.Register(esc.WriteDurationMs)
_ = registry.Register(esc.DocumentBuildDurationMs)
_ = registry.Register(esc.HTTPErrors)
_ = registry.Register(esc.RequestSize)
if esc.IndexName == "" {
esc.IndexName = default_metrics_metadata_index
}
if esc.IndexDateFmt == "" {
esc.DirectoriesIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, directories_index_suffix)
esc.MetricsIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, metrics_index_suffix)
}
esc.KnownIndices = map[string]bool{}
return &esc
}
func createElasticSearchClient(servers []string, username, password string, allow_insecure_tls bool) (*elasticsearch.Client, error) {
cfg := elasticsearch.Config{
Addresses: servers,
Username: username,
Password: password,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: allow_insecure_tls,
},
},
}
es, err := elasticsearch.NewClient(cfg)
if err != nil {
log.Fatalf("Error creating the ElasticSearch client: %s", err)
}
_, err = es.Info()
if err != nil {
log.Fatalf("Error getting ElasticSearch information response: %s", err)
}
return es, err
}
// NewBgMetadataElasticSearchConnectorWithDefaults is the public contructor of BgMetadataElasticSearchConnector
func NewBgMetadataElasticSearchConnectorWithDefaults(cfg *cfg.BgMetadataESConfig) *BgMetadataElasticSearchConnector {
es, err := createElasticSearchClient(cfg.StorageServers, cfg.Username, cfg.Password, cfg.AllowInsecureTLS)
if err != nil {
log.Fatalf("Could not create ElasticSearch connector: %v", err)
}
return newBgMetadataElasticSearchConnector(es, prometheus.DefaultRegisterer, cfg.BulkSize, cfg.MaxRetry, cfg.IndexName, cfg.IndexDateFmt)
}
func (esc *BgMetadataElasticSearchConnector) Close() {
}
func (esc *BgMetadataElasticSearchConnector) createIndicesAndMapping(metricIndexName, directoryIndexName string) error {
indices := []struct{ name, mapping string }{{metricIndexName, metricsMapping}, {directoryIndexName, dirMapping}}
for _, index := range indices {
indexCreateRequest := esapi.IndicesCreateRequest{Index: index.name}
res, err := indexCreateRequest.Do(context.Background(), esc.client)
esc.logger.Info("using index", zap.String("name", index.name))
// extract TODO error deserialize
r := strings.NewReader(index.mapping)
request := esapi.IndicesPutMappingRequest{Index: []string{index.name}, Body: r, DocumentType: documentType}
res, err = request.Do(context.Background(), esc.client)
if err != nil {
return fmt.Errorf("Could not set ElasticSearch mapping: %w", err)
}
if res.StatusCode != http.StatusOK {
errorMessage, _ := ioutil.ReadAll(res.Body)
return fmt.Errorf("Could not set ElasticSearch mapping (status %d, error: %s)", res.StatusCode, errorMessage)
}
}
return nil
}
// UpdateMetricMetadata stores the metric in a buffer, will bulkupdate when at full cap
// threadsafe
func (esc *BgMetadataElasticSearchConnector) UpdateMetricMetadata(metric *Metric) error {
return esc.addDocumentToBuff(metric)
}
func (esc *BgMetadataElasticSearchConnector) | (doc ElasticSearchDocument) error {
esc.mux.Lock()
defer esc.mux.Unlock()
esc.bulkBuffer = append(esc.bulkBuffer, doc)
if len(esc.bulkBuffer) == cap(esc.bulkBuffer) {
esc.sendAndClearBuffer()
}
return nil
}
func (esc *BgMetadataElasticSearchConnector) sendAndClearBuffer() error {
defer esc.clearBuffer()
metricIndex, directoryIndex, err := esc.getIndices()
var errorMessage []byte
var statusCode int
if err != nil {
esc.UpdatedDocuments.WithLabelValues("failure").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not get index: %w", err)
}
timeBeforeBuild := time.Now()
requestBody := BuildElasticSearchDocumentMulti(metricIndex, directoryIndex, esc.bulkBuffer)
esc.DocumentBuildDurationMs.Observe(float64(time.Since(timeBeforeBuild).Milliseconds()))
esc.RequestSize.Observe(float64(len(requestBody)))
for attempt := uint(0); attempt <= esc.MaxRetry; attempt++ {
res, err := esc.bulkUpdate(requestBody)
if err != nil {
// esapi resturns a nil body in case of error
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index: %w", err)
}
if !res.IsError() {
esc.updateInternalMetrics(res)
res.Body.Close()
return nil
} else {
esc.HTTPErrors.WithLabelValues(strconv.Itoa(res.StatusCode)).Inc()
statusCode = res.StatusCode
errorMessage, _ = ioutil.ReadAll(res.Body)
res.Body.Close()
}
}
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index (status %d, error: %s)", statusCode, errorMessage)
}
// updateInternalMetrics increments BGMetadataConnector's metrics,
func (esc *BgMetadataElasticSearchConnector) updateInternalMetrics(res *esapi.Response) {
defer func() {
if err := recover(); err != nil {
esc.logger.Warn("malformed bulk response", zap.Error(err.(error)))
}
}()
var mapResp map[string]interface{}
json.NewDecoder(res.Body).Decode(&mapResp)
esc.WriteDurationMs.Observe(mapResp["took"].(float64))
for _, item := range mapResp["items"].([]interface{}) {
mapCreate := item.(map[string]interface{})["create"].(map[string]interface{})
// protected by esc.Mux currentIndex may not change while looping
if int(mapCreate["status"].(float64)) == http.StatusCreated {
if strings.HasPrefix(mapCreate["_index"].(string), esc.currentIndex) {
esc.UpdatedDocuments.WithLabelValues("created", "metric").Inc()
} else {
esc.UpdatedDocuments.WithLabelValues("created", "directory").Inc()
}
}
}
}
func (esc *BgMetadataElasticSearchConnector) clearBuffer() error {
esc.bulkBuffer = esc.bulkBuffer[:0]
return nil
}
func (esc *BgMetadataElasticSearchConnector) bulkUpdate(body string) (*esapi.Response, error) {
req := esapi.BulkRequest{
Body: strings.NewReader(body),
DocumentType: documentType,
}
res, err := req.Do(context.Background(), esc.client)
return res, err
}
func (esc *BgMetadataElasticSearchConnector) getIndices() (string, string, error) {
metricIndexName, directoryIndexName := esc.getIndicesNames()
esc.currentIndex = metricIndexName
_, isKnownIndex := esc.KnownIndices[metricIndexName]
// no need to test both
if !isKnownIndex {
err := esc.createIndicesAndMapping(metricIndexName, directoryIndexName)
if err != nil {
return "", "", err
}
esc.KnownIndices[metricIndexName] = true
}
return metricIndexName, directoryIndexName, nil
}
// InsertDirectory will add directory to the bulkBuffer
func (esc *BgMetadataElasticSearchConnector) InsertDirectory(dir *MetricDirectory) error {
esc.addDocumentToBuff(dir)
return nil
}
// SelectDirectory unused, no need in ES
// returns an error to signal that parent dir does not exist
func (esc *BgMetadataElasticSearchConnector) SelectDirectory(dir string) (string, error) {
return dir, fmt.Errorf("")
}
func (esc *BgMetadataElasticSearchConnector) getIndicesNames() (metricIndexName, directoryIndexName string) {
if len(esc.DirectoriesIndexAlias) > 0 && len(esc.MetricsIndexAlias) > 0 {
return esc.MetricsIndexAlias, esc.DirectoriesIndexAlias
}
now := time.Now().UTC()
date, err := strftime.Format(esc.IndexDateFmt, now)
if err != nil {
log.Fatalf("Index date format invalid strftime format: %s", err)
}
metricIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, metrics_index_suffix, date)
directoryIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, directories_index_suffix, date)
return
}
| addDocumentToBuff | identifier_name |
elasticsearch_metadata.go | package storage
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/elastic/go-elasticsearch/v6"
"github.com/elastic/go-elasticsearch/v6/esapi"
"github.com/graphite-ng/carbon-relay-ng/cfg"
"github.com/lestrrat-go/strftime"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
const (
namespace = "elasticsearch"
default_metrics_metadata_index = "biggraphite"
default_index_date_format = "%Y-%m-%d"
directories_index_suffix = "directories"
metrics_index_suffix = "metrics"
metricsMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"config": {
"type": "object"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
dirMapping = `
{
"_doc": {
"properties": {
"depth": {
"type": "long"
},
"name": {
"type": "keyword",
"ignore_above": 1024
},
"uuid": {
"type": "keyword"
},
"parent": {
"type": "keyword"
}
},
"dynamic_templates": [
{
"strings_as_keywords": {
"match": "p*",
"match_mapping_type": "string",
"mapping": {
"type": "keyword",
"ignore_above": 256,
"ignore_malformed": true
}
}
}
]
}
}
`
documentType = "_doc"
)
type BgMetadataElasticSearchConnector struct {
client ElasticSearchClient
UpdatedDocuments *prometheus.CounterVec
HTTPErrors *prometheus.CounterVec
WriteDurationMs prometheus.Histogram
DocumentBuildDurationMs prometheus.Histogram
RequestSize prometheus.Histogram
KnownIndices map[string]bool
bulkBuffer []ElasticSearchDocument
BulkSize uint
mux sync.Mutex
MaxRetry uint
IndexName, currentIndex string
IndexDateFmt string //strftime fmt string
DirectoriesIndexAlias, MetricsIndexAlias string
logger *zap.Logger
}
type ElasticSearchClient interface {
Perform(*http.Request) (*http.Response, error)
}
// NewBgMetadataElasticSearchConnector : contructor for BgMetadataElasticSearchConnector
func newBgMetadataElasticSearchConnector(elasticSearchClient ElasticSearchClient, registry prometheus.Registerer, bulkSize, maxRetry uint, indexName, IndexDateFmt string) *BgMetadataElasticSearchConnector {
var esc = BgMetadataElasticSearchConnector{
client: elasticSearchClient,
BulkSize: bulkSize,
bulkBuffer: make([]ElasticSearchDocument, 0, bulkSize),
MaxRetry: maxRetry,
IndexName: indexName,
IndexDateFmt: IndexDateFmt,
UpdatedDocuments: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "updated_documents",
Help: "total number of documents updated in ElasticSearch splited between metrics and directories",
}, []string{"status", "type"}),
HTTPErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Name: "http_errors",
Help: "total number of http errors encountered partitionned by status code",
}, []string{"code"}),
WriteDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_duration_ms",
Help: "time spent writing to ElasticSearch based on `took` field of response ",
Buckets: []float64{250, 500, 750, 1000, 1500, 2000, 5000, 10000}}),
RequestSize: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "write_request_size_bytes",
Help: "Size of batch create requests performed on elasticsearch",
Buckets: []float64{10000, 100000, 1000000, 5000000, 10000000, 20000000, 50000000}}),
DocumentBuildDurationMs: prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "document_build_duration_ms",
Help: "time spent building an ElasticSearch document",
Buckets: []float64{1, 5, 10, 50, 100, 250, 500, 750, 1000, 2000}}),
logger: zap.L(),
}
_ = registry.Register(esc.UpdatedDocuments)
_ = registry.Register(esc.WriteDurationMs)
_ = registry.Register(esc.DocumentBuildDurationMs)
_ = registry.Register(esc.HTTPErrors)
_ = registry.Register(esc.RequestSize)
if esc.IndexName == "" {
esc.IndexName = default_metrics_metadata_index
}
if esc.IndexDateFmt == "" {
esc.DirectoriesIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, directories_index_suffix)
esc.MetricsIndexAlias = fmt.Sprintf("%s_%s", esc.IndexName, metrics_index_suffix)
}
esc.KnownIndices = map[string]bool{}
return &esc
}
func createElasticSearchClient(servers []string, username, password string, allow_insecure_tls bool) (*elasticsearch.Client, error) {
cfg := elasticsearch.Config{
Addresses: servers,
Username: username,
Password: password,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: allow_insecure_tls,
},
},
}
es, err := elasticsearch.NewClient(cfg)
if err != nil {
log.Fatalf("Error creating the ElasticSearch client: %s", err)
}
_, err = es.Info()
if err != nil |
return es, err
}
// NewBgMetadataElasticSearchConnectorWithDefaults is the public contructor of BgMetadataElasticSearchConnector
func NewBgMetadataElasticSearchConnectorWithDefaults(cfg *cfg.BgMetadataESConfig) *BgMetadataElasticSearchConnector {
es, err := createElasticSearchClient(cfg.StorageServers, cfg.Username, cfg.Password, cfg.AllowInsecureTLS)
if err != nil {
log.Fatalf("Could not create ElasticSearch connector: %v", err)
}
return newBgMetadataElasticSearchConnector(es, prometheus.DefaultRegisterer, cfg.BulkSize, cfg.MaxRetry, cfg.IndexName, cfg.IndexDateFmt)
}
func (esc *BgMetadataElasticSearchConnector) Close() {
}
func (esc *BgMetadataElasticSearchConnector) createIndicesAndMapping(metricIndexName, directoryIndexName string) error {
indices := []struct{ name, mapping string }{{metricIndexName, metricsMapping}, {directoryIndexName, dirMapping}}
for _, index := range indices {
indexCreateRequest := esapi.IndicesCreateRequest{Index: index.name}
res, err := indexCreateRequest.Do(context.Background(), esc.client)
esc.logger.Info("using index", zap.String("name", index.name))
// extract TODO error deserialize
r := strings.NewReader(index.mapping)
request := esapi.IndicesPutMappingRequest{Index: []string{index.name}, Body: r, DocumentType: documentType}
res, err = request.Do(context.Background(), esc.client)
if err != nil {
return fmt.Errorf("Could not set ElasticSearch mapping: %w", err)
}
if res.StatusCode != http.StatusOK {
errorMessage, _ := ioutil.ReadAll(res.Body)
return fmt.Errorf("Could not set ElasticSearch mapping (status %d, error: %s)", res.StatusCode, errorMessage)
}
}
return nil
}
// UpdateMetricMetadata stores the metric in a buffer, will bulkupdate when at full cap
// threadsafe
func (esc *BgMetadataElasticSearchConnector) UpdateMetricMetadata(metric *Metric) error {
return esc.addDocumentToBuff(metric)
}
func (esc *BgMetadataElasticSearchConnector) addDocumentToBuff(doc ElasticSearchDocument) error {
esc.mux.Lock()
defer esc.mux.Unlock()
esc.bulkBuffer = append(esc.bulkBuffer, doc)
if len(esc.bulkBuffer) == cap(esc.bulkBuffer) {
esc.sendAndClearBuffer()
}
return nil
}
func (esc *BgMetadataElasticSearchConnector) sendAndClearBuffer() error {
defer esc.clearBuffer()
metricIndex, directoryIndex, err := esc.getIndices()
var errorMessage []byte
var statusCode int
if err != nil {
esc.UpdatedDocuments.WithLabelValues("failure").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not get index: %w", err)
}
timeBeforeBuild := time.Now()
requestBody := BuildElasticSearchDocumentMulti(metricIndex, directoryIndex, esc.bulkBuffer)
esc.DocumentBuildDurationMs.Observe(float64(time.Since(timeBeforeBuild).Milliseconds()))
esc.RequestSize.Observe(float64(len(requestBody)))
for attempt := uint(0); attempt <= esc.MaxRetry; attempt++ {
res, err := esc.bulkUpdate(requestBody)
if err != nil {
// esapi resturns a nil body in case of error
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index: %w", err)
}
if !res.IsError() {
esc.updateInternalMetrics(res)
res.Body.Close()
return nil
} else {
esc.HTTPErrors.WithLabelValues(strconv.Itoa(res.StatusCode)).Inc()
statusCode = res.StatusCode
errorMessage, _ = ioutil.ReadAll(res.Body)
res.Body.Close()
}
}
esc.UpdatedDocuments.WithLabelValues("failure", "any").Add(float64(len(esc.bulkBuffer)))
return fmt.Errorf("Could not write to index (status %d, error: %s)", statusCode, errorMessage)
}
// updateInternalMetrics increments BGMetadataConnector's metrics,
func (esc *BgMetadataElasticSearchConnector) updateInternalMetrics(res *esapi.Response) {
defer func() {
if err := recover(); err != nil {
esc.logger.Warn("malformed bulk response", zap.Error(err.(error)))
}
}()
var mapResp map[string]interface{}
json.NewDecoder(res.Body).Decode(&mapResp)
esc.WriteDurationMs.Observe(mapResp["took"].(float64))
for _, item := range mapResp["items"].([]interface{}) {
mapCreate := item.(map[string]interface{})["create"].(map[string]interface{})
// protected by esc.Mux currentIndex may not change while looping
if int(mapCreate["status"].(float64)) == http.StatusCreated {
if strings.HasPrefix(mapCreate["_index"].(string), esc.currentIndex) {
esc.UpdatedDocuments.WithLabelValues("created", "metric").Inc()
} else {
esc.UpdatedDocuments.WithLabelValues("created", "directory").Inc()
}
}
}
}
func (esc *BgMetadataElasticSearchConnector) clearBuffer() error {
esc.bulkBuffer = esc.bulkBuffer[:0]
return nil
}
func (esc *BgMetadataElasticSearchConnector) bulkUpdate(body string) (*esapi.Response, error) {
req := esapi.BulkRequest{
Body: strings.NewReader(body),
DocumentType: documentType,
}
res, err := req.Do(context.Background(), esc.client)
return res, err
}
func (esc *BgMetadataElasticSearchConnector) getIndices() (string, string, error) {
metricIndexName, directoryIndexName := esc.getIndicesNames()
esc.currentIndex = metricIndexName
_, isKnownIndex := esc.KnownIndices[metricIndexName]
// no need to test both
if !isKnownIndex {
err := esc.createIndicesAndMapping(metricIndexName, directoryIndexName)
if err != nil {
return "", "", err
}
esc.KnownIndices[metricIndexName] = true
}
return metricIndexName, directoryIndexName, nil
}
// InsertDirectory will add directory to the bulkBuffer
func (esc *BgMetadataElasticSearchConnector) InsertDirectory(dir *MetricDirectory) error {
esc.addDocumentToBuff(dir)
return nil
}
// SelectDirectory unused, no need in ES
// returns an error to signal that parent dir does not exist
func (esc *BgMetadataElasticSearchConnector) SelectDirectory(dir string) (string, error) {
return dir, fmt.Errorf("")
}
func (esc *BgMetadataElasticSearchConnector) getIndicesNames() (metricIndexName, directoryIndexName string) {
if len(esc.DirectoriesIndexAlias) > 0 && len(esc.MetricsIndexAlias) > 0 {
return esc.MetricsIndexAlias, esc.DirectoriesIndexAlias
}
now := time.Now().UTC()
date, err := strftime.Format(esc.IndexDateFmt, now)
if err != nil {
log.Fatalf("Index date format invalid strftime format: %s", err)
}
metricIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, metrics_index_suffix, date)
directoryIndexName = fmt.Sprintf("%s_%s_%s", esc.IndexName, directories_index_suffix, date)
return
}
| {
log.Fatalf("Error getting ElasticSearch information response: %s", err)
} | conditional_block |
report_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from collections import defaultdict
from logging import Logger
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from ax.core.experiment import Experiment
from ax.core.metric import Metric
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import MultiObjective, ScalarizedObjective
from ax.core.search_space import SearchSpace
from ax.core.trial import BaseTrial, Trial
from ax.modelbridge import ModelBridge
from ax.modelbridge.cross_validation import cross_validate
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.plot.contour import interact_contour_plotly
from ax.plot.diagnostic import interact_cross_validation_plotly
from ax.plot.slice import plot_slice_plotly
from ax.plot.trace import optimization_trace_single_method_plotly
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, not_none
logger: Logger = get_logger(__name__)
# pyre-ignore[11]: Annotation `go.Figure` is not defined as a type.
def _get_cross_validation_plot(model: ModelBridge) -> go.Figure:
cv = cross_validate(model)
return interact_cross_validation_plotly(cv)
def _get_objective_trace_plot(
experiment: Experiment,
metric_name: str,
model_transitions: List[int], | y=best_objectives,
title="Best objective found vs. # of iterations",
ylabel=metric_name,
model_transitions=model_transitions,
optimization_direction=optimization_direction,
plot_trial_points=True,
)
def _get_objective_v_param_plot(
search_space: SearchSpace,
model: ModelBridge,
metric_name: str,
trials: Dict[int, BaseTrial],
) -> Optional[go.Figure]:
range_params = list(search_space.range_parameters.keys())
if len(range_params) == 1:
# individual parameter slice plot
output_slice_plot = plot_slice_plotly(
model=not_none(model),
param_name=range_params[0],
metric_name=metric_name,
generator_runs_dict={
str(t.index): not_none(checked_cast(Trial, t).generator_run)
for t in trials.values()
},
)
return output_slice_plot
if len(range_params) > 1:
# contour plot
output_contour_plot = interact_contour_plotly(
model=not_none(model),
metric_name=metric_name,
)
return output_contour_plot
# if search space contains no range params
logger.warning(
"_get_objective_v_param_plot requires a search space with at least one "
"RangeParameter. Returning None."
)
return None
def _get_suffix(input_str: str, delim: str = ".", n_chunks: int = 1) -> str:
return delim.join(input_str.split(delim)[-n_chunks:])
def _get_shortest_unique_suffix_dict(
input_str_list: List[str], delim: str = "."
) -> Dict[str, str]:
"""Maps a list of strings to their shortest unique suffixes
Maps all original strings to the smallest number of chunks, as specified by
delim, that are not a suffix of any other original string. If the original
string was a suffix of another string, map it to its unaltered self.
Args:
input_str_list: a list of strings to create the suffix mapping for
delim: the delimiter used to split up the strings into meaningful chunks
Returns:
dict: A dict with the original strings as keys and their abbreviations as
values
"""
# all input strings must be unique
assert len(input_str_list) == len(set(input_str_list))
if delim == "":
raise ValueError("delim must be a non-empty string.")
suffix_dict = defaultdict(list)
# initialize suffix_dict with last chunk
for istr in input_str_list:
suffix_dict[_get_suffix(istr, delim=delim, n_chunks=1)].append(istr)
max_chunks = max(len(istr.split(delim)) for istr in input_str_list)
if max_chunks == 1:
return {istr: istr for istr in input_str_list}
# the upper range of this loop is `max_chunks + 2` because:
# - `i` needs to take the value of `max_chunks`, hence one +1
# - the contents of the loop are run one more time to check if `all_unique`,
# hence the other +1
for i in range(2, max_chunks + 2):
new_dict = defaultdict(list)
all_unique = True
for suffix, suffix_str_list in suffix_dict.items():
if len(suffix_str_list) > 1:
all_unique = False
for istr in suffix_str_list:
new_dict[_get_suffix(istr, delim=delim, n_chunks=i)].append(istr)
else:
new_dict[suffix] = suffix_str_list
if all_unique:
if len(set(input_str_list)) != len(suffix_dict.keys()):
break
return {
suffix_str_list[0]: suffix
for suffix, suffix_str_list in suffix_dict.items()
}
suffix_dict = new_dict
# If this function has not yet exited, some input strings still share a suffix.
# This is not expected, but in this case, the function will return the identity
# mapping, i.e., a dict with the original strings as both keys and values.
logger.warning(
"Something went wrong. Returning dictionary with original strings as keys and "
"values."
)
return {istr: istr for istr in input_str_list}
def get_standard_plots(
experiment: Experiment, generation_strategy: Optional[GenerationStrategy]
) -> List[go.Figure]:
"""Extract standard plots for single-objective optimization.
Extracts a list of plots from an Experiment and GenerationStrategy of general
interest to an Ax user. Currently not supported are
- TODO: multi-objective optimization
- TODO: ChoiceParameter plots
Args:
- experiment: the Experiment from which to obtain standard plots.
- generation_strategy: the GenerationStrategy used to suggest trial parameters
in experiment
Returns:
- a plot of objective value vs. trial index, to show experiment progression
- a plot of objective value vs. range parameter values, only included if the
model associated with generation_strategy can create predictions. This
consists of:
- a plot_slice plot if the search space contains one range parameter
- an interact_contour plot if the search space contains multiple
range parameters
"""
objective = not_none(experiment.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"get_standard_plots does not currently support MultiObjective "
"optimization experiments. Returning an empty list."
)
return []
if isinstance(objective, ScalarizedObjective):
logger.warning(
"get_standard_plots does not currently support ScalarizedObjective "
"optimization experiments. Returning an empty list."
)
return []
if experiment.fetch_data().df.empty:
logger.info(f"Experiment {experiment} does not yet have data, nothing to plot.")
return []
output_plot_list = []
output_plot_list.append(
_get_objective_trace_plot(
experiment=experiment,
metric_name=not_none(experiment.optimization_config).objective.metric.name,
# TODO: Adjust `model_transitions` to case where custom trials are present
# and generation strategy does not start right away.
model_transitions=not_none(generation_strategy).model_transitions
if generation_strategy is not None
else [],
optimization_direction=(
"minimize"
if not_none(experiment.optimization_config).objective.minimize
else "maximize"
),
)
)
# Objective vs. parameter plot requires a `Model`, so add it only if model
# is alrady available. In cases where initially custom trials are attached,
# model might not yet be set on the generation strategy.
if generation_strategy and generation_strategy.model:
model = not_none(not_none(generation_strategy).model)
try:
output_plot_list.append(
_get_objective_v_param_plot(
search_space=experiment.search_space,
model=model,
metric_name=not_none(
experiment.optimization_config
).objective.metric.name,
trials=experiment.trials,
)
)
output_plot_list.append(_get_cross_validation_plot(model))
except NotImplementedError:
# Model does not implement `predict` method.
pass
return [plot for plot in output_plot_list if plot is not None]
def exp_to_df(
exp: Experiment,
metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
trial_properties_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""Transforms an experiment to a DataFrame. Only supports Experiment and
SimpleExperiment.
Transforms an Experiment into a dataframe with rows keyed by trial_index
and arm_name, metrics pivoted into one row.
Args:
exp: An Experiment that may have pending trials.
metrics: Override list of metrics to return. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
trial_properties_fields: fields to extract from trial._properties for trial
in experiment.trials. If there are multiple arms per trial, these fields
will be replicated across the arms of a trial. Output columns names will be
prepended with "trial_properties_".
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs, metadata and metrics by trial and arm. If
no trials are available, returns an empty dataframe. If no metric ouputs are
available, returns a dataframe of inputs and metadata.
"""
def prep_return(
df: pd.DataFrame, drop_col: str, sort_by: List[str]
) -> pd.DataFrame:
return not_none(not_none(df.drop(drop_col, axis=1)).sort_values(sort_by))
# Accept Experiment and SimpleExperiment
if isinstance(exp, MultiTypeExperiment):
raise ValueError("Cannot transform MultiTypeExperiments to DataFrames.")
key_components = ["trial_index", "arm_name"]
# Get each trial-arm with parameters
arms_df = pd.DataFrame()
for trial_index, trial in exp.trials.items():
for arm in trial.arms:
arms_df = arms_df.append(
{"arm_name": arm.name, "trial_index": trial_index, **arm.parameters},
ignore_index=True,
)
# Fetch results; in case arms_df is empty, return empty results (legacy behavior)
results = exp.fetch_data(metrics, **kwargs).df
if len(arms_df.index) == 0:
if len(results.index) != 0:
raise ValueError(
"exp.fetch_data().df returned more rows than there are experimental "
"arms. This is an inconsistent experimental state. Please report to "
"Ax support."
)
return results
# Create key column from key_components
arms_df["trial_index"] = arms_df["trial_index"].astype(int)
key_col = "-".join(key_components)
key_vals = arms_df[key_components[0]].astype("str") + arms_df[
key_components[1]
].astype("str")
arms_df[key_col] = key_vals
# Add trial status
trials = exp.trials.items()
trial_to_status = {index: trial.status.name for index, trial in trials}
arms_df["trial_status"] = [
trial_to_status[trial_index] for trial_index in arms_df.trial_index
]
# Add generator_run model keys
arms_df["generator_model"] = [
# This accounts for the generic case that generator_runs is a list of arbitrary
# length. If all elements are `None`, this yields an empty string. Repeated
# generator models within a trial are condensed via a set comprehension.
", ".join(
{
not_none(generator_run._model_key)
for generator_run in exp.trials[trial_index].generator_runs
if generator_run._model_key is not None
}
)
if trial_index in exp.trials
else ""
for trial_index in arms_df.trial_index
]
# replace all unknown generator_models (denoted by empty strings) with "Unknown"
arms_df["generator_model"] = [
"Unknown" if generator_model == "" else generator_model
for generator_model in arms_df["generator_model"]
]
# Add any trial properties fields to arms_df
if trial_properties_fields is not None:
if not (
isinstance(trial_properties_fields, list)
and all(isinstance(field, str) for field in trial_properties_fields)
):
raise ValueError(
"trial_properties_fields must be List[str] or None. "
f"Got {trial_properties_fields}"
)
# add trial._properties fields
for field in trial_properties_fields:
trial_to_properties_field = {
index: (
trial._properties[field] if field in trial._properties else None
)
for index, trial in trials
}
if any(trial_to_properties_field.values()): # field present for any trial
if not all(
trial_to_properties_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' properties. "
"Returning None when missing."
)
arms_df["trial_properties_" + field] = [
trial_to_properties_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' properties. "
"Not appending column."
)
# Add any run_metadata fields to arms_df
if run_metadata_fields is not None:
if not (
isinstance(run_metadata_fields, list)
and all(isinstance(field, str) for field in run_metadata_fields)
):
raise ValueError(
"run_metadata_fields must be List[str] or None. "
f"Got {run_metadata_fields}"
)
# add run_metadata fields
for field in run_metadata_fields:
trial_to_metadata_field = {
index: (
trial.run_metadata[field] if field in trial.run_metadata else None
)
for index, trial in trials
}
if any(trial_to_metadata_field.values()): # field present for any trial
if not all(
trial_to_metadata_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' run_metadata. "
"Returning None when missing."
)
arms_df[field] = [
trial_to_metadata_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' run_metadata. "
"Not appending column."
)
if len(results.index) == 0:
logger.info(
f"No results present for the specified metrics `{metrics}`. "
"Returning arm parameters and metadata only."
)
exp_df = arms_df
elif not all(col in results.columns for col in key_components):
logger.warn(
f"At least one of key columns `{key_components}` not present in results df "
f"`{results}`. Returning arm parameters and metadata only."
)
exp_df = arms_df
else:
# prepare results for merge
key_vals = results[key_components[0]].astype("str") + results[
key_components[1]
].astype("str")
results[key_col] = key_vals
metric_vals = results.pivot(
index=key_col, columns="metric_name", values="mean"
).reset_index()
# dedupe results by key_components
metadata = results[key_components + [key_col]].drop_duplicates()
metrics_df = pd.merge(metric_vals, metadata, on=key_col)
# merge and return
exp_df = pd.merge(
metrics_df, arms_df, on=key_components + [key_col], how="outer"
)
return prep_return(df=exp_df, drop_col=key_col, sort_by=["arm_name"])
def get_best_trial(
exp: Experiment,
additional_metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[pd.DataFrame]:
"""Finds the optimal trial given an experiment, based on raw objective value.
Returns a 1-row dataframe. Should match the row of ``exp_to_df`` with the best
raw objective value, given the same arguments.
Args:
exp: An Experiment that may have pending trials.
additional_metrics: List of metrics to return in addition to the objective
metric. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs and metrics of the optimal trial.
"""
objective = not_none(exp.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"No best trial is available for `MultiObjective` optimization. "
"Returning None for best trial."
)
return None
if isinstance(objective, ScalarizedObjective):
logger.warning(
"No best trial is available for `ScalarizedObjective` optimization. "
"Returning None for best trial."
)
return None
if (additional_metrics is not None) and (
objective.metric not in additional_metrics
):
additional_metrics.append(objective.metric)
trials_df = exp_to_df(
exp=exp,
metrics=additional_metrics,
run_metadata_fields=run_metadata_fields,
**kwargs,
)
if len(trials_df.index) == 0:
logger.warning("`exp_to_df` returned 0 trials. Returning None for best trial.")
return None
metric_name = objective.metric.name
minimize = objective.minimize
if metric_name not in trials_df.columns:
logger.warning(
f"`exp_to_df` did not have data for metric {metric_name}. "
"Returning None for best trial."
)
return None
metric_optimum = (
trials_df[metric_name].min() if minimize else trials_df[metric_name].max()
)
return pd.DataFrame(trials_df[trials_df[metric_name] == metric_optimum].head(1)) | optimization_direction: Optional[str] = None,
) -> Optional[go.Figure]:
best_objectives = np.array([experiment.fetch_data().df["mean"]])
return optimization_trace_single_method_plotly( | random_line_split |
report_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from collections import defaultdict
from logging import Logger
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from ax.core.experiment import Experiment
from ax.core.metric import Metric
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import MultiObjective, ScalarizedObjective
from ax.core.search_space import SearchSpace
from ax.core.trial import BaseTrial, Trial
from ax.modelbridge import ModelBridge
from ax.modelbridge.cross_validation import cross_validate
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.plot.contour import interact_contour_plotly
from ax.plot.diagnostic import interact_cross_validation_plotly
from ax.plot.slice import plot_slice_plotly
from ax.plot.trace import optimization_trace_single_method_plotly
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, not_none
logger: Logger = get_logger(__name__)
# pyre-ignore[11]: Annotation `go.Figure` is not defined as a type.
def _get_cross_validation_plot(model: ModelBridge) -> go.Figure:
cv = cross_validate(model)
return interact_cross_validation_plotly(cv)
def _get_objective_trace_plot(
experiment: Experiment,
metric_name: str,
model_transitions: List[int],
optimization_direction: Optional[str] = None,
) -> Optional[go.Figure]:
best_objectives = np.array([experiment.fetch_data().df["mean"]])
return optimization_trace_single_method_plotly(
y=best_objectives,
title="Best objective found vs. # of iterations",
ylabel=metric_name,
model_transitions=model_transitions,
optimization_direction=optimization_direction,
plot_trial_points=True,
)
def _get_objective_v_param_plot(
search_space: SearchSpace,
model: ModelBridge,
metric_name: str,
trials: Dict[int, BaseTrial],
) -> Optional[go.Figure]:
range_params = list(search_space.range_parameters.keys())
if len(range_params) == 1:
# individual parameter slice plot
output_slice_plot = plot_slice_plotly(
model=not_none(model),
param_name=range_params[0],
metric_name=metric_name,
generator_runs_dict={
str(t.index): not_none(checked_cast(Trial, t).generator_run)
for t in trials.values()
},
)
return output_slice_plot
if len(range_params) > 1:
# contour plot
output_contour_plot = interact_contour_plotly(
model=not_none(model),
metric_name=metric_name,
)
return output_contour_plot
# if search space contains no range params
logger.warning(
"_get_objective_v_param_plot requires a search space with at least one "
"RangeParameter. Returning None."
)
return None
def _get_suffix(input_str: str, delim: str = ".", n_chunks: int = 1) -> str:
return delim.join(input_str.split(delim)[-n_chunks:])
def _get_shortest_unique_suffix_dict(
input_str_list: List[str], delim: str = "."
) -> Dict[str, str]:
"""Maps a list of strings to their shortest unique suffixes
Maps all original strings to the smallest number of chunks, as specified by
delim, that are not a suffix of any other original string. If the original
string was a suffix of another string, map it to its unaltered self.
Args:
input_str_list: a list of strings to create the suffix mapping for
delim: the delimiter used to split up the strings into meaningful chunks
Returns:
dict: A dict with the original strings as keys and their abbreviations as
values
"""
# all input strings must be unique
assert len(input_str_list) == len(set(input_str_list))
if delim == "":
raise ValueError("delim must be a non-empty string.")
suffix_dict = defaultdict(list)
# initialize suffix_dict with last chunk
for istr in input_str_list:
suffix_dict[_get_suffix(istr, delim=delim, n_chunks=1)].append(istr)
max_chunks = max(len(istr.split(delim)) for istr in input_str_list)
if max_chunks == 1:
return {istr: istr for istr in input_str_list}
# the upper range of this loop is `max_chunks + 2` because:
# - `i` needs to take the value of `max_chunks`, hence one +1
# - the contents of the loop are run one more time to check if `all_unique`,
# hence the other +1
for i in range(2, max_chunks + 2):
new_dict = defaultdict(list)
all_unique = True
for suffix, suffix_str_list in suffix_dict.items():
if len(suffix_str_list) > 1:
all_unique = False
for istr in suffix_str_list:
new_dict[_get_suffix(istr, delim=delim, n_chunks=i)].append(istr)
else:
new_dict[suffix] = suffix_str_list
if all_unique:
if len(set(input_str_list)) != len(suffix_dict.keys()):
break
return {
suffix_str_list[0]: suffix
for suffix, suffix_str_list in suffix_dict.items()
}
suffix_dict = new_dict
# If this function has not yet exited, some input strings still share a suffix.
# This is not expected, but in this case, the function will return the identity
# mapping, i.e., a dict with the original strings as both keys and values.
logger.warning(
"Something went wrong. Returning dictionary with original strings as keys and "
"values."
)
return {istr: istr for istr in input_str_list}
def get_standard_plots(
experiment: Experiment, generation_strategy: Optional[GenerationStrategy]
) -> List[go.Figure]:
"""Extract standard plots for single-objective optimization.
Extracts a list of plots from an Experiment and GenerationStrategy of general
interest to an Ax user. Currently not supported are
- TODO: multi-objective optimization
- TODO: ChoiceParameter plots
Args:
- experiment: the Experiment from which to obtain standard plots.
- generation_strategy: the GenerationStrategy used to suggest trial parameters
in experiment
Returns:
- a plot of objective value vs. trial index, to show experiment progression
- a plot of objective value vs. range parameter values, only included if the
model associated with generation_strategy can create predictions. This
consists of:
- a plot_slice plot if the search space contains one range parameter
- an interact_contour plot if the search space contains multiple
range parameters
"""
objective = not_none(experiment.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"get_standard_plots does not currently support MultiObjective "
"optimization experiments. Returning an empty list."
)
return []
if isinstance(objective, ScalarizedObjective):
logger.warning(
"get_standard_plots does not currently support ScalarizedObjective "
"optimization experiments. Returning an empty list."
)
return []
if experiment.fetch_data().df.empty:
logger.info(f"Experiment {experiment} does not yet have data, nothing to plot.")
return []
output_plot_list = []
output_plot_list.append(
_get_objective_trace_plot(
experiment=experiment,
metric_name=not_none(experiment.optimization_config).objective.metric.name,
# TODO: Adjust `model_transitions` to case where custom trials are present
# and generation strategy does not start right away.
model_transitions=not_none(generation_strategy).model_transitions
if generation_strategy is not None
else [],
optimization_direction=(
"minimize"
if not_none(experiment.optimization_config).objective.minimize
else "maximize"
),
)
)
# Objective vs. parameter plot requires a `Model`, so add it only if model
# is alrady available. In cases where initially custom trials are attached,
# model might not yet be set on the generation strategy.
if generation_strategy and generation_strategy.model:
model = not_none(not_none(generation_strategy).model)
try:
output_plot_list.append(
_get_objective_v_param_plot(
search_space=experiment.search_space,
model=model,
metric_name=not_none(
experiment.optimization_config
).objective.metric.name,
trials=experiment.trials,
)
)
output_plot_list.append(_get_cross_validation_plot(model))
except NotImplementedError:
# Model does not implement `predict` method.
pass
return [plot for plot in output_plot_list if plot is not None]
def exp_to_df(
exp: Experiment,
metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
trial_properties_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""Transforms an experiment to a DataFrame. Only supports Experiment and
SimpleExperiment.
Transforms an Experiment into a dataframe with rows keyed by trial_index
and arm_name, metrics pivoted into one row.
Args:
exp: An Experiment that may have pending trials.
metrics: Override list of metrics to return. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
trial_properties_fields: fields to extract from trial._properties for trial
in experiment.trials. If there are multiple arms per trial, these fields
will be replicated across the arms of a trial. Output columns names will be
prepended with "trial_properties_".
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs, metadata and metrics by trial and arm. If
no trials are available, returns an empty dataframe. If no metric ouputs are
available, returns a dataframe of inputs and metadata.
"""
def prep_return(
df: pd.DataFrame, drop_col: str, sort_by: List[str]
) -> pd.DataFrame:
return not_none(not_none(df.drop(drop_col, axis=1)).sort_values(sort_by))
# Accept Experiment and SimpleExperiment
if isinstance(exp, MultiTypeExperiment):
raise ValueError("Cannot transform MultiTypeExperiments to DataFrames.")
key_components = ["trial_index", "arm_name"]
# Get each trial-arm with parameters
arms_df = pd.DataFrame()
for trial_index, trial in exp.trials.items():
for arm in trial.arms:
arms_df = arms_df.append(
{"arm_name": arm.name, "trial_index": trial_index, **arm.parameters},
ignore_index=True,
)
# Fetch results; in case arms_df is empty, return empty results (legacy behavior)
results = exp.fetch_data(metrics, **kwargs).df
if len(arms_df.index) == 0:
if len(results.index) != 0:
raise ValueError(
"exp.fetch_data().df returned more rows than there are experimental "
"arms. This is an inconsistent experimental state. Please report to "
"Ax support."
)
return results
# Create key column from key_components
arms_df["trial_index"] = arms_df["trial_index"].astype(int)
key_col = "-".join(key_components)
key_vals = arms_df[key_components[0]].astype("str") + arms_df[
key_components[1]
].astype("str")
arms_df[key_col] = key_vals
# Add trial status
trials = exp.trials.items()
trial_to_status = {index: trial.status.name for index, trial in trials}
arms_df["trial_status"] = [
trial_to_status[trial_index] for trial_index in arms_df.trial_index
]
# Add generator_run model keys
arms_df["generator_model"] = [
# This accounts for the generic case that generator_runs is a list of arbitrary
# length. If all elements are `None`, this yields an empty string. Repeated
# generator models within a trial are condensed via a set comprehension.
", ".join(
{
not_none(generator_run._model_key)
for generator_run in exp.trials[trial_index].generator_runs
if generator_run._model_key is not None
}
)
if trial_index in exp.trials
else ""
for trial_index in arms_df.trial_index
]
# replace all unknown generator_models (denoted by empty strings) with "Unknown"
arms_df["generator_model"] = [
"Unknown" if generator_model == "" else generator_model
for generator_model in arms_df["generator_model"]
]
# Add any trial properties fields to arms_df
if trial_properties_fields is not None:
|
# Add any run_metadata fields to arms_df
if run_metadata_fields is not None:
if not (
isinstance(run_metadata_fields, list)
and all(isinstance(field, str) for field in run_metadata_fields)
):
raise ValueError(
"run_metadata_fields must be List[str] or None. "
f"Got {run_metadata_fields}"
)
# add run_metadata fields
for field in run_metadata_fields:
trial_to_metadata_field = {
index: (
trial.run_metadata[field] if field in trial.run_metadata else None
)
for index, trial in trials
}
if any(trial_to_metadata_field.values()): # field present for any trial
if not all(
trial_to_metadata_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' run_metadata. "
"Returning None when missing."
)
arms_df[field] = [
trial_to_metadata_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' run_metadata. "
"Not appending column."
)
if len(results.index) == 0:
logger.info(
f"No results present for the specified metrics `{metrics}`. "
"Returning arm parameters and metadata only."
)
exp_df = arms_df
elif not all(col in results.columns for col in key_components):
logger.warn(
f"At least one of key columns `{key_components}` not present in results df "
f"`{results}`. Returning arm parameters and metadata only."
)
exp_df = arms_df
else:
# prepare results for merge
key_vals = results[key_components[0]].astype("str") + results[
key_components[1]
].astype("str")
results[key_col] = key_vals
metric_vals = results.pivot(
index=key_col, columns="metric_name", values="mean"
).reset_index()
# dedupe results by key_components
metadata = results[key_components + [key_col]].drop_duplicates()
metrics_df = pd.merge(metric_vals, metadata, on=key_col)
# merge and return
exp_df = pd.merge(
metrics_df, arms_df, on=key_components + [key_col], how="outer"
)
return prep_return(df=exp_df, drop_col=key_col, sort_by=["arm_name"])
def get_best_trial(
exp: Experiment,
additional_metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[pd.DataFrame]:
"""Finds the optimal trial given an experiment, based on raw objective value.
Returns a 1-row dataframe. Should match the row of ``exp_to_df`` with the best
raw objective value, given the same arguments.
Args:
exp: An Experiment that may have pending trials.
additional_metrics: List of metrics to return in addition to the objective
metric. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs and metrics of the optimal trial.
"""
objective = not_none(exp.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"No best trial is available for `MultiObjective` optimization. "
"Returning None for best trial."
)
return None
if isinstance(objective, ScalarizedObjective):
logger.warning(
"No best trial is available for `ScalarizedObjective` optimization. "
"Returning None for best trial."
)
return None
if (additional_metrics is not None) and (
objective.metric not in additional_metrics
):
additional_metrics.append(objective.metric)
trials_df = exp_to_df(
exp=exp,
metrics=additional_metrics,
run_metadata_fields=run_metadata_fields,
**kwargs,
)
if len(trials_df.index) == 0:
logger.warning("`exp_to_df` returned 0 trials. Returning None for best trial.")
return None
metric_name = objective.metric.name
minimize = objective.minimize
if metric_name not in trials_df.columns:
logger.warning(
f"`exp_to_df` did not have data for metric {metric_name}. "
"Returning None for best trial."
)
return None
metric_optimum = (
trials_df[metric_name].min() if minimize else trials_df[metric_name].max()
)
return pd.DataFrame(trials_df[trials_df[metric_name] == metric_optimum].head(1))
| if not (
isinstance(trial_properties_fields, list)
and all(isinstance(field, str) for field in trial_properties_fields)
):
raise ValueError(
"trial_properties_fields must be List[str] or None. "
f"Got {trial_properties_fields}"
)
# add trial._properties fields
for field in trial_properties_fields:
trial_to_properties_field = {
index: (
trial._properties[field] if field in trial._properties else None
)
for index, trial in trials
}
if any(trial_to_properties_field.values()): # field present for any trial
if not all(
trial_to_properties_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' properties. "
"Returning None when missing."
)
arms_df["trial_properties_" + field] = [
trial_to_properties_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' properties. "
"Not appending column."
) | conditional_block |
report_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from collections import defaultdict
from logging import Logger
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from ax.core.experiment import Experiment
from ax.core.metric import Metric
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import MultiObjective, ScalarizedObjective
from ax.core.search_space import SearchSpace
from ax.core.trial import BaseTrial, Trial
from ax.modelbridge import ModelBridge
from ax.modelbridge.cross_validation import cross_validate
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.plot.contour import interact_contour_plotly
from ax.plot.diagnostic import interact_cross_validation_plotly
from ax.plot.slice import plot_slice_plotly
from ax.plot.trace import optimization_trace_single_method_plotly
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, not_none
logger: Logger = get_logger(__name__)
# pyre-ignore[11]: Annotation `go.Figure` is not defined as a type.
def _get_cross_validation_plot(model: ModelBridge) -> go.Figure:
cv = cross_validate(model)
return interact_cross_validation_plotly(cv)
def _get_objective_trace_plot(
experiment: Experiment,
metric_name: str,
model_transitions: List[int],
optimization_direction: Optional[str] = None,
) -> Optional[go.Figure]:
best_objectives = np.array([experiment.fetch_data().df["mean"]])
return optimization_trace_single_method_plotly(
y=best_objectives,
title="Best objective found vs. # of iterations",
ylabel=metric_name,
model_transitions=model_transitions,
optimization_direction=optimization_direction,
plot_trial_points=True,
)
def _get_objective_v_param_plot(
search_space: SearchSpace,
model: ModelBridge,
metric_name: str,
trials: Dict[int, BaseTrial],
) -> Optional[go.Figure]:
range_params = list(search_space.range_parameters.keys())
if len(range_params) == 1:
# individual parameter slice plot
output_slice_plot = plot_slice_plotly(
model=not_none(model),
param_name=range_params[0],
metric_name=metric_name,
generator_runs_dict={
str(t.index): not_none(checked_cast(Trial, t).generator_run)
for t in trials.values()
},
)
return output_slice_plot
if len(range_params) > 1:
# contour plot
output_contour_plot = interact_contour_plotly(
model=not_none(model),
metric_name=metric_name,
)
return output_contour_plot
# if search space contains no range params
logger.warning(
"_get_objective_v_param_plot requires a search space with at least one "
"RangeParameter. Returning None."
)
return None
def _get_suffix(input_str: str, delim: str = ".", n_chunks: int = 1) -> str:
return delim.join(input_str.split(delim)[-n_chunks:])
def _get_shortest_unique_suffix_dict(
input_str_list: List[str], delim: str = "."
) -> Dict[str, str]:
"""Maps a list of strings to their shortest unique suffixes
Maps all original strings to the smallest number of chunks, as specified by
delim, that are not a suffix of any other original string. If the original
string was a suffix of another string, map it to its unaltered self.
Args:
input_str_list: a list of strings to create the suffix mapping for
delim: the delimiter used to split up the strings into meaningful chunks
Returns:
dict: A dict with the original strings as keys and their abbreviations as
values
"""
# all input strings must be unique
assert len(input_str_list) == len(set(input_str_list))
if delim == "":
raise ValueError("delim must be a non-empty string.")
suffix_dict = defaultdict(list)
# initialize suffix_dict with last chunk
for istr in input_str_list:
suffix_dict[_get_suffix(istr, delim=delim, n_chunks=1)].append(istr)
max_chunks = max(len(istr.split(delim)) for istr in input_str_list)
if max_chunks == 1:
return {istr: istr for istr in input_str_list}
# the upper range of this loop is `max_chunks + 2` because:
# - `i` needs to take the value of `max_chunks`, hence one +1
# - the contents of the loop are run one more time to check if `all_unique`,
# hence the other +1
for i in range(2, max_chunks + 2):
new_dict = defaultdict(list)
all_unique = True
for suffix, suffix_str_list in suffix_dict.items():
if len(suffix_str_list) > 1:
all_unique = False
for istr in suffix_str_list:
new_dict[_get_suffix(istr, delim=delim, n_chunks=i)].append(istr)
else:
new_dict[suffix] = suffix_str_list
if all_unique:
if len(set(input_str_list)) != len(suffix_dict.keys()):
break
return {
suffix_str_list[0]: suffix
for suffix, suffix_str_list in suffix_dict.items()
}
suffix_dict = new_dict
# If this function has not yet exited, some input strings still share a suffix.
# This is not expected, but in this case, the function will return the identity
# mapping, i.e., a dict with the original strings as both keys and values.
logger.warning(
"Something went wrong. Returning dictionary with original strings as keys and "
"values."
)
return {istr: istr for istr in input_str_list}
def get_standard_plots(
experiment: Experiment, generation_strategy: Optional[GenerationStrategy]
) -> List[go.Figure]:
"""Extract standard plots for single-objective optimization.
Extracts a list of plots from an Experiment and GenerationStrategy of general
interest to an Ax user. Currently not supported are
- TODO: multi-objective optimization
- TODO: ChoiceParameter plots
Args:
- experiment: the Experiment from which to obtain standard plots.
- generation_strategy: the GenerationStrategy used to suggest trial parameters
in experiment
Returns:
- a plot of objective value vs. trial index, to show experiment progression
- a plot of objective value vs. range parameter values, only included if the
model associated with generation_strategy can create predictions. This
consists of:
- a plot_slice plot if the search space contains one range parameter
- an interact_contour plot if the search space contains multiple
range parameters
"""
objective = not_none(experiment.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"get_standard_plots does not currently support MultiObjective "
"optimization experiments. Returning an empty list."
)
return []
if isinstance(objective, ScalarizedObjective):
logger.warning(
"get_standard_plots does not currently support ScalarizedObjective "
"optimization experiments. Returning an empty list."
)
return []
if experiment.fetch_data().df.empty:
logger.info(f"Experiment {experiment} does not yet have data, nothing to plot.")
return []
output_plot_list = []
output_plot_list.append(
_get_objective_trace_plot(
experiment=experiment,
metric_name=not_none(experiment.optimization_config).objective.metric.name,
# TODO: Adjust `model_transitions` to case where custom trials are present
# and generation strategy does not start right away.
model_transitions=not_none(generation_strategy).model_transitions
if generation_strategy is not None
else [],
optimization_direction=(
"minimize"
if not_none(experiment.optimization_config).objective.minimize
else "maximize"
),
)
)
# Objective vs. parameter plot requires a `Model`, so add it only if model
# is alrady available. In cases where initially custom trials are attached,
# model might not yet be set on the generation strategy.
if generation_strategy and generation_strategy.model:
model = not_none(not_none(generation_strategy).model)
try:
output_plot_list.append(
_get_objective_v_param_plot(
search_space=experiment.search_space,
model=model,
metric_name=not_none(
experiment.optimization_config
).objective.metric.name,
trials=experiment.trials,
)
)
output_plot_list.append(_get_cross_validation_plot(model))
except NotImplementedError:
# Model does not implement `predict` method.
pass
return [plot for plot in output_plot_list if plot is not None]
def exp_to_df(
exp: Experiment,
metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
trial_properties_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""Transforms an experiment to a DataFrame. Only supports Experiment and
SimpleExperiment.
Transforms an Experiment into a dataframe with rows keyed by trial_index
and arm_name, metrics pivoted into one row.
Args:
exp: An Experiment that may have pending trials.
metrics: Override list of metrics to return. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
trial_properties_fields: fields to extract from trial._properties for trial
in experiment.trials. If there are multiple arms per trial, these fields
will be replicated across the arms of a trial. Output columns names will be
prepended with "trial_properties_".
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs, metadata and metrics by trial and arm. If
no trials are available, returns an empty dataframe. If no metric ouputs are
available, returns a dataframe of inputs and metadata.
"""
def prep_return(
df: pd.DataFrame, drop_col: str, sort_by: List[str]
) -> pd.DataFrame:
|
# Accept Experiment and SimpleExperiment
if isinstance(exp, MultiTypeExperiment):
raise ValueError("Cannot transform MultiTypeExperiments to DataFrames.")
key_components = ["trial_index", "arm_name"]
# Get each trial-arm with parameters
arms_df = pd.DataFrame()
for trial_index, trial in exp.trials.items():
for arm in trial.arms:
arms_df = arms_df.append(
{"arm_name": arm.name, "trial_index": trial_index, **arm.parameters},
ignore_index=True,
)
# Fetch results; in case arms_df is empty, return empty results (legacy behavior)
results = exp.fetch_data(metrics, **kwargs).df
if len(arms_df.index) == 0:
if len(results.index) != 0:
raise ValueError(
"exp.fetch_data().df returned more rows than there are experimental "
"arms. This is an inconsistent experimental state. Please report to "
"Ax support."
)
return results
# Create key column from key_components
arms_df["trial_index"] = arms_df["trial_index"].astype(int)
key_col = "-".join(key_components)
key_vals = arms_df[key_components[0]].astype("str") + arms_df[
key_components[1]
].astype("str")
arms_df[key_col] = key_vals
# Add trial status
trials = exp.trials.items()
trial_to_status = {index: trial.status.name for index, trial in trials}
arms_df["trial_status"] = [
trial_to_status[trial_index] for trial_index in arms_df.trial_index
]
# Add generator_run model keys
arms_df["generator_model"] = [
# This accounts for the generic case that generator_runs is a list of arbitrary
# length. If all elements are `None`, this yields an empty string. Repeated
# generator models within a trial are condensed via a set comprehension.
", ".join(
{
not_none(generator_run._model_key)
for generator_run in exp.trials[trial_index].generator_runs
if generator_run._model_key is not None
}
)
if trial_index in exp.trials
else ""
for trial_index in arms_df.trial_index
]
# replace all unknown generator_models (denoted by empty strings) with "Unknown"
arms_df["generator_model"] = [
"Unknown" if generator_model == "" else generator_model
for generator_model in arms_df["generator_model"]
]
# Add any trial properties fields to arms_df
if trial_properties_fields is not None:
if not (
isinstance(trial_properties_fields, list)
and all(isinstance(field, str) for field in trial_properties_fields)
):
raise ValueError(
"trial_properties_fields must be List[str] or None. "
f"Got {trial_properties_fields}"
)
# add trial._properties fields
for field in trial_properties_fields:
trial_to_properties_field = {
index: (
trial._properties[field] if field in trial._properties else None
)
for index, trial in trials
}
if any(trial_to_properties_field.values()): # field present for any trial
if not all(
trial_to_properties_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' properties. "
"Returning None when missing."
)
arms_df["trial_properties_" + field] = [
trial_to_properties_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' properties. "
"Not appending column."
)
# Add any run_metadata fields to arms_df
if run_metadata_fields is not None:
if not (
isinstance(run_metadata_fields, list)
and all(isinstance(field, str) for field in run_metadata_fields)
):
raise ValueError(
"run_metadata_fields must be List[str] or None. "
f"Got {run_metadata_fields}"
)
# add run_metadata fields
for field in run_metadata_fields:
trial_to_metadata_field = {
index: (
trial.run_metadata[field] if field in trial.run_metadata else None
)
for index, trial in trials
}
if any(trial_to_metadata_field.values()): # field present for any trial
if not all(
trial_to_metadata_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' run_metadata. "
"Returning None when missing."
)
arms_df[field] = [
trial_to_metadata_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' run_metadata. "
"Not appending column."
)
if len(results.index) == 0:
logger.info(
f"No results present for the specified metrics `{metrics}`. "
"Returning arm parameters and metadata only."
)
exp_df = arms_df
elif not all(col in results.columns for col in key_components):
logger.warn(
f"At least one of key columns `{key_components}` not present in results df "
f"`{results}`. Returning arm parameters and metadata only."
)
exp_df = arms_df
else:
# prepare results for merge
key_vals = results[key_components[0]].astype("str") + results[
key_components[1]
].astype("str")
results[key_col] = key_vals
metric_vals = results.pivot(
index=key_col, columns="metric_name", values="mean"
).reset_index()
# dedupe results by key_components
metadata = results[key_components + [key_col]].drop_duplicates()
metrics_df = pd.merge(metric_vals, metadata, on=key_col)
# merge and return
exp_df = pd.merge(
metrics_df, arms_df, on=key_components + [key_col], how="outer"
)
return prep_return(df=exp_df, drop_col=key_col, sort_by=["arm_name"])
def get_best_trial(
exp: Experiment,
additional_metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[pd.DataFrame]:
"""Finds the optimal trial given an experiment, based on raw objective value.
Returns a 1-row dataframe. Should match the row of ``exp_to_df`` with the best
raw objective value, given the same arguments.
Args:
exp: An Experiment that may have pending trials.
additional_metrics: List of metrics to return in addition to the objective
metric. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs and metrics of the optimal trial.
"""
objective = not_none(exp.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"No best trial is available for `MultiObjective` optimization. "
"Returning None for best trial."
)
return None
if isinstance(objective, ScalarizedObjective):
logger.warning(
"No best trial is available for `ScalarizedObjective` optimization. "
"Returning None for best trial."
)
return None
if (additional_metrics is not None) and (
objective.metric not in additional_metrics
):
additional_metrics.append(objective.metric)
trials_df = exp_to_df(
exp=exp,
metrics=additional_metrics,
run_metadata_fields=run_metadata_fields,
**kwargs,
)
if len(trials_df.index) == 0:
logger.warning("`exp_to_df` returned 0 trials. Returning None for best trial.")
return None
metric_name = objective.metric.name
minimize = objective.minimize
if metric_name not in trials_df.columns:
logger.warning(
f"`exp_to_df` did not have data for metric {metric_name}. "
"Returning None for best trial."
)
return None
metric_optimum = (
trials_df[metric_name].min() if minimize else trials_df[metric_name].max()
)
return pd.DataFrame(trials_df[trials_df[metric_name] == metric_optimum].head(1))
| return not_none(not_none(df.drop(drop_col, axis=1)).sort_values(sort_by)) | identifier_body |
report_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from collections import defaultdict
from logging import Logger
from typing import Any, Dict, List, Optional
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from ax.core.experiment import Experiment
from ax.core.metric import Metric
from ax.core.multi_type_experiment import MultiTypeExperiment
from ax.core.objective import MultiObjective, ScalarizedObjective
from ax.core.search_space import SearchSpace
from ax.core.trial import BaseTrial, Trial
from ax.modelbridge import ModelBridge
from ax.modelbridge.cross_validation import cross_validate
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.plot.contour import interact_contour_plotly
from ax.plot.diagnostic import interact_cross_validation_plotly
from ax.plot.slice import plot_slice_plotly
from ax.plot.trace import optimization_trace_single_method_plotly
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, not_none
logger: Logger = get_logger(__name__)
# pyre-ignore[11]: Annotation `go.Figure` is not defined as a type.
def _get_cross_validation_plot(model: ModelBridge) -> go.Figure:
cv = cross_validate(model)
return interact_cross_validation_plotly(cv)
def _get_objective_trace_plot(
experiment: Experiment,
metric_name: str,
model_transitions: List[int],
optimization_direction: Optional[str] = None,
) -> Optional[go.Figure]:
best_objectives = np.array([experiment.fetch_data().df["mean"]])
return optimization_trace_single_method_plotly(
y=best_objectives,
title="Best objective found vs. # of iterations",
ylabel=metric_name,
model_transitions=model_transitions,
optimization_direction=optimization_direction,
plot_trial_points=True,
)
def _get_objective_v_param_plot(
search_space: SearchSpace,
model: ModelBridge,
metric_name: str,
trials: Dict[int, BaseTrial],
) -> Optional[go.Figure]:
range_params = list(search_space.range_parameters.keys())
if len(range_params) == 1:
# individual parameter slice plot
output_slice_plot = plot_slice_plotly(
model=not_none(model),
param_name=range_params[0],
metric_name=metric_name,
generator_runs_dict={
str(t.index): not_none(checked_cast(Trial, t).generator_run)
for t in trials.values()
},
)
return output_slice_plot
if len(range_params) > 1:
# contour plot
output_contour_plot = interact_contour_plotly(
model=not_none(model),
metric_name=metric_name,
)
return output_contour_plot
# if search space contains no range params
logger.warning(
"_get_objective_v_param_plot requires a search space with at least one "
"RangeParameter. Returning None."
)
return None
def _get_suffix(input_str: str, delim: str = ".", n_chunks: int = 1) -> str:
return delim.join(input_str.split(delim)[-n_chunks:])
def _get_shortest_unique_suffix_dict(
input_str_list: List[str], delim: str = "."
) -> Dict[str, str]:
"""Maps a list of strings to their shortest unique suffixes
Maps all original strings to the smallest number of chunks, as specified by
delim, that are not a suffix of any other original string. If the original
string was a suffix of another string, map it to its unaltered self.
Args:
input_str_list: a list of strings to create the suffix mapping for
delim: the delimiter used to split up the strings into meaningful chunks
Returns:
dict: A dict with the original strings as keys and their abbreviations as
values
"""
# all input strings must be unique
assert len(input_str_list) == len(set(input_str_list))
if delim == "":
raise ValueError("delim must be a non-empty string.")
suffix_dict = defaultdict(list)
# initialize suffix_dict with last chunk
for istr in input_str_list:
suffix_dict[_get_suffix(istr, delim=delim, n_chunks=1)].append(istr)
max_chunks = max(len(istr.split(delim)) for istr in input_str_list)
if max_chunks == 1:
return {istr: istr for istr in input_str_list}
# the upper range of this loop is `max_chunks + 2` because:
# - `i` needs to take the value of `max_chunks`, hence one +1
# - the contents of the loop are run one more time to check if `all_unique`,
# hence the other +1
for i in range(2, max_chunks + 2):
new_dict = defaultdict(list)
all_unique = True
for suffix, suffix_str_list in suffix_dict.items():
if len(suffix_str_list) > 1:
all_unique = False
for istr in suffix_str_list:
new_dict[_get_suffix(istr, delim=delim, n_chunks=i)].append(istr)
else:
new_dict[suffix] = suffix_str_list
if all_unique:
if len(set(input_str_list)) != len(suffix_dict.keys()):
break
return {
suffix_str_list[0]: suffix
for suffix, suffix_str_list in suffix_dict.items()
}
suffix_dict = new_dict
# If this function has not yet exited, some input strings still share a suffix.
# This is not expected, but in this case, the function will return the identity
# mapping, i.e., a dict with the original strings as both keys and values.
logger.warning(
"Something went wrong. Returning dictionary with original strings as keys and "
"values."
)
return {istr: istr for istr in input_str_list}
def get_standard_plots(
experiment: Experiment, generation_strategy: Optional[GenerationStrategy]
) -> List[go.Figure]:
"""Extract standard plots for single-objective optimization.
Extracts a list of plots from an Experiment and GenerationStrategy of general
interest to an Ax user. Currently not supported are
- TODO: multi-objective optimization
- TODO: ChoiceParameter plots
Args:
- experiment: the Experiment from which to obtain standard plots.
- generation_strategy: the GenerationStrategy used to suggest trial parameters
in experiment
Returns:
- a plot of objective value vs. trial index, to show experiment progression
- a plot of objective value vs. range parameter values, only included if the
model associated with generation_strategy can create predictions. This
consists of:
- a plot_slice plot if the search space contains one range parameter
- an interact_contour plot if the search space contains multiple
range parameters
"""
objective = not_none(experiment.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"get_standard_plots does not currently support MultiObjective "
"optimization experiments. Returning an empty list."
)
return []
if isinstance(objective, ScalarizedObjective):
logger.warning(
"get_standard_plots does not currently support ScalarizedObjective "
"optimization experiments. Returning an empty list."
)
return []
if experiment.fetch_data().df.empty:
logger.info(f"Experiment {experiment} does not yet have data, nothing to plot.")
return []
output_plot_list = []
output_plot_list.append(
_get_objective_trace_plot(
experiment=experiment,
metric_name=not_none(experiment.optimization_config).objective.metric.name,
# TODO: Adjust `model_transitions` to case where custom trials are present
# and generation strategy does not start right away.
model_transitions=not_none(generation_strategy).model_transitions
if generation_strategy is not None
else [],
optimization_direction=(
"minimize"
if not_none(experiment.optimization_config).objective.minimize
else "maximize"
),
)
)
# Objective vs. parameter plot requires a `Model`, so add it only if model
# is alrady available. In cases where initially custom trials are attached,
# model might not yet be set on the generation strategy.
if generation_strategy and generation_strategy.model:
model = not_none(not_none(generation_strategy).model)
try:
output_plot_list.append(
_get_objective_v_param_plot(
search_space=experiment.search_space,
model=model,
metric_name=not_none(
experiment.optimization_config
).objective.metric.name,
trials=experiment.trials,
)
)
output_plot_list.append(_get_cross_validation_plot(model))
except NotImplementedError:
# Model does not implement `predict` method.
pass
return [plot for plot in output_plot_list if plot is not None]
def exp_to_df(
exp: Experiment,
metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
trial_properties_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> pd.DataFrame:
"""Transforms an experiment to a DataFrame. Only supports Experiment and
SimpleExperiment.
Transforms an Experiment into a dataframe with rows keyed by trial_index
and arm_name, metrics pivoted into one row.
Args:
exp: An Experiment that may have pending trials.
metrics: Override list of metrics to return. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
trial_properties_fields: fields to extract from trial._properties for trial
in experiment.trials. If there are multiple arms per trial, these fields
will be replicated across the arms of a trial. Output columns names will be
prepended with "trial_properties_".
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs, metadata and metrics by trial and arm. If
no trials are available, returns an empty dataframe. If no metric ouputs are
available, returns a dataframe of inputs and metadata.
"""
def | (
df: pd.DataFrame, drop_col: str, sort_by: List[str]
) -> pd.DataFrame:
return not_none(not_none(df.drop(drop_col, axis=1)).sort_values(sort_by))
# Accept Experiment and SimpleExperiment
if isinstance(exp, MultiTypeExperiment):
raise ValueError("Cannot transform MultiTypeExperiments to DataFrames.")
key_components = ["trial_index", "arm_name"]
# Get each trial-arm with parameters
arms_df = pd.DataFrame()
for trial_index, trial in exp.trials.items():
for arm in trial.arms:
arms_df = arms_df.append(
{"arm_name": arm.name, "trial_index": trial_index, **arm.parameters},
ignore_index=True,
)
# Fetch results; in case arms_df is empty, return empty results (legacy behavior)
results = exp.fetch_data(metrics, **kwargs).df
if len(arms_df.index) == 0:
if len(results.index) != 0:
raise ValueError(
"exp.fetch_data().df returned more rows than there are experimental "
"arms. This is an inconsistent experimental state. Please report to "
"Ax support."
)
return results
# Create key column from key_components
arms_df["trial_index"] = arms_df["trial_index"].astype(int)
key_col = "-".join(key_components)
key_vals = arms_df[key_components[0]].astype("str") + arms_df[
key_components[1]
].astype("str")
arms_df[key_col] = key_vals
# Add trial status
trials = exp.trials.items()
trial_to_status = {index: trial.status.name for index, trial in trials}
arms_df["trial_status"] = [
trial_to_status[trial_index] for trial_index in arms_df.trial_index
]
# Add generator_run model keys
arms_df["generator_model"] = [
# This accounts for the generic case that generator_runs is a list of arbitrary
# length. If all elements are `None`, this yields an empty string. Repeated
# generator models within a trial are condensed via a set comprehension.
", ".join(
{
not_none(generator_run._model_key)
for generator_run in exp.trials[trial_index].generator_runs
if generator_run._model_key is not None
}
)
if trial_index in exp.trials
else ""
for trial_index in arms_df.trial_index
]
# replace all unknown generator_models (denoted by empty strings) with "Unknown"
arms_df["generator_model"] = [
"Unknown" if generator_model == "" else generator_model
for generator_model in arms_df["generator_model"]
]
# Add any trial properties fields to arms_df
if trial_properties_fields is not None:
if not (
isinstance(trial_properties_fields, list)
and all(isinstance(field, str) for field in trial_properties_fields)
):
raise ValueError(
"trial_properties_fields must be List[str] or None. "
f"Got {trial_properties_fields}"
)
# add trial._properties fields
for field in trial_properties_fields:
trial_to_properties_field = {
index: (
trial._properties[field] if field in trial._properties else None
)
for index, trial in trials
}
if any(trial_to_properties_field.values()): # field present for any trial
if not all(
trial_to_properties_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' properties. "
"Returning None when missing."
)
arms_df["trial_properties_" + field] = [
trial_to_properties_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' properties. "
"Not appending column."
)
# Add any run_metadata fields to arms_df
if run_metadata_fields is not None:
if not (
isinstance(run_metadata_fields, list)
and all(isinstance(field, str) for field in run_metadata_fields)
):
raise ValueError(
"run_metadata_fields must be List[str] or None. "
f"Got {run_metadata_fields}"
)
# add run_metadata fields
for field in run_metadata_fields:
trial_to_metadata_field = {
index: (
trial.run_metadata[field] if field in trial.run_metadata else None
)
for index, trial in trials
}
if any(trial_to_metadata_field.values()): # field present for any trial
if not all(
trial_to_metadata_field.values()
): # not present for all trials
logger.warning(
f"Field {field} missing for some trials' run_metadata. "
"Returning None when missing."
)
arms_df[field] = [
trial_to_metadata_field[key] for key in arms_df.trial_index
]
else:
logger.warning(
f"Field {field} missing for all trials' run_metadata. "
"Not appending column."
)
if len(results.index) == 0:
logger.info(
f"No results present for the specified metrics `{metrics}`. "
"Returning arm parameters and metadata only."
)
exp_df = arms_df
elif not all(col in results.columns for col in key_components):
logger.warn(
f"At least one of key columns `{key_components}` not present in results df "
f"`{results}`. Returning arm parameters and metadata only."
)
exp_df = arms_df
else:
# prepare results for merge
key_vals = results[key_components[0]].astype("str") + results[
key_components[1]
].astype("str")
results[key_col] = key_vals
metric_vals = results.pivot(
index=key_col, columns="metric_name", values="mean"
).reset_index()
# dedupe results by key_components
metadata = results[key_components + [key_col]].drop_duplicates()
metrics_df = pd.merge(metric_vals, metadata, on=key_col)
# merge and return
exp_df = pd.merge(
metrics_df, arms_df, on=key_components + [key_col], how="outer"
)
return prep_return(df=exp_df, drop_col=key_col, sort_by=["arm_name"])
def get_best_trial(
exp: Experiment,
additional_metrics: Optional[List[Metric]] = None,
run_metadata_fields: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[pd.DataFrame]:
"""Finds the optimal trial given an experiment, based on raw objective value.
Returns a 1-row dataframe. Should match the row of ``exp_to_df`` with the best
raw objective value, given the same arguments.
Args:
exp: An Experiment that may have pending trials.
additional_metrics: List of metrics to return in addition to the objective
metric. Return all metrics if None.
run_metadata_fields: fields to extract from trial.run_metadata for trial
in experiment.trials. If there are multiple arms per trial, these
fields will be replicated across the arms of a trial.
**kwargs: Custom named arguments, useful for passing complex
objects from call-site to the `fetch_data` callback.
Returns:
DataFrame: A dataframe of inputs and metrics of the optimal trial.
"""
objective = not_none(exp.optimization_config).objective
if isinstance(objective, MultiObjective):
logger.warning(
"No best trial is available for `MultiObjective` optimization. "
"Returning None for best trial."
)
return None
if isinstance(objective, ScalarizedObjective):
logger.warning(
"No best trial is available for `ScalarizedObjective` optimization. "
"Returning None for best trial."
)
return None
if (additional_metrics is not None) and (
objective.metric not in additional_metrics
):
additional_metrics.append(objective.metric)
trials_df = exp_to_df(
exp=exp,
metrics=additional_metrics,
run_metadata_fields=run_metadata_fields,
**kwargs,
)
if len(trials_df.index) == 0:
logger.warning("`exp_to_df` returned 0 trials. Returning None for best trial.")
return None
metric_name = objective.metric.name
minimize = objective.minimize
if metric_name not in trials_df.columns:
logger.warning(
f"`exp_to_df` did not have data for metric {metric_name}. "
"Returning None for best trial."
)
return None
metric_optimum = (
trials_df[metric_name].min() if minimize else trials_df[metric_name].max()
)
return pd.DataFrame(trials_df[trials_df[metric_name] == metric_optimum].head(1))
| prep_return | identifier_name |
getlola.py | # -*- coding:UTF-8 -*-
import cx_Oracle
import datetime
import os
# 设置字符集与oracle一致,不然insert中文乱码
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.ZHS16GBK'
print('====beging...')
# 获取需要判断的数据信息
startTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
print('===========================',startTime,'============================')
DB_UserName = ""
DB_UserPwd = ""
DB_ConnectStr = ""
def Oracle_Query(SqlStr, debug=0):
"Execute oracle query, and return data_list"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
data_list = []
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
while 1:
rs = cursor.fetchone()
if rs == None:
break
data_list.append(rs)
if debug:
fieldnames = []
for field in cursor.description:
fieldnames.append(field[0])
print(fieldnames)
print(data_list)
print("Query success!")
except:
print("Exec sql failed: %s" % SqlStr)
finally:
cursor.close()
# conn.close()
return data_list
def Oracle_Exec(SqlStr):
"Execute oracle command"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
conn.commit()
return True
except:
conn.rollback()
print("Exec sql failed: %s" % SqlStr)
return False
finally:
cursor.close()
# conn.close()
# 判断坐标是否再坐标圈内
def is_pt_in_poly(aLon, aLat, pointList):
'''''
:param aLon: double 经度
:param aLat: double 纬度
:param pointList: list [(lon, lat)...] 多边形点的顺序需根据顺时针或逆时针,不能乱
'''
iSum = 0
iCount = len(pointList)
if (iCount < 3):
return False
# print("iCount = " + str(iCount))
for i in range(iCount):
pLon1 = pointList[i][0]
pLat1 = pointList[i][1]
if (i == iCount - 1):
pLon2 = pointList[0][0]
pLat2 = pointList[0][1]
else:
# print(i+1)
try:
pLon2 = pointList[i + 1][0]
pLat2 = pointList[i + 1][1]
except IndexError:
break
###转换数值类型
pLon1 = float(pLon1)
pLat1 = float(pLat1)
pLon2 = float(pLon2)
pLat2 = float(pLat2)
aLat = float(aLat)
aLon = float(aLon)
if ((aLat >= pLat1) and (aLat < pLat2)) or ((aLat >= pLat2) and (aLat < pLat1)):
if (abs(pLat1 - pLat2) > 0):
pLon = pLon1 - ((pLon1 - pLon2) * (pLat1 - aLat)) / (pLat1 - pLat2)
if (pLon < aLon):
iSum += 1
if (iSum % 2 != 0):
return True
else:
return False
# 格式化从excel中得到的内容
# def get_file_row(row,file_name):
# wb_1 = openpyxl.load_workbook('%s' % file_name)
# ws_1 = wb_1.active
# colC = ws_1['%s' % row]
# list_1 = [x for x in colC]
# list_2 = []
# for i in list_1:
# list_2.append(i.value)
# return list_2
# 查询获得基站的名称、经度、纬度
def get_station_lo_la():
sql_sec_name_lo_la = "SELECT A.B_STATION_NAME,A.LONGITUDE,A.LATITUDE FROM B_BASE_STATION_INFO A"
list_station_lo_la = Oracle_Query(sql_sec_name_lo_la)
return list_station_lo_la
# 查询获得网格的id、名称、网格坐标圈 指定市
def get_point_2(org_id, orgLevel):
sql_sec_orgid_orgname_point = "select r.org_code,r.org_name,r.point " \
"from (" \
"select a.org_code,a.org_name,a.p_org_id,a.org_level,b.point " \
"from s_orgnization a,p_map_draw b " \
"where a.org_id=b.org_id " \
"and b.state='00A' " \
"and a.state='00A' " \
"and a.org_id in ( " \
"select org_id from s_orgnization start with org_id='%s' " \
"connect by prior org_id=p_org_id)) r,s_orgnization o " \
"where r.p_org_id=o.org_id " \
"and r.org_level='%s'" % (str(org_id), str(orgLevel))
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 查询获得网格的id、名称、网格坐标圈
def get_point():
sql_sec_orgid_orgname_point = "select a.org_id,a.org_name,b.point from s_orgnization a " \
"left join p_map_draw b on b.org_id = a.org_id" \
" where a.org_level='5'"
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 格式化cblob字段
def expand_list(tList):
mList = tList.read().split(";")
flag = 0
for i in mList:
mList[flag] = mList[flag].split(',')
flag += 1
return mList
# 修改基站grid_id bak表
def update_station_grid_id(gridId, bStationName):
sqlUpdateGridId = "update b_base_station_info_bak a " \
"set a.grid_id='%s' " \
"where a.b_station_name='%s'" % (str(gridId), bStationName)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# 对比两个b_station_name的grid_id是否相同
def judge_station_name(stationName):
sqlGridIdFromInfo = " select grid_id from b_base_station_info_bak where b_station_name = '%s'" % stationName
sqlGridIdFromMid = "select grid_id from B_STATION_GRID_MID where b_station_name = '%s'" % stationName
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 格式化orgIdNamePoint
def expand_orgidnamepoint(orgIdNamePoint):
flag = 0
for i in orgIdNamePoint:
if i[2] is not None:
orgIdNamePoint[flag] = list(orgIdNamePoint[flag])
orgIdNamePoint[flag][2] = expand_list(i[2])
else:
continue
flag += 1
return orgIdNamePoint
# 获取数据入中间表
def in_station_mid_table(stationLoLa, orgIdNamePoint):
for station_name, station_lo, station_la in stationLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(station_lo, station_la, org_point)
if judge_result:
sql_insert_b_station_grid_mid = "insert into b_station_grid_mid (org_name,grid_id,b_station_name) " \
"values ('%s','%s','%s')" % (org_name, ord_id, station_name)
Oracle_Exec(sql_insert_b_station_grid_mid)
break
# 对照中间表修改正式表中的所有数据
def updata_station_all():
sqlSecStationNameFromMid = "select b_station_name from B_STATION_GRID_MID"
stationNameList = Oracle_Query(sqlSecStationNameFromMid)
for stationNameTup in stationNameList:
gridId = judge_station_name(stationNameTup[0])
if gridId:
print(stationNameTup[0])
print(gridId)
update_station_grid_id(gridId[0][0], stationNameTup[0])
else:
continue
# 获取小区的名称以及经纬度
def get_cell_lo_la(cityId, dayID):
"""
:param cityId: 小区所在的城市ID
:return:
"""
sqlGetCellIdLoLa = "SELECT A.CELL_ID,A.LONGITUDE,A.LATITUDE " \
"FROM B_SUBDISTRICT_INFO A " \
"WHERE CITY_ID='%s' " \
"AND A.LONGITUDE IS NOT NULL " \
"AND A.LATITUDE IS NOT NULL " \
"AND A.DAY_ID='%s'" % (str(cityId), str(dayID))
listCellLoLa = Oracle_Query(sqlGetCellIdLoLa)
return listCellLoLa
# 判断小区的结果并录入中间表
def in_cell_mid_table(cellLoLa, orgIdNamePoint, orgLevel, dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 判断小区的结果并录入中间表
def in_cell_mid_table_grid_id(cellLoLa, orgIdNamePoint_5, orgIdNamePoint_4, orgLevel_5, orgLevel_4,dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
flag = 0
for ord_id, org_name, org_point in orgIdNamePoint_5: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_ | _id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_5), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
flag = 1
break
if flag == 0:
for ord_id, org_name, org_point in orgIdNamePoint_4: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_4), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 对比两个cell_id的region_id是否相同
def judge_cell_id_region(cellId):
sqlGridIdFromInfo = " select region from B_SUBDISTRICT_INFO where cell_id = '%s'" % cellId
sqlGridIdFromMid = "select grid_id from B_CELL_MID where cell_id = '%s'" % cellId
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 修改基站grid_id bak表
def update_cell_region(gridId, cellId):
sqlUpdateGridId = "update B_SUBDISTRICT_INFO a " \
"set a.region='%s' " \
"where a.cell_id='%s'" % (str(gridId), cellId)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# 对照中间表修改正式表中的所有数据 update小区region_id
def updata_cell_region_all():
sqlSecCellIdFromMid = "select cell_id from B_CELL_MID"
cellIdList = Oracle_Query(sqlSecCellIdFromMid)
for cellIdTup in cellIdList:
gridId = judge_cell_id_region(cellIdTup[0])
if gridId:
print(cellIdTup[0])
print(gridId)
update_cell_region(gridId[0][0], cellIdTup[0])
else:
continue
| mid (day_id,org_name,grid | identifier_name |
getlola.py | # -*- coding:UTF-8 -*-
import cx_Oracle
import datetime
import os
# 设置字符集与oracle一致,不然insert中文乱码
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.ZHS16GBK'
print('====beging...')
# 获取需要判断的数据信息
startTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
print('===========================',startTime,'============================')
DB_UserName = ""
DB_UserPwd = ""
DB_ConnectStr = ""
def Oracle_Query(SqlStr, debug=0):
"Execute oracle query, and return data_list"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
data_list = []
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
while 1:
rs = cursor.fetchone()
if rs == None:
break
data_list.append(rs)
if debug:
fieldnames = []
for field in cursor.description:
fieldnames.append(field[0])
print(fieldnames)
print(data_list)
print("Query success!")
except:
print("Exec sql failed: %s" % SqlStr)
finally:
cursor.close()
# conn.close()
return data_list
def Oracle_Exec(SqlStr):
"Execute oracle command"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
conn.commit()
return True
except:
conn.rollback()
print("Exec sql failed: %s" % SqlStr)
return False
finally:
cursor.close()
# conn.close()
# 判断坐标是否再坐标圈内
def is_pt_in_poly(aLon, aLat, pointList):
'''''
:param aLon: double 经度
:param aLat: double 纬度
:param pointList: list [(lon, lat)...] 多边形点的顺序需根据顺时针或逆时针,不能乱
'''
iSum = 0
iCount = len(pointList)
if (iCount < 3):
return False
# print("iCount = " + str(iCount))
for i in range(iCount):
pLon1 = pointList[i][0]
pLat1 = pointList[i][1]
if (i == iCount - 1):
pLon2 = pointList[0][0]
pLat2 = pointList[0][1]
else:
# print(i+1)
try:
pLon2 = pointList[i + 1][0]
pLat2 = pointList[i + 1][1]
except IndexError:
break
###转换数值类型
pLon1 = float(pLon1)
pLat1 = float(pLat1)
pLon2 = float(pLon2)
pLat2 = float(pLat2)
aLat = float(aLat)
aLon = float(aLon)
if ((aLat >= pLat1) and (aLat < pLat2)) or ((aLat >= pLat2) and (aLat < pLat1)):
if (abs(pLat1 - pLat2) > 0):
pLon = pLon1 - ((pLon1 - pLon2) * (pLat1 - aLat)) / (pLat1 - pLat2)
if (pLon < aLon):
iSum += 1
if (iSum % 2 != 0):
return True
else:
return False
# 格式化从excel中得到的内容
# def get_file_row(row,file_name):
# wb_1 = openpyxl.load_workbook('%s' % file_name)
# ws_1 = wb_1.active
# colC = ws_1['%s' % row]
# list_1 = [x for x in colC]
# list_2 = []
# for i in list_1:
# list_2.append(i.value)
# return list_2
# 查询获得基站的名称、经度、纬度
def get_station_lo_la():
sql_sec_name_lo_la = "SELECT A.B_STATION_NAME,A.LONGITUDE,A.LATITUDE FROM B_BASE_STATION_INFO A"
list_station_lo_la = Oracle_Query(sql_sec_name_lo_la)
return list_station_lo_la
# 查询获得网格的id、名称、网格坐标圈 指定市
def get_point_2(org_id, orgLevel):
sql_sec_orgid_orgname_point = "select r.org_code,r.org_name,r.point " \
"from (" \
"select a.org_code,a.org_name,a.p_org_id,a.org_level,b.point " \
"from s_orgnization a,p_map_draw b " \
"where a.org_id=b.org_id " \
"and b.state='00A' " \
"and a.state='00A' " \
"and a.org_id in ( " \
"select org_id from s_orgnization start with org_id='%s' " \
"connect by prior org_id=p_org_id)) r,s_orgnization o " \
"where r.p_org_id=o.org_id " \
"and r.org_level='%s'" % (str(org_id), str(orgLevel))
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 查询获得网格的id、名称、网格坐标圈
def get_point():
sql_sec_orgid_orgname_point = "select a.org_id,a.org_name,b.point from s_orgnization a " \
"left join p_map_draw b on b.org_id = a.org_id" \
" where a.org_level='5'"
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 格式化cblob字段
def expand_list(tList):
mList = tList.read().split(";")
flag = 0
for i in mList:
mList[flag] = mList[flag].split(',')
flag += 1
return mList
# 修改基站grid_id bak表
def update_station_grid_id(gridId, bStationName):
sqlUpdateGridId = "update b_base_station_info_bak a " \
"set a.grid_id='%s' " \
"where a.b_station_name='%s'" % (str(gridId), bStationName)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# 对比两个b_station_name的grid_id是否相同
def judge_station_name(stationName):
sqlGridIdFromInfo = " select grid_id from b_base_station_info_bak where b_station_name = '%s'" % stationName
sqlGridIdFromMid = "select grid_id from B_STATION_GRID_MID where b_station_name = '%s'" % stationName
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 格式化orgIdNamePoint
def expand_orgidnamepoint(orgIdNamePoint):
flag = 0
for i in orgIdNamePoint:
if i[2] is not None:
orgIdNamePoint[flag] = list(orgIdNamePoint[flag])
orgIdNamePoint[flag][2] = expand_list(i[2])
else:
continue
flag += 1
return orgIdNamePoint
# 获取数据入中间表
def in_station_mid_table(stationLoLa, orgIdNamePoint):
for station_name, station_lo, station_la in stationLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(station_lo, station_la, org_point)
if judge_result:
sql_insert_b_station_grid_mid = "insert into b_station_grid_mid (org_name,grid_id,b_station_name) " \
"values ('%s','%s','%s')" % (org_name, ord_id, station_name)
Oracle_Exec(sql_insert_b_station_grid_mid)
break
# 对照中间表修改正式表中的所有数据
def updata_station_all():
sqlSecStationNameFromMid = "select b_station_name from B_STATION_GRID_MID"
stationNameList = Oracle_Query(sqlSecStationNameFromMid)
for stationNameTup in stationNameList:
gridId = judge_station_name(stationNameTup[0])
if gridId:
print(stationNameTup[0])
print(gridId)
update_station_grid_id(gridId[0][0], stationNameTup[0])
else:
continue
# 获取小区的名称以及经纬度
def get_cell_lo_la(cityId, dayID):
"""
:param cityId: 小区所在的城市ID
:return:
"""
sqlGetCellIdLoLa = "SELECT A.CELL_ID,A.LONGITUDE,A.LATITUDE " \
"FROM B_SUBDISTRICT_INFO A " \
"WHERE CITY_ID='%s' " \
"AND A.LONGITUDE IS NOT NULL " \
"AND A.LATITUDE IS NOT NULL " \
"AND A.DAY_ID='%s'" % (str(cityId), str(dayID))
listCellLoLa = Oracle_Query(sqlGetCellIdLoLa)
return listCellLoLa
# 判断小区的结果并录入中间表
def in_cell_mid_table(cellLoLa, orgIdNamePoint, orgLevel, dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list) | judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 判断小区的结果并录入中间表
def in_cell_mid_table_grid_id(cellLoLa, orgIdNamePoint_5, orgIdNamePoint_4, orgLevel_5, orgLevel_4,dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
flag = 0
for ord_id, org_name, org_point in orgIdNamePoint_5: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_5), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
flag = 1
break
if flag == 0:
for ord_id, org_name, org_point in orgIdNamePoint_4: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_4), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 对比两个cell_id的region_id是否相同
def judge_cell_id_region(cellId):
sqlGridIdFromInfo = " select region from B_SUBDISTRICT_INFO where cell_id = '%s'" % cellId
sqlGridIdFromMid = "select grid_id from B_CELL_MID where cell_id = '%s'" % cellId
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 修改基站grid_id bak表
def update_cell_region(gridId, cellId):
sqlUpdateGridId = "update B_SUBDISTRICT_INFO a " \
"set a.region='%s' " \
"where a.cell_id='%s'" % (str(gridId), cellId)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# 对照中间表修改正式表中的所有数据 update小区region_id
def updata_cell_region_all():
sqlSecCellIdFromMid = "select cell_id from B_CELL_MID"
cellIdList = Oracle_Query(sqlSecCellIdFromMid)
for cellIdTup in cellIdList:
gridId = judge_cell_id_region(cellIdTup[0])
if gridId:
print(cellIdTup[0])
print(gridId)
update_cell_region(gridId[0][0], cellIdTup[0])
else:
continue | random_line_split | |
getlola.py | # -*- coding:UTF-8 -*-
import cx_Oracle
import datetime
import os
# 设置字符集与oracle一致,不然insert中文乱码
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.ZHS16GBK'
print('====beging...')
# 获取需要判断的数据信息
startTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
print('===========================',startTime,'============================')
DB_UserName = ""
DB_UserPwd = ""
DB_ConnectStr = ""
def Oracle_Query(SqlStr, debug=0):
"Execute oracle query, and return data_list"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
data_list = []
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
while 1:
rs = cursor.fetchone()
if rs == None:
break
data_list.append(rs)
if debug:
fieldnames = []
for field in cursor.description:
fieldnames.append(field[0])
print(fieldnames)
print(data_list)
print("Query success!")
except:
print("Exec sql failed: %s" % SqlStr)
finally:
cursor.close()
# conn.close()
return data_list
def Oracle_Exec(SqlStr):
"Execute oracle command"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
conn.commit()
return True
except:
conn.rollback()
print("Exec sql failed: %s" % SqlStr)
return False
finally:
cursor.close()
# conn.close()
# 判断坐标是否再坐标圈内
def is_pt_in_poly(aLon, aLat, pointList):
'''''
:param aLon: double 经度
:param aLat: double 纬度
:param pointList: list [(lon, lat)...] 多边形点的顺序需根据顺时针或逆时针,不能乱
'''
iSum = 0
iCount = len(pointList)
if (iCount < 3):
return False
# print("iCount = " + str(iCount))
for i in range(iCount):
pLon1 = pointList[i][0]
pLat1 = pointList[i][1]
if (i == iCount - 1):
pLon2 = pointList[0][0]
pLat2 = pointList[0][1]
else:
# print(i+1)
try:
pLon2 = pointList[i + 1][0]
pLat2 = pointList[i + 1][1]
except IndexError:
| loat(pLat2)
aLat = float(aLat)
aLon = float(aLon)
if ((aLat >= pLat1) and (aLat < pLat2)) or ((aLat >= pLat2) and (aLat < pLat1)):
if (abs(pLat1 - pLat2) > 0):
pLon = pLon1 - ((pLon1 - pLon2) * (pLat1 - aLat)) / (pLat1 - pLat2)
if (pLon < aLon):
iSum += 1
if (iSum % 2 != 0):
return True
else:
return False
# 格式化从excel中得到的内容
# def get_file_row(row,file_name):
# wb_1 = openpyxl.load_workbook('%s' % file_name)
# ws_1 = wb_1.active
# colC = ws_1['%s' % row]
# list_1 = [x for x in colC]
# list_2 = []
# for i in list_1:
# list_2.append(i.value)
# return list_2
# 查询获得基站的名称、经度、纬度
def get_station_lo_la():
sql_sec_name_lo_la = "SELECT A.B_STATION_NAME,A.LONGITUDE,A.LATITUDE FROM B_BASE_STATION_INFO A"
list_station_lo_la = Oracle_Query(sql_sec_name_lo_la)
return list_station_lo_la
# 查询获得网格的id、名称、网格坐标圈 指定市
def get_point_2(org_id, orgLevel):
sql_sec_orgid_orgname_point = "select r.org_code,r.org_name,r.point " \
"from (" \
"select a.org_code,a.org_name,a.p_org_id,a.org_level,b.point " \
"from s_orgnization a,p_map_draw b " \
"where a.org_id=b.org_id " \
"and b.state='00A' " \
"and a.state='00A' " \
"and a.org_id in ( " \
"select org_id from s_orgnization start with org_id='%s' " \
"connect by prior org_id=p_org_id)) r,s_orgnization o " \
"where r.p_org_id=o.org_id " \
"and r.org_level='%s'" % (str(org_id), str(orgLevel))
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 查询获得网格的id、名称、网格坐标圈
def get_point():
sql_sec_orgid_orgname_point = "select a.org_id,a.org_name,b.point from s_orgnization a " \
"left join p_map_draw b on b.org_id = a.org_id" \
" where a.org_level='5'"
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 格式化cblob字段
def expand_list(tList):
mList = tList.read().split(";")
flag = 0
for i in mList:
mList[flag] = mList[flag].split(',')
flag += 1
return mList
# 修改基站grid_id bak表
def update_station_grid_id(gridId, bStationName):
sqlUpdateGridId = "update b_base_station_info_bak a " \
"set a.grid_id='%s' " \
"where a.b_station_name='%s'" % (str(gridId), bStationName)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# 对比两个b_station_name的grid_id是否相同
def judge_station_name(stationName):
sqlGridIdFromInfo = " select grid_id from b_base_station_info_bak where b_station_name = '%s'" % stationName
sqlGridIdFromMid = "select grid_id from B_STATION_GRID_MID where b_station_name = '%s'" % stationName
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 格式化orgIdNamePoint
def expand_orgidnamepoint(orgIdNamePoint):
flag = 0
for i in orgIdNamePoint:
if i[2] is not None:
orgIdNamePoint[flag] = list(orgIdNamePoint[flag])
orgIdNamePoint[flag][2] = expand_list(i[2])
else:
continue
flag += 1
return orgIdNamePoint
# 获取数据入中间表
def in_station_mid_table(stationLoLa, orgIdNamePoint):
for station_name, station_lo, station_la in stationLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(station_lo, station_la, org_point)
if judge_result:
sql_insert_b_station_grid_mid = "insert into b_station_grid_mid (org_name,grid_id,b_station_name) " \
"values ('%s','%s','%s')" % (org_name, ord_id, station_name)
Oracle_Exec(sql_insert_b_station_grid_mid)
break
# 对照中间表修改正式表中的所有数据
def updata_station_all():
sqlSecStationNameFromMid = "select b_station_name from B_STATION_GRID_MID"
stationNameList = Oracle_Query(sqlSecStationNameFromMid)
for stationNameTup in stationNameList:
gridId = judge_station_name(stationNameTup[0])
if gridId:
print(stationNameTup[0])
print(gridId)
update_station_grid_id(gridId[0][0], stationNameTup[0])
else:
continue
# 获取小区的名称以及经纬度
def get_cell_lo_la(cityId, dayID):
"""
:param cityId: 小区所在的城市ID
:return:
"""
sqlGetCellIdLoLa = "SELECT A.CELL_ID,A.LONGITUDE,A.LATITUDE " \
"FROM B_SUBDISTRICT_INFO A " \
"WHERE CITY_ID='%s' " \
"AND A.LONGITUDE IS NOT NULL " \
"AND A.LATITUDE IS NOT NULL " \
"AND A.DAY_ID='%s'" % (str(cityId), str(dayID))
listCellLoLa = Oracle_Query(sqlGetCellIdLoLa)
return listCellLoLa
# 判断小区的结果并录入中间表
def in_cell_mid_table(cellLoLa, orgIdNamePoint, orgLevel, dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 判断小区的结果并录入中间表
def in_cell_mid_table_grid_id(cellLoLa, orgIdNamePoint_5, orgIdNamePoint_4, orgLevel_5, orgLevel_4,dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
flag = 0
for ord_id, org_name, org_point in orgIdNamePoint_5: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_5), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
flag = 1
break
if flag == 0:
for ord_id, org_name, org_point in orgIdNamePoint_4: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_4), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 对比两个cell_id的region_id是否相同
def judge_cell_id_region(cellId):
sqlGridIdFromInfo = " select region from B_SUBDISTRICT_INFO where cell_id = '%s'" % cellId
sqlGridIdFromMid = "select grid_id from B_CELL_MID where cell_id = '%s'" % cellId
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 修改基站grid_id bak表
def update_cell_region(gridId, cellId):
sqlUpdateGridId = "update B_SUBDISTRICT_INFO a " \
"set a.region='%s' " \
"where a.cell_id='%s'" % (str(gridId), cellId)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# 对照中间表修改正式表中的所有数据 update小区region_id
def updata_cell_region_all():
sqlSecCellIdFromMid = "select cell_id from B_CELL_MID"
cellIdList = Oracle_Query(sqlSecCellIdFromMid)
for cellIdTup in cellIdList:
gridId = judge_cell_id_region(cellIdTup[0])
if gridId:
print(cellIdTup[0])
print(gridId)
update_cell_region(gridId[0][0], cellIdTup[0])
else:
continue
| break
###转换数值类型
pLon1 = float(pLon1)
pLat1 = float(pLat1)
pLon2 = float(pLon2)
pLat2 = f | conditional_block |
getlola.py | # -*- coding:UTF-8 -*-
import cx_Oracle
import datetime
import os
# 设置字符集与oracle一致,不然insert中文乱码
os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.ZHS16GBK'
print('====beging...')
# 获取需要判断的数据信息
startTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
print('===========================',startTime,'============================')
DB_UserName = ""
DB_UserPwd = ""
DB_ConnectStr = ""
def Oracle_Query(SqlStr, debug=0):
"Execute oracle query, and return data_list"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
data_list = []
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
while 1:
rs = cursor.fetchone()
if rs == None:
break
data_list.append(rs)
if debug:
fieldnames = []
for field in cursor.description:
fieldnames.append(field[0])
print(fieldnames)
print(data_list)
print("Query success!")
except:
print("Exec sql failed: %s" % SqlStr)
finally:
cursor.close()
# conn.close()
return data_list
def Oracle_Exec(SqlStr):
"Execute oracle command"
conn = cx_Oracle.connect(DB_UserName, DB_UserPwd, DB_ConnectStr)
cursor = conn.cursor()
try:
cursor.execute(SqlStr)
conn.commit()
return True
except:
conn.rollback()
print("Exec sql failed: %s" % SqlStr)
return False
finally:
cursor.close()
# conn.close()
# 判断坐标是否再坐标圈内
def is_pt_in_poly(aLon, aLat, pointList):
'''''
:param aLon: double 经度
:param aLat: double 纬度
:param pointList: list [(lon, lat)...] 多边形点的顺序需根据顺时针或逆时针,不能乱
'''
iSum = 0
iCount = len(pointList)
if (iCount < 3):
return False
# print("iCount = " + str(iCount))
for i in range(iCount):
pLon1 = pointList[i][0]
pLat1 = pointList[i][1]
if (i == iCount - 1):
pLon2 = pointList[0][0]
pLat2 = pointList[0][1]
else:
# print(i+1)
try:
pLon2 = pointList[i + 1][0]
pLat2 = pointList[i + 1][1]
except IndexError:
break
###转换数值类型
pLon1 = float(pLon1)
pLat1 = float(pLat1)
pLon2 = float(pLon2)
pLat2 = float(pLat2)
aLat = float(aLat)
aLon = float(aLon)
if ((aLat >= pLat1) and (aLat < pLat2)) or ((aLat >= pLat2) and (aLat < pLat1)):
if (abs(pLat1 - pLat2) > 0):
pLon = pLon1 - ((pLon1 - pLon2) * (pLat1 - aLat)) / (pLat1 - pLat2)
if (pLon < aLon):
iSum += 1
if (iSum % 2 != 0):
return True
else:
return False
# 格式化从excel中得到的内容
# def get_file_row(row,file_name):
# wb_1 = openpyxl.load_workbook('%s' % file_name)
# ws_1 = wb_1.active
# colC = ws_1['%s' % row]
# list_1 = [x for x in colC]
# list_2 = []
# for i in list_1:
# list_2.append(i.value)
# return list_2
# 查询获得基站的名称、经度、纬度
def get_station_lo_la():
sql_sec_name_lo_la = "SELECT A.B_STATION_NAME,A.LONGITUDE,A.LATITUDE FROM B_BASE_STATION_INFO A"
list_station_lo_la = Oracle_Query(sql_sec_name_lo_la)
return list_station_lo_la
# 查询获得网格的id、名称、网格坐标圈 指定市
def get_point_2(org_id, orgLevel):
sql_sec_orgid_orgname_point = "select r.org_code,r.org_name,r.point " \
"from (" \
"select a.org_code,a.org_name,a.p_org_id,a.org_level,b.point " \
"from s_orgnization a,p_map_draw b " \
"where a.org_id=b.org_id " \
"and b.state='00A' " \
"and a.state='00A' " \
"and a.org_id in ( " \
"select org_id from s_orgnization start with org_id='%s' " \
"connect by prior org_id=p_org_id)) r,s_orgnization o " \
"where r.p_org_id=o.org_id " \
"and r.org_level='%s'" % (str(org_id), str(orgLevel))
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 查询获得网格的id、名称、网格坐标圈
def get_point():
sql_sec_orgid_orgname_point = "select a.org_id,a.org_name,b.point from s_orgnization a " \
"left join p_map_draw b on b.org_id = a.org_id" \
" where a.org_level='5'"
list_orgid_orgname_point = Oracle_Query(sql_sec_orgid_orgname_point)
return list_orgid_orgname_point
# 格式化cblob字段
def expand_list(tList):
mList = tList.read().split(";")
flag = 0
for i in mList:
mList[flag] = mList[flag].split(',')
flag += 1
return mList
# 修改基站grid_id bak表
def update_station_grid_id(gridId, bStationName):
sqlUpdateGridId = "update b_base_station_info_bak a " \
"set a.grid_id='%s' " \
"where a.b_station_name='%s'" % (str(gridId), bStationName)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# 对比两个b_station_name的grid_id是否相同
def judge_station_name(stationName):
sqlGridIdFromInfo = " select grid_id from b_base_station_info_bak where b_station_name = '%s'" % stationName
sqlGridIdFromMid = "select grid_id from B_STATION_GRID_MID where b_station_name = '%s'" % stationName
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 格式化orgIdNamePoint
def expand_orgidnamepoint(orgIdNamePoint):
flag = 0
for i in orgIdNamePoint:
if i[2] is not None:
orgIdNamePoint[flag] = list(orgIdNamePoint[flag])
orgIdNamePoint[flag][2] = expand_list(i[2])
else:
continue
flag += 1
return orgIdNamePoint
# 获取数据入中间表
def in_station_mid_table(stationLoLa, orgIdNamePoint):
for station_name, station_lo, station_la in stationLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(station_lo, station_la, org_point)
if judge_result:
sql_insert_b_station_grid_mid = "insert into b_station_grid_mid (org_name,grid_id,b_station_name) " \
"values ('%s','%s','%s')" % (org_name, ord_id, station_name)
Oracle_Exec(sql_insert_b_station_grid_mid)
break
# 对照中间表修改正式表中的所有数据
def updata_station_all():
sqlSecStationNameFromMid = "select b_station_name from B_STATION_GRID_MID"
stationNameList = Oracle_Query(sqlSecStationNameFromMid)
for stationNameTup in stationNameList:
gridId = judge_station_name(stationNameTup[0])
if gridId:
print(stationNameTup[0])
print(gridId)
update_station_grid_id(gridId[0][0], stationNameTup[0])
else:
continue
# 获取小区的名称以及经纬度
def get_cell_lo_la(cityId, dayID):
"""
:param cityId: 小区所在的城市ID
:return:
"""
sqlGetCellIdLoLa = "SELECT A.CELL_ID,A.LONGITUDE,A.LATITUDE " \
"FROM B_SUBDISTRICT_INFO A " \
"WHERE CITY_ID='%s' " \
"AND A.LONGITUDE IS NOT NULL " \
"AND A.LATITUDE IS NOT NULL " \
"AND A.DAY_ID='%s'" % (str(cityId), str(dayID))
listCellLoLa = Oracle_Query(sqlGetCellIdLoLa)
return listCellLoLa
# 判断小区的结果并录入中间表
def in_cell_mid_table(cellLoLa, orgIdNamePoint, orgLevel, dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
for ord_id, org_name, org_point in orgIdNamePoint: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 判断小区的结果并录入中间表
def in_cell_mid_table_grid_id(cellLoLa, orgIdNamePoint_5, orgIdNamePoint_4, orgLevel_5, orgLevel_4,dayId, cityId):
"""
:param cellLoLa: 小区的坐标信息
:param orgIdNamePoint: 网格的坐标范围信息
:param orgLevel: 网格等级
:return: 无返回值
"""
for cellId, cellLo, cellLa in cellLoLa: # 获取基站的经纬度
flag = 0
for ord_id, org_name, org_point in orgIdNamePoint_5: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_5), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
flag = 1
break
if flag == 0:
for ord_id, org_name, org_point in orgIdNamePoint_4: # 获取网格的相关内容 (id、name、point list)
judge_result = is_pt_in_poly(cellLo, cellLa, org_point)
if judge_result:
sql_insert_b_cell_mid = "insert into b_cell_mid (day_id,org_name,grid_id,city_id,org_level,cell_id) " \
"values ('%s','%s','%s','%s','%s','%s')" % (
str(dayId), org_name, ord_id, str(cityId), str(orgLevel_4), cellId)
Oracle_Exec(sql_insert_b_cell_mid)
break
# 对比两个cell_id的region_id是否相同
def judge_cell_id_region(cellId):
sqlGridIdFromInfo = " select region from B_SUBDISTRICT_INFO where cell_id = '%s'" % cellId
sqlGridIdFromMid = "select grid_id from B_CELL_MID where cell_id = '%s'" % cellId
gridIdFromInfo = Oracle_Query(sqlGridIdFromInfo)
gridIdFromMid = Oracle_Query(sqlGridIdFromMid)
if gridIdFromInfo == gridIdFromMid:
return False
else:
return gridIdFromMid
# 修改基站grid_id bak表
def update_cell_region(gridId, cellId):
sqlUpdateGridId = "update B_SUBDISTRICT_INFO a " \
"set a.region='%s' " \
"where a.cell_id='%s'" % (str(gridId), cellId)
updateResult = Oracle_Exec(sqlUpdateGridId)
return updateResult
# | ], cellIdTup[0])
else:
continue
| 对照中间表修改正式表中的所有数据 update小区region_id
def updata_cell_region_all():
sqlSecCellIdFromMid = "select cell_id from B_CELL_MID"
cellIdList = Oracle_Query(sqlSecCellIdFromMid)
for cellIdTup in cellIdList:
gridId = judge_cell_id_region(cellIdTup[0])
if gridId:
print(cellIdTup[0])
print(gridId)
update_cell_region(gridId[0][0 | identifier_body |
report_internal_faculty_cv.py | # required for PDF generation
from __future__ import unicode_literals
from io import BytesIO
from reportlab.lib.pagesizes import letter, A4, cm
from reportlab.platypus import BaseDocTemplate, Frame, Paragraph, LongTable, TableStyle, PageTemplate
from reportlab.platypus.flowables import PageBreak
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER
from django.http import HttpResponse
from cscm.views.FooterDocTemplate import FooterDocTemplate
import datetime
from cscm.views.internal_styles import *
from cscm.helpers.loadconfigs import get_config
from cscm.helpers.functions import *
# models
from cscm.models import CourseLogEntry
from cscm.models import Course, Instructor
from csip.models import InstructorProfile
# Forms imports
from django.core.context_processors import csrf
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
import copy
# =============================================================================================
@login_required
def report_internal_faculty_cv(request):
class InternalFacultyCVForm(forms.Form):
if request.user.is_superuser:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.all())
else:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.filter(owner=request.user)) | c = RequestContext(request)
c.update(csrf(request))
# if 'course_name' in request.GET and request.GET['course_name']:
if request.method == 'POST':
# form submitted
form = InternalFacultyCVForm(request.POST)
form.is_valid()
instructor = form.cleaned_data['instructor']
instructor = instructor[0]
inner_response = report_internal_faculty_cv_pdf(request, instructor)
http_response = HttpResponse(inner_response, c)
escaped_name = str(instructor.name).replace(' ', '_')
this_year = datetime.datetime.now().strftime("%Y")
filename = "faculty_cv_" + escaped_name + "-" + this_year + ".pdf"
http_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return http_response
else:
# form not yet submitted ... display it
form = InternalFacultyCVForm()
return render_to_response('internal_faculty_cv.html' , {
'form': form
}, c)
# ============= PDF GEN
def report_internal_faculty_cv_pdf(request, instructor):
def make_table(data, widths, style=[]):
table = LongTable(data, colWidths=widths)
table.setStyle(TableStyle(style))
return table
response = HttpResponse(mimetype='application/pdf')
buffer = BytesIO()
org = Internal()
styleN, styleB, styleH, styleSmaller = org.getTextStyles()
styleBC = copy.copy(styleB)
styleBC.alignment = TA_CENTER
width, height = A4
doc = FooterDocTemplate(buffer, pagesize=(height, width))
frame = org.getFrame(doc)
template = PageTemplate(id='test', frames=frame, onPage=org.get_header_footer(doccode="NCEAC.DOC.008", pagesize=(width, height)))
doc.addPageTemplates([template])
# Our main content holder
elements = []
i = instructor
ip = i.instructorprofile
percent_time_teaching = ip.percent_time_teaching
# title page
data = [[Paragraph('Name', styleB), i.name],
[Paragraph('Academic Rank', styleB), ip.designation],
[Paragraph('Administrative Responsibility', styleB), ip.admin_responsibility],
[Paragraph('Date of Original Appointment', styleB), ip.joining_date],
]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# elements.append(PageBreak())
# Education
ieds = i.instructoreducation_set.all().order_by('-year')
data = [[Paragraph('Degrees', styleB),
Paragraph('Degree', styleB),
Paragraph('Field', styleB),
Paragraph('Institution', styleB),
Paragraph('Date', styleB),
]]
for ied in ieds:
data.append(['',
Paragraph(ied.degree, styleN),
Paragraph(ied.field, styleN),
Paragraph(ied.university, styleN),
Paragraph(ied.year, styleN),
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 4.5 * cm, 4.5 * cm, 7 * cm, 4 * cm], style=ts))
# events
ievs = i.instructoreventparticpation_set.all().order_by('-start_date')
counter = 1
cat_header = Paragraph('Conferences, workshops, and professional development programs participated during the past five years', styleB)
data = []
for iev in ievs:
iev_string = str(counter) + '. ' + iev.title + '. Role: ' + iev.role + ' (' + str(iev.duration) + ' at ' + str(iev.venue) + ')'
data.append([cat_header,
Paragraph(iev_string, styleN),
Paragraph(str(iev.start_date.year), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Consultancies
icons = i.instructorconsultancy_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Consulting activities during the last five years', styleB)
data = []
for icon in icons:
icon_string = str(counter) + '. <b>' + icon.organization + '</b>. ' + icon.description
data.append([cat_header,
Paragraph(icon_string, styleN),
Paragraph(str(icon.date.year), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Publications
ipbs = i.instructorpublication_set.all().order_by('-id')
counter = 1
cat_header = Paragraph('Principal publications during the last five years (give in standard bibliogrpahic format)', styleB)
data = []
for ipb in ipbs:
pub_string = str(counter) + '. ' + str(ipb)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph('date', styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Other activities
ioas = i.instructorotheractivity_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Other scholarly activities during the last five years (grants, sabbaticals, software development, etc.)', styleB)
data = []
for ioa in ioas:
pub_string = str(counter) + '. ' + str(ioa.title) + '. ' + str(ioa.description)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph(str(ioa.date), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# courses during last two years
ics = i.course_set.all().order_by('-year')
data = [[Paragraph('Courses taught during this and last academic year', styleB),
Paragraph('Year', styleB),
Paragraph('Semester', styleB),
Paragraph('Course Code', styleB),
Paragraph('Course Title', styleB),
]]
for ic in ics:
data.append(['',
str(ic.year),
str(ic.semester),
str(ic.course_code),
str(ic.course_name)
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 3 * cm, 3 * cm, 3 * cm, 11 * cm], style=ts))
# Percentage of time given to teaching
data = [[Paragraph('State percentage of your full-time work dedicated to teaching in the computing program under evaluation', styleB)
, str(percent_time_teaching) + '%']]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# END OF REPORT. NOW BUILD
doc.build(elements)
# OUTPUT FILE
# doc.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response | random_line_split | |
report_internal_faculty_cv.py | # required for PDF generation
from __future__ import unicode_literals
from io import BytesIO
from reportlab.lib.pagesizes import letter, A4, cm
from reportlab.platypus import BaseDocTemplate, Frame, Paragraph, LongTable, TableStyle, PageTemplate
from reportlab.platypus.flowables import PageBreak
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER
from django.http import HttpResponse
from cscm.views.FooterDocTemplate import FooterDocTemplate
import datetime
from cscm.views.internal_styles import *
from cscm.helpers.loadconfigs import get_config
from cscm.helpers.functions import *
# models
from cscm.models import CourseLogEntry
from cscm.models import Course, Instructor
from csip.models import InstructorProfile
# Forms imports
from django.core.context_processors import csrf
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
import copy
# =============================================================================================
@login_required
def report_internal_faculty_cv(request):
class InternalFacultyCVForm(forms.Form):
if request.user.is_superuser:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.all())
else:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.filter(owner=request.user))
c = RequestContext(request)
c.update(csrf(request))
# if 'course_name' in request.GET and request.GET['course_name']:
if request.method == 'POST':
# form submitted
form = InternalFacultyCVForm(request.POST)
form.is_valid()
instructor = form.cleaned_data['instructor']
instructor = instructor[0]
inner_response = report_internal_faculty_cv_pdf(request, instructor)
http_response = HttpResponse(inner_response, c)
escaped_name = str(instructor.name).replace(' ', '_')
this_year = datetime.datetime.now().strftime("%Y")
filename = "faculty_cv_" + escaped_name + "-" + this_year + ".pdf"
http_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return http_response
else:
# form not yet submitted ... display it
form = InternalFacultyCVForm()
return render_to_response('internal_faculty_cv.html' , {
'form': form
}, c)
# ============= PDF GEN
def report_internal_faculty_cv_pdf(request, instructor):
def make_table(data, widths, style=[]):
table = LongTable(data, colWidths=widths)
table.setStyle(TableStyle(style))
return table
response = HttpResponse(mimetype='application/pdf')
buffer = BytesIO()
org = Internal()
styleN, styleB, styleH, styleSmaller = org.getTextStyles()
styleBC = copy.copy(styleB)
styleBC.alignment = TA_CENTER
width, height = A4
doc = FooterDocTemplate(buffer, pagesize=(height, width))
frame = org.getFrame(doc)
template = PageTemplate(id='test', frames=frame, onPage=org.get_header_footer(doccode="NCEAC.DOC.008", pagesize=(width, height)))
doc.addPageTemplates([template])
# Our main content holder
elements = []
i = instructor
ip = i.instructorprofile
percent_time_teaching = ip.percent_time_teaching
# title page
data = [[Paragraph('Name', styleB), i.name],
[Paragraph('Academic Rank', styleB), ip.designation],
[Paragraph('Administrative Responsibility', styleB), ip.admin_responsibility],
[Paragraph('Date of Original Appointment', styleB), ip.joining_date],
]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# elements.append(PageBreak())
# Education
ieds = i.instructoreducation_set.all().order_by('-year')
data = [[Paragraph('Degrees', styleB),
Paragraph('Degree', styleB),
Paragraph('Field', styleB),
Paragraph('Institution', styleB),
Paragraph('Date', styleB),
]]
for ied in ieds:
data.append(['',
Paragraph(ied.degree, styleN),
Paragraph(ied.field, styleN),
Paragraph(ied.university, styleN),
Paragraph(ied.year, styleN),
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 4.5 * cm, 4.5 * cm, 7 * cm, 4 * cm], style=ts))
# events
ievs = i.instructoreventparticpation_set.all().order_by('-start_date')
counter = 1
cat_header = Paragraph('Conferences, workshops, and professional development programs participated during the past five years', styleB)
data = []
for iev in ievs:
iev_string = str(counter) + '. ' + iev.title + '. Role: ' + iev.role + ' (' + str(iev.duration) + ' at ' + str(iev.venue) + ')'
data.append([cat_header,
Paragraph(iev_string, styleN),
Paragraph(str(iev.start_date.year), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Consultancies
icons = i.instructorconsultancy_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Consulting activities during the last five years', styleB)
data = []
for icon in icons:
|
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Publications
ipbs = i.instructorpublication_set.all().order_by('-id')
counter = 1
cat_header = Paragraph('Principal publications during the last five years (give in standard bibliogrpahic format)', styleB)
data = []
for ipb in ipbs:
pub_string = str(counter) + '. ' + str(ipb)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph('date', styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Other activities
ioas = i.instructorotheractivity_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Other scholarly activities during the last five years (grants, sabbaticals, software development, etc.)', styleB)
data = []
for ioa in ioas:
pub_string = str(counter) + '. ' + str(ioa.title) + '. ' + str(ioa.description)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph(str(ioa.date), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# courses during last two years
ics = i.course_set.all().order_by('-year')
data = [[Paragraph('Courses taught during this and last academic year', styleB),
Paragraph('Year', styleB),
Paragraph('Semester', styleB),
Paragraph('Course Code', styleB),
Paragraph('Course Title', styleB),
]]
for ic in ics:
data.append(['',
str(ic.year),
str(ic.semester),
str(ic.course_code),
str(ic.course_name)
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 3 * cm, 3 * cm, 3 * cm, 11 * cm], style=ts))
# Percentage of time given to teaching
data = [[Paragraph('State percentage of your full-time work dedicated to teaching in the computing program under evaluation', styleB)
, str(percent_time_teaching) + '%']]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# END OF REPORT. NOW BUILD
doc.build(elements)
# OUTPUT FILE
# doc.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
| icon_string = str(counter) + '. <b>' + icon.organization + '</b>. ' + icon.description
data.append([cat_header,
Paragraph(icon_string, styleN),
Paragraph(str(icon.date.year), styleN),
])
cat_header = ''
counter += 1 | conditional_block |
report_internal_faculty_cv.py | # required for PDF generation
from __future__ import unicode_literals
from io import BytesIO
from reportlab.lib.pagesizes import letter, A4, cm
from reportlab.platypus import BaseDocTemplate, Frame, Paragraph, LongTable, TableStyle, PageTemplate
from reportlab.platypus.flowables import PageBreak
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER
from django.http import HttpResponse
from cscm.views.FooterDocTemplate import FooterDocTemplate
import datetime
from cscm.views.internal_styles import *
from cscm.helpers.loadconfigs import get_config
from cscm.helpers.functions import *
# models
from cscm.models import CourseLogEntry
from cscm.models import Course, Instructor
from csip.models import InstructorProfile
# Forms imports
from django.core.context_processors import csrf
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
import copy
# =============================================================================================
@login_required
def report_internal_faculty_cv(request):
class InternalFacultyCVForm(forms.Form):
|
c = RequestContext(request)
c.update(csrf(request))
# if 'course_name' in request.GET and request.GET['course_name']:
if request.method == 'POST':
# form submitted
form = InternalFacultyCVForm(request.POST)
form.is_valid()
instructor = form.cleaned_data['instructor']
instructor = instructor[0]
inner_response = report_internal_faculty_cv_pdf(request, instructor)
http_response = HttpResponse(inner_response, c)
escaped_name = str(instructor.name).replace(' ', '_')
this_year = datetime.datetime.now().strftime("%Y")
filename = "faculty_cv_" + escaped_name + "-" + this_year + ".pdf"
http_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return http_response
else:
# form not yet submitted ... display it
form = InternalFacultyCVForm()
return render_to_response('internal_faculty_cv.html' , {
'form': form
}, c)
# ============= PDF GEN
def report_internal_faculty_cv_pdf(request, instructor):
def make_table(data, widths, style=[]):
table = LongTable(data, colWidths=widths)
table.setStyle(TableStyle(style))
return table
response = HttpResponse(mimetype='application/pdf')
buffer = BytesIO()
org = Internal()
styleN, styleB, styleH, styleSmaller = org.getTextStyles()
styleBC = copy.copy(styleB)
styleBC.alignment = TA_CENTER
width, height = A4
doc = FooterDocTemplate(buffer, pagesize=(height, width))
frame = org.getFrame(doc)
template = PageTemplate(id='test', frames=frame, onPage=org.get_header_footer(doccode="NCEAC.DOC.008", pagesize=(width, height)))
doc.addPageTemplates([template])
# Our main content holder
elements = []
i = instructor
ip = i.instructorprofile
percent_time_teaching = ip.percent_time_teaching
# title page
data = [[Paragraph('Name', styleB), i.name],
[Paragraph('Academic Rank', styleB), ip.designation],
[Paragraph('Administrative Responsibility', styleB), ip.admin_responsibility],
[Paragraph('Date of Original Appointment', styleB), ip.joining_date],
]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# elements.append(PageBreak())
# Education
ieds = i.instructoreducation_set.all().order_by('-year')
data = [[Paragraph('Degrees', styleB),
Paragraph('Degree', styleB),
Paragraph('Field', styleB),
Paragraph('Institution', styleB),
Paragraph('Date', styleB),
]]
for ied in ieds:
data.append(['',
Paragraph(ied.degree, styleN),
Paragraph(ied.field, styleN),
Paragraph(ied.university, styleN),
Paragraph(ied.year, styleN),
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 4.5 * cm, 4.5 * cm, 7 * cm, 4 * cm], style=ts))
# events
ievs = i.instructoreventparticpation_set.all().order_by('-start_date')
counter = 1
cat_header = Paragraph('Conferences, workshops, and professional development programs participated during the past five years', styleB)
data = []
for iev in ievs:
iev_string = str(counter) + '. ' + iev.title + '. Role: ' + iev.role + ' (' + str(iev.duration) + ' at ' + str(iev.venue) + ')'
data.append([cat_header,
Paragraph(iev_string, styleN),
Paragraph(str(iev.start_date.year), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Consultancies
icons = i.instructorconsultancy_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Consulting activities during the last five years', styleB)
data = []
for icon in icons:
icon_string = str(counter) + '. <b>' + icon.organization + '</b>. ' + icon.description
data.append([cat_header,
Paragraph(icon_string, styleN),
Paragraph(str(icon.date.year), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Publications
ipbs = i.instructorpublication_set.all().order_by('-id')
counter = 1
cat_header = Paragraph('Principal publications during the last five years (give in standard bibliogrpahic format)', styleB)
data = []
for ipb in ipbs:
pub_string = str(counter) + '. ' + str(ipb)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph('date', styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Other activities
ioas = i.instructorotheractivity_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Other scholarly activities during the last five years (grants, sabbaticals, software development, etc.)', styleB)
data = []
for ioa in ioas:
pub_string = str(counter) + '. ' + str(ioa.title) + '. ' + str(ioa.description)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph(str(ioa.date), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# courses during last two years
ics = i.course_set.all().order_by('-year')
data = [[Paragraph('Courses taught during this and last academic year', styleB),
Paragraph('Year', styleB),
Paragraph('Semester', styleB),
Paragraph('Course Code', styleB),
Paragraph('Course Title', styleB),
]]
for ic in ics:
data.append(['',
str(ic.year),
str(ic.semester),
str(ic.course_code),
str(ic.course_name)
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 3 * cm, 3 * cm, 3 * cm, 11 * cm], style=ts))
# Percentage of time given to teaching
data = [[Paragraph('State percentage of your full-time work dedicated to teaching in the computing program under evaluation', styleB)
, str(percent_time_teaching) + '%']]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# END OF REPORT. NOW BUILD
doc.build(elements)
# OUTPUT FILE
# doc.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
| if request.user.is_superuser:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.all())
else:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.filter(owner=request.user)) | identifier_body |
report_internal_faculty_cv.py | # required for PDF generation
from __future__ import unicode_literals
from io import BytesIO
from reportlab.lib.pagesizes import letter, A4, cm
from reportlab.platypus import BaseDocTemplate, Frame, Paragraph, LongTable, TableStyle, PageTemplate
from reportlab.platypus.flowables import PageBreak
from reportlab.lib import colors
from reportlab.lib.enums import TA_CENTER
from django.http import HttpResponse
from cscm.views.FooterDocTemplate import FooterDocTemplate
import datetime
from cscm.views.internal_styles import *
from cscm.helpers.loadconfigs import get_config
from cscm.helpers.functions import *
# models
from cscm.models import CourseLogEntry
from cscm.models import Course, Instructor
from csip.models import InstructorProfile
# Forms imports
from django.core.context_processors import csrf
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
import copy
# =============================================================================================
@login_required
def report_internal_faculty_cv(request):
class | (forms.Form):
if request.user.is_superuser:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.all())
else:
instructor = forms.ModelMultipleChoiceField(queryset=Instructor.objects.filter(owner=request.user))
c = RequestContext(request)
c.update(csrf(request))
# if 'course_name' in request.GET and request.GET['course_name']:
if request.method == 'POST':
# form submitted
form = InternalFacultyCVForm(request.POST)
form.is_valid()
instructor = form.cleaned_data['instructor']
instructor = instructor[0]
inner_response = report_internal_faculty_cv_pdf(request, instructor)
http_response = HttpResponse(inner_response, c)
escaped_name = str(instructor.name).replace(' ', '_')
this_year = datetime.datetime.now().strftime("%Y")
filename = "faculty_cv_" + escaped_name + "-" + this_year + ".pdf"
http_response['Content-Disposition'] = 'attachment;filename="' + filename + '"'
return http_response
else:
# form not yet submitted ... display it
form = InternalFacultyCVForm()
return render_to_response('internal_faculty_cv.html' , {
'form': form
}, c)
# ============= PDF GEN
def report_internal_faculty_cv_pdf(request, instructor):
def make_table(data, widths, style=[]):
table = LongTable(data, colWidths=widths)
table.setStyle(TableStyle(style))
return table
response = HttpResponse(mimetype='application/pdf')
buffer = BytesIO()
org = Internal()
styleN, styleB, styleH, styleSmaller = org.getTextStyles()
styleBC = copy.copy(styleB)
styleBC.alignment = TA_CENTER
width, height = A4
doc = FooterDocTemplate(buffer, pagesize=(height, width))
frame = org.getFrame(doc)
template = PageTemplate(id='test', frames=frame, onPage=org.get_header_footer(doccode="NCEAC.DOC.008", pagesize=(width, height)))
doc.addPageTemplates([template])
# Our main content holder
elements = []
i = instructor
ip = i.instructorprofile
percent_time_teaching = ip.percent_time_teaching
# title page
data = [[Paragraph('Name', styleB), i.name],
[Paragraph('Academic Rank', styleB), ip.designation],
[Paragraph('Administrative Responsibility', styleB), ip.admin_responsibility],
[Paragraph('Date of Original Appointment', styleB), ip.joining_date],
]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# elements.append(PageBreak())
# Education
ieds = i.instructoreducation_set.all().order_by('-year')
data = [[Paragraph('Degrees', styleB),
Paragraph('Degree', styleB),
Paragraph('Field', styleB),
Paragraph('Institution', styleB),
Paragraph('Date', styleB),
]]
for ied in ieds:
data.append(['',
Paragraph(ied.degree, styleN),
Paragraph(ied.field, styleN),
Paragraph(ied.university, styleN),
Paragraph(ied.year, styleN),
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 4.5 * cm, 4.5 * cm, 7 * cm, 4 * cm], style=ts))
# events
ievs = i.instructoreventparticpation_set.all().order_by('-start_date')
counter = 1
cat_header = Paragraph('Conferences, workshops, and professional development programs participated during the past five years', styleB)
data = []
for iev in ievs:
iev_string = str(counter) + '. ' + iev.title + '. Role: ' + iev.role + ' (' + str(iev.duration) + ' at ' + str(iev.venue) + ')'
data.append([cat_header,
Paragraph(iev_string, styleN),
Paragraph(str(iev.start_date.year), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Consultancies
icons = i.instructorconsultancy_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Consulting activities during the last five years', styleB)
data = []
for icon in icons:
icon_string = str(counter) + '. <b>' + icon.organization + '</b>. ' + icon.description
data.append([cat_header,
Paragraph(icon_string, styleN),
Paragraph(str(icon.date.year), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Publications
ipbs = i.instructorpublication_set.all().order_by('-id')
counter = 1
cat_header = Paragraph('Principal publications during the last five years (give in standard bibliogrpahic format)', styleB)
data = []
for ipb in ipbs:
pub_string = str(counter) + '. ' + str(ipb)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph('date', styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# Other activities
ioas = i.instructorotheractivity_set.all().order_by('-date')
counter = 1
cat_header = Paragraph('Other scholarly activities during the last five years (grants, sabbaticals, software development, etc.)', styleB)
data = []
for ioa in ioas:
pub_string = str(counter) + '. ' + str(ioa.title) + '. ' + str(ioa.description)
data.append([cat_header,
Paragraph(pub_string, styleN),
Paragraph(str(ioa.date), styleN),
])
cat_header = ''
counter += 1
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 16 * cm, 4 * cm], style=ts))
# courses during last two years
ics = i.course_set.all().order_by('-year')
data = [[Paragraph('Courses taught during this and last academic year', styleB),
Paragraph('Year', styleB),
Paragraph('Semester', styleB),
Paragraph('Course Code', styleB),
Paragraph('Course Title', styleB),
]]
for ic in ics:
data.append(['',
str(ic.year),
str(ic.semester),
str(ic.course_code),
str(ic.course_name)
])
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('SPAN', (0, 0), (0, -1))
]
elements.append(make_table(data, widths=[6 * cm, 3 * cm, 3 * cm, 3 * cm, 11 * cm], style=ts))
# Percentage of time given to teaching
data = [[Paragraph('State percentage of your full-time work dedicated to teaching in the computing program under evaluation', styleB)
, str(percent_time_teaching) + '%']]
ts = [ ('INNERGRID', (0, 0), (-1, -1), 0.15, colors.black),
('BOX', (0, 0), (-1, -1), 0.25, colors.black),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
]
elements.append(make_table(data, widths=[6 * cm, 20 * cm], style=ts))
# END OF REPORT. NOW BUILD
doc.build(elements)
# OUTPUT FILE
# doc.save()
pdf = buffer.getvalue()
buffer.close()
response.write(pdf)
return response
| InternalFacultyCVForm | identifier_name |
common_voices_eval.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torchaudio
from copy import deepcopy
import torch
import time
import random
import math
import json
import subprocess
import sys
import progressbar
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torch.multiprocessing import Pool
from cpc.criterion.seq_alignment import get_seq_PER
from cpc.criterion.seq_alignment import beam_search
from cpc.feature_loader import loadModel
from cpc.dataset import findAllSeqs, parseSeqLabels, filterSeqs
def load(path_item):
seq_name = path_item.stem
data = torchaudio.load(str(path_item))[0].view(1, -1)
return seq_name, data
class SingleSequenceDataset(Dataset):
def __init__(self,
pathDB,
seqNames,
phoneLabelsDict,
inDim=1,
transpose=True):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabels (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
"""
self.seqNames = deepcopy(seqNames)
self.pathDB = pathDB
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.inDim = inDim
self.transpose = transpose
self.loadSeqs()
def loadSeqs(self):
# Labels
self.seqOffset = [0]
self.phoneLabels = []
self.phoneOffsets = [0]
self.data = []
self.maxSize = 0
self.maxSizePhone = 0
# Data
nprocess = min(30, len(self.seqNames))
start_time = time.time()
to_load = [Path(self.pathDB) / x for _, x in self.seqNames]
with Pool(nprocess) as p:
poolData = p.map(load, to_load)
tmpData = []
poolData.sort()
totSize = 0
minSizePhone = float('inf')
for seqName, seq in poolData:
self.phoneLabels += self.phoneLabelsDict[seqName]
self.phoneOffsets.append(len(self.phoneLabels))
self.maxSizePhone = max(self.maxSizePhone, len(
self.phoneLabelsDict[seqName]))
minSizePhone = min(minSizePhone, len(
self.phoneLabelsDict[seqName]))
sizeSeq = seq.size(1)
self.maxSize = max(self.maxSize, sizeSeq)
totSize += sizeSeq
tmpData.append(seq)
self.seqOffset.append(self.seqOffset[-1] + sizeSeq)
del seq
self.data = torch.cat(tmpData, dim=1)
self.phoneLabels = torch.tensor(self.phoneLabels, dtype=torch.long)
print(f'Loaded {len(self.phoneOffsets)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f'maxSizeSeq : {self.maxSize}')
print(f'maxSizePhone : {self.maxSizePhone}')
print(f"minSizePhone : {minSizePhone}")
print(f'Total size dataset {totSize / (16000 * 3600)} hours')
def __getitem__(self, idx):
offsetStart = self.seqOffset[idx]
offsetEnd = self.seqOffset[idx+1]
offsetPhoneStart = self.phoneOffsets[idx]
offsetPhoneEnd = self.phoneOffsets[idx + 1]
sizeSeq = int(offsetEnd - offsetStart)
sizePhone = int(offsetPhoneEnd - offsetPhoneStart)
outSeq = torch.zeros((self.inDim, self.maxSize))
outPhone = torch.zeros((self.maxSizePhone))
outSeq[:, :sizeSeq] = self.data[:, offsetStart:offsetEnd]
outPhone[:sizePhone] = self.phoneLabels[offsetPhoneStart:offsetPhoneEnd]
return outSeq, torch.tensor([sizeSeq], dtype=torch.long), outPhone.long(), torch.tensor([sizePhone], dtype=torch.long)
def __len__(self):
return len(self.seqOffset) - 1
class CTCphone_criterion(torch.nn.Module):
def __init__(self, dimEncoder, nPhones, LSTM=False, sizeKernel=8,
seqNorm=False, dropout=False, reduction='sum'):
super(CTCphone_criterion, self).__init__()
self.seqNorm = seqNorm
self.epsilon = 1e-8
self.dropout = torch.nn.Dropout2d(
p=0.5, inplace=False) if dropout else None
self.conv1 = torch.nn.LSTM(dimEncoder, dimEncoder,
num_layers=1, batch_first=True)
self.PhoneCriterionClassifier = torch.nn.Conv1d(
dimEncoder, nPhones + 1, sizeKernel, stride=sizeKernel // 2)
self.lossCriterion = torch.nn.CTCLoss(blank=nPhones,
reduction=reduction,
zero_infinity=True)
self.relu = torch.nn.ReLU()
self.BLANK_LABEL = nPhones
self.useLSTM = LSTM
def getPrediction(self, cFeature, featureSize):
B, S, H = cFeature.size()
if self.seqNorm:
for b in range(B):
size = featureSize[b]
m = cFeature[b, :size].mean(dim=0, keepdim=True)
v = cFeature[b, :size].var(dim=0, keepdim=True)
cFeature[b] = (cFeature[b] - m) / torch.sqrt(v + self.epsilon)
if self.useLSTM:
cFeature = self.conv1(cFeature)[0]
cFeature = cFeature.permute(0, 2, 1)
if self.dropout is not None:
cFeature = self.dropout(cFeature)
cFeature = self.PhoneCriterionClassifier(cFeature)
return cFeature.permute(0, 2, 1)
def forward(self, cFeature, featureSize, label, labelSize):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature, featureSize)
featureSize /= 4
predictions = cut_data(predictions, featureSize)
featureSize = torch.clamp(featureSize, max=predictions.size(1))
label = cut_data(label, labelSize)
if labelSize.min() <= 0:
print(label, labelSize)
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
loss = self.lossCriterion(predictions, label,
featureSize, labelSize).view(1, -1)
if torch.isinf(loss).sum() > 0 or torch.isnan(loss).sum() > 0:
loss = 0
return loss
class IDModule(torch.nn.Module):
def __init__(self):
super(IDModule, self).__init__()
def forward(self, feature, *args):
B, C, S = feature.size()
return feature.permute(0, 2, 1), None, None
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
return seq[:, :maxSeq]
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda(non_blocking=True)
phone = phone.cuda(non_blocking=True)
sizeSeq = sizeSeq.cuda(non_blocking=True).view(-1)
sizePhone = sizePhone.cuda(non_blocking=True).view(-1)
seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)
return seq, sizeSeq, phone, sizePhone
def train_step(train_loader,
model,
criterion,
optimizer,
downsampling_factor):
if model.optimize:
model.train()
criterion.train()
avg_loss = 0
nItems = 0
for data in train_loader:
optimizer.zero_grad()
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
if not model.optimize:
c_feature = c_feature.detach()
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
loss.mean().backward()
avg_loss += loss.mean().item()
nItems += 1
optimizer.step()
return avg_loss / nItems
def val_step(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avg_loss = 0
nItems = 0
for data in val_loader:
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
avg_loss += loss.mean().item()
nItems += 1
return avg_loss / nItems
def get_per(data):
pred, size_pred, gt, size_gt, blank_label = data
l_ = min(size_pred // 4, pred.size(0))
p_ = pred[:l_].view(l_, -1).numpy()
gt_seq = gt[:size_gt].view(-1).tolist()
predSeq = beam_search(p_, 20, blank_label)[0][1]
out = get_seq_PER(gt_seq, predSeq)
return out
def perStep(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avgPER = 0
varPER = 0
nItems = 0
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(val_loader))
bar.start()
for index, data in enumerate(val_loader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
predictions = torch.nn.functional.softmax(
criterion.module.getPrediction(c_feature, sizeSeq), dim=2).cpu()
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
bs = c_feature.size(0)
data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],
criterion.module.BLANK_LABEL) for b in range(bs)]
with Pool(bs) as p:
poolData = p.map(get_per, data_per)
avgPER += sum([x for x in poolData])
varPER += sum([x*x for x in poolData])
nItems += len(poolData)
bar.finish()
avgPER /= nItems
varPER /= nItems
varPER -= avgPER**2
print(f"Average PER {avgPER}")
print(f"Standard deviation PER {math.sqrt(varPER)}")
def run(train_loader,
val_loader,
model,
criterion,
optimizer,
downsampling_factor,
nEpochs,
pathCheckpoint):
print(f"Starting the training for {nEpochs} epochs")
bestLoss = float('inf')
for epoch in range(nEpochs):
lossTrain = train_step(train_loader, model, criterion,
optimizer, downsampling_factor)
print(f"Epoch {epoch} loss train : {lossTrain}")
lossVal = val_step(val_loader, model, criterion, downsampling_factor)
print(f"Epoch {epoch} loss val : {lossVal}")
if lossVal < bestLoss:
bestLoss = lossVal
state_dict = {'classifier': criterion.state_dict(),
'model': model.state_dict(),
'bestLoss': bestLoss}
torch.save(state_dict, pathCheckpoint)
def get_PER_args(args):
path_args_training = os.path.join(args.output, "args_training.json")
with open(path_args_training, 'rb') as file:
data = json.load(file)
if args.pathDB is None:
args.pathDB = data["pathDB"]
args.file_extension = data["file_extension"]
if args.pathVal is None and args.pathPhone is None:
args.pathPhone = data["pathPhone"]
args.pathVal = data["pathVal"]
args.pathCheckpoint = data["pathCheckpoint"]
args.no_pretraining = data["no_pretraining"]
args.LSTM = data.get("LSTM", False)
args.seqNorm = data.get("seqNorm", False)
args.dropout = data.get("dropout", False)
args.in_dim = data.get("in_dim", 1)
args.loss_reduction = data.get("loss_reduction", "mean")
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description='Simple phone recognition pipeline '
'for the common voices datasets')
subparsers = parser.add_subparsers(dest='command')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('pathDB', type=str,
help='Path to the directory containing the '
'audio data / pre-computed features.')
parser_train.add_argument('pathPhone', type=str,
help='Path to the .txt file containing the '
'phone transcription.')
parser_train.add_argument('pathCheckpoint', type=str,
help='Path to the CPC checkpoint to load. '
'Set to ID to work with pre-cimputed features.')
parser_train.add_argument('--freeze', action='store_true',
help="Freeze the CPC features layers")
parser_train.add_argument('--pathTrain', default=None, type=str,
help='Path to the .txt files containing the '
'list of the training sequences.')
parser_train.add_argument('--pathVal', default=None, type=str,
help='Path to the .txt files containing the '
'list of the validation sequences.')
parser_train.add_argument('--file_extension', type=str, default=".mp3",
help='Extension of the files in the '
'dataset')
parser_train.add_argument('--batchSize', type=int, default=8)
parser_train.add_argument('--nEpochs', type=int, default=30)
parser_train.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser_train.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser_train.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer.')
parser_train.add_argument('--lr', type=float, default=2e-04,
help='Learning rate.')
parser_train.add_argument('-o', '--output', type=str, default='out',
help="Output directory")
parser_train.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_train.add_argument('--no_pretraining', action='store_true',
help='Activate use a randmly initialized '
'network')
parser_train.add_argument('--LSTM', action='store_true',
help='Activate to add a LSTM to the phone '
'classifier')
parser_train.add_argument('--seqNorm', action='store_true',
help='Activate if you want to normalize each '
'batch of features through time before the '
'phone classification.')
parser_train.add_argument('--kernelSize', type=int, default=8,
help='Number of features to concatenate before '
'feeding them to the phone classifier.')
parser_train.add_argument('--dropout', action='store_true')
parser_train.add_argument('--in_dim', type=int, default=1,
help='Dimension of the input data: useful when '
'working with pre-computed features or ' |
parser_per = subparsers.add_parser('per')
parser_per.add_argument('output', type=str)
parser_per.add_argument('--batchSize', type=int, default=8)
parser_per.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_per.add_argument('--pathDB',
help="For computing the PER on another dataset",
type=str, default=None)
parser_per.add_argument('--pathVal',
help="For computing the PER on specific sequences",
type=str, default=None)
parser_per.add_argument('--pathPhone',
help="For computing the PER on specific sequences",
default=None, type=str)
parser_per.add_argument('--file_extension', type=str, default=".mp3")
parser_per.add_argument('--name', type=str, default="0")
args = parser.parse_args()
if args.command == 'per':
args = get_PER_args(args)
# Output Directory
if not os.path.isdir(args.output):
os.mkdir(args.output)
name = f"_{args.name}" if args.command == "per" else ""
pathLogs = os.path.join(args.output, f'logs_{args.command}{name}.txt')
tee = subprocess.Popen(["tee", pathLogs], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
inSeqs, _ = findAllSeqs(args.pathDB,
extension=args.file_extension)
# Datasets
if args.command == 'train' and args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, inSeqs)
else:
seqTrain = inSeqs
if args.pathVal is None and args.command == 'train':
random.shuffle(seqTrain)
sizeTrain = int(0.9 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
elif args.pathVal is not None:
seqVal = filterSeqs(args.pathVal, inSeqs)
else:
raise RuntimeError("No validation dataset found for PER computation")
if args.debug:
seqVal = seqVal[:100]
downsampling_factor = 160
if args.pathCheckpoint == 'ID':
downsampling_factor = 1
feature_maker = IDModule()
hiddenGar = args.in_dim
else:
feature_maker, hiddenGar, _ = loadModel([args.pathCheckpoint],
loadStateDict=not args.no_pretraining)
feature_maker.cuda()
feature_maker = torch.nn.DataParallel(feature_maker)
phone_criterion = CTCphone_criterion(hiddenGar, nPhones, args.LSTM,
seqNorm=args.seqNorm,
dropout=args.dropout,
reduction=args.loss_reduction)
phone_criterion.cuda()
phone_criterion = torch.nn.DataParallel(phone_criterion)
print(f"Loading the validation dataset at {args.pathDB}")
datasetVal = SingleSequenceDataset(args.pathDB, seqVal,
phoneLabels, inDim=args.in_dim)
val_loader = DataLoader(datasetVal, batch_size=args.batchSize,
shuffle=True)
# Checkpoint file where the model should be saved
pathCheckpoint = os.path.join(args.output, 'checkpoint.pt')
if args.command == 'train':
feature_maker.optimize = True
if args.freeze:
feature_maker.eval()
feature_maker.optimize = False
for g in feature_maker.parameters():
g.requires_grad = False
if args.debug:
print("debug")
random.shuffle(seqTrain)
seqTrain = seqTrain[:1000]
seqVal = seqVal[:100]
print(f"Loading the training dataset at {args.pathDB}")
datasetTrain = SingleSequenceDataset(args.pathDB, seqTrain,
phoneLabels, inDim=args.in_dim)
train_loader = DataLoader(datasetTrain, batch_size=args.batchSize,
shuffle=True)
# Optimizer
g_params = list(phone_criterion.parameters())
if not args.freeze:
print("Optimizing model")
g_params += list(feature_maker.parameters())
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
pathArgs = os.path.join(args.output, "args_training.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
run(train_loader, val_loader, feature_maker, phone_criterion,
optimizer, downsampling_factor, args.nEpochs, pathCheckpoint)
else:
print(f"Loading data at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint,
map_location=lambda storage, loc: storage)
if 'bestLoss' in state_dict:
print(f"Best loss : {state_dict['bestLoss']}")
phone_criterion.load_state_dict(state_dict['classifier'])
feature_maker.load_state_dict(state_dict['model'])
pathArgs = os.path.join(args.output,
f"args_validation_{args.name}.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
perStep(val_loader,
feature_maker,
phone_criterion,
downsampling_factor) | 'stereo audio.')
parser_train.add_argument('--loss_reduction', type=str, default='mean',
choices=['mean', 'sum']) | random_line_split |
common_voices_eval.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torchaudio
from copy import deepcopy
import torch
import time
import random
import math
import json
import subprocess
import sys
import progressbar
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torch.multiprocessing import Pool
from cpc.criterion.seq_alignment import get_seq_PER
from cpc.criterion.seq_alignment import beam_search
from cpc.feature_loader import loadModel
from cpc.dataset import findAllSeqs, parseSeqLabels, filterSeqs
def load(path_item):
seq_name = path_item.stem
data = torchaudio.load(str(path_item))[0].view(1, -1)
return seq_name, data
class SingleSequenceDataset(Dataset):
def __init__(self,
pathDB,
seqNames,
phoneLabelsDict,
inDim=1,
transpose=True):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabels (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
"""
self.seqNames = deepcopy(seqNames)
self.pathDB = pathDB
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.inDim = inDim
self.transpose = transpose
self.loadSeqs()
def loadSeqs(self):
# Labels
self.seqOffset = [0]
self.phoneLabels = []
self.phoneOffsets = [0]
self.data = []
self.maxSize = 0
self.maxSizePhone = 0
# Data
nprocess = min(30, len(self.seqNames))
start_time = time.time()
to_load = [Path(self.pathDB) / x for _, x in self.seqNames]
with Pool(nprocess) as p:
poolData = p.map(load, to_load)
tmpData = []
poolData.sort()
totSize = 0
minSizePhone = float('inf')
for seqName, seq in poolData:
self.phoneLabels += self.phoneLabelsDict[seqName]
self.phoneOffsets.append(len(self.phoneLabels))
self.maxSizePhone = max(self.maxSizePhone, len(
self.phoneLabelsDict[seqName]))
minSizePhone = min(minSizePhone, len(
self.phoneLabelsDict[seqName]))
sizeSeq = seq.size(1)
self.maxSize = max(self.maxSize, sizeSeq)
totSize += sizeSeq
tmpData.append(seq)
self.seqOffset.append(self.seqOffset[-1] + sizeSeq)
del seq
self.data = torch.cat(tmpData, dim=1)
self.phoneLabels = torch.tensor(self.phoneLabels, dtype=torch.long)
print(f'Loaded {len(self.phoneOffsets)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f'maxSizeSeq : {self.maxSize}')
print(f'maxSizePhone : {self.maxSizePhone}')
print(f"minSizePhone : {minSizePhone}")
print(f'Total size dataset {totSize / (16000 * 3600)} hours')
def __getitem__(self, idx):
offsetStart = self.seqOffset[idx]
offsetEnd = self.seqOffset[idx+1]
offsetPhoneStart = self.phoneOffsets[idx]
offsetPhoneEnd = self.phoneOffsets[idx + 1]
sizeSeq = int(offsetEnd - offsetStart)
sizePhone = int(offsetPhoneEnd - offsetPhoneStart)
outSeq = torch.zeros((self.inDim, self.maxSize))
outPhone = torch.zeros((self.maxSizePhone))
outSeq[:, :sizeSeq] = self.data[:, offsetStart:offsetEnd]
outPhone[:sizePhone] = self.phoneLabels[offsetPhoneStart:offsetPhoneEnd]
return outSeq, torch.tensor([sizeSeq], dtype=torch.long), outPhone.long(), torch.tensor([sizePhone], dtype=torch.long)
def __len__(self):
return len(self.seqOffset) - 1
class CTCphone_criterion(torch.nn.Module):
def __init__(self, dimEncoder, nPhones, LSTM=False, sizeKernel=8,
seqNorm=False, dropout=False, reduction='sum'):
super(CTCphone_criterion, self).__init__()
self.seqNorm = seqNorm
self.epsilon = 1e-8
self.dropout = torch.nn.Dropout2d(
p=0.5, inplace=False) if dropout else None
self.conv1 = torch.nn.LSTM(dimEncoder, dimEncoder,
num_layers=1, batch_first=True)
self.PhoneCriterionClassifier = torch.nn.Conv1d(
dimEncoder, nPhones + 1, sizeKernel, stride=sizeKernel // 2)
self.lossCriterion = torch.nn.CTCLoss(blank=nPhones,
reduction=reduction,
zero_infinity=True)
self.relu = torch.nn.ReLU()
self.BLANK_LABEL = nPhones
self.useLSTM = LSTM
def getPrediction(self, cFeature, featureSize):
B, S, H = cFeature.size()
if self.seqNorm:
for b in range(B):
size = featureSize[b]
m = cFeature[b, :size].mean(dim=0, keepdim=True)
v = cFeature[b, :size].var(dim=0, keepdim=True)
cFeature[b] = (cFeature[b] - m) / torch.sqrt(v + self.epsilon)
if self.useLSTM:
cFeature = self.conv1(cFeature)[0]
cFeature = cFeature.permute(0, 2, 1)
if self.dropout is not None:
cFeature = self.dropout(cFeature)
cFeature = self.PhoneCriterionClassifier(cFeature)
return cFeature.permute(0, 2, 1)
def forward(self, cFeature, featureSize, label, labelSize):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature, featureSize)
featureSize /= 4
predictions = cut_data(predictions, featureSize)
featureSize = torch.clamp(featureSize, max=predictions.size(1))
label = cut_data(label, labelSize)
if labelSize.min() <= 0:
print(label, labelSize)
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
loss = self.lossCriterion(predictions, label,
featureSize, labelSize).view(1, -1)
if torch.isinf(loss).sum() > 0 or torch.isnan(loss).sum() > 0:
loss = 0
return loss
class IDModule(torch.nn.Module):
def __init__(self):
super(IDModule, self).__init__()
def forward(self, feature, *args):
B, C, S = feature.size()
return feature.permute(0, 2, 1), None, None
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
return seq[:, :maxSeq]
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda(non_blocking=True)
phone = phone.cuda(non_blocking=True)
sizeSeq = sizeSeq.cuda(non_blocking=True).view(-1)
sizePhone = sizePhone.cuda(non_blocking=True).view(-1)
seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)
return seq, sizeSeq, phone, sizePhone
def train_step(train_loader,
model,
criterion,
optimizer,
downsampling_factor):
if model.optimize:
model.train()
criterion.train()
avg_loss = 0
nItems = 0
for data in train_loader:
optimizer.zero_grad()
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
if not model.optimize:
c_feature = c_feature.detach()
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
loss.mean().backward()
avg_loss += loss.mean().item()
nItems += 1
optimizer.step()
return avg_loss / nItems
def val_step(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avg_loss = 0
nItems = 0
for data in val_loader:
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
avg_loss += loss.mean().item()
nItems += 1
return avg_loss / nItems
def get_per(data):
pred, size_pred, gt, size_gt, blank_label = data
l_ = min(size_pred // 4, pred.size(0))
p_ = pred[:l_].view(l_, -1).numpy()
gt_seq = gt[:size_gt].view(-1).tolist()
predSeq = beam_search(p_, 20, blank_label)[0][1]
out = get_seq_PER(gt_seq, predSeq)
return out
def perStep(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avgPER = 0
varPER = 0
nItems = 0
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(val_loader))
bar.start()
for index, data in enumerate(val_loader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
predictions = torch.nn.functional.softmax(
criterion.module.getPrediction(c_feature, sizeSeq), dim=2).cpu()
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
bs = c_feature.size(0)
data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],
criterion.module.BLANK_LABEL) for b in range(bs)]
with Pool(bs) as p:
poolData = p.map(get_per, data_per)
avgPER += sum([x for x in poolData])
varPER += sum([x*x for x in poolData])
nItems += len(poolData)
bar.finish()
avgPER /= nItems
varPER /= nItems
varPER -= avgPER**2
print(f"Average PER {avgPER}")
print(f"Standard deviation PER {math.sqrt(varPER)}")
def run(train_loader,
val_loader,
model,
criterion,
optimizer,
downsampling_factor,
nEpochs,
pathCheckpoint):
print(f"Starting the training for {nEpochs} epochs")
bestLoss = float('inf')
for epoch in range(nEpochs):
lossTrain = train_step(train_loader, model, criterion,
optimizer, downsampling_factor)
print(f"Epoch {epoch} loss train : {lossTrain}")
lossVal = val_step(val_loader, model, criterion, downsampling_factor)
print(f"Epoch {epoch} loss val : {lossVal}")
if lossVal < bestLoss:
bestLoss = lossVal
state_dict = {'classifier': criterion.state_dict(),
'model': model.state_dict(),
'bestLoss': bestLoss}
torch.save(state_dict, pathCheckpoint)
def get_PER_args(args):
path_args_training = os.path.join(args.output, "args_training.json")
with open(path_args_training, 'rb') as file:
data = json.load(file)
if args.pathDB is None:
args.pathDB = data["pathDB"]
args.file_extension = data["file_extension"]
if args.pathVal is None and args.pathPhone is None:
|
args.pathCheckpoint = data["pathCheckpoint"]
args.no_pretraining = data["no_pretraining"]
args.LSTM = data.get("LSTM", False)
args.seqNorm = data.get("seqNorm", False)
args.dropout = data.get("dropout", False)
args.in_dim = data.get("in_dim", 1)
args.loss_reduction = data.get("loss_reduction", "mean")
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description='Simple phone recognition pipeline '
'for the common voices datasets')
subparsers = parser.add_subparsers(dest='command')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('pathDB', type=str,
help='Path to the directory containing the '
'audio data / pre-computed features.')
parser_train.add_argument('pathPhone', type=str,
help='Path to the .txt file containing the '
'phone transcription.')
parser_train.add_argument('pathCheckpoint', type=str,
help='Path to the CPC checkpoint to load. '
'Set to ID to work with pre-cimputed features.')
parser_train.add_argument('--freeze', action='store_true',
help="Freeze the CPC features layers")
parser_train.add_argument('--pathTrain', default=None, type=str,
help='Path to the .txt files containing the '
'list of the training sequences.')
parser_train.add_argument('--pathVal', default=None, type=str,
help='Path to the .txt files containing the '
'list of the validation sequences.')
parser_train.add_argument('--file_extension', type=str, default=".mp3",
help='Extension of the files in the '
'dataset')
parser_train.add_argument('--batchSize', type=int, default=8)
parser_train.add_argument('--nEpochs', type=int, default=30)
parser_train.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser_train.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser_train.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer.')
parser_train.add_argument('--lr', type=float, default=2e-04,
help='Learning rate.')
parser_train.add_argument('-o', '--output', type=str, default='out',
help="Output directory")
parser_train.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_train.add_argument('--no_pretraining', action='store_true',
help='Activate use a randmly initialized '
'network')
parser_train.add_argument('--LSTM', action='store_true',
help='Activate to add a LSTM to the phone '
'classifier')
parser_train.add_argument('--seqNorm', action='store_true',
help='Activate if you want to normalize each '
'batch of features through time before the '
'phone classification.')
parser_train.add_argument('--kernelSize', type=int, default=8,
help='Number of features to concatenate before '
'feeding them to the phone classifier.')
parser_train.add_argument('--dropout', action='store_true')
parser_train.add_argument('--in_dim', type=int, default=1,
help='Dimension of the input data: useful when '
'working with pre-computed features or '
'stereo audio.')
parser_train.add_argument('--loss_reduction', type=str, default='mean',
choices=['mean', 'sum'])
parser_per = subparsers.add_parser('per')
parser_per.add_argument('output', type=str)
parser_per.add_argument('--batchSize', type=int, default=8)
parser_per.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_per.add_argument('--pathDB',
help="For computing the PER on another dataset",
type=str, default=None)
parser_per.add_argument('--pathVal',
help="For computing the PER on specific sequences",
type=str, default=None)
parser_per.add_argument('--pathPhone',
help="For computing the PER on specific sequences",
default=None, type=str)
parser_per.add_argument('--file_extension', type=str, default=".mp3")
parser_per.add_argument('--name', type=str, default="0")
args = parser.parse_args()
if args.command == 'per':
args = get_PER_args(args)
# Output Directory
if not os.path.isdir(args.output):
os.mkdir(args.output)
name = f"_{args.name}" if args.command == "per" else ""
pathLogs = os.path.join(args.output, f'logs_{args.command}{name}.txt')
tee = subprocess.Popen(["tee", pathLogs], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
inSeqs, _ = findAllSeqs(args.pathDB,
extension=args.file_extension)
# Datasets
if args.command == 'train' and args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, inSeqs)
else:
seqTrain = inSeqs
if args.pathVal is None and args.command == 'train':
random.shuffle(seqTrain)
sizeTrain = int(0.9 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
elif args.pathVal is not None:
seqVal = filterSeqs(args.pathVal, inSeqs)
else:
raise RuntimeError("No validation dataset found for PER computation")
if args.debug:
seqVal = seqVal[:100]
downsampling_factor = 160
if args.pathCheckpoint == 'ID':
downsampling_factor = 1
feature_maker = IDModule()
hiddenGar = args.in_dim
else:
feature_maker, hiddenGar, _ = loadModel([args.pathCheckpoint],
loadStateDict=not args.no_pretraining)
feature_maker.cuda()
feature_maker = torch.nn.DataParallel(feature_maker)
phone_criterion = CTCphone_criterion(hiddenGar, nPhones, args.LSTM,
seqNorm=args.seqNorm,
dropout=args.dropout,
reduction=args.loss_reduction)
phone_criterion.cuda()
phone_criterion = torch.nn.DataParallel(phone_criterion)
print(f"Loading the validation dataset at {args.pathDB}")
datasetVal = SingleSequenceDataset(args.pathDB, seqVal,
phoneLabels, inDim=args.in_dim)
val_loader = DataLoader(datasetVal, batch_size=args.batchSize,
shuffle=True)
# Checkpoint file where the model should be saved
pathCheckpoint = os.path.join(args.output, 'checkpoint.pt')
if args.command == 'train':
feature_maker.optimize = True
if args.freeze:
feature_maker.eval()
feature_maker.optimize = False
for g in feature_maker.parameters():
g.requires_grad = False
if args.debug:
print("debug")
random.shuffle(seqTrain)
seqTrain = seqTrain[:1000]
seqVal = seqVal[:100]
print(f"Loading the training dataset at {args.pathDB}")
datasetTrain = SingleSequenceDataset(args.pathDB, seqTrain,
phoneLabels, inDim=args.in_dim)
train_loader = DataLoader(datasetTrain, batch_size=args.batchSize,
shuffle=True)
# Optimizer
g_params = list(phone_criterion.parameters())
if not args.freeze:
print("Optimizing model")
g_params += list(feature_maker.parameters())
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
pathArgs = os.path.join(args.output, "args_training.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
run(train_loader, val_loader, feature_maker, phone_criterion,
optimizer, downsampling_factor, args.nEpochs, pathCheckpoint)
else:
print(f"Loading data at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint,
map_location=lambda storage, loc: storage)
if 'bestLoss' in state_dict:
print(f"Best loss : {state_dict['bestLoss']}")
phone_criterion.load_state_dict(state_dict['classifier'])
feature_maker.load_state_dict(state_dict['model'])
pathArgs = os.path.join(args.output,
f"args_validation_{args.name}.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
perStep(val_loader,
feature_maker,
phone_criterion,
downsampling_factor)
| args.pathPhone = data["pathPhone"]
args.pathVal = data["pathVal"] | conditional_block |
common_voices_eval.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torchaudio
from copy import deepcopy
import torch
import time
import random
import math
import json
import subprocess
import sys
import progressbar
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torch.multiprocessing import Pool
from cpc.criterion.seq_alignment import get_seq_PER
from cpc.criterion.seq_alignment import beam_search
from cpc.feature_loader import loadModel
from cpc.dataset import findAllSeqs, parseSeqLabels, filterSeqs
def load(path_item):
seq_name = path_item.stem
data = torchaudio.load(str(path_item))[0].view(1, -1)
return seq_name, data
class SingleSequenceDataset(Dataset):
def __init__(self,
pathDB,
seqNames,
phoneLabelsDict,
inDim=1,
transpose=True):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabels (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
"""
self.seqNames = deepcopy(seqNames)
self.pathDB = pathDB
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.inDim = inDim
self.transpose = transpose
self.loadSeqs()
def loadSeqs(self):
# Labels
self.seqOffset = [0]
self.phoneLabels = []
self.phoneOffsets = [0]
self.data = []
self.maxSize = 0
self.maxSizePhone = 0
# Data
nprocess = min(30, len(self.seqNames))
start_time = time.time()
to_load = [Path(self.pathDB) / x for _, x in self.seqNames]
with Pool(nprocess) as p:
poolData = p.map(load, to_load)
tmpData = []
poolData.sort()
totSize = 0
minSizePhone = float('inf')
for seqName, seq in poolData:
self.phoneLabels += self.phoneLabelsDict[seqName]
self.phoneOffsets.append(len(self.phoneLabels))
self.maxSizePhone = max(self.maxSizePhone, len(
self.phoneLabelsDict[seqName]))
minSizePhone = min(minSizePhone, len(
self.phoneLabelsDict[seqName]))
sizeSeq = seq.size(1)
self.maxSize = max(self.maxSize, sizeSeq)
totSize += sizeSeq
tmpData.append(seq)
self.seqOffset.append(self.seqOffset[-1] + sizeSeq)
del seq
self.data = torch.cat(tmpData, dim=1)
self.phoneLabels = torch.tensor(self.phoneLabels, dtype=torch.long)
print(f'Loaded {len(self.phoneOffsets)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f'maxSizeSeq : {self.maxSize}')
print(f'maxSizePhone : {self.maxSizePhone}')
print(f"minSizePhone : {minSizePhone}")
print(f'Total size dataset {totSize / (16000 * 3600)} hours')
def __getitem__(self, idx):
offsetStart = self.seqOffset[idx]
offsetEnd = self.seqOffset[idx+1]
offsetPhoneStart = self.phoneOffsets[idx]
offsetPhoneEnd = self.phoneOffsets[idx + 1]
sizeSeq = int(offsetEnd - offsetStart)
sizePhone = int(offsetPhoneEnd - offsetPhoneStart)
outSeq = torch.zeros((self.inDim, self.maxSize))
outPhone = torch.zeros((self.maxSizePhone))
outSeq[:, :sizeSeq] = self.data[:, offsetStart:offsetEnd]
outPhone[:sizePhone] = self.phoneLabels[offsetPhoneStart:offsetPhoneEnd]
return outSeq, torch.tensor([sizeSeq], dtype=torch.long), outPhone.long(), torch.tensor([sizePhone], dtype=torch.long)
def __len__(self):
return len(self.seqOffset) - 1
class CTCphone_criterion(torch.nn.Module):
def __init__(self, dimEncoder, nPhones, LSTM=False, sizeKernel=8,
seqNorm=False, dropout=False, reduction='sum'):
|
def getPrediction(self, cFeature, featureSize):
B, S, H = cFeature.size()
if self.seqNorm:
for b in range(B):
size = featureSize[b]
m = cFeature[b, :size].mean(dim=0, keepdim=True)
v = cFeature[b, :size].var(dim=0, keepdim=True)
cFeature[b] = (cFeature[b] - m) / torch.sqrt(v + self.epsilon)
if self.useLSTM:
cFeature = self.conv1(cFeature)[0]
cFeature = cFeature.permute(0, 2, 1)
if self.dropout is not None:
cFeature = self.dropout(cFeature)
cFeature = self.PhoneCriterionClassifier(cFeature)
return cFeature.permute(0, 2, 1)
def forward(self, cFeature, featureSize, label, labelSize):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature, featureSize)
featureSize /= 4
predictions = cut_data(predictions, featureSize)
featureSize = torch.clamp(featureSize, max=predictions.size(1))
label = cut_data(label, labelSize)
if labelSize.min() <= 0:
print(label, labelSize)
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
loss = self.lossCriterion(predictions, label,
featureSize, labelSize).view(1, -1)
if torch.isinf(loss).sum() > 0 or torch.isnan(loss).sum() > 0:
loss = 0
return loss
class IDModule(torch.nn.Module):
def __init__(self):
super(IDModule, self).__init__()
def forward(self, feature, *args):
B, C, S = feature.size()
return feature.permute(0, 2, 1), None, None
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
return seq[:, :maxSeq]
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda(non_blocking=True)
phone = phone.cuda(non_blocking=True)
sizeSeq = sizeSeq.cuda(non_blocking=True).view(-1)
sizePhone = sizePhone.cuda(non_blocking=True).view(-1)
seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)
return seq, sizeSeq, phone, sizePhone
def train_step(train_loader,
model,
criterion,
optimizer,
downsampling_factor):
if model.optimize:
model.train()
criterion.train()
avg_loss = 0
nItems = 0
for data in train_loader:
optimizer.zero_grad()
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
if not model.optimize:
c_feature = c_feature.detach()
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
loss.mean().backward()
avg_loss += loss.mean().item()
nItems += 1
optimizer.step()
return avg_loss / nItems
def val_step(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avg_loss = 0
nItems = 0
for data in val_loader:
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
avg_loss += loss.mean().item()
nItems += 1
return avg_loss / nItems
def get_per(data):
pred, size_pred, gt, size_gt, blank_label = data
l_ = min(size_pred // 4, pred.size(0))
p_ = pred[:l_].view(l_, -1).numpy()
gt_seq = gt[:size_gt].view(-1).tolist()
predSeq = beam_search(p_, 20, blank_label)[0][1]
out = get_seq_PER(gt_seq, predSeq)
return out
def perStep(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avgPER = 0
varPER = 0
nItems = 0
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(val_loader))
bar.start()
for index, data in enumerate(val_loader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
predictions = torch.nn.functional.softmax(
criterion.module.getPrediction(c_feature, sizeSeq), dim=2).cpu()
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
bs = c_feature.size(0)
data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],
criterion.module.BLANK_LABEL) for b in range(bs)]
with Pool(bs) as p:
poolData = p.map(get_per, data_per)
avgPER += sum([x for x in poolData])
varPER += sum([x*x for x in poolData])
nItems += len(poolData)
bar.finish()
avgPER /= nItems
varPER /= nItems
varPER -= avgPER**2
print(f"Average PER {avgPER}")
print(f"Standard deviation PER {math.sqrt(varPER)}")
def run(train_loader,
val_loader,
model,
criterion,
optimizer,
downsampling_factor,
nEpochs,
pathCheckpoint):
print(f"Starting the training for {nEpochs} epochs")
bestLoss = float('inf')
for epoch in range(nEpochs):
lossTrain = train_step(train_loader, model, criterion,
optimizer, downsampling_factor)
print(f"Epoch {epoch} loss train : {lossTrain}")
lossVal = val_step(val_loader, model, criterion, downsampling_factor)
print(f"Epoch {epoch} loss val : {lossVal}")
if lossVal < bestLoss:
bestLoss = lossVal
state_dict = {'classifier': criterion.state_dict(),
'model': model.state_dict(),
'bestLoss': bestLoss}
torch.save(state_dict, pathCheckpoint)
def get_PER_args(args):
path_args_training = os.path.join(args.output, "args_training.json")
with open(path_args_training, 'rb') as file:
data = json.load(file)
if args.pathDB is None:
args.pathDB = data["pathDB"]
args.file_extension = data["file_extension"]
if args.pathVal is None and args.pathPhone is None:
args.pathPhone = data["pathPhone"]
args.pathVal = data["pathVal"]
args.pathCheckpoint = data["pathCheckpoint"]
args.no_pretraining = data["no_pretraining"]
args.LSTM = data.get("LSTM", False)
args.seqNorm = data.get("seqNorm", False)
args.dropout = data.get("dropout", False)
args.in_dim = data.get("in_dim", 1)
args.loss_reduction = data.get("loss_reduction", "mean")
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description='Simple phone recognition pipeline '
'for the common voices datasets')
subparsers = parser.add_subparsers(dest='command')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('pathDB', type=str,
help='Path to the directory containing the '
'audio data / pre-computed features.')
parser_train.add_argument('pathPhone', type=str,
help='Path to the .txt file containing the '
'phone transcription.')
parser_train.add_argument('pathCheckpoint', type=str,
help='Path to the CPC checkpoint to load. '
'Set to ID to work with pre-cimputed features.')
parser_train.add_argument('--freeze', action='store_true',
help="Freeze the CPC features layers")
parser_train.add_argument('--pathTrain', default=None, type=str,
help='Path to the .txt files containing the '
'list of the training sequences.')
parser_train.add_argument('--pathVal', default=None, type=str,
help='Path to the .txt files containing the '
'list of the validation sequences.')
parser_train.add_argument('--file_extension', type=str, default=".mp3",
help='Extension of the files in the '
'dataset')
parser_train.add_argument('--batchSize', type=int, default=8)
parser_train.add_argument('--nEpochs', type=int, default=30)
parser_train.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser_train.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser_train.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer.')
parser_train.add_argument('--lr', type=float, default=2e-04,
help='Learning rate.')
parser_train.add_argument('-o', '--output', type=str, default='out',
help="Output directory")
parser_train.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_train.add_argument('--no_pretraining', action='store_true',
help='Activate use a randmly initialized '
'network')
parser_train.add_argument('--LSTM', action='store_true',
help='Activate to add a LSTM to the phone '
'classifier')
parser_train.add_argument('--seqNorm', action='store_true',
help='Activate if you want to normalize each '
'batch of features through time before the '
'phone classification.')
parser_train.add_argument('--kernelSize', type=int, default=8,
help='Number of features to concatenate before '
'feeding them to the phone classifier.')
parser_train.add_argument('--dropout', action='store_true')
parser_train.add_argument('--in_dim', type=int, default=1,
help='Dimension of the input data: useful when '
'working with pre-computed features or '
'stereo audio.')
parser_train.add_argument('--loss_reduction', type=str, default='mean',
choices=['mean', 'sum'])
parser_per = subparsers.add_parser('per')
parser_per.add_argument('output', type=str)
parser_per.add_argument('--batchSize', type=int, default=8)
parser_per.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_per.add_argument('--pathDB',
help="For computing the PER on another dataset",
type=str, default=None)
parser_per.add_argument('--pathVal',
help="For computing the PER on specific sequences",
type=str, default=None)
parser_per.add_argument('--pathPhone',
help="For computing the PER on specific sequences",
default=None, type=str)
parser_per.add_argument('--file_extension', type=str, default=".mp3")
parser_per.add_argument('--name', type=str, default="0")
args = parser.parse_args()
if args.command == 'per':
args = get_PER_args(args)
# Output Directory
if not os.path.isdir(args.output):
os.mkdir(args.output)
name = f"_{args.name}" if args.command == "per" else ""
pathLogs = os.path.join(args.output, f'logs_{args.command}{name}.txt')
tee = subprocess.Popen(["tee", pathLogs], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
inSeqs, _ = findAllSeqs(args.pathDB,
extension=args.file_extension)
# Datasets
if args.command == 'train' and args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, inSeqs)
else:
seqTrain = inSeqs
if args.pathVal is None and args.command == 'train':
random.shuffle(seqTrain)
sizeTrain = int(0.9 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
elif args.pathVal is not None:
seqVal = filterSeqs(args.pathVal, inSeqs)
else:
raise RuntimeError("No validation dataset found for PER computation")
if args.debug:
seqVal = seqVal[:100]
downsampling_factor = 160
if args.pathCheckpoint == 'ID':
downsampling_factor = 1
feature_maker = IDModule()
hiddenGar = args.in_dim
else:
feature_maker, hiddenGar, _ = loadModel([args.pathCheckpoint],
loadStateDict=not args.no_pretraining)
feature_maker.cuda()
feature_maker = torch.nn.DataParallel(feature_maker)
phone_criterion = CTCphone_criterion(hiddenGar, nPhones, args.LSTM,
seqNorm=args.seqNorm,
dropout=args.dropout,
reduction=args.loss_reduction)
phone_criterion.cuda()
phone_criterion = torch.nn.DataParallel(phone_criterion)
print(f"Loading the validation dataset at {args.pathDB}")
datasetVal = SingleSequenceDataset(args.pathDB, seqVal,
phoneLabels, inDim=args.in_dim)
val_loader = DataLoader(datasetVal, batch_size=args.batchSize,
shuffle=True)
# Checkpoint file where the model should be saved
pathCheckpoint = os.path.join(args.output, 'checkpoint.pt')
if args.command == 'train':
feature_maker.optimize = True
if args.freeze:
feature_maker.eval()
feature_maker.optimize = False
for g in feature_maker.parameters():
g.requires_grad = False
if args.debug:
print("debug")
random.shuffle(seqTrain)
seqTrain = seqTrain[:1000]
seqVal = seqVal[:100]
print(f"Loading the training dataset at {args.pathDB}")
datasetTrain = SingleSequenceDataset(args.pathDB, seqTrain,
phoneLabels, inDim=args.in_dim)
train_loader = DataLoader(datasetTrain, batch_size=args.batchSize,
shuffle=True)
# Optimizer
g_params = list(phone_criterion.parameters())
if not args.freeze:
print("Optimizing model")
g_params += list(feature_maker.parameters())
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
pathArgs = os.path.join(args.output, "args_training.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
run(train_loader, val_loader, feature_maker, phone_criterion,
optimizer, downsampling_factor, args.nEpochs, pathCheckpoint)
else:
print(f"Loading data at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint,
map_location=lambda storage, loc: storage)
if 'bestLoss' in state_dict:
print(f"Best loss : {state_dict['bestLoss']}")
phone_criterion.load_state_dict(state_dict['classifier'])
feature_maker.load_state_dict(state_dict['model'])
pathArgs = os.path.join(args.output,
f"args_validation_{args.name}.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
perStep(val_loader,
feature_maker,
phone_criterion,
downsampling_factor)
| super(CTCphone_criterion, self).__init__()
self.seqNorm = seqNorm
self.epsilon = 1e-8
self.dropout = torch.nn.Dropout2d(
p=0.5, inplace=False) if dropout else None
self.conv1 = torch.nn.LSTM(dimEncoder, dimEncoder,
num_layers=1, batch_first=True)
self.PhoneCriterionClassifier = torch.nn.Conv1d(
dimEncoder, nPhones + 1, sizeKernel, stride=sizeKernel // 2)
self.lossCriterion = torch.nn.CTCLoss(blank=nPhones,
reduction=reduction,
zero_infinity=True)
self.relu = torch.nn.ReLU()
self.BLANK_LABEL = nPhones
self.useLSTM = LSTM | identifier_body |
common_voices_eval.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torchaudio
from copy import deepcopy
import torch
import time
import random
import math
import json
import subprocess
import sys
import progressbar
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torch.multiprocessing import Pool
from cpc.criterion.seq_alignment import get_seq_PER
from cpc.criterion.seq_alignment import beam_search
from cpc.feature_loader import loadModel
from cpc.dataset import findAllSeqs, parseSeqLabels, filterSeqs
def load(path_item):
seq_name = path_item.stem
data = torchaudio.load(str(path_item))[0].view(1, -1)
return seq_name, data
class SingleSequenceDataset(Dataset):
def __init__(self,
pathDB,
seqNames,
phoneLabelsDict,
inDim=1,
transpose=True):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabels (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
"""
self.seqNames = deepcopy(seqNames)
self.pathDB = pathDB
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.inDim = inDim
self.transpose = transpose
self.loadSeqs()
def loadSeqs(self):
# Labels
self.seqOffset = [0]
self.phoneLabels = []
self.phoneOffsets = [0]
self.data = []
self.maxSize = 0
self.maxSizePhone = 0
# Data
nprocess = min(30, len(self.seqNames))
start_time = time.time()
to_load = [Path(self.pathDB) / x for _, x in self.seqNames]
with Pool(nprocess) as p:
poolData = p.map(load, to_load)
tmpData = []
poolData.sort()
totSize = 0
minSizePhone = float('inf')
for seqName, seq in poolData:
self.phoneLabels += self.phoneLabelsDict[seqName]
self.phoneOffsets.append(len(self.phoneLabels))
self.maxSizePhone = max(self.maxSizePhone, len(
self.phoneLabelsDict[seqName]))
minSizePhone = min(minSizePhone, len(
self.phoneLabelsDict[seqName]))
sizeSeq = seq.size(1)
self.maxSize = max(self.maxSize, sizeSeq)
totSize += sizeSeq
tmpData.append(seq)
self.seqOffset.append(self.seqOffset[-1] + sizeSeq)
del seq
self.data = torch.cat(tmpData, dim=1)
self.phoneLabels = torch.tensor(self.phoneLabels, dtype=torch.long)
print(f'Loaded {len(self.phoneOffsets)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f'maxSizeSeq : {self.maxSize}')
print(f'maxSizePhone : {self.maxSizePhone}')
print(f"minSizePhone : {minSizePhone}")
print(f'Total size dataset {totSize / (16000 * 3600)} hours')
def __getitem__(self, idx):
offsetStart = self.seqOffset[idx]
offsetEnd = self.seqOffset[idx+1]
offsetPhoneStart = self.phoneOffsets[idx]
offsetPhoneEnd = self.phoneOffsets[idx + 1]
sizeSeq = int(offsetEnd - offsetStart)
sizePhone = int(offsetPhoneEnd - offsetPhoneStart)
outSeq = torch.zeros((self.inDim, self.maxSize))
outPhone = torch.zeros((self.maxSizePhone))
outSeq[:, :sizeSeq] = self.data[:, offsetStart:offsetEnd]
outPhone[:sizePhone] = self.phoneLabels[offsetPhoneStart:offsetPhoneEnd]
return outSeq, torch.tensor([sizeSeq], dtype=torch.long), outPhone.long(), torch.tensor([sizePhone], dtype=torch.long)
def __len__(self):
return len(self.seqOffset) - 1
class CTCphone_criterion(torch.nn.Module):
def __init__(self, dimEncoder, nPhones, LSTM=False, sizeKernel=8,
seqNorm=False, dropout=False, reduction='sum'):
super(CTCphone_criterion, self).__init__()
self.seqNorm = seqNorm
self.epsilon = 1e-8
self.dropout = torch.nn.Dropout2d(
p=0.5, inplace=False) if dropout else None
self.conv1 = torch.nn.LSTM(dimEncoder, dimEncoder,
num_layers=1, batch_first=True)
self.PhoneCriterionClassifier = torch.nn.Conv1d(
dimEncoder, nPhones + 1, sizeKernel, stride=sizeKernel // 2)
self.lossCriterion = torch.nn.CTCLoss(blank=nPhones,
reduction=reduction,
zero_infinity=True)
self.relu = torch.nn.ReLU()
self.BLANK_LABEL = nPhones
self.useLSTM = LSTM
def getPrediction(self, cFeature, featureSize):
B, S, H = cFeature.size()
if self.seqNorm:
for b in range(B):
size = featureSize[b]
m = cFeature[b, :size].mean(dim=0, keepdim=True)
v = cFeature[b, :size].var(dim=0, keepdim=True)
cFeature[b] = (cFeature[b] - m) / torch.sqrt(v + self.epsilon)
if self.useLSTM:
cFeature = self.conv1(cFeature)[0]
cFeature = cFeature.permute(0, 2, 1)
if self.dropout is not None:
cFeature = self.dropout(cFeature)
cFeature = self.PhoneCriterionClassifier(cFeature)
return cFeature.permute(0, 2, 1)
def forward(self, cFeature, featureSize, label, labelSize):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature, featureSize)
featureSize /= 4
predictions = cut_data(predictions, featureSize)
featureSize = torch.clamp(featureSize, max=predictions.size(1))
label = cut_data(label, labelSize)
if labelSize.min() <= 0:
print(label, labelSize)
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
loss = self.lossCriterion(predictions, label,
featureSize, labelSize).view(1, -1)
if torch.isinf(loss).sum() > 0 or torch.isnan(loss).sum() > 0:
loss = 0
return loss
class IDModule(torch.nn.Module):
def __init__(self):
super(IDModule, self).__init__()
def | (self, feature, *args):
B, C, S = feature.size()
return feature.permute(0, 2, 1), None, None
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
return seq[:, :maxSeq]
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda(non_blocking=True)
phone = phone.cuda(non_blocking=True)
sizeSeq = sizeSeq.cuda(non_blocking=True).view(-1)
sizePhone = sizePhone.cuda(non_blocking=True).view(-1)
seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)
return seq, sizeSeq, phone, sizePhone
def train_step(train_loader,
model,
criterion,
optimizer,
downsampling_factor):
if model.optimize:
model.train()
criterion.train()
avg_loss = 0
nItems = 0
for data in train_loader:
optimizer.zero_grad()
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
if not model.optimize:
c_feature = c_feature.detach()
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
loss.mean().backward()
avg_loss += loss.mean().item()
nItems += 1
optimizer.step()
return avg_loss / nItems
def val_step(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avg_loss = 0
nItems = 0
for data in val_loader:
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
avg_loss += loss.mean().item()
nItems += 1
return avg_loss / nItems
def get_per(data):
pred, size_pred, gt, size_gt, blank_label = data
l_ = min(size_pred // 4, pred.size(0))
p_ = pred[:l_].view(l_, -1).numpy()
gt_seq = gt[:size_gt].view(-1).tolist()
predSeq = beam_search(p_, 20, blank_label)[0][1]
out = get_seq_PER(gt_seq, predSeq)
return out
def perStep(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avgPER = 0
varPER = 0
nItems = 0
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(val_loader))
bar.start()
for index, data in enumerate(val_loader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
predictions = torch.nn.functional.softmax(
criterion.module.getPrediction(c_feature, sizeSeq), dim=2).cpu()
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
bs = c_feature.size(0)
data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],
criterion.module.BLANK_LABEL) for b in range(bs)]
with Pool(bs) as p:
poolData = p.map(get_per, data_per)
avgPER += sum([x for x in poolData])
varPER += sum([x*x for x in poolData])
nItems += len(poolData)
bar.finish()
avgPER /= nItems
varPER /= nItems
varPER -= avgPER**2
print(f"Average PER {avgPER}")
print(f"Standard deviation PER {math.sqrt(varPER)}")
def run(train_loader,
val_loader,
model,
criterion,
optimizer,
downsampling_factor,
nEpochs,
pathCheckpoint):
print(f"Starting the training for {nEpochs} epochs")
bestLoss = float('inf')
for epoch in range(nEpochs):
lossTrain = train_step(train_loader, model, criterion,
optimizer, downsampling_factor)
print(f"Epoch {epoch} loss train : {lossTrain}")
lossVal = val_step(val_loader, model, criterion, downsampling_factor)
print(f"Epoch {epoch} loss val : {lossVal}")
if lossVal < bestLoss:
bestLoss = lossVal
state_dict = {'classifier': criterion.state_dict(),
'model': model.state_dict(),
'bestLoss': bestLoss}
torch.save(state_dict, pathCheckpoint)
def get_PER_args(args):
path_args_training = os.path.join(args.output, "args_training.json")
with open(path_args_training, 'rb') as file:
data = json.load(file)
if args.pathDB is None:
args.pathDB = data["pathDB"]
args.file_extension = data["file_extension"]
if args.pathVal is None and args.pathPhone is None:
args.pathPhone = data["pathPhone"]
args.pathVal = data["pathVal"]
args.pathCheckpoint = data["pathCheckpoint"]
args.no_pretraining = data["no_pretraining"]
args.LSTM = data.get("LSTM", False)
args.seqNorm = data.get("seqNorm", False)
args.dropout = data.get("dropout", False)
args.in_dim = data.get("in_dim", 1)
args.loss_reduction = data.get("loss_reduction", "mean")
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description='Simple phone recognition pipeline '
'for the common voices datasets')
subparsers = parser.add_subparsers(dest='command')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('pathDB', type=str,
help='Path to the directory containing the '
'audio data / pre-computed features.')
parser_train.add_argument('pathPhone', type=str,
help='Path to the .txt file containing the '
'phone transcription.')
parser_train.add_argument('pathCheckpoint', type=str,
help='Path to the CPC checkpoint to load. '
'Set to ID to work with pre-cimputed features.')
parser_train.add_argument('--freeze', action='store_true',
help="Freeze the CPC features layers")
parser_train.add_argument('--pathTrain', default=None, type=str,
help='Path to the .txt files containing the '
'list of the training sequences.')
parser_train.add_argument('--pathVal', default=None, type=str,
help='Path to the .txt files containing the '
'list of the validation sequences.')
parser_train.add_argument('--file_extension', type=str, default=".mp3",
help='Extension of the files in the '
'dataset')
parser_train.add_argument('--batchSize', type=int, default=8)
parser_train.add_argument('--nEpochs', type=int, default=30)
parser_train.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser_train.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser_train.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer.')
parser_train.add_argument('--lr', type=float, default=2e-04,
help='Learning rate.')
parser_train.add_argument('-o', '--output', type=str, default='out',
help="Output directory")
parser_train.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_train.add_argument('--no_pretraining', action='store_true',
help='Activate use a randmly initialized '
'network')
parser_train.add_argument('--LSTM', action='store_true',
help='Activate to add a LSTM to the phone '
'classifier')
parser_train.add_argument('--seqNorm', action='store_true',
help='Activate if you want to normalize each '
'batch of features through time before the '
'phone classification.')
parser_train.add_argument('--kernelSize', type=int, default=8,
help='Number of features to concatenate before '
'feeding them to the phone classifier.')
parser_train.add_argument('--dropout', action='store_true')
parser_train.add_argument('--in_dim', type=int, default=1,
help='Dimension of the input data: useful when '
'working with pre-computed features or '
'stereo audio.')
parser_train.add_argument('--loss_reduction', type=str, default='mean',
choices=['mean', 'sum'])
parser_per = subparsers.add_parser('per')
parser_per.add_argument('output', type=str)
parser_per.add_argument('--batchSize', type=int, default=8)
parser_per.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_per.add_argument('--pathDB',
help="For computing the PER on another dataset",
type=str, default=None)
parser_per.add_argument('--pathVal',
help="For computing the PER on specific sequences",
type=str, default=None)
parser_per.add_argument('--pathPhone',
help="For computing the PER on specific sequences",
default=None, type=str)
parser_per.add_argument('--file_extension', type=str, default=".mp3")
parser_per.add_argument('--name', type=str, default="0")
args = parser.parse_args()
if args.command == 'per':
args = get_PER_args(args)
# Output Directory
if not os.path.isdir(args.output):
os.mkdir(args.output)
name = f"_{args.name}" if args.command == "per" else ""
pathLogs = os.path.join(args.output, f'logs_{args.command}{name}.txt')
tee = subprocess.Popen(["tee", pathLogs], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
inSeqs, _ = findAllSeqs(args.pathDB,
extension=args.file_extension)
# Datasets
if args.command == 'train' and args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, inSeqs)
else:
seqTrain = inSeqs
if args.pathVal is None and args.command == 'train':
random.shuffle(seqTrain)
sizeTrain = int(0.9 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
elif args.pathVal is not None:
seqVal = filterSeqs(args.pathVal, inSeqs)
else:
raise RuntimeError("No validation dataset found for PER computation")
if args.debug:
seqVal = seqVal[:100]
downsampling_factor = 160
if args.pathCheckpoint == 'ID':
downsampling_factor = 1
feature_maker = IDModule()
hiddenGar = args.in_dim
else:
feature_maker, hiddenGar, _ = loadModel([args.pathCheckpoint],
loadStateDict=not args.no_pretraining)
feature_maker.cuda()
feature_maker = torch.nn.DataParallel(feature_maker)
phone_criterion = CTCphone_criterion(hiddenGar, nPhones, args.LSTM,
seqNorm=args.seqNorm,
dropout=args.dropout,
reduction=args.loss_reduction)
phone_criterion.cuda()
phone_criterion = torch.nn.DataParallel(phone_criterion)
print(f"Loading the validation dataset at {args.pathDB}")
datasetVal = SingleSequenceDataset(args.pathDB, seqVal,
phoneLabels, inDim=args.in_dim)
val_loader = DataLoader(datasetVal, batch_size=args.batchSize,
shuffle=True)
# Checkpoint file where the model should be saved
pathCheckpoint = os.path.join(args.output, 'checkpoint.pt')
if args.command == 'train':
feature_maker.optimize = True
if args.freeze:
feature_maker.eval()
feature_maker.optimize = False
for g in feature_maker.parameters():
g.requires_grad = False
if args.debug:
print("debug")
random.shuffle(seqTrain)
seqTrain = seqTrain[:1000]
seqVal = seqVal[:100]
print(f"Loading the training dataset at {args.pathDB}")
datasetTrain = SingleSequenceDataset(args.pathDB, seqTrain,
phoneLabels, inDim=args.in_dim)
train_loader = DataLoader(datasetTrain, batch_size=args.batchSize,
shuffle=True)
# Optimizer
g_params = list(phone_criterion.parameters())
if not args.freeze:
print("Optimizing model")
g_params += list(feature_maker.parameters())
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
pathArgs = os.path.join(args.output, "args_training.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
run(train_loader, val_loader, feature_maker, phone_criterion,
optimizer, downsampling_factor, args.nEpochs, pathCheckpoint)
else:
print(f"Loading data at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint,
map_location=lambda storage, loc: storage)
if 'bestLoss' in state_dict:
print(f"Best loss : {state_dict['bestLoss']}")
phone_criterion.load_state_dict(state_dict['classifier'])
feature_maker.load_state_dict(state_dict['model'])
pathArgs = os.path.join(args.output,
f"args_validation_{args.name}.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
perStep(val_loader,
feature_maker,
phone_criterion,
downsampling_factor)
| forward | identifier_name |
operational_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"bytes"
"compress/bzip2"
"encoding/gob"
"os"
"path/filepath"
"testing"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/txscript"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrutil"
"github.com/btcsuite/golangcrypto/ripemd160"
// "github.com/davecgh/go-spew/spew"
)
var network = wire.MainNet
// testDb is used to store db related context for a running test.
// the `cleanUpFunc` *must* be called after each test to maintain db
// consistency across tests.
type testDb struct {
db database.Db
blocks []*dcrutil.Block
dbName string
dbNameVer string
cleanUpFunc func()
}
func setUpTestDb(t *testing.T, dbname string) (*testDb, error) |
func TestOperational(t *testing.T) {
testOperationalMode(t)
}
// testAddrIndexOperations ensures that all normal operations concerning
// the optional address index function correctly.
func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Metadata about the current addr index state should be unset.
sha, height, err := db.FetchAddrIndexTip()
if err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.")
}
var zeroHash chainhash.Hash
if !sha.IsEqual(&zeroHash) {
t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash)
}
if height != -1 {
t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height)
}
// Test enforcement of constraints for "limit" and "skip"
var fakeAddr dcrutil.Address
_, _, err = db.FetchTxsForAddr(fakeAddr, -1, 0, false)
if err == nil {
t.Fatalf("Negative value for skip passed, should return an error")
}
_, _, err = db.FetchTxsForAddr(fakeAddr, 0, -1, false)
if err == nil {
t.Fatalf("Negative value for limit passed, should return an error")
}
// Simple test to index outputs(s) of the first tx.
testIndex := make(database.BlockAddrIndex, database.AddrIndexKeySize)
testTx, err := newestBlock.Tx(0)
if err != nil {
t.Fatalf("Block has no transactions, unable to test addr "+
"indexing, err %v", err)
}
// Extract the dest addr from the tx.
_, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].Version, testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode tx output, err %v", err)
}
// Extract the hash160 from the output script.
var hash160Bytes [ripemd160.Size]byte
testHash160 := testAddrs[0].(*dcrutil.AddressScriptHash).Hash160()
copy(hash160Bytes[:], testHash160[:])
// Create a fake index.
blktxLoc, _, _ := newestBlock.TxLoc()
testIndex = []*database.TxAddrIndex{
&database.TxAddrIndex{
Hash160: hash160Bytes,
Height: uint32(newestBlockIdx),
TxOffset: uint32(blktxLoc[0].TxStart),
TxLen: uint32(blktxLoc[0].TxLen),
},
}
// Insert our test addr index into the DB.
err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newestBlockIdx, newestSha, err)
}
// Chain Tip of address should've been updated.
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Check index retrieval.
txReplies, _, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+
"address, err %v", err)
}
// Should have one reply.
if len(txReplies) != 1 {
t.Fatalf("Failed to properly index tx by address.")
}
// Our test tx and indexed tx should have the same sha.
indexedTx := txReplies[0]
if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {
t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+
"fetched %v", testTx.Sha(), indexedTx.Sha)
}
// Shut down DB.
db.Sync()
db.Close()
// Re-Open, tip still should be updated to current height and sha.
db, err = database.OpenDB("leveldb", "tstdbopmode")
if err != nil {
t.Fatalf("Unable to re-open created db, err %v", err)
}
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Delete the entire index.
err = db.PurgeAddrIndex()
if err != nil {
t.Fatalf("Couldn't delete address index, err %v", err)
}
// Former index should no longer exist.
txReplies, _, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReplies) != 0 {
t.Fatalf("Address index was not successfully deleted. "+
"Should have 0 tx's indexed, %v were returned.",
len(txReplies))
}
// Tip should be blanked out.
if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index was not fully deleted.")
}
}
func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Safe to ignore error, since height will be < 0 in "error" case.
sha, height, _ := db.FetchAddrIndexTip()
if newestBlockIdx != height {
t.Fatalf("Height of address index tip failed to update, "+
"expected %v, got %v", newestBlockIdx, height)
}
if !bytes.Equal(newestSha.Bytes(), sha.Bytes()) {
t.Fatalf("Sha of address index tip failed to update, "+
"expected %v, got %v", newestSha, sha)
}
}
func testOperationalMode(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
// 4) exercise the optional addridex
testDb, err := setUpTestDb(t, "tstdbopmode")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
err = nil
out:
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
block := testDb.blocks[height]
if height != 0 {
// except for NoVerify which does not allow lookups check inputs
mblock := block.MsgBlock()
//t.Errorf("%v", blockchain.DebugBlockString(block))
parentBlock := testDb.blocks[height-1]
mParentBlock := parentBlock.MsgBlock()
var txneededList []*chainhash.Hash
opSpentInBlock := make(map[wire.OutPoint]struct{})
if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) {
for _, tx := range mParentBlock.Transactions {
for _, txin := range tx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v (height %v)", origintxsha, height)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
}
for _, stx := range mblock.STransactions {
for _, txin := range stx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v", origintxsha)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
txlist := testDb.db.FetchUnSpentTxByShaList(txneededList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break out
}
}
}
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break out
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
break out
}
newSha, blkid, err := testDb.db.NewestSha()
if err != nil {
t.Errorf("failed to obtain latest sha %v %v", height, err)
}
if blkid != height {
t.Errorf("height does not match latest block height %v %v %v", blkid, height, err)
}
blkSha := block.Sha()
if *newSha != *blkSha {
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
}
}
// now that the db is populated, do some additional tests
testFetchHeightRange(t, testDb.db, testDb.blocks)
// Ensure all operations dealing with the optional address index behave
// correctly.
newSha, blkid, err := testDb.db.NewestSha()
testAddrIndexOperations(t, testDb.db, testDb.blocks[len(testDb.blocks)-1], newSha, blkid)
}
func TestBackout(t *testing.T) {
testBackout(t)
}
func testBackout(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
testDb, err := setUpTestDb(t, "tstdbbackout")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
if len(testDb.blocks) < 120 {
t.Errorf("test data too small")
return
}
err = nil
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
if height == 100 {
testDb.db.Sync()
}
if height == 120 {
// Simulate unexpected application quit
testDb.db.RollbackClose()
break
}
block := testDb.blocks[height]
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
return
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
return
}
}
// db was closed at height 120, so no cleanup is possible.
// reopen db
testDb.db, err = database.OpenDB("leveldb", testDb.dbName)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer func() {
if err := testDb.db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
sha := testDb.blocks[99].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("failed to load block 99 from db %v", err)
return
}
sha = testDb.blocks[119].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("loaded block 119 from db")
return
}
// pick block 118 since tx for block 119 wont be inserted until block 120 is seen to be valid
block := testDb.blocks[118]
mblock := block.MsgBlock()
txsha := mblock.Transactions[0].TxSha()
exists, err := testDb.db.ExistsTxSha(&txsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("tx %v not located db\n", txsha)
}
_, err = testDb.db.FetchTxBySha(&txsha)
if err != nil {
t.Errorf("tx %v not located db\n", txsha)
return
}
}
func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) {
fi, err := os.Open(file)
if err != nil {
t.Errorf("failed to open file %v, err %v", file, err)
return nil, err
}
bcStream := bzip2.NewReader(fi)
defer fi.Close()
// Create a buffer of the read file
bcBuf := new(bytes.Buffer)
bcBuf.ReadFrom(bcStream)
// Create decoder from the buffer and a map to store the data
bcDecoder := gob.NewDecoder(bcBuf)
blockchain := make(map[int64][]byte)
// Decode the blockchain into the map
if err := bcDecoder.Decode(&blockchain); err != nil {
t.Errorf("error decoding test blockchain")
}
blocks = make([]*dcrutil.Block, 0, len(blockchain))
for height := int64(1); height < int64(len(blockchain)); height++ {
block, err := dcrutil.NewBlockFromBytes(blockchain[height])
if err != nil {
t.Errorf("failed to parse block %v", height)
return nil, err
}
block.SetHeight(height - 1)
blocks = append(blocks, block)
}
return
}
func testFetchHeightRange(t *testing.T, db database.Db, blocks []*dcrutil.Block) {
var testincrement int64 = 50
var testcnt int64 = 100
shanames := make([]*chainhash.Hash, len(blocks))
nBlocks := int64(len(blocks))
for i := range blocks {
shanames[i] = blocks[i].Sha()
}
for startheight := int64(0); startheight < nBlocks; startheight += testincrement {
endheight := startheight + testcnt
if endheight > nBlocks {
endheight = database.AllShas
}
shalist, err := db.FetchHeightRange(startheight, endheight)
if err != nil {
t.Errorf("FetchHeightRange: unexpected failure looking up shas %v", err)
}
if endheight == database.AllShas {
if int64(len(shalist)) != nBlocks-startheight {
t.Errorf("FetchHeightRange: expected A %v shas, got %v", nBlocks-startheight, len(shalist))
}
} else {
if int64(len(shalist)) != testcnt {
t.Errorf("FetchHeightRange: expected %v shas, got %v", testcnt, len(shalist))
}
}
for i := range shalist {
sha0 := *shanames[int64(i)+startheight]
sha1 := shalist[i]
if sha0 != sha1 {
t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int64(i)+startheight, startheight, endheight, sha0, sha1)
}
}
}
}
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) {
testDb, err := setUpTestDb(t, "tstdbtxaddr")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
_, err = testDb.db.InsertBlock(testDb.blocks[0])
if err != nil {
t.Fatalf("failed to insert initial block")
}
// Insert a block with some fake test transactions. The block will have
// 10 copies of a fake transaction involving same address.
addrString := "DsZEAobx6qJ7K2qaHZBA2vBn66Nor8KYAKk"
targetAddr, err := dcrutil.DecodeAddress(addrString, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode test address: %v", err)
}
outputScript, err := txscript.PayToAddrScript(targetAddr)
if err != nil {
t.Fatalf("Unable make test pkScript %v", err)
}
fakeTxOut := wire.NewTxOut(10, outputScript)
var emptyHash chainhash.Hash
fakeHeader := wire.NewBlockHeader(0, &emptyHash, &emptyHash, &emptyHash, 1, [6]byte{}, 1, 1, 1, 1, 1, 1, 1, 1, 1, [36]byte{})
msgBlock := wire.NewMsgBlock(fakeHeader)
for i := 0; i < 10; i++ {
mtx := wire.NewMsgTx()
mtx.AddTxOut(fakeTxOut)
msgBlock.AddTransaction(mtx)
}
lastBlock := testDb.blocks[0]
msgBlock.Header.PrevBlock = *lastBlock.Sha()
// Insert the test block into the DB.
testBlock := dcrutil.NewBlock(msgBlock)
newheight, err := testDb.db.InsertBlock(testBlock)
if err != nil {
t.Fatalf("Unable to insert block into db: %v", err)
}
// Create and insert an address index for out test addr.
txLoc, _, _ := testBlock.TxLoc()
index := make(database.BlockAddrIndex, len(txLoc))
for i := range testBlock.Transactions() {
var hash160 [ripemd160.Size]byte
scriptAddr := targetAddr.ScriptAddress()
copy(hash160[:], scriptAddr[:])
txAddrIndex := &database.TxAddrIndex{
Hash160: hash160,
Height: uint32(newheight),
TxOffset: uint32(txLoc[i].TxStart),
TxLen: uint32(txLoc[i].TxLen),
}
index[i] = txAddrIndex
}
blkSha := testBlock.Sha()
err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newheight, blkSha, err)
return
}
// Try skipping the first 4 results, should get 6 in return.
txReply, txSkipped, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 4 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 4)
}
if len(txReply) != 6 {
t.Fatalf("Did not correctly skip forward in txs for address reply"+
" got %v txs, expected %v", len(txReply), 6)
}
// Limit the number of results to 3.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 0 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 0)
}
if len(txReply) != 3 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 3)
}
// Skip 1, limit 5.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 1 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 1)
}
if len(txReply) != 5 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 5)
}
}
| {
// Ignore db remove errors since it means we didn't have an old one.
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
return nil, err
}
testdatafile := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2")
blocks, err := loadBlocks(t, testdatafile)
if err != nil {
return nil, err
}
cleanUp := func() {
db.Close()
os.RemoveAll(dbname)
os.RemoveAll(dbnamever)
}
return &testDb{
db: db,
blocks: blocks,
dbName: dbname,
dbNameVer: dbnamever,
cleanUpFunc: cleanUp,
}, nil
} | identifier_body |
operational_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"bytes"
"compress/bzip2"
"encoding/gob"
"os"
"path/filepath"
"testing"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/txscript"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrutil"
"github.com/btcsuite/golangcrypto/ripemd160"
// "github.com/davecgh/go-spew/spew"
)
var network = wire.MainNet
// testDb is used to store db related context for a running test.
// the `cleanUpFunc` *must* be called after each test to maintain db
// consistency across tests.
type testDb struct {
db database.Db
blocks []*dcrutil.Block
dbName string
dbNameVer string
cleanUpFunc func()
}
func setUpTestDb(t *testing.T, dbname string) (*testDb, error) {
// Ignore db remove errors since it means we didn't have an old one.
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
return nil, err
}
testdatafile := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2")
blocks, err := loadBlocks(t, testdatafile)
if err != nil {
return nil, err
}
cleanUp := func() {
db.Close()
os.RemoveAll(dbname)
os.RemoveAll(dbnamever)
}
return &testDb{
db: db,
blocks: blocks,
dbName: dbname,
dbNameVer: dbnamever,
cleanUpFunc: cleanUp,
}, nil
}
func TestOperational(t *testing.T) {
testOperationalMode(t)
}
// testAddrIndexOperations ensures that all normal operations concerning
// the optional address index function correctly.
func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Metadata about the current addr index state should be unset.
sha, height, err := db.FetchAddrIndexTip()
if err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.")
}
var zeroHash chainhash.Hash
if !sha.IsEqual(&zeroHash) {
t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash)
}
if height != -1 {
t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height)
}
// Test enforcement of constraints for "limit" and "skip"
var fakeAddr dcrutil.Address
_, _, err = db.FetchTxsForAddr(fakeAddr, -1, 0, false)
if err == nil {
t.Fatalf("Negative value for skip passed, should return an error")
}
_, _, err = db.FetchTxsForAddr(fakeAddr, 0, -1, false)
if err == nil {
t.Fatalf("Negative value for limit passed, should return an error")
}
// Simple test to index outputs(s) of the first tx.
testIndex := make(database.BlockAddrIndex, database.AddrIndexKeySize)
testTx, err := newestBlock.Tx(0)
if err != nil {
t.Fatalf("Block has no transactions, unable to test addr "+
"indexing, err %v", err)
}
// Extract the dest addr from the tx.
_, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].Version, testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode tx output, err %v", err)
}
// Extract the hash160 from the output script.
var hash160Bytes [ripemd160.Size]byte
testHash160 := testAddrs[0].(*dcrutil.AddressScriptHash).Hash160()
copy(hash160Bytes[:], testHash160[:])
// Create a fake index.
blktxLoc, _, _ := newestBlock.TxLoc()
testIndex = []*database.TxAddrIndex{
&database.TxAddrIndex{
Hash160: hash160Bytes,
Height: uint32(newestBlockIdx),
TxOffset: uint32(blktxLoc[0].TxStart),
TxLen: uint32(blktxLoc[0].TxLen),
},
}
// Insert our test addr index into the DB.
err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newestBlockIdx, newestSha, err)
}
// Chain Tip of address should've been updated.
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Check index retrieval.
txReplies, _, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+
"address, err %v", err)
}
// Should have one reply.
if len(txReplies) != 1 {
t.Fatalf("Failed to properly index tx by address.")
}
// Our test tx and indexed tx should have the same sha.
indexedTx := txReplies[0]
if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {
t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+
"fetched %v", testTx.Sha(), indexedTx.Sha)
}
// Shut down DB.
db.Sync()
db.Close()
// Re-Open, tip still should be updated to current height and sha.
db, err = database.OpenDB("leveldb", "tstdbopmode")
if err != nil {
t.Fatalf("Unable to re-open created db, err %v", err)
}
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Delete the entire index.
err = db.PurgeAddrIndex()
if err != nil {
t.Fatalf("Couldn't delete address index, err %v", err)
}
// Former index should no longer exist.
txReplies, _, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReplies) != 0 {
t.Fatalf("Address index was not successfully deleted. "+
"Should have 0 tx's indexed, %v were returned.",
len(txReplies))
}
// Tip should be blanked out.
if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index was not fully deleted.")
}
}
func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Safe to ignore error, since height will be < 0 in "error" case.
sha, height, _ := db.FetchAddrIndexTip()
if newestBlockIdx != height {
t.Fatalf("Height of address index tip failed to update, "+
"expected %v, got %v", newestBlockIdx, height)
}
if !bytes.Equal(newestSha.Bytes(), sha.Bytes()) {
t.Fatalf("Sha of address index tip failed to update, "+
"expected %v, got %v", newestSha, sha)
}
}
func testOperationalMode(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
// 4) exercise the optional addridex
testDb, err := setUpTestDb(t, "tstdbopmode")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
err = nil
out:
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
block := testDb.blocks[height]
if height != 0 {
// except for NoVerify which does not allow lookups check inputs
mblock := block.MsgBlock()
//t.Errorf("%v", blockchain.DebugBlockString(block))
parentBlock := testDb.blocks[height-1]
mParentBlock := parentBlock.MsgBlock()
var txneededList []*chainhash.Hash
opSpentInBlock := make(map[wire.OutPoint]struct{})
if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) {
for _, tx := range mParentBlock.Transactions {
for _, txin := range tx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v (height %v)", origintxsha, height)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
}
for _, stx := range mblock.STransactions {
for _, txin := range stx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v", origintxsha)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
txlist := testDb.db.FetchUnSpentTxByShaList(txneededList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break out
}
}
}
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break out
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
break out
}
newSha, blkid, err := testDb.db.NewestSha()
if err != nil |
if blkid != height {
t.Errorf("height does not match latest block height %v %v %v", blkid, height, err)
}
blkSha := block.Sha()
if *newSha != *blkSha {
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
}
}
// now that the db is populated, do some additional tests
testFetchHeightRange(t, testDb.db, testDb.blocks)
// Ensure all operations dealing with the optional address index behave
// correctly.
newSha, blkid, err := testDb.db.NewestSha()
testAddrIndexOperations(t, testDb.db, testDb.blocks[len(testDb.blocks)-1], newSha, blkid)
}
func TestBackout(t *testing.T) {
testBackout(t)
}
func testBackout(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
testDb, err := setUpTestDb(t, "tstdbbackout")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
if len(testDb.blocks) < 120 {
t.Errorf("test data too small")
return
}
err = nil
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
if height == 100 {
testDb.db.Sync()
}
if height == 120 {
// Simulate unexpected application quit
testDb.db.RollbackClose()
break
}
block := testDb.blocks[height]
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
return
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
return
}
}
// db was closed at height 120, so no cleanup is possible.
// reopen db
testDb.db, err = database.OpenDB("leveldb", testDb.dbName)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer func() {
if err := testDb.db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
sha := testDb.blocks[99].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("failed to load block 99 from db %v", err)
return
}
sha = testDb.blocks[119].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("loaded block 119 from db")
return
}
// pick block 118 since tx for block 119 wont be inserted until block 120 is seen to be valid
block := testDb.blocks[118]
mblock := block.MsgBlock()
txsha := mblock.Transactions[0].TxSha()
exists, err := testDb.db.ExistsTxSha(&txsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("tx %v not located db\n", txsha)
}
_, err = testDb.db.FetchTxBySha(&txsha)
if err != nil {
t.Errorf("tx %v not located db\n", txsha)
return
}
}
func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) {
fi, err := os.Open(file)
if err != nil {
t.Errorf("failed to open file %v, err %v", file, err)
return nil, err
}
bcStream := bzip2.NewReader(fi)
defer fi.Close()
// Create a buffer of the read file
bcBuf := new(bytes.Buffer)
bcBuf.ReadFrom(bcStream)
// Create decoder from the buffer and a map to store the data
bcDecoder := gob.NewDecoder(bcBuf)
blockchain := make(map[int64][]byte)
// Decode the blockchain into the map
if err := bcDecoder.Decode(&blockchain); err != nil {
t.Errorf("error decoding test blockchain")
}
blocks = make([]*dcrutil.Block, 0, len(blockchain))
for height := int64(1); height < int64(len(blockchain)); height++ {
block, err := dcrutil.NewBlockFromBytes(blockchain[height])
if err != nil {
t.Errorf("failed to parse block %v", height)
return nil, err
}
block.SetHeight(height - 1)
blocks = append(blocks, block)
}
return
}
func testFetchHeightRange(t *testing.T, db database.Db, blocks []*dcrutil.Block) {
var testincrement int64 = 50
var testcnt int64 = 100
shanames := make([]*chainhash.Hash, len(blocks))
nBlocks := int64(len(blocks))
for i := range blocks {
shanames[i] = blocks[i].Sha()
}
for startheight := int64(0); startheight < nBlocks; startheight += testincrement {
endheight := startheight + testcnt
if endheight > nBlocks {
endheight = database.AllShas
}
shalist, err := db.FetchHeightRange(startheight, endheight)
if err != nil {
t.Errorf("FetchHeightRange: unexpected failure looking up shas %v", err)
}
if endheight == database.AllShas {
if int64(len(shalist)) != nBlocks-startheight {
t.Errorf("FetchHeightRange: expected A %v shas, got %v", nBlocks-startheight, len(shalist))
}
} else {
if int64(len(shalist)) != testcnt {
t.Errorf("FetchHeightRange: expected %v shas, got %v", testcnt, len(shalist))
}
}
for i := range shalist {
sha0 := *shanames[int64(i)+startheight]
sha1 := shalist[i]
if sha0 != sha1 {
t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int64(i)+startheight, startheight, endheight, sha0, sha1)
}
}
}
}
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) {
testDb, err := setUpTestDb(t, "tstdbtxaddr")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
_, err = testDb.db.InsertBlock(testDb.blocks[0])
if err != nil {
t.Fatalf("failed to insert initial block")
}
// Insert a block with some fake test transactions. The block will have
// 10 copies of a fake transaction involving same address.
addrString := "DsZEAobx6qJ7K2qaHZBA2vBn66Nor8KYAKk"
targetAddr, err := dcrutil.DecodeAddress(addrString, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode test address: %v", err)
}
outputScript, err := txscript.PayToAddrScript(targetAddr)
if err != nil {
t.Fatalf("Unable make test pkScript %v", err)
}
fakeTxOut := wire.NewTxOut(10, outputScript)
var emptyHash chainhash.Hash
fakeHeader := wire.NewBlockHeader(0, &emptyHash, &emptyHash, &emptyHash, 1, [6]byte{}, 1, 1, 1, 1, 1, 1, 1, 1, 1, [36]byte{})
msgBlock := wire.NewMsgBlock(fakeHeader)
for i := 0; i < 10; i++ {
mtx := wire.NewMsgTx()
mtx.AddTxOut(fakeTxOut)
msgBlock.AddTransaction(mtx)
}
lastBlock := testDb.blocks[0]
msgBlock.Header.PrevBlock = *lastBlock.Sha()
// Insert the test block into the DB.
testBlock := dcrutil.NewBlock(msgBlock)
newheight, err := testDb.db.InsertBlock(testBlock)
if err != nil {
t.Fatalf("Unable to insert block into db: %v", err)
}
// Create and insert an address index for out test addr.
txLoc, _, _ := testBlock.TxLoc()
index := make(database.BlockAddrIndex, len(txLoc))
for i := range testBlock.Transactions() {
var hash160 [ripemd160.Size]byte
scriptAddr := targetAddr.ScriptAddress()
copy(hash160[:], scriptAddr[:])
txAddrIndex := &database.TxAddrIndex{
Hash160: hash160,
Height: uint32(newheight),
TxOffset: uint32(txLoc[i].TxStart),
TxLen: uint32(txLoc[i].TxLen),
}
index[i] = txAddrIndex
}
blkSha := testBlock.Sha()
err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newheight, blkSha, err)
return
}
// Try skipping the first 4 results, should get 6 in return.
txReply, txSkipped, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 4 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 4)
}
if len(txReply) != 6 {
t.Fatalf("Did not correctly skip forward in txs for address reply"+
" got %v txs, expected %v", len(txReply), 6)
}
// Limit the number of results to 3.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 0 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 0)
}
if len(txReply) != 3 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 3)
}
// Skip 1, limit 5.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 1 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 1)
}
if len(txReply) != 5 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 5)
}
}
| {
t.Errorf("failed to obtain latest sha %v %v", height, err)
} | conditional_block |
operational_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"bytes"
"compress/bzip2"
"encoding/gob"
"os"
"path/filepath"
"testing"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/txscript"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrutil"
"github.com/btcsuite/golangcrypto/ripemd160"
// "github.com/davecgh/go-spew/spew"
)
var network = wire.MainNet
// testDb is used to store db related context for a running test.
// the `cleanUpFunc` *must* be called after each test to maintain db
// consistency across tests.
type testDb struct {
db database.Db
blocks []*dcrutil.Block
dbName string
dbNameVer string
cleanUpFunc func()
}
func setUpTestDb(t *testing.T, dbname string) (*testDb, error) {
// Ignore db remove errors since it means we didn't have an old one.
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
return nil, err
}
testdatafile := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2")
blocks, err := loadBlocks(t, testdatafile)
if err != nil {
return nil, err
}
cleanUp := func() {
db.Close()
os.RemoveAll(dbname)
os.RemoveAll(dbnamever)
}
return &testDb{
db: db,
blocks: blocks,
dbName: dbname,
dbNameVer: dbnamever,
cleanUpFunc: cleanUp,
}, nil
}
func TestOperational(t *testing.T) {
testOperationalMode(t)
}
// testAddrIndexOperations ensures that all normal operations concerning
// the optional address index function correctly.
func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Metadata about the current addr index state should be unset.
sha, height, err := db.FetchAddrIndexTip()
if err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.")
}
var zeroHash chainhash.Hash
if !sha.IsEqual(&zeroHash) {
t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash)
}
if height != -1 {
t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height)
}
// Test enforcement of constraints for "limit" and "skip"
var fakeAddr dcrutil.Address
_, _, err = db.FetchTxsForAddr(fakeAddr, -1, 0, false)
if err == nil {
t.Fatalf("Negative value for skip passed, should return an error")
}
_, _, err = db.FetchTxsForAddr(fakeAddr, 0, -1, false)
if err == nil {
t.Fatalf("Negative value for limit passed, should return an error")
}
// Simple test to index outputs(s) of the first tx.
testIndex := make(database.BlockAddrIndex, database.AddrIndexKeySize)
testTx, err := newestBlock.Tx(0)
if err != nil {
t.Fatalf("Block has no transactions, unable to test addr "+
"indexing, err %v", err)
}
// Extract the dest addr from the tx.
_, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].Version, testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode tx output, err %v", err)
}
// Extract the hash160 from the output script.
var hash160Bytes [ripemd160.Size]byte
testHash160 := testAddrs[0].(*dcrutil.AddressScriptHash).Hash160()
copy(hash160Bytes[:], testHash160[:])
// Create a fake index.
blktxLoc, _, _ := newestBlock.TxLoc()
testIndex = []*database.TxAddrIndex{
&database.TxAddrIndex{
Hash160: hash160Bytes,
Height: uint32(newestBlockIdx),
TxOffset: uint32(blktxLoc[0].TxStart),
TxLen: uint32(blktxLoc[0].TxLen),
},
}
// Insert our test addr index into the DB.
err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newestBlockIdx, newestSha, err)
}
// Chain Tip of address should've been updated.
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Check index retrieval.
txReplies, _, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+
"address, err %v", err)
}
// Should have one reply.
if len(txReplies) != 1 {
t.Fatalf("Failed to properly index tx by address.")
}
// Our test tx and indexed tx should have the same sha.
indexedTx := txReplies[0]
if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {
t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+
"fetched %v", testTx.Sha(), indexedTx.Sha)
}
// Shut down DB.
db.Sync()
db.Close()
// Re-Open, tip still should be updated to current height and sha.
db, err = database.OpenDB("leveldb", "tstdbopmode")
if err != nil {
t.Fatalf("Unable to re-open created db, err %v", err)
}
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Delete the entire index.
err = db.PurgeAddrIndex()
if err != nil {
t.Fatalf("Couldn't delete address index, err %v", err)
}
// Former index should no longer exist.
txReplies, _, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReplies) != 0 {
t.Fatalf("Address index was not successfully deleted. "+
"Should have 0 tx's indexed, %v were returned.",
len(txReplies))
}
// Tip should be blanked out.
if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index was not fully deleted.")
}
}
func | (db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Safe to ignore error, since height will be < 0 in "error" case.
sha, height, _ := db.FetchAddrIndexTip()
if newestBlockIdx != height {
t.Fatalf("Height of address index tip failed to update, "+
"expected %v, got %v", newestBlockIdx, height)
}
if !bytes.Equal(newestSha.Bytes(), sha.Bytes()) {
t.Fatalf("Sha of address index tip failed to update, "+
"expected %v, got %v", newestSha, sha)
}
}
func testOperationalMode(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
// 4) exercise the optional addridex
testDb, err := setUpTestDb(t, "tstdbopmode")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
err = nil
out:
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
block := testDb.blocks[height]
if height != 0 {
// except for NoVerify which does not allow lookups check inputs
mblock := block.MsgBlock()
//t.Errorf("%v", blockchain.DebugBlockString(block))
parentBlock := testDb.blocks[height-1]
mParentBlock := parentBlock.MsgBlock()
var txneededList []*chainhash.Hash
opSpentInBlock := make(map[wire.OutPoint]struct{})
if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) {
for _, tx := range mParentBlock.Transactions {
for _, txin := range tx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v (height %v)", origintxsha, height)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
}
for _, stx := range mblock.STransactions {
for _, txin := range stx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v", origintxsha)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
txlist := testDb.db.FetchUnSpentTxByShaList(txneededList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break out
}
}
}
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break out
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
break out
}
newSha, blkid, err := testDb.db.NewestSha()
if err != nil {
t.Errorf("failed to obtain latest sha %v %v", height, err)
}
if blkid != height {
t.Errorf("height does not match latest block height %v %v %v", blkid, height, err)
}
blkSha := block.Sha()
if *newSha != *blkSha {
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
}
}
// now that the db is populated, do some additional tests
testFetchHeightRange(t, testDb.db, testDb.blocks)
// Ensure all operations dealing with the optional address index behave
// correctly.
newSha, blkid, err := testDb.db.NewestSha()
testAddrIndexOperations(t, testDb.db, testDb.blocks[len(testDb.blocks)-1], newSha, blkid)
}
func TestBackout(t *testing.T) {
testBackout(t)
}
func testBackout(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
testDb, err := setUpTestDb(t, "tstdbbackout")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
if len(testDb.blocks) < 120 {
t.Errorf("test data too small")
return
}
err = nil
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
if height == 100 {
testDb.db.Sync()
}
if height == 120 {
// Simulate unexpected application quit
testDb.db.RollbackClose()
break
}
block := testDb.blocks[height]
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
return
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
return
}
}
// db was closed at height 120, so no cleanup is possible.
// reopen db
testDb.db, err = database.OpenDB("leveldb", testDb.dbName)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer func() {
if err := testDb.db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
sha := testDb.blocks[99].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("failed to load block 99 from db %v", err)
return
}
sha = testDb.blocks[119].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("loaded block 119 from db")
return
}
// pick block 118 since tx for block 119 wont be inserted until block 120 is seen to be valid
block := testDb.blocks[118]
mblock := block.MsgBlock()
txsha := mblock.Transactions[0].TxSha()
exists, err := testDb.db.ExistsTxSha(&txsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("tx %v not located db\n", txsha)
}
_, err = testDb.db.FetchTxBySha(&txsha)
if err != nil {
t.Errorf("tx %v not located db\n", txsha)
return
}
}
func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) {
fi, err := os.Open(file)
if err != nil {
t.Errorf("failed to open file %v, err %v", file, err)
return nil, err
}
bcStream := bzip2.NewReader(fi)
defer fi.Close()
// Create a buffer of the read file
bcBuf := new(bytes.Buffer)
bcBuf.ReadFrom(bcStream)
// Create decoder from the buffer and a map to store the data
bcDecoder := gob.NewDecoder(bcBuf)
blockchain := make(map[int64][]byte)
// Decode the blockchain into the map
if err := bcDecoder.Decode(&blockchain); err != nil {
t.Errorf("error decoding test blockchain")
}
blocks = make([]*dcrutil.Block, 0, len(blockchain))
for height := int64(1); height < int64(len(blockchain)); height++ {
block, err := dcrutil.NewBlockFromBytes(blockchain[height])
if err != nil {
t.Errorf("failed to parse block %v", height)
return nil, err
}
block.SetHeight(height - 1)
blocks = append(blocks, block)
}
return
}
func testFetchHeightRange(t *testing.T, db database.Db, blocks []*dcrutil.Block) {
var testincrement int64 = 50
var testcnt int64 = 100
shanames := make([]*chainhash.Hash, len(blocks))
nBlocks := int64(len(blocks))
for i := range blocks {
shanames[i] = blocks[i].Sha()
}
for startheight := int64(0); startheight < nBlocks; startheight += testincrement {
endheight := startheight + testcnt
if endheight > nBlocks {
endheight = database.AllShas
}
shalist, err := db.FetchHeightRange(startheight, endheight)
if err != nil {
t.Errorf("FetchHeightRange: unexpected failure looking up shas %v", err)
}
if endheight == database.AllShas {
if int64(len(shalist)) != nBlocks-startheight {
t.Errorf("FetchHeightRange: expected A %v shas, got %v", nBlocks-startheight, len(shalist))
}
} else {
if int64(len(shalist)) != testcnt {
t.Errorf("FetchHeightRange: expected %v shas, got %v", testcnt, len(shalist))
}
}
for i := range shalist {
sha0 := *shanames[int64(i)+startheight]
sha1 := shalist[i]
if sha0 != sha1 {
t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int64(i)+startheight, startheight, endheight, sha0, sha1)
}
}
}
}
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) {
testDb, err := setUpTestDb(t, "tstdbtxaddr")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
_, err = testDb.db.InsertBlock(testDb.blocks[0])
if err != nil {
t.Fatalf("failed to insert initial block")
}
// Insert a block with some fake test transactions. The block will have
// 10 copies of a fake transaction involving same address.
addrString := "DsZEAobx6qJ7K2qaHZBA2vBn66Nor8KYAKk"
targetAddr, err := dcrutil.DecodeAddress(addrString, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode test address: %v", err)
}
outputScript, err := txscript.PayToAddrScript(targetAddr)
if err != nil {
t.Fatalf("Unable make test pkScript %v", err)
}
fakeTxOut := wire.NewTxOut(10, outputScript)
var emptyHash chainhash.Hash
fakeHeader := wire.NewBlockHeader(0, &emptyHash, &emptyHash, &emptyHash, 1, [6]byte{}, 1, 1, 1, 1, 1, 1, 1, 1, 1, [36]byte{})
msgBlock := wire.NewMsgBlock(fakeHeader)
for i := 0; i < 10; i++ {
mtx := wire.NewMsgTx()
mtx.AddTxOut(fakeTxOut)
msgBlock.AddTransaction(mtx)
}
lastBlock := testDb.blocks[0]
msgBlock.Header.PrevBlock = *lastBlock.Sha()
// Insert the test block into the DB.
testBlock := dcrutil.NewBlock(msgBlock)
newheight, err := testDb.db.InsertBlock(testBlock)
if err != nil {
t.Fatalf("Unable to insert block into db: %v", err)
}
// Create and insert an address index for out test addr.
txLoc, _, _ := testBlock.TxLoc()
index := make(database.BlockAddrIndex, len(txLoc))
for i := range testBlock.Transactions() {
var hash160 [ripemd160.Size]byte
scriptAddr := targetAddr.ScriptAddress()
copy(hash160[:], scriptAddr[:])
txAddrIndex := &database.TxAddrIndex{
Hash160: hash160,
Height: uint32(newheight),
TxOffset: uint32(txLoc[i].TxStart),
TxLen: uint32(txLoc[i].TxLen),
}
index[i] = txAddrIndex
}
blkSha := testBlock.Sha()
err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newheight, blkSha, err)
return
}
// Try skipping the first 4 results, should get 6 in return.
txReply, txSkipped, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 4 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 4)
}
if len(txReply) != 6 {
t.Fatalf("Did not correctly skip forward in txs for address reply"+
" got %v txs, expected %v", len(txReply), 6)
}
// Limit the number of results to 3.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 0 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 0)
}
if len(txReply) != 3 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 3)
}
// Skip 1, limit 5.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 1 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 1)
}
if len(txReply) != 5 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 5)
}
}
| assertAddrIndexTipIsUpdated | identifier_name |
operational_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2015 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ldb_test
import (
"bytes"
"compress/bzip2"
"encoding/gob"
"os"
"path/filepath"
"testing"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/database"
"github.com/decred/dcrd/txscript"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrutil"
"github.com/btcsuite/golangcrypto/ripemd160"
// "github.com/davecgh/go-spew/spew"
)
var network = wire.MainNet
// testDb is used to store db related context for a running test.
// the `cleanUpFunc` *must* be called after each test to maintain db
// consistency across tests.
type testDb struct {
db database.Db
blocks []*dcrutil.Block
dbName string
dbNameVer string
cleanUpFunc func()
}
func setUpTestDb(t *testing.T, dbname string) (*testDb, error) {
// Ignore db remove errors since it means we didn't have an old one.
dbnamever := dbname + ".ver"
_ = os.RemoveAll(dbname)
_ = os.RemoveAll(dbnamever)
db, err := database.CreateDB("leveldb", dbname)
if err != nil {
return nil, err
}
testdatafile := filepath.Join("..", "/../blockchain/testdata", "blocks0to168.bz2")
blocks, err := loadBlocks(t, testdatafile)
if err != nil {
return nil, err
}
cleanUp := func() {
db.Close()
os.RemoveAll(dbname)
os.RemoveAll(dbnamever)
}
return &testDb{
db: db,
blocks: blocks,
dbName: dbname,
dbNameVer: dbnamever,
cleanUpFunc: cleanUp,
}, nil
}
func TestOperational(t *testing.T) {
testOperationalMode(t)
}
// testAddrIndexOperations ensures that all normal operations concerning
// the optional address index function correctly.
func testAddrIndexOperations(t *testing.T, db database.Db, newestBlock *dcrutil.Block, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Metadata about the current addr index state should be unset.
sha, height, err := db.FetchAddrIndexTip()
if err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index metadata shouldn't be in db, hasn't been built up yet.")
}
var zeroHash chainhash.Hash
if !sha.IsEqual(&zeroHash) {
t.Fatalf("AddrIndexTip wrong hash got: %s, want %s", sha, &zeroHash)
}
if height != -1 {
t.Fatalf("Addrindex not built up, yet a block index tip has been set to: %d.", height)
}
// Test enforcement of constraints for "limit" and "skip"
var fakeAddr dcrutil.Address
_, _, err = db.FetchTxsForAddr(fakeAddr, -1, 0, false)
if err == nil {
t.Fatalf("Negative value for skip passed, should return an error")
}
_, _, err = db.FetchTxsForAddr(fakeAddr, 0, -1, false)
if err == nil {
t.Fatalf("Negative value for limit passed, should return an error")
}
// Simple test to index outputs(s) of the first tx.
testIndex := make(database.BlockAddrIndex, database.AddrIndexKeySize)
testTx, err := newestBlock.Tx(0)
if err != nil {
t.Fatalf("Block has no transactions, unable to test addr "+
"indexing, err %v", err)
}
// Extract the dest addr from the tx.
_, testAddrs, _, err := txscript.ExtractPkScriptAddrs(testTx.MsgTx().TxOut[0].Version, testTx.MsgTx().TxOut[0].PkScript, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode tx output, err %v", err)
}
// Extract the hash160 from the output script.
var hash160Bytes [ripemd160.Size]byte
testHash160 := testAddrs[0].(*dcrutil.AddressScriptHash).Hash160()
copy(hash160Bytes[:], testHash160[:])
// Create a fake index.
blktxLoc, _, _ := newestBlock.TxLoc()
testIndex = []*database.TxAddrIndex{
&database.TxAddrIndex{
Hash160: hash160Bytes,
Height: uint32(newestBlockIdx),
TxOffset: uint32(blktxLoc[0].TxStart),
TxLen: uint32(blktxLoc[0].TxLen),
},
}
// Insert our test addr index into the DB.
err = db.UpdateAddrIndexForBlock(newestSha, newestBlockIdx, testIndex)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newestBlockIdx, newestSha, err)
}
// Chain Tip of address should've been updated.
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Check index retrieval.
txReplies, _, err := db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("FetchTxsForAddr failed to correctly fetch txs for an "+
"address, err %v", err)
}
// Should have one reply.
if len(txReplies) != 1 {
t.Fatalf("Failed to properly index tx by address.")
}
// Our test tx and indexed tx should have the same sha.
indexedTx := txReplies[0]
if !bytes.Equal(indexedTx.Sha.Bytes(), testTx.Sha().Bytes()) {
t.Fatalf("Failed to fetch proper indexed tx. Expected sha %v, "+
"fetched %v", testTx.Sha(), indexedTx.Sha)
}
// Shut down DB.
db.Sync()
db.Close()
// Re-Open, tip still should be updated to current height and sha.
db, err = database.OpenDB("leveldb", "tstdbopmode")
if err != nil {
t.Fatalf("Unable to re-open created db, err %v", err)
}
assertAddrIndexTipIsUpdated(db, t, newestSha, newestBlockIdx)
// Delete the entire index.
err = db.PurgeAddrIndex()
if err != nil {
t.Fatalf("Couldn't delete address index, err %v", err)
}
// Former index should no longer exist.
txReplies, _, err = db.FetchTxsForAddr(testAddrs[0], 0, 1000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if len(txReplies) != 0 {
t.Fatalf("Address index was not successfully deleted. "+
"Should have 0 tx's indexed, %v were returned.",
len(txReplies))
}
// Tip should be blanked out.
if _, _, err := db.FetchAddrIndexTip(); err != database.ErrAddrIndexDoesNotExist {
t.Fatalf("Address index was not fully deleted.")
}
}
func assertAddrIndexTipIsUpdated(db database.Db, t *testing.T, newestSha *chainhash.Hash, newestBlockIdx int64) {
// Safe to ignore error, since height will be < 0 in "error" case.
sha, height, _ := db.FetchAddrIndexTip()
if newestBlockIdx != height {
t.Fatalf("Height of address index tip failed to update, "+
"expected %v, got %v", newestBlockIdx, height)
}
if !bytes.Equal(newestSha.Bytes(), sha.Bytes()) {
t.Fatalf("Sha of address index tip failed to update, "+
"expected %v, got %v", newestSha, sha)
}
}
func testOperationalMode(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
// 4) exercise the optional addridex
testDb, err := setUpTestDb(t, "tstdbopmode")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
err = nil
out:
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
block := testDb.blocks[height]
if height != 0 {
// except for NoVerify which does not allow lookups check inputs
mblock := block.MsgBlock()
//t.Errorf("%v", blockchain.DebugBlockString(block))
parentBlock := testDb.blocks[height-1]
mParentBlock := parentBlock.MsgBlock()
var txneededList []*chainhash.Hash
opSpentInBlock := make(map[wire.OutPoint]struct{})
if dcrutil.IsFlagSet16(dcrutil.BlockValid, mParentBlock.Header.VoteBits) {
for _, tx := range mParentBlock.Transactions {
for _, txin := range tx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v (height %v)", origintxsha, height)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
}
for _, stx := range mblock.STransactions {
for _, txin := range stx.TxIn {
if txin.PreviousOutPoint.Index == uint32(4294967295) {
continue
}
if existsInOwnBlockRegTree(mParentBlock, txin.PreviousOutPoint.Hash) {
_, used := opSpentInBlock[txin.PreviousOutPoint]
if !used {
// Origin tx is in the block and so hasn't been
// added yet, continue
opSpentInBlock[txin.PreviousOutPoint] = struct{}{}
continue
} else {
t.Errorf("output ref %v attempted double spend of previously spend output", txin.PreviousOutPoint)
}
}
origintxsha := &txin.PreviousOutPoint.Hash
txneededList = append(txneededList, origintxsha)
exists, err := testDb.db.ExistsTxSha(origintxsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("referenced tx not found %v", origintxsha)
}
_, err = testDb.db.FetchTxBySha(origintxsha)
if err != nil {
t.Errorf("referenced tx not found %v err %v ", origintxsha, err)
}
}
}
txlist := testDb.db.FetchUnSpentTxByShaList(txneededList)
for _, txe := range txlist {
if txe.Err != nil {
t.Errorf("tx list fetch failed %v err %v ", txe.Sha, txe.Err)
break out
}
}
}
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
break out
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
break out
}
newSha, blkid, err := testDb.db.NewestSha()
if err != nil {
t.Errorf("failed to obtain latest sha %v %v", height, err)
}
if blkid != height {
t.Errorf("height does not match latest block height %v %v %v", blkid, height, err)
}
blkSha := block.Sha()
if *newSha != *blkSha {
t.Errorf("Newest block sha does not match freshly inserted one %v %v %v ", newSha, blkSha, err)
}
}
// now that the db is populated, do some additional tests
testFetchHeightRange(t, testDb.db, testDb.blocks)
// Ensure all operations dealing with the optional address index behave
// correctly.
newSha, blkid, err := testDb.db.NewestSha()
testAddrIndexOperations(t, testDb.db, testDb.blocks[len(testDb.blocks)-1], newSha, blkid)
}
func TestBackout(t *testing.T) {
testBackout(t)
}
func testBackout(t *testing.T) {
// simplified basic operation is:
// 1) fetch block from remote server
// 2) look up all txin (except coinbase in db)
// 3) insert block
testDb, err := setUpTestDb(t, "tstdbbackout")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
if len(testDb.blocks) < 120 {
t.Errorf("test data too small")
return
}
err = nil
for height := int64(0); height < int64(len(testDb.blocks)); height++ {
if height == 100 {
testDb.db.Sync()
}
if height == 120 {
// Simulate unexpected application quit
testDb.db.RollbackClose()
break
}
block := testDb.blocks[height]
newheight, err := testDb.db.InsertBlock(block)
if err != nil {
t.Errorf("failed to insert block %v err %v", height, err)
return
}
if newheight != height {
t.Errorf("height mismatch expect %v returned %v", height, newheight)
return
}
}
// db was closed at height 120, so no cleanup is possible.
// reopen db
testDb.db, err = database.OpenDB("leveldb", testDb.dbName)
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer func() {
if err := testDb.db.Close(); err != nil {
t.Errorf("Close: unexpected error: %v", err)
}
}()
sha := testDb.blocks[99].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("failed to load block 99 from db %v", err)
return
}
sha = testDb.blocks[119].Sha()
if _, err := testDb.db.ExistsSha(sha); err != nil {
t.Errorf("ExistsSha: unexpected error: %v", err)
}
_, err = testDb.db.FetchBlockBySha(sha)
if err != nil {
t.Errorf("loaded block 119 from db")
return
}
// pick block 118 since tx for block 119 wont be inserted until block 120 is seen to be valid
block := testDb.blocks[118]
mblock := block.MsgBlock()
txsha := mblock.Transactions[0].TxSha()
exists, err := testDb.db.ExistsTxSha(&txsha)
if err != nil {
t.Errorf("ExistsTxSha: unexpected error %v ", err)
}
if !exists {
t.Errorf("tx %v not located db\n", txsha)
}
_, err = testDb.db.FetchTxBySha(&txsha)
if err != nil {
t.Errorf("tx %v not located db\n", txsha)
return
}
}
func loadBlocks(t *testing.T, file string) (blocks []*dcrutil.Block, err error) {
fi, err := os.Open(file)
if err != nil {
t.Errorf("failed to open file %v, err %v", file, err)
return nil, err
}
bcStream := bzip2.NewReader(fi)
defer fi.Close()
// Create a buffer of the read file
bcBuf := new(bytes.Buffer)
bcBuf.ReadFrom(bcStream)
// Create decoder from the buffer and a map to store the data
bcDecoder := gob.NewDecoder(bcBuf)
blockchain := make(map[int64][]byte)
// Decode the blockchain into the map
if err := bcDecoder.Decode(&blockchain); err != nil {
t.Errorf("error decoding test blockchain")
}
blocks = make([]*dcrutil.Block, 0, len(blockchain))
for height := int64(1); height < int64(len(blockchain)); height++ {
block, err := dcrutil.NewBlockFromBytes(blockchain[height])
if err != nil {
t.Errorf("failed to parse block %v", height)
return nil, err
}
block.SetHeight(height - 1)
blocks = append(blocks, block)
}
return
}
func testFetchHeightRange(t *testing.T, db database.Db, blocks []*dcrutil.Block) {
var testincrement int64 = 50
var testcnt int64 = 100
shanames := make([]*chainhash.Hash, len(blocks))
nBlocks := int64(len(blocks))
for i := range blocks {
shanames[i] = blocks[i].Sha()
}
for startheight := int64(0); startheight < nBlocks; startheight += testincrement {
endheight := startheight + testcnt
if endheight > nBlocks {
endheight = database.AllShas
}
shalist, err := db.FetchHeightRange(startheight, endheight)
if err != nil {
t.Errorf("FetchHeightRange: unexpected failure looking up shas %v", err)
}
if endheight == database.AllShas {
if int64(len(shalist)) != nBlocks-startheight {
t.Errorf("FetchHeightRange: expected A %v shas, got %v", nBlocks-startheight, len(shalist))
}
} else {
if int64(len(shalist)) != testcnt {
t.Errorf("FetchHeightRange: expected %v shas, got %v", testcnt, len(shalist))
}
}
for i := range shalist {
sha0 := *shanames[int64(i)+startheight]
sha1 := shalist[i]
if sha0 != sha1 {
t.Errorf("FetchHeightRange: mismatch sha at %v requested range %v %v: %v %v ", int64(i)+startheight, startheight, endheight, sha0, sha1)
}
}
}
}
func TestLimitAndSkipFetchTxsForAddr(t *testing.T) {
testDb, err := setUpTestDb(t, "tstdbtxaddr")
if err != nil {
t.Errorf("Failed to open test database %v", err)
return
}
defer testDb.cleanUpFunc()
_, err = testDb.db.InsertBlock(testDb.blocks[0])
if err != nil {
t.Fatalf("failed to insert initial block")
}
// Insert a block with some fake test transactions. The block will have
// 10 copies of a fake transaction involving same address.
addrString := "DsZEAobx6qJ7K2qaHZBA2vBn66Nor8KYAKk"
targetAddr, err := dcrutil.DecodeAddress(addrString, &chaincfg.MainNetParams)
if err != nil {
t.Fatalf("Unable to decode test address: %v", err)
}
outputScript, err := txscript.PayToAddrScript(targetAddr)
if err != nil {
t.Fatalf("Unable make test pkScript %v", err)
}
fakeTxOut := wire.NewTxOut(10, outputScript)
var emptyHash chainhash.Hash
fakeHeader := wire.NewBlockHeader(0, &emptyHash, &emptyHash, &emptyHash, 1, [6]byte{}, 1, 1, 1, 1, 1, 1, 1, 1, 1, [36]byte{})
msgBlock := wire.NewMsgBlock(fakeHeader)
for i := 0; i < 10; i++ {
mtx := wire.NewMsgTx()
mtx.AddTxOut(fakeTxOut)
msgBlock.AddTransaction(mtx)
}
lastBlock := testDb.blocks[0]
msgBlock.Header.PrevBlock = *lastBlock.Sha()
// Insert the test block into the DB.
testBlock := dcrutil.NewBlock(msgBlock)
newheight, err := testDb.db.InsertBlock(testBlock)
if err != nil {
t.Fatalf("Unable to insert block into db: %v", err)
}
// Create and insert an address index for out test addr.
txLoc, _, _ := testBlock.TxLoc()
index := make(database.BlockAddrIndex, len(txLoc))
for i := range testBlock.Transactions() {
var hash160 [ripemd160.Size]byte
scriptAddr := targetAddr.ScriptAddress()
copy(hash160[:], scriptAddr[:])
txAddrIndex := &database.TxAddrIndex{
Hash160: hash160,
Height: uint32(newheight),
TxOffset: uint32(txLoc[i].TxStart),
TxLen: uint32(txLoc[i].TxLen),
}
index[i] = txAddrIndex
}
blkSha := testBlock.Sha()
err = testDb.db.UpdateAddrIndexForBlock(blkSha, newheight, index)
if err != nil {
t.Fatalf("UpdateAddrIndexForBlock: failed to index"+
" addrs for block #%d (%s) "+
"err %v", newheight, blkSha, err)
return
}
// Try skipping the first 4 results, should get 6 in return.
txReply, txSkipped, err := testDb.db.FetchTxsForAddr(targetAddr, 4, 100000, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 4 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 4)
}
if len(txReply) != 6 {
t.Fatalf("Did not correctly skip forward in txs for address reply"+
" got %v txs, expected %v", len(txReply), 6)
}
// Limit the number of results to 3.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 0, 3, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err)
}
if txSkipped != 0 {
t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 0)
}
if len(txReply) != 3 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 3)
}
// Skip 1, limit 5.
txReply, txSkipped, err = testDb.db.FetchTxsForAddr(targetAddr, 1, 5, false)
if err != nil {
t.Fatalf("Unable to fetch transactions for address: %v", err) | t.Fatalf("Did not correctly return skipped amount"+
" got %v txs, expected %v", txSkipped, 1)
}
if len(txReply) != 5 {
t.Fatalf("Did not correctly limit in txs for address reply"+
" got %v txs, expected %v", len(txReply), 5)
}
} | }
if txSkipped != 1 { | random_line_split |
kc_pattern_scanner.py | # -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: Mayank Rasu (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect, KiteTicker
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
import sys
cwd = os.chdir("/home/rajkp/code/Projects/Django-Dashboard/boilerplate-code-django-dashboard/app/algos")
#generate trading session
access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def tokenLookup(instrument_df,symbol_list):
"""Looks up instrument token for a given script from instrument dump"""
token_list = []
for symbol in symbol_list:
token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))
return token_list
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser<0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
|
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["BHEL",
"CONCOR",
"ASTRAL",
"INDHOTEL",
"DALBHARAT",
"COFORGE",
"ITI",
"IPCALAB",
"SUMICHEM",
"DHANI",
"DIXON",
"SUNTV",
"FEDERALBNK",
"OFSS",
"COROMANDEL",
"RECLTD",
"VOLTAS",
"ISEC",
"AUBANK",
"BALKRISIND",
"GSPL",
"HAL",
"POLYCAB",
"TATACHEM",
"SUPREMEIND",
"LTTS",
"BHARATFORG",
"HATSUN",
"TVSMOTOR",
"GMRINFRA",
"TRENT",
"MOTILALOFS",
"L&TFH",
"ATUL",
"AIAENG",
"GLAXO",
"JSWENERGY",
"SKFINDIA",
"IDBI",
"PRESTIGE",
"NHPC",
"ATGL",
"TIINDIA",
"SJVN",
"MINDAIND",
"CANBK",
"VINATIORGA",
"BANKINDIA",
"OIL",
"BBTC",
"PFC",
"GODREJAGRO",
"AAVAS",
"EXIDEIND",
"WHIRLPOOL",
"MAXHEALTH",
"GODREJPROP",
"VBL",
"3MINDIA",
"METROPOLIS",
"ASTRAZEN",
"MGL",
"SRF",
"APOLLOTYRE",
"MFSL",
"BATAINDIA",
"UNIONBANK",
"VGUARD",
"ZYDUSWELL",
"PFIZER",
"BAYERCROP",
"IRCTC",
"CASTROLIND",
"SANOFI",
"ABFRL",
"FORTIS",
"CESC",
"PERSISTENT",
"GODREJIND",
"MPHASIS",
"PHOENIXLTD",
"CHOLAHLDNG",
"DEEPAKNTR",
"HONAUT",
"TATACOMM",
"JMFINANCIL",
"LICHSGFIN",
"CUMMINSIND",
"GICRE",
"THERMAX",
"SOLARINDS",
"SRTRANSFIN",
"LAURUSLABS",
"IDFCFIRSTB",
"CUB",
"NIACL",
"NAVINFLUOR",
"OBEROIRLTY",
"TATAELXSI",
"RELAXO",
"MANAPPURAM",
"CRISIL",
"AMARAJABAT",
"GUJGASLTD",
"BANKBARODA",
"AARTIIND",
"M&MFIN",
"ASHOKLEY",
"PGHL",
"PIIND",
"GILLETTE",
"ABCAPITAL",
"APLLTD",
"CROMPTON",
"NAM-INDIA",
"ABB",
"TTKPRESTIG",
"SUVENPHAR",
"IDEA",
"BEL",
"SCHAEFFLER",
"ZEEL",
"RBLBANK",
"RAMCOCEM",
"GLENMARK",
"RAJESHEXPO",
"SUNDRMFAST",
"EMAMILTD",
"ENDURANCE",
"SYNGENE",
"AKZOINDIA",
"LALPATHLAB",
"HINDZINC",
"TATAPOWER",
"JKCEMENT",
"ESCORTS",
"SUNDARMFIN",
"IIFLWAM",
"IBULHSGFIN",
"CREDITACC",
"KANSAINER",
"MINDTREE",
"PAGEIND",
"CHOLAFIN",
"AJANTPHARM",
"NATCOPHARM",
"JINDALSTEL",
"TORNTPOWER",
"SAIL",
"INDIAMART",
"GAIL",
"HINDPETRO",
"JUBLFOOD",
"ADANITRANS",
"BOSCHLTD",
"IGL",
"SIEMENS",
"PETRONET",
"ICICIPRULI",
"ACC",
"MARICO",
"AMBUJACEM",
"BERGEPAINT",
"PIDILITIND",
"INDUSTOWER",
"ABBOTINDIA",
"BIOCON",
"MCDOWELL-N",
"PGHH",
"DMART",
"MRF",
"DLF",
"GODREJCP",
"COLPAL",
"HDFCAMC",
"YESBANK",
"VEDL",
"BAJAJHLDNG",
"DABUR",
"INDIGO",
"ALKEM",
"CADILAHC",
"MOTHERSUMI",
"HAVELLS",
"ADANIENT",
"UBL",
"SBICARD",
"PEL",
"BANDHANBNK",
"MUTHOOTFIN",
"TORNTPHARM",
"ICICIGI",
"LUPIN",
"LTI",
"APOLLOHOSP",
"ADANIGREEN",
"NAUKRI",
"NMDC",
"PNB",
"AUROPHARMA",
"COALINDIA",
"IOC",
"NTPC",
"ULTRACEMCO",
"BPCL",
"TATASTEEL",
"TATACONSUM",
"SUNPHARMA",
"TATAMOTORS",
"GRASIM",
"SHREECEM",
"SBIN",
"EICHERMOT",
"RELIANCE",
"BAJAJ-AUTO",
"INDUSINDBK",
"BRITANNIA",
"SBILIFE",
"UPL",
"ONGC",
"ADANIPORTS",
"POWERGRID",
"NESTLEIND",
"BHARTIARTL",
"TITAN",
"HEROMOTOCO",
"ASIANPAINT",
"MARUTI",
"ITC",
"ICICIBANK",
"HCLTECH",
"M&M",
"LT",
"INFY",
"BAJAJFINSV",
"DRREDDY",
"HDFCBANK",
"CIPLA",
"HDFCLIFE",
"TCS",
"AXISBANK",
"HINDUNILVR",
"JSWSTEEL",
"TECHM",
"BAJFINANCE",
"WIPRO",
"DIVISLAB",
"KOTAKBANK",
"HINDALCO",
"HDFC"]
#####################################################################################################
def main():
a,b = 0,0
while a < 10:
try:
pos_df = pd.DataFrame(kite.positions()["day"])
break
except:
print("can't extract position data..retrying")
a+=1
while b < 10:
try:
ord_df = pd.DataFrame(kite.orders())
break
except:
print("can't extract order data..retrying")
b+=1
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
# print(ticker, ": ",cp)
# if len(pos_df.columns)==0:
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker not in pos_df["tradingsymbol"].tolist():
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker in pos_df["tradingsymbol"].tolist():
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] == 0:
# if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] > 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["lower_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] < 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["upper_limit"])
except:
print("skipping for ",ticker)
# Continuous execution
# starttime=time.time()
# timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
# while time.time() <= timeout:
# try:
# print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# main()
# time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
# except KeyboardInterrupt:
# print('\n\nKeyboard exception received. Exiting.')
# exit()
capital = 3000 #position size
# macd_xover = {}
# renko_param = {}
# for ticker in tickers:
# renko_param[ticker] = {"brick_size":renkoBrickSize(ticker),"upper_limit":None, "lower_limit":None,"brick":0}
# macd_xover[ticker] = None
#create KiteTicker object
kws = KiteTicker(key_secret[0],kite.access_token)
tokens = tokenLookup(instrument_df,tickers)
start_minute = dt.datetime.now().minute
def on_ticks(ws,ticks):
global start_minute
# renkoOperation(ticks)
now_minute = dt.datetime.now().minute
if abs(now_minute - start_minute) >= 5:
start_minute = now_minute
main(capital)
def on_connect(ws,response):
ws.subscribe(tokens)
ws.set_mode(ws.MODE_LTP,tokens)
def pattern_scanner():
while True:
now = dt.datetime.now()
if (now.hour >= 9):
kws.on_ticks=on_ticks
kws.on_connect=on_connect
kws.connect()
if (now.hour >= 14 and now.minute >= 30):
sys.exit() | pattern = "engulfing_bullish" | conditional_block |
kc_pattern_scanner.py | # -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: Mayank Rasu (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect, KiteTicker
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
import sys
cwd = os.chdir("/home/rajkp/code/Projects/Django-Dashboard/boilerplate-code-django-dashboard/app/algos")
#generate trading session
access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def tokenLookup(instrument_df,symbol_list):
"""Looks up instrument token for a given script from instrument dump"""
token_list = []
for symbol in symbol_list:
token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))
return token_list
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser<0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["BHEL",
"CONCOR",
"ASTRAL",
"INDHOTEL",
"DALBHARAT",
"COFORGE",
"ITI",
"IPCALAB",
"SUMICHEM",
"DHANI",
"DIXON",
"SUNTV",
"FEDERALBNK",
"OFSS",
"COROMANDEL",
"RECLTD",
"VOLTAS",
"ISEC",
"AUBANK",
"BALKRISIND",
"GSPL",
"HAL",
"POLYCAB",
"TATACHEM",
"SUPREMEIND",
"LTTS",
"BHARATFORG",
"HATSUN",
"TVSMOTOR",
"GMRINFRA",
"TRENT",
"MOTILALOFS",
"L&TFH",
"ATUL",
"AIAENG",
"GLAXO",
"JSWENERGY",
"SKFINDIA",
"IDBI",
"PRESTIGE",
"NHPC",
"ATGL",
"TIINDIA",
"SJVN",
"MINDAIND",
"CANBK",
"VINATIORGA",
"BANKINDIA",
"OIL",
"BBTC",
"PFC",
"GODREJAGRO",
"AAVAS",
"EXIDEIND",
"WHIRLPOOL",
"MAXHEALTH",
"GODREJPROP",
"VBL",
"3MINDIA",
"METROPOLIS",
"ASTRAZEN",
"MGL",
"SRF",
"APOLLOTYRE",
"MFSL",
"BATAINDIA",
"UNIONBANK",
"VGUARD",
"ZYDUSWELL",
"PFIZER",
"BAYERCROP",
"IRCTC",
"CASTROLIND",
"SANOFI",
"ABFRL",
"FORTIS",
"CESC",
"PERSISTENT",
"GODREJIND",
"MPHASIS",
"PHOENIXLTD",
"CHOLAHLDNG",
"DEEPAKNTR",
"HONAUT",
"TATACOMM",
"JMFINANCIL",
"LICHSGFIN",
"CUMMINSIND",
"GICRE",
"THERMAX",
"SOLARINDS",
"SRTRANSFIN",
"LAURUSLABS",
"IDFCFIRSTB",
"CUB",
"NIACL",
"NAVINFLUOR",
"OBEROIRLTY",
"TATAELXSI",
"RELAXO",
"MANAPPURAM",
"CRISIL",
"AMARAJABAT",
"GUJGASLTD",
"BANKBARODA",
"AARTIIND",
"M&MFIN",
"ASHOKLEY",
"PGHL",
"PIIND",
"GILLETTE",
"ABCAPITAL",
"APLLTD",
"CROMPTON",
"NAM-INDIA",
"ABB",
"TTKPRESTIG",
"SUVENPHAR",
"IDEA",
"BEL",
"SCHAEFFLER",
"ZEEL",
"RBLBANK",
"RAMCOCEM",
"GLENMARK",
"RAJESHEXPO",
"SUNDRMFAST",
"EMAMILTD",
"ENDURANCE",
"SYNGENE",
"AKZOINDIA",
"LALPATHLAB",
"HINDZINC",
"TATAPOWER",
"JKCEMENT",
"ESCORTS",
"SUNDARMFIN",
"IIFLWAM",
"IBULHSGFIN",
"CREDITACC",
"KANSAINER",
"MINDTREE",
"PAGEIND",
"CHOLAFIN",
"AJANTPHARM",
"NATCOPHARM",
"JINDALSTEL",
"TORNTPOWER",
"SAIL",
"INDIAMART",
"GAIL",
"HINDPETRO",
"JUBLFOOD",
"ADANITRANS",
"BOSCHLTD",
"IGL",
"SIEMENS",
"PETRONET",
"ICICIPRULI",
"ACC",
"MARICO",
"AMBUJACEM",
"BERGEPAINT",
"PIDILITIND",
"INDUSTOWER",
"ABBOTINDIA",
"BIOCON",
"MCDOWELL-N",
"PGHH",
"DMART",
"MRF",
| "GODREJCP",
"COLPAL",
"HDFCAMC",
"YESBANK",
"VEDL",
"BAJAJHLDNG",
"DABUR",
"INDIGO",
"ALKEM",
"CADILAHC",
"MOTHERSUMI",
"HAVELLS",
"ADANIENT",
"UBL",
"SBICARD",
"PEL",
"BANDHANBNK",
"MUTHOOTFIN",
"TORNTPHARM",
"ICICIGI",
"LUPIN",
"LTI",
"APOLLOHOSP",
"ADANIGREEN",
"NAUKRI",
"NMDC",
"PNB",
"AUROPHARMA",
"COALINDIA",
"IOC",
"NTPC",
"ULTRACEMCO",
"BPCL",
"TATASTEEL",
"TATACONSUM",
"SUNPHARMA",
"TATAMOTORS",
"GRASIM",
"SHREECEM",
"SBIN",
"EICHERMOT",
"RELIANCE",
"BAJAJ-AUTO",
"INDUSINDBK",
"BRITANNIA",
"SBILIFE",
"UPL",
"ONGC",
"ADANIPORTS",
"POWERGRID",
"NESTLEIND",
"BHARTIARTL",
"TITAN",
"HEROMOTOCO",
"ASIANPAINT",
"MARUTI",
"ITC",
"ICICIBANK",
"HCLTECH",
"M&M",
"LT",
"INFY",
"BAJAJFINSV",
"DRREDDY",
"HDFCBANK",
"CIPLA",
"HDFCLIFE",
"TCS",
"AXISBANK",
"HINDUNILVR",
"JSWSTEEL",
"TECHM",
"BAJFINANCE",
"WIPRO",
"DIVISLAB",
"KOTAKBANK",
"HINDALCO",
"HDFC"]
#####################################################################################################
def main():
a,b = 0,0
while a < 10:
try:
pos_df = pd.DataFrame(kite.positions()["day"])
break
except:
print("can't extract position data..retrying")
a+=1
while b < 10:
try:
ord_df = pd.DataFrame(kite.orders())
break
except:
print("can't extract order data..retrying")
b+=1
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
# print(ticker, ": ",cp)
# if len(pos_df.columns)==0:
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker not in pos_df["tradingsymbol"].tolist():
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker in pos_df["tradingsymbol"].tolist():
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] == 0:
# if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] > 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["lower_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] < 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["upper_limit"])
except:
print("skipping for ",ticker)
# Continuous execution
# starttime=time.time()
# timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
# while time.time() <= timeout:
# try:
# print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# main()
# time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
# except KeyboardInterrupt:
# print('\n\nKeyboard exception received. Exiting.')
# exit()
capital = 3000 #position size
# macd_xover = {}
# renko_param = {}
# for ticker in tickers:
# renko_param[ticker] = {"brick_size":renkoBrickSize(ticker),"upper_limit":None, "lower_limit":None,"brick":0}
# macd_xover[ticker] = None
#create KiteTicker object
kws = KiteTicker(key_secret[0],kite.access_token)
tokens = tokenLookup(instrument_df,tickers)
start_minute = dt.datetime.now().minute
def on_ticks(ws,ticks):
global start_minute
# renkoOperation(ticks)
now_minute = dt.datetime.now().minute
if abs(now_minute - start_minute) >= 5:
start_minute = now_minute
main(capital)
def on_connect(ws,response):
ws.subscribe(tokens)
ws.set_mode(ws.MODE_LTP,tokens)
def pattern_scanner():
while True:
now = dt.datetime.now()
if (now.hour >= 9):
kws.on_ticks=on_ticks
kws.on_connect=on_connect
kws.connect()
if (now.hour >= 14 and now.minute >= 30):
sys.exit() | "DLF",
| random_line_split |
kc_pattern_scanner.py | # -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: Mayank Rasu (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect, KiteTicker
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
import sys
cwd = os.chdir("/home/rajkp/code/Projects/Django-Dashboard/boilerplate-code-django-dashboard/app/algos")
#generate trading session
access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def tokenLookup(instrument_df,symbol_list):
"""Looks up instrument token for a given script from instrument dump"""
token_list = []
for symbol in symbol_list:
token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))
return token_list
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser<0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["BHEL",
"CONCOR",
"ASTRAL",
"INDHOTEL",
"DALBHARAT",
"COFORGE",
"ITI",
"IPCALAB",
"SUMICHEM",
"DHANI",
"DIXON",
"SUNTV",
"FEDERALBNK",
"OFSS",
"COROMANDEL",
"RECLTD",
"VOLTAS",
"ISEC",
"AUBANK",
"BALKRISIND",
"GSPL",
"HAL",
"POLYCAB",
"TATACHEM",
"SUPREMEIND",
"LTTS",
"BHARATFORG",
"HATSUN",
"TVSMOTOR",
"GMRINFRA",
"TRENT",
"MOTILALOFS",
"L&TFH",
"ATUL",
"AIAENG",
"GLAXO",
"JSWENERGY",
"SKFINDIA",
"IDBI",
"PRESTIGE",
"NHPC",
"ATGL",
"TIINDIA",
"SJVN",
"MINDAIND",
"CANBK",
"VINATIORGA",
"BANKINDIA",
"OIL",
"BBTC",
"PFC",
"GODREJAGRO",
"AAVAS",
"EXIDEIND",
"WHIRLPOOL",
"MAXHEALTH",
"GODREJPROP",
"VBL",
"3MINDIA",
"METROPOLIS",
"ASTRAZEN",
"MGL",
"SRF",
"APOLLOTYRE",
"MFSL",
"BATAINDIA",
"UNIONBANK",
"VGUARD",
"ZYDUSWELL",
"PFIZER",
"BAYERCROP",
"IRCTC",
"CASTROLIND",
"SANOFI",
"ABFRL",
"FORTIS",
"CESC",
"PERSISTENT",
"GODREJIND",
"MPHASIS",
"PHOENIXLTD",
"CHOLAHLDNG",
"DEEPAKNTR",
"HONAUT",
"TATACOMM",
"JMFINANCIL",
"LICHSGFIN",
"CUMMINSIND",
"GICRE",
"THERMAX",
"SOLARINDS",
"SRTRANSFIN",
"LAURUSLABS",
"IDFCFIRSTB",
"CUB",
"NIACL",
"NAVINFLUOR",
"OBEROIRLTY",
"TATAELXSI",
"RELAXO",
"MANAPPURAM",
"CRISIL",
"AMARAJABAT",
"GUJGASLTD",
"BANKBARODA",
"AARTIIND",
"M&MFIN",
"ASHOKLEY",
"PGHL",
"PIIND",
"GILLETTE",
"ABCAPITAL",
"APLLTD",
"CROMPTON",
"NAM-INDIA",
"ABB",
"TTKPRESTIG",
"SUVENPHAR",
"IDEA",
"BEL",
"SCHAEFFLER",
"ZEEL",
"RBLBANK",
"RAMCOCEM",
"GLENMARK",
"RAJESHEXPO",
"SUNDRMFAST",
"EMAMILTD",
"ENDURANCE",
"SYNGENE",
"AKZOINDIA",
"LALPATHLAB",
"HINDZINC",
"TATAPOWER",
"JKCEMENT",
"ESCORTS",
"SUNDARMFIN",
"IIFLWAM",
"IBULHSGFIN",
"CREDITACC",
"KANSAINER",
"MINDTREE",
"PAGEIND",
"CHOLAFIN",
"AJANTPHARM",
"NATCOPHARM",
"JINDALSTEL",
"TORNTPOWER",
"SAIL",
"INDIAMART",
"GAIL",
"HINDPETRO",
"JUBLFOOD",
"ADANITRANS",
"BOSCHLTD",
"IGL",
"SIEMENS",
"PETRONET",
"ICICIPRULI",
"ACC",
"MARICO",
"AMBUJACEM",
"BERGEPAINT",
"PIDILITIND",
"INDUSTOWER",
"ABBOTINDIA",
"BIOCON",
"MCDOWELL-N",
"PGHH",
"DMART",
"MRF",
"DLF",
"GODREJCP",
"COLPAL",
"HDFCAMC",
"YESBANK",
"VEDL",
"BAJAJHLDNG",
"DABUR",
"INDIGO",
"ALKEM",
"CADILAHC",
"MOTHERSUMI",
"HAVELLS",
"ADANIENT",
"UBL",
"SBICARD",
"PEL",
"BANDHANBNK",
"MUTHOOTFIN",
"TORNTPHARM",
"ICICIGI",
"LUPIN",
"LTI",
"APOLLOHOSP",
"ADANIGREEN",
"NAUKRI",
"NMDC",
"PNB",
"AUROPHARMA",
"COALINDIA",
"IOC",
"NTPC",
"ULTRACEMCO",
"BPCL",
"TATASTEEL",
"TATACONSUM",
"SUNPHARMA",
"TATAMOTORS",
"GRASIM",
"SHREECEM",
"SBIN",
"EICHERMOT",
"RELIANCE",
"BAJAJ-AUTO",
"INDUSINDBK",
"BRITANNIA",
"SBILIFE",
"UPL",
"ONGC",
"ADANIPORTS",
"POWERGRID",
"NESTLEIND",
"BHARTIARTL",
"TITAN",
"HEROMOTOCO",
"ASIANPAINT",
"MARUTI",
"ITC",
"ICICIBANK",
"HCLTECH",
"M&M",
"LT",
"INFY",
"BAJAJFINSV",
"DRREDDY",
"HDFCBANK",
"CIPLA",
"HDFCLIFE",
"TCS",
"AXISBANK",
"HINDUNILVR",
"JSWSTEEL",
"TECHM",
"BAJFINANCE",
"WIPRO",
"DIVISLAB",
"KOTAKBANK",
"HINDALCO",
"HDFC"]
#####################################################################################################
def | ():
a,b = 0,0
while a < 10:
try:
pos_df = pd.DataFrame(kite.positions()["day"])
break
except:
print("can't extract position data..retrying")
a+=1
while b < 10:
try:
ord_df = pd.DataFrame(kite.orders())
break
except:
print("can't extract order data..retrying")
b+=1
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
# print(ticker, ": ",cp)
# if len(pos_df.columns)==0:
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker not in pos_df["tradingsymbol"].tolist():
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker in pos_df["tradingsymbol"].tolist():
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] == 0:
# if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] > 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["lower_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] < 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["upper_limit"])
except:
print("skipping for ",ticker)
# Continuous execution
# starttime=time.time()
# timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
# while time.time() <= timeout:
# try:
# print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# main()
# time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
# except KeyboardInterrupt:
# print('\n\nKeyboard exception received. Exiting.')
# exit()
capital = 3000 #position size
# macd_xover = {}
# renko_param = {}
# for ticker in tickers:
# renko_param[ticker] = {"brick_size":renkoBrickSize(ticker),"upper_limit":None, "lower_limit":None,"brick":0}
# macd_xover[ticker] = None
#create KiteTicker object
kws = KiteTicker(key_secret[0],kite.access_token)
tokens = tokenLookup(instrument_df,tickers)
start_minute = dt.datetime.now().minute
def on_ticks(ws,ticks):
global start_minute
# renkoOperation(ticks)
now_minute = dt.datetime.now().minute
if abs(now_minute - start_minute) >= 5:
start_minute = now_minute
main(capital)
def on_connect(ws,response):
ws.subscribe(tokens)
ws.set_mode(ws.MODE_LTP,tokens)
def pattern_scanner():
while True:
now = dt.datetime.now()
if (now.hour >= 9):
kws.on_ticks=on_ticks
kws.on_connect=on_connect
kws.connect()
if (now.hour >= 14 and now.minute >= 30):
sys.exit() | main | identifier_name |
kc_pattern_scanner.py | # -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: Mayank Rasu (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect, KiteTicker
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
import sys
cwd = os.chdir("/home/rajkp/code/Projects/Django-Dashboard/boilerplate-code-django-dashboard/app/algos")
#generate trading session
access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def tokenLookup(instrument_df,symbol_list):
"""Looks up instrument token for a given script from instrument dump"""
token_list = []
for symbol in symbol_list:
token_list.append(int(instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]))
return token_list
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
|
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser<0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["BHEL",
"CONCOR",
"ASTRAL",
"INDHOTEL",
"DALBHARAT",
"COFORGE",
"ITI",
"IPCALAB",
"SUMICHEM",
"DHANI",
"DIXON",
"SUNTV",
"FEDERALBNK",
"OFSS",
"COROMANDEL",
"RECLTD",
"VOLTAS",
"ISEC",
"AUBANK",
"BALKRISIND",
"GSPL",
"HAL",
"POLYCAB",
"TATACHEM",
"SUPREMEIND",
"LTTS",
"BHARATFORG",
"HATSUN",
"TVSMOTOR",
"GMRINFRA",
"TRENT",
"MOTILALOFS",
"L&TFH",
"ATUL",
"AIAENG",
"GLAXO",
"JSWENERGY",
"SKFINDIA",
"IDBI",
"PRESTIGE",
"NHPC",
"ATGL",
"TIINDIA",
"SJVN",
"MINDAIND",
"CANBK",
"VINATIORGA",
"BANKINDIA",
"OIL",
"BBTC",
"PFC",
"GODREJAGRO",
"AAVAS",
"EXIDEIND",
"WHIRLPOOL",
"MAXHEALTH",
"GODREJPROP",
"VBL",
"3MINDIA",
"METROPOLIS",
"ASTRAZEN",
"MGL",
"SRF",
"APOLLOTYRE",
"MFSL",
"BATAINDIA",
"UNIONBANK",
"VGUARD",
"ZYDUSWELL",
"PFIZER",
"BAYERCROP",
"IRCTC",
"CASTROLIND",
"SANOFI",
"ABFRL",
"FORTIS",
"CESC",
"PERSISTENT",
"GODREJIND",
"MPHASIS",
"PHOENIXLTD",
"CHOLAHLDNG",
"DEEPAKNTR",
"HONAUT",
"TATACOMM",
"JMFINANCIL",
"LICHSGFIN",
"CUMMINSIND",
"GICRE",
"THERMAX",
"SOLARINDS",
"SRTRANSFIN",
"LAURUSLABS",
"IDFCFIRSTB",
"CUB",
"NIACL",
"NAVINFLUOR",
"OBEROIRLTY",
"TATAELXSI",
"RELAXO",
"MANAPPURAM",
"CRISIL",
"AMARAJABAT",
"GUJGASLTD",
"BANKBARODA",
"AARTIIND",
"M&MFIN",
"ASHOKLEY",
"PGHL",
"PIIND",
"GILLETTE",
"ABCAPITAL",
"APLLTD",
"CROMPTON",
"NAM-INDIA",
"ABB",
"TTKPRESTIG",
"SUVENPHAR",
"IDEA",
"BEL",
"SCHAEFFLER",
"ZEEL",
"RBLBANK",
"RAMCOCEM",
"GLENMARK",
"RAJESHEXPO",
"SUNDRMFAST",
"EMAMILTD",
"ENDURANCE",
"SYNGENE",
"AKZOINDIA",
"LALPATHLAB",
"HINDZINC",
"TATAPOWER",
"JKCEMENT",
"ESCORTS",
"SUNDARMFIN",
"IIFLWAM",
"IBULHSGFIN",
"CREDITACC",
"KANSAINER",
"MINDTREE",
"PAGEIND",
"CHOLAFIN",
"AJANTPHARM",
"NATCOPHARM",
"JINDALSTEL",
"TORNTPOWER",
"SAIL",
"INDIAMART",
"GAIL",
"HINDPETRO",
"JUBLFOOD",
"ADANITRANS",
"BOSCHLTD",
"IGL",
"SIEMENS",
"PETRONET",
"ICICIPRULI",
"ACC",
"MARICO",
"AMBUJACEM",
"BERGEPAINT",
"PIDILITIND",
"INDUSTOWER",
"ABBOTINDIA",
"BIOCON",
"MCDOWELL-N",
"PGHH",
"DMART",
"MRF",
"DLF",
"GODREJCP",
"COLPAL",
"HDFCAMC",
"YESBANK",
"VEDL",
"BAJAJHLDNG",
"DABUR",
"INDIGO",
"ALKEM",
"CADILAHC",
"MOTHERSUMI",
"HAVELLS",
"ADANIENT",
"UBL",
"SBICARD",
"PEL",
"BANDHANBNK",
"MUTHOOTFIN",
"TORNTPHARM",
"ICICIGI",
"LUPIN",
"LTI",
"APOLLOHOSP",
"ADANIGREEN",
"NAUKRI",
"NMDC",
"PNB",
"AUROPHARMA",
"COALINDIA",
"IOC",
"NTPC",
"ULTRACEMCO",
"BPCL",
"TATASTEEL",
"TATACONSUM",
"SUNPHARMA",
"TATAMOTORS",
"GRASIM",
"SHREECEM",
"SBIN",
"EICHERMOT",
"RELIANCE",
"BAJAJ-AUTO",
"INDUSINDBK",
"BRITANNIA",
"SBILIFE",
"UPL",
"ONGC",
"ADANIPORTS",
"POWERGRID",
"NESTLEIND",
"BHARTIARTL",
"TITAN",
"HEROMOTOCO",
"ASIANPAINT",
"MARUTI",
"ITC",
"ICICIBANK",
"HCLTECH",
"M&M",
"LT",
"INFY",
"BAJAJFINSV",
"DRREDDY",
"HDFCBANK",
"CIPLA",
"HDFCLIFE",
"TCS",
"AXISBANK",
"HINDUNILVR",
"JSWSTEEL",
"TECHM",
"BAJFINANCE",
"WIPRO",
"DIVISLAB",
"KOTAKBANK",
"HINDALCO",
"HDFC"]
#####################################################################################################
def main():
a,b = 0,0
while a < 10:
try:
pos_df = pd.DataFrame(kite.positions()["day"])
break
except:
print("can't extract position data..retrying")
a+=1
while b < 10:
try:
ord_df = pd.DataFrame(kite.orders())
break
except:
print("can't extract order data..retrying")
b+=1
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
# print(ticker, ": ",cp)
# if len(pos_df.columns)==0:
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker not in pos_df["tradingsymbol"].tolist():
# # if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# # placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# # if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# # placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if len(pos_df.columns)!=0 and ticker in pos_df["tradingsymbol"].tolist():
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] == 0:
# if macd_xover[ticker] == "bullish" and renko_param[ticker]["brick"] >=2:
# placeSLOrder(ticker,"buy",quantity,renko_param[ticker]["lower_limit"])
# if macd_xover[ticker] == "bearish" and renko_param[ticker]["brick"] <=-2:
# placeSLOrder(ticker,"sell",quantity,renko_param[ticker]["upper_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] > 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["lower_limit"])
# if pos_df[pos_df["tradingsymbol"]==ticker]["quantity"].values[0] < 0:
# order_id = ord_df.loc[(ord_df['tradingsymbol'] == ticker) & (ord_df['status'].isin(["TRIGGER PENDING","OPEN"]))]["order_id"].values[0]
# ModifyOrder(order_id,renko_param[ticker]["upper_limit"])
except:
print("skipping for ",ticker)
# Continuous execution
# starttime=time.time()
# timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
# while time.time() <= timeout:
# try:
# print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# main()
# time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
# except KeyboardInterrupt:
# print('\n\nKeyboard exception received. Exiting.')
# exit()
capital = 3000 #position size
# macd_xover = {}
# renko_param = {}
# for ticker in tickers:
# renko_param[ticker] = {"brick_size":renkoBrickSize(ticker),"upper_limit":None, "lower_limit":None,"brick":0}
# macd_xover[ticker] = None
#create KiteTicker object
kws = KiteTicker(key_secret[0],kite.access_token)
tokens = tokenLookup(instrument_df,tickers)
start_minute = dt.datetime.now().minute
def on_ticks(ws,ticks):
global start_minute
# renkoOperation(ticks)
now_minute = dt.datetime.now().minute
if abs(now_minute - start_minute) >= 5:
start_minute = now_minute
main(capital)
def on_connect(ws,response):
ws.subscribe(tokens)
ws.set_mode(ws.MODE_LTP,tokens)
def pattern_scanner():
while True:
now = dt.datetime.now()
if (now.hour >= 9):
kws.on_ticks=on_ticks
kws.on_connect=on_connect
kws.connect()
if (now.hour >= 14 and now.minute >= 30):
sys.exit() | "function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None | identifier_body |
tex_mobject.py | from constants import *
from svg_mobject import SVGMobject
from svg_mobject import VMobjectFromSVGPathstring
from utils.config_ops import digest_config
from utils.strings import split_string_list_to_isolate_substring
from mobject.types.vectorized_mobject import VGroup
from mobject.types.vectorized_mobject import VectorizedPoint
import operator as op
# TODO list
# - Make sure if "color" is passed into TexMobject, it behaves as expected
TEX_MOB_SCALE_FACTOR = 0.05
class TexSymbol(VMobjectFromSVGPathstring):
def pointwise_become_partial(self, mobject, a, b):
# TODO, this assumes a = 0
if b < 0.5:
b = 2 * b
added_width = 1
opacity = 0
else:
added_width = 2 - 2 * b
opacity = 2 * b - 1
b = 1
VMobjectFromSVGPathstring.pointwise_become_partial(
self, mobject, 0, b
)
self.set_stroke(width=added_width + mobject.get_stroke_width())
self.set_fill(opacity=opacity)
class SingleStringTexMobject(SVGMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEX_FILE,
"stroke_width": 0,
"fill_opacity": 1.0,
"should_center": True,
"height": None,
"organize_left_to_right": False,
"propagate_style_to_family": True,
"alignment": "",
}
def __init__(self, tex_string, **kwargs):
digest_config(self, kwargs)
assert(isinstance(tex_string, str))
self.tex_string = tex_string
file_name = tex_to_svg_file(
self.get_modified_expression(tex_string),
self.template_tex_file
)
SVGMobject.__init__(self, file_name=file_name, **kwargs)
if self.height is None:
self.scale(TEX_MOB_SCALE_FACTOR)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def get_modified_expression(self, tex_string):
result = self.alignment + " " + tex_string
result = result.strip()
result = self.modify_special_strings(result)
return result
def modify_special_strings(self, tex):
tex = self.remove_stray_braces(tex)
if tex in ["\\over", "\\overline"]:
# fraction line needs something to be over
tex += "\\,"
if tex == "\\sqrt":
tex += "{\\quad}"
if tex == "\\substack":
tex = ""
for t1, t2 in ("\\left", "\\right"), ("\\right", "\\left"):
should_replace = reduce(op.and_, [
t1 in tex,
t2 not in tex,
len(tex) > len(t1) and tex[len(t1)] in "()[]<>|.\\"
])
if should_replace:
tex = tex.replace(t1, "\\big")
if tex == "":
tex = "\\quad"
return tex
def remove_stray_braces(self, tex):
"""
Makes TexMobject resiliant to unmatched { at start
"""
num_lefts, num_rights = [
tex.count(char)
for char in "{}"
]
if num_rights > num_lefts:
backwards = tex[::-1].replace("}", "", num_rights - num_lefts)
tex = backwards[::-1]
elif num_lefts > num_rights:
tex = tex.replace("{", "", num_lefts - num_rights)
return tex
def get_tex_string(self):
return self.tex_string
def path_string_to_mobject(self, path_string):
# Overwrite superclass default to use
# specialized path_string mobject
return TexSymbol(path_string)
def organize_submobjects_left_to_right(self):
self.sort_submobjects(lambda p: p[0])
return self
class TexMobject(SingleStringTexMobject):
CONFIG = {
"arg_separator": " ",
"substrings_to_isolate": [],
"tex_to_color_map": {},
}
def __init__(self, *tex_strings, **kwargs):
digest_config(self, kwargs)
tex_strings = self.break_up_tex_strings(tex_strings)
self.tex_strings = tex_strings
SingleStringTexMobject.__init__(
self, self.arg_separator.join(tex_strings), **kwargs
)
self.break_up_by_substrings()
self.set_color_by_tex_to_color_map(self.tex_to_color_map)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def break_up_tex_strings(self, tex_strings):
substrings_to_isolate = op.add(
self.substrings_to_isolate,
self.tex_to_color_map.keys()
)
split_list = split_string_list_to_isolate_substring(
tex_strings, *substrings_to_isolate
)
split_list = map(str.strip, split_list)
split_list = filter(lambda s: s != '', split_list)
return split_list
def break_up_by_substrings(self):
"""
Reorganize existing submojects one layer
deeper based on the structure of tex_strings (as a list
of tex_strings)
"""
new_submobjects = []
curr_index = 0
for tex_string in self.tex_strings:
sub_tex_mob = SingleStringTexMobject(tex_string, **self.CONFIG)
num_submobs = len(sub_tex_mob.submobjects)
new_index = curr_index + num_submobs
if num_submobs == 0:
# For cases like empty tex_strings, we want the corresponing
# part of the whole TexMobject to be a VectorizedPoint
# positioned in the right part of the TexMobject
sub_tex_mob.submobjects = [VectorizedPoint()]
last_submob_index = min(curr_index, len(self.submobjects) - 1)
sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT)
else:
sub_tex_mob.submobjects = self.submobjects[curr_index:new_index]
new_submobjects.append(sub_tex_mob)
curr_index = new_index
self.submobjects = new_submobjects
return self
def get_parts_by_tex(self, tex, substring=True, case_sensitive=True):
def test(tex1, tex2):
if not case_sensitive:
tex1 = tex1.lower()
tex2 = tex2.lower()
if substring:
return tex1 in tex2
else:
return tex1 == tex2
return VGroup(*filter(
lambda m: test(tex, m.get_tex_string()),
self.submobjects
))
def get_part_by_tex(self, tex, **kwargs):
all_parts = self.get_parts_by_tex(tex, **kwargs)
return all_parts[0] if all_parts else None
def set_color_by_tex(self, tex, color, **kwargs):
parts_to_color = self.get_parts_by_tex(tex, **kwargs)
for part in parts_to_color:
part.set_color(color)
return self
def set_color_by_tex_to_color_map(self, texs_to_color_map, **kwargs):
for texs, color in texs_to_color_map.items():
try:
# If the given key behaves like tex_strings
texs + ''
self.set_color_by_tex(texs, color, **kwargs)
except TypeError:
# If the given key is a tuple
for tex in texs:
self.set_color_by_tex(tex, color, **kwargs)
return self
def index_of_part(self, part):
split_self = self.split()
if part not in split_self:
raise Exception("Trying to get index of part not in TexMobject")
return split_self.index(part)
def index_of_part_by_tex(self, tex, **kwargs):
part = self.get_part_by_tex(tex, **kwargs)
return self.index_of_part(part)
def split(self):
# Many old scenes assume that when you pass in a single string
# to TexMobject, it indexes across the characters.
if len(self.submobjects) == 1:
return self.submobjects[0].split()
else:
return super(TexMobject, self).split()
class TextMobject(TexMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "\\centering",
}
class BulletedList(TextMobject):
CONFIG = {
"buff": MED_LARGE_BUFF,
"dot_scale_factor": 2,
# Have to include because of handle_multiple_args implementation
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "",
}
def __init__(self, *items, **kwargs):
line_separated_items = [s + "\\\\" for s in items]
TextMobject.__init__(self, *line_separated_items, **kwargs)
for part in self:
dot = TexMobject("\\cdot").scale(self.dot_scale_factor)
dot.next_to(part[0], LEFT, SMALL_BUFF)
part.add_to_back(dot)
self.arrange_submobjects(
DOWN,
aligned_edge=LEFT,
buff=self.buff
)
def fade_all_but(self, index_or_string, opacity=0.5):
arg = index_or_string
if isinstance(arg, str):
part = self.get_part_by_tex(arg)
elif isinstance(arg, int):
part = self.submobjects[arg]
else:
raise Exception("Expected int or string, got {0}".format(arg))
for other_part in self.submobjects:
if other_part is part:
other_part.set_fill(opacity=1)
else:
other_part.set_fill(opacity=opacity)
class TexMobjectFromPresetString(TexMobject):
def __init__(self, **kwargs):
digest_config(self, kwargs)
TexMobject.__init__(self, self.tex, **kwargs)
self.set_color(self.color)
##########
def tex_hash(expression, template_tex_file):
return str(hash(expression + template_tex_file))
def | (expression, template_tex_file):
image_dir = os.path.join(
TEX_IMAGE_DIR,
tex_hash(expression, template_tex_file)
)
if os.path.exists(image_dir):
return get_sorted_image_list(image_dir)
tex_file = generate_tex_file(expression, template_tex_file)
dvi_file = tex_to_dvi(tex_file)
return dvi_to_svg(dvi_file)
def generate_tex_file(expression, template_tex_file):
result = os.path.join(
TEX_DIR,
tex_hash(expression, template_tex_file)
) + ".tex"
if not os.path.exists(result):
print("Writing \"%s\" to %s" % (
"".join(expression), result
))
with open(template_tex_file, "r") as infile:
body = infile.read()
body = body.replace(TEX_TEXT_TO_REPLACE, expression)
with open(result, "w") as outfile:
outfile.write(body)
return result
def get_null():
if os.name == "nt":
return "NUL"
return "/dev/null"
def tex_to_dvi(tex_file):
result = tex_file.replace(".tex", ".dvi")
if not os.path.exists(result):
commands = [
"latex",
"-interaction=batchmode",
"-halt-on-error",
"-output-directory=" + TEX_DIR,
tex_file,
">",
get_null()
]
exit_code = os.system(" ".join(commands))
if exit_code != 0:
latex_output = ''
log_file = tex_file.replace(".tex", ".log")
if os.path.exists(log_file):
with open(log_file, 'r') as f:
latex_output = f.read()
raise Exception(
"Latex error converting to dvi. "
"See log output above or the log file: %s" % log_file)
return result
def dvi_to_svg(dvi_file, regen_if_exists=False):
"""
Converts a dvi, which potentially has multiple slides, into a
directory full of enumerated pngs corresponding with these slides.
Returns a list of PIL Image objects for these images sorted as they
where in the dvi
"""
result = dvi_file.replace(".dvi", ".svg")
if not os.path.exists(result):
commands = [
"dvisvgm",
dvi_file,
"-n",
"-v",
"0",
"-o",
result,
">",
get_null()
]
os.system(" ".join(commands))
return result
| tex_to_svg_file | identifier_name |
tex_mobject.py | from constants import *
from svg_mobject import SVGMobject
from svg_mobject import VMobjectFromSVGPathstring
from utils.config_ops import digest_config
from utils.strings import split_string_list_to_isolate_substring
from mobject.types.vectorized_mobject import VGroup
from mobject.types.vectorized_mobject import VectorizedPoint
import operator as op
# TODO list
# - Make sure if "color" is passed into TexMobject, it behaves as expected
TEX_MOB_SCALE_FACTOR = 0.05
class TexSymbol(VMobjectFromSVGPathstring):
def pointwise_become_partial(self, mobject, a, b):
# TODO, this assumes a = 0
if b < 0.5:
b = 2 * b
added_width = 1
opacity = 0
else:
added_width = 2 - 2 * b
opacity = 2 * b - 1
b = 1
VMobjectFromSVGPathstring.pointwise_become_partial(
self, mobject, 0, b
)
self.set_stroke(width=added_width + mobject.get_stroke_width())
self.set_fill(opacity=opacity)
class SingleStringTexMobject(SVGMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEX_FILE,
"stroke_width": 0,
"fill_opacity": 1.0,
"should_center": True,
"height": None,
"organize_left_to_right": False,
"propagate_style_to_family": True,
"alignment": "",
}
def __init__(self, tex_string, **kwargs):
digest_config(self, kwargs)
assert(isinstance(tex_string, str))
self.tex_string = tex_string
file_name = tex_to_svg_file(
self.get_modified_expression(tex_string),
self.template_tex_file
)
SVGMobject.__init__(self, file_name=file_name, **kwargs)
if self.height is None:
self.scale(TEX_MOB_SCALE_FACTOR)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def get_modified_expression(self, tex_string):
result = self.alignment + " " + tex_string
result = result.strip()
result = self.modify_special_strings(result)
return result
def modify_special_strings(self, tex):
tex = self.remove_stray_braces(tex)
if tex in ["\\over", "\\overline"]:
# fraction line needs something to be over
tex += "\\,"
if tex == "\\sqrt":
tex += "{\\quad}"
if tex == "\\substack":
|
for t1, t2 in ("\\left", "\\right"), ("\\right", "\\left"):
should_replace = reduce(op.and_, [
t1 in tex,
t2 not in tex,
len(tex) > len(t1) and tex[len(t1)] in "()[]<>|.\\"
])
if should_replace:
tex = tex.replace(t1, "\\big")
if tex == "":
tex = "\\quad"
return tex
def remove_stray_braces(self, tex):
"""
Makes TexMobject resiliant to unmatched { at start
"""
num_lefts, num_rights = [
tex.count(char)
for char in "{}"
]
if num_rights > num_lefts:
backwards = tex[::-1].replace("}", "", num_rights - num_lefts)
tex = backwards[::-1]
elif num_lefts > num_rights:
tex = tex.replace("{", "", num_lefts - num_rights)
return tex
def get_tex_string(self):
return self.tex_string
def path_string_to_mobject(self, path_string):
# Overwrite superclass default to use
# specialized path_string mobject
return TexSymbol(path_string)
def organize_submobjects_left_to_right(self):
self.sort_submobjects(lambda p: p[0])
return self
class TexMobject(SingleStringTexMobject):
CONFIG = {
"arg_separator": " ",
"substrings_to_isolate": [],
"tex_to_color_map": {},
}
def __init__(self, *tex_strings, **kwargs):
digest_config(self, kwargs)
tex_strings = self.break_up_tex_strings(tex_strings)
self.tex_strings = tex_strings
SingleStringTexMobject.__init__(
self, self.arg_separator.join(tex_strings), **kwargs
)
self.break_up_by_substrings()
self.set_color_by_tex_to_color_map(self.tex_to_color_map)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def break_up_tex_strings(self, tex_strings):
substrings_to_isolate = op.add(
self.substrings_to_isolate,
self.tex_to_color_map.keys()
)
split_list = split_string_list_to_isolate_substring(
tex_strings, *substrings_to_isolate
)
split_list = map(str.strip, split_list)
split_list = filter(lambda s: s != '', split_list)
return split_list
def break_up_by_substrings(self):
"""
Reorganize existing submojects one layer
deeper based on the structure of tex_strings (as a list
of tex_strings)
"""
new_submobjects = []
curr_index = 0
for tex_string in self.tex_strings:
sub_tex_mob = SingleStringTexMobject(tex_string, **self.CONFIG)
num_submobs = len(sub_tex_mob.submobjects)
new_index = curr_index + num_submobs
if num_submobs == 0:
# For cases like empty tex_strings, we want the corresponing
# part of the whole TexMobject to be a VectorizedPoint
# positioned in the right part of the TexMobject
sub_tex_mob.submobjects = [VectorizedPoint()]
last_submob_index = min(curr_index, len(self.submobjects) - 1)
sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT)
else:
sub_tex_mob.submobjects = self.submobjects[curr_index:new_index]
new_submobjects.append(sub_tex_mob)
curr_index = new_index
self.submobjects = new_submobjects
return self
def get_parts_by_tex(self, tex, substring=True, case_sensitive=True):
def test(tex1, tex2):
if not case_sensitive:
tex1 = tex1.lower()
tex2 = tex2.lower()
if substring:
return tex1 in tex2
else:
return tex1 == tex2
return VGroup(*filter(
lambda m: test(tex, m.get_tex_string()),
self.submobjects
))
def get_part_by_tex(self, tex, **kwargs):
all_parts = self.get_parts_by_tex(tex, **kwargs)
return all_parts[0] if all_parts else None
def set_color_by_tex(self, tex, color, **kwargs):
parts_to_color = self.get_parts_by_tex(tex, **kwargs)
for part in parts_to_color:
part.set_color(color)
return self
def set_color_by_tex_to_color_map(self, texs_to_color_map, **kwargs):
for texs, color in texs_to_color_map.items():
try:
# If the given key behaves like tex_strings
texs + ''
self.set_color_by_tex(texs, color, **kwargs)
except TypeError:
# If the given key is a tuple
for tex in texs:
self.set_color_by_tex(tex, color, **kwargs)
return self
def index_of_part(self, part):
split_self = self.split()
if part not in split_self:
raise Exception("Trying to get index of part not in TexMobject")
return split_self.index(part)
def index_of_part_by_tex(self, tex, **kwargs):
part = self.get_part_by_tex(tex, **kwargs)
return self.index_of_part(part)
def split(self):
# Many old scenes assume that when you pass in a single string
# to TexMobject, it indexes across the characters.
if len(self.submobjects) == 1:
return self.submobjects[0].split()
else:
return super(TexMobject, self).split()
class TextMobject(TexMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "\\centering",
}
class BulletedList(TextMobject):
CONFIG = {
"buff": MED_LARGE_BUFF,
"dot_scale_factor": 2,
# Have to include because of handle_multiple_args implementation
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "",
}
def __init__(self, *items, **kwargs):
line_separated_items = [s + "\\\\" for s in items]
TextMobject.__init__(self, *line_separated_items, **kwargs)
for part in self:
dot = TexMobject("\\cdot").scale(self.dot_scale_factor)
dot.next_to(part[0], LEFT, SMALL_BUFF)
part.add_to_back(dot)
self.arrange_submobjects(
DOWN,
aligned_edge=LEFT,
buff=self.buff
)
def fade_all_but(self, index_or_string, opacity=0.5):
arg = index_or_string
if isinstance(arg, str):
part = self.get_part_by_tex(arg)
elif isinstance(arg, int):
part = self.submobjects[arg]
else:
raise Exception("Expected int or string, got {0}".format(arg))
for other_part in self.submobjects:
if other_part is part:
other_part.set_fill(opacity=1)
else:
other_part.set_fill(opacity=opacity)
class TexMobjectFromPresetString(TexMobject):
def __init__(self, **kwargs):
digest_config(self, kwargs)
TexMobject.__init__(self, self.tex, **kwargs)
self.set_color(self.color)
##########
def tex_hash(expression, template_tex_file):
return str(hash(expression + template_tex_file))
def tex_to_svg_file(expression, template_tex_file):
image_dir = os.path.join(
TEX_IMAGE_DIR,
tex_hash(expression, template_tex_file)
)
if os.path.exists(image_dir):
return get_sorted_image_list(image_dir)
tex_file = generate_tex_file(expression, template_tex_file)
dvi_file = tex_to_dvi(tex_file)
return dvi_to_svg(dvi_file)
def generate_tex_file(expression, template_tex_file):
result = os.path.join(
TEX_DIR,
tex_hash(expression, template_tex_file)
) + ".tex"
if not os.path.exists(result):
print("Writing \"%s\" to %s" % (
"".join(expression), result
))
with open(template_tex_file, "r") as infile:
body = infile.read()
body = body.replace(TEX_TEXT_TO_REPLACE, expression)
with open(result, "w") as outfile:
outfile.write(body)
return result
def get_null():
if os.name == "nt":
return "NUL"
return "/dev/null"
def tex_to_dvi(tex_file):
result = tex_file.replace(".tex", ".dvi")
if not os.path.exists(result):
commands = [
"latex",
"-interaction=batchmode",
"-halt-on-error",
"-output-directory=" + TEX_DIR,
tex_file,
">",
get_null()
]
exit_code = os.system(" ".join(commands))
if exit_code != 0:
latex_output = ''
log_file = tex_file.replace(".tex", ".log")
if os.path.exists(log_file):
with open(log_file, 'r') as f:
latex_output = f.read()
raise Exception(
"Latex error converting to dvi. "
"See log output above or the log file: %s" % log_file)
return result
def dvi_to_svg(dvi_file, regen_if_exists=False):
"""
Converts a dvi, which potentially has multiple slides, into a
directory full of enumerated pngs corresponding with these slides.
Returns a list of PIL Image objects for these images sorted as they
where in the dvi
"""
result = dvi_file.replace(".dvi", ".svg")
if not os.path.exists(result):
commands = [
"dvisvgm",
dvi_file,
"-n",
"-v",
"0",
"-o",
result,
">",
get_null()
]
os.system(" ".join(commands))
return result
| tex = "" | conditional_block |
tex_mobject.py | from constants import *
from svg_mobject import SVGMobject
from svg_mobject import VMobjectFromSVGPathstring
from utils.config_ops import digest_config
from utils.strings import split_string_list_to_isolate_substring
from mobject.types.vectorized_mobject import VGroup
from mobject.types.vectorized_mobject import VectorizedPoint
import operator as op
# TODO list
# - Make sure if "color" is passed into TexMobject, it behaves as expected
TEX_MOB_SCALE_FACTOR = 0.05
class TexSymbol(VMobjectFromSVGPathstring):
def pointwise_become_partial(self, mobject, a, b):
# TODO, this assumes a = 0
if b < 0.5:
b = 2 * b
added_width = 1
opacity = 0
else:
added_width = 2 - 2 * b
opacity = 2 * b - 1
b = 1
VMobjectFromSVGPathstring.pointwise_become_partial(
self, mobject, 0, b
)
self.set_stroke(width=added_width + mobject.get_stroke_width())
self.set_fill(opacity=opacity)
class SingleStringTexMobject(SVGMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEX_FILE,
"stroke_width": 0,
"fill_opacity": 1.0,
"should_center": True,
"height": None,
"organize_left_to_right": False,
"propagate_style_to_family": True,
"alignment": "",
}
def __init__(self, tex_string, **kwargs):
digest_config(self, kwargs)
assert(isinstance(tex_string, str))
self.tex_string = tex_string
file_name = tex_to_svg_file(
self.get_modified_expression(tex_string),
self.template_tex_file
)
SVGMobject.__init__(self, file_name=file_name, **kwargs)
if self.height is None:
self.scale(TEX_MOB_SCALE_FACTOR)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def get_modified_expression(self, tex_string):
result = self.alignment + " " + tex_string
result = result.strip()
result = self.modify_special_strings(result)
return result
def modify_special_strings(self, tex):
tex = self.remove_stray_braces(tex)
if tex in ["\\over", "\\overline"]:
# fraction line needs something to be over
tex += "\\,"
if tex == "\\sqrt":
tex += "{\\quad}"
if tex == "\\substack":
tex = ""
for t1, t2 in ("\\left", "\\right"), ("\\right", "\\left"):
should_replace = reduce(op.and_, [
t1 in tex,
t2 not in tex,
len(tex) > len(t1) and tex[len(t1)] in "()[]<>|.\\"
])
if should_replace:
tex = tex.replace(t1, "\\big")
if tex == "":
tex = "\\quad"
return tex
def remove_stray_braces(self, tex):
"""
Makes TexMobject resiliant to unmatched { at start
"""
num_lefts, num_rights = [
tex.count(char)
for char in "{}"
]
if num_rights > num_lefts:
backwards = tex[::-1].replace("}", "", num_rights - num_lefts)
tex = backwards[::-1]
elif num_lefts > num_rights:
tex = tex.replace("{", "", num_lefts - num_rights)
return tex
def get_tex_string(self):
return self.tex_string
def path_string_to_mobject(self, path_string):
# Overwrite superclass default to use
# specialized path_string mobject
return TexSymbol(path_string)
def organize_submobjects_left_to_right(self):
self.sort_submobjects(lambda p: p[0])
return self
class TexMobject(SingleStringTexMobject):
CONFIG = {
"arg_separator": " ",
"substrings_to_isolate": [],
"tex_to_color_map": {},
}
def __init__(self, *tex_strings, **kwargs):
digest_config(self, kwargs)
tex_strings = self.break_up_tex_strings(tex_strings)
self.tex_strings = tex_strings
SingleStringTexMobject.__init__(
self, self.arg_separator.join(tex_strings), **kwargs
)
self.break_up_by_substrings()
self.set_color_by_tex_to_color_map(self.tex_to_color_map)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def break_up_tex_strings(self, tex_strings):
substrings_to_isolate = op.add(
self.substrings_to_isolate,
self.tex_to_color_map.keys()
)
split_list = split_string_list_to_isolate_substring(
tex_strings, *substrings_to_isolate
)
split_list = map(str.strip, split_list)
split_list = filter(lambda s: s != '', split_list)
return split_list
def break_up_by_substrings(self):
"""
Reorganize existing submojects one layer
deeper based on the structure of tex_strings (as a list | """
new_submobjects = []
curr_index = 0
for tex_string in self.tex_strings:
sub_tex_mob = SingleStringTexMobject(tex_string, **self.CONFIG)
num_submobs = len(sub_tex_mob.submobjects)
new_index = curr_index + num_submobs
if num_submobs == 0:
# For cases like empty tex_strings, we want the corresponing
# part of the whole TexMobject to be a VectorizedPoint
# positioned in the right part of the TexMobject
sub_tex_mob.submobjects = [VectorizedPoint()]
last_submob_index = min(curr_index, len(self.submobjects) - 1)
sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT)
else:
sub_tex_mob.submobjects = self.submobjects[curr_index:new_index]
new_submobjects.append(sub_tex_mob)
curr_index = new_index
self.submobjects = new_submobjects
return self
def get_parts_by_tex(self, tex, substring=True, case_sensitive=True):
def test(tex1, tex2):
if not case_sensitive:
tex1 = tex1.lower()
tex2 = tex2.lower()
if substring:
return tex1 in tex2
else:
return tex1 == tex2
return VGroup(*filter(
lambda m: test(tex, m.get_tex_string()),
self.submobjects
))
def get_part_by_tex(self, tex, **kwargs):
all_parts = self.get_parts_by_tex(tex, **kwargs)
return all_parts[0] if all_parts else None
def set_color_by_tex(self, tex, color, **kwargs):
parts_to_color = self.get_parts_by_tex(tex, **kwargs)
for part in parts_to_color:
part.set_color(color)
return self
def set_color_by_tex_to_color_map(self, texs_to_color_map, **kwargs):
for texs, color in texs_to_color_map.items():
try:
# If the given key behaves like tex_strings
texs + ''
self.set_color_by_tex(texs, color, **kwargs)
except TypeError:
# If the given key is a tuple
for tex in texs:
self.set_color_by_tex(tex, color, **kwargs)
return self
def index_of_part(self, part):
split_self = self.split()
if part not in split_self:
raise Exception("Trying to get index of part not in TexMobject")
return split_self.index(part)
def index_of_part_by_tex(self, tex, **kwargs):
part = self.get_part_by_tex(tex, **kwargs)
return self.index_of_part(part)
def split(self):
# Many old scenes assume that when you pass in a single string
# to TexMobject, it indexes across the characters.
if len(self.submobjects) == 1:
return self.submobjects[0].split()
else:
return super(TexMobject, self).split()
class TextMobject(TexMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "\\centering",
}
class BulletedList(TextMobject):
CONFIG = {
"buff": MED_LARGE_BUFF,
"dot_scale_factor": 2,
# Have to include because of handle_multiple_args implementation
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "",
}
def __init__(self, *items, **kwargs):
line_separated_items = [s + "\\\\" for s in items]
TextMobject.__init__(self, *line_separated_items, **kwargs)
for part in self:
dot = TexMobject("\\cdot").scale(self.dot_scale_factor)
dot.next_to(part[0], LEFT, SMALL_BUFF)
part.add_to_back(dot)
self.arrange_submobjects(
DOWN,
aligned_edge=LEFT,
buff=self.buff
)
def fade_all_but(self, index_or_string, opacity=0.5):
arg = index_or_string
if isinstance(arg, str):
part = self.get_part_by_tex(arg)
elif isinstance(arg, int):
part = self.submobjects[arg]
else:
raise Exception("Expected int or string, got {0}".format(arg))
for other_part in self.submobjects:
if other_part is part:
other_part.set_fill(opacity=1)
else:
other_part.set_fill(opacity=opacity)
class TexMobjectFromPresetString(TexMobject):
def __init__(self, **kwargs):
digest_config(self, kwargs)
TexMobject.__init__(self, self.tex, **kwargs)
self.set_color(self.color)
##########
def tex_hash(expression, template_tex_file):
return str(hash(expression + template_tex_file))
def tex_to_svg_file(expression, template_tex_file):
image_dir = os.path.join(
TEX_IMAGE_DIR,
tex_hash(expression, template_tex_file)
)
if os.path.exists(image_dir):
return get_sorted_image_list(image_dir)
tex_file = generate_tex_file(expression, template_tex_file)
dvi_file = tex_to_dvi(tex_file)
return dvi_to_svg(dvi_file)
def generate_tex_file(expression, template_tex_file):
result = os.path.join(
TEX_DIR,
tex_hash(expression, template_tex_file)
) + ".tex"
if not os.path.exists(result):
print("Writing \"%s\" to %s" % (
"".join(expression), result
))
with open(template_tex_file, "r") as infile:
body = infile.read()
body = body.replace(TEX_TEXT_TO_REPLACE, expression)
with open(result, "w") as outfile:
outfile.write(body)
return result
def get_null():
if os.name == "nt":
return "NUL"
return "/dev/null"
def tex_to_dvi(tex_file):
result = tex_file.replace(".tex", ".dvi")
if not os.path.exists(result):
commands = [
"latex",
"-interaction=batchmode",
"-halt-on-error",
"-output-directory=" + TEX_DIR,
tex_file,
">",
get_null()
]
exit_code = os.system(" ".join(commands))
if exit_code != 0:
latex_output = ''
log_file = tex_file.replace(".tex", ".log")
if os.path.exists(log_file):
with open(log_file, 'r') as f:
latex_output = f.read()
raise Exception(
"Latex error converting to dvi. "
"See log output above or the log file: %s" % log_file)
return result
def dvi_to_svg(dvi_file, regen_if_exists=False):
"""
Converts a dvi, which potentially has multiple slides, into a
directory full of enumerated pngs corresponding with these slides.
Returns a list of PIL Image objects for these images sorted as they
where in the dvi
"""
result = dvi_file.replace(".dvi", ".svg")
if not os.path.exists(result):
commands = [
"dvisvgm",
dvi_file,
"-n",
"-v",
"0",
"-o",
result,
">",
get_null()
]
os.system(" ".join(commands))
return result | of tex_strings) | random_line_split |
tex_mobject.py | from constants import *
from svg_mobject import SVGMobject
from svg_mobject import VMobjectFromSVGPathstring
from utils.config_ops import digest_config
from utils.strings import split_string_list_to_isolate_substring
from mobject.types.vectorized_mobject import VGroup
from mobject.types.vectorized_mobject import VectorizedPoint
import operator as op
# TODO list
# - Make sure if "color" is passed into TexMobject, it behaves as expected
TEX_MOB_SCALE_FACTOR = 0.05
class TexSymbol(VMobjectFromSVGPathstring):
def pointwise_become_partial(self, mobject, a, b):
# TODO, this assumes a = 0
if b < 0.5:
b = 2 * b
added_width = 1
opacity = 0
else:
added_width = 2 - 2 * b
opacity = 2 * b - 1
b = 1
VMobjectFromSVGPathstring.pointwise_become_partial(
self, mobject, 0, b
)
self.set_stroke(width=added_width + mobject.get_stroke_width())
self.set_fill(opacity=opacity)
class SingleStringTexMobject(SVGMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEX_FILE,
"stroke_width": 0,
"fill_opacity": 1.0,
"should_center": True,
"height": None,
"organize_left_to_right": False,
"propagate_style_to_family": True,
"alignment": "",
}
def __init__(self, tex_string, **kwargs):
digest_config(self, kwargs)
assert(isinstance(tex_string, str))
self.tex_string = tex_string
file_name = tex_to_svg_file(
self.get_modified_expression(tex_string),
self.template_tex_file
)
SVGMobject.__init__(self, file_name=file_name, **kwargs)
if self.height is None:
self.scale(TEX_MOB_SCALE_FACTOR)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def get_modified_expression(self, tex_string):
result = self.alignment + " " + tex_string
result = result.strip()
result = self.modify_special_strings(result)
return result
def modify_special_strings(self, tex):
tex = self.remove_stray_braces(tex)
if tex in ["\\over", "\\overline"]:
# fraction line needs something to be over
tex += "\\,"
if tex == "\\sqrt":
tex += "{\\quad}"
if tex == "\\substack":
tex = ""
for t1, t2 in ("\\left", "\\right"), ("\\right", "\\left"):
should_replace = reduce(op.and_, [
t1 in tex,
t2 not in tex,
len(tex) > len(t1) and tex[len(t1)] in "()[]<>|.\\"
])
if should_replace:
tex = tex.replace(t1, "\\big")
if tex == "":
tex = "\\quad"
return tex
def remove_stray_braces(self, tex):
"""
Makes TexMobject resiliant to unmatched { at start
"""
num_lefts, num_rights = [
tex.count(char)
for char in "{}"
]
if num_rights > num_lefts:
backwards = tex[::-1].replace("}", "", num_rights - num_lefts)
tex = backwards[::-1]
elif num_lefts > num_rights:
tex = tex.replace("{", "", num_lefts - num_rights)
return tex
def get_tex_string(self):
return self.tex_string
def path_string_to_mobject(self, path_string):
# Overwrite superclass default to use
# specialized path_string mobject
return TexSymbol(path_string)
def organize_submobjects_left_to_right(self):
self.sort_submobjects(lambda p: p[0])
return self
class TexMobject(SingleStringTexMobject):
CONFIG = {
"arg_separator": " ",
"substrings_to_isolate": [],
"tex_to_color_map": {},
}
def __init__(self, *tex_strings, **kwargs):
digest_config(self, kwargs)
tex_strings = self.break_up_tex_strings(tex_strings)
self.tex_strings = tex_strings
SingleStringTexMobject.__init__(
self, self.arg_separator.join(tex_strings), **kwargs
)
self.break_up_by_substrings()
self.set_color_by_tex_to_color_map(self.tex_to_color_map)
if self.organize_left_to_right:
self.organize_submobjects_left_to_right()
def break_up_tex_strings(self, tex_strings):
substrings_to_isolate = op.add(
self.substrings_to_isolate,
self.tex_to_color_map.keys()
)
split_list = split_string_list_to_isolate_substring(
tex_strings, *substrings_to_isolate
)
split_list = map(str.strip, split_list)
split_list = filter(lambda s: s != '', split_list)
return split_list
def break_up_by_substrings(self):
"""
Reorganize existing submojects one layer
deeper based on the structure of tex_strings (as a list
of tex_strings)
"""
new_submobjects = []
curr_index = 0
for tex_string in self.tex_strings:
sub_tex_mob = SingleStringTexMobject(tex_string, **self.CONFIG)
num_submobs = len(sub_tex_mob.submobjects)
new_index = curr_index + num_submobs
if num_submobs == 0:
# For cases like empty tex_strings, we want the corresponing
# part of the whole TexMobject to be a VectorizedPoint
# positioned in the right part of the TexMobject
sub_tex_mob.submobjects = [VectorizedPoint()]
last_submob_index = min(curr_index, len(self.submobjects) - 1)
sub_tex_mob.move_to(self.submobjects[last_submob_index], RIGHT)
else:
sub_tex_mob.submobjects = self.submobjects[curr_index:new_index]
new_submobjects.append(sub_tex_mob)
curr_index = new_index
self.submobjects = new_submobjects
return self
def get_parts_by_tex(self, tex, substring=True, case_sensitive=True):
def test(tex1, tex2):
if not case_sensitive:
tex1 = tex1.lower()
tex2 = tex2.lower()
if substring:
return tex1 in tex2
else:
return tex1 == tex2
return VGroup(*filter(
lambda m: test(tex, m.get_tex_string()),
self.submobjects
))
def get_part_by_tex(self, tex, **kwargs):
all_parts = self.get_parts_by_tex(tex, **kwargs)
return all_parts[0] if all_parts else None
def set_color_by_tex(self, tex, color, **kwargs):
parts_to_color = self.get_parts_by_tex(tex, **kwargs)
for part in parts_to_color:
part.set_color(color)
return self
def set_color_by_tex_to_color_map(self, texs_to_color_map, **kwargs):
|
def index_of_part(self, part):
split_self = self.split()
if part not in split_self:
raise Exception("Trying to get index of part not in TexMobject")
return split_self.index(part)
def index_of_part_by_tex(self, tex, **kwargs):
part = self.get_part_by_tex(tex, **kwargs)
return self.index_of_part(part)
def split(self):
# Many old scenes assume that when you pass in a single string
# to TexMobject, it indexes across the characters.
if len(self.submobjects) == 1:
return self.submobjects[0].split()
else:
return super(TexMobject, self).split()
class TextMobject(TexMobject):
CONFIG = {
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "\\centering",
}
class BulletedList(TextMobject):
CONFIG = {
"buff": MED_LARGE_BUFF,
"dot_scale_factor": 2,
# Have to include because of handle_multiple_args implementation
"template_tex_file": TEMPLATE_TEXT_FILE,
"alignment": "",
}
def __init__(self, *items, **kwargs):
line_separated_items = [s + "\\\\" for s in items]
TextMobject.__init__(self, *line_separated_items, **kwargs)
for part in self:
dot = TexMobject("\\cdot").scale(self.dot_scale_factor)
dot.next_to(part[0], LEFT, SMALL_BUFF)
part.add_to_back(dot)
self.arrange_submobjects(
DOWN,
aligned_edge=LEFT,
buff=self.buff
)
def fade_all_but(self, index_or_string, opacity=0.5):
arg = index_or_string
if isinstance(arg, str):
part = self.get_part_by_tex(arg)
elif isinstance(arg, int):
part = self.submobjects[arg]
else:
raise Exception("Expected int or string, got {0}".format(arg))
for other_part in self.submobjects:
if other_part is part:
other_part.set_fill(opacity=1)
else:
other_part.set_fill(opacity=opacity)
class TexMobjectFromPresetString(TexMobject):
def __init__(self, **kwargs):
digest_config(self, kwargs)
TexMobject.__init__(self, self.tex, **kwargs)
self.set_color(self.color)
##########
def tex_hash(expression, template_tex_file):
return str(hash(expression + template_tex_file))
def tex_to_svg_file(expression, template_tex_file):
image_dir = os.path.join(
TEX_IMAGE_DIR,
tex_hash(expression, template_tex_file)
)
if os.path.exists(image_dir):
return get_sorted_image_list(image_dir)
tex_file = generate_tex_file(expression, template_tex_file)
dvi_file = tex_to_dvi(tex_file)
return dvi_to_svg(dvi_file)
def generate_tex_file(expression, template_tex_file):
result = os.path.join(
TEX_DIR,
tex_hash(expression, template_tex_file)
) + ".tex"
if not os.path.exists(result):
print("Writing \"%s\" to %s" % (
"".join(expression), result
))
with open(template_tex_file, "r") as infile:
body = infile.read()
body = body.replace(TEX_TEXT_TO_REPLACE, expression)
with open(result, "w") as outfile:
outfile.write(body)
return result
def get_null():
if os.name == "nt":
return "NUL"
return "/dev/null"
def tex_to_dvi(tex_file):
result = tex_file.replace(".tex", ".dvi")
if not os.path.exists(result):
commands = [
"latex",
"-interaction=batchmode",
"-halt-on-error",
"-output-directory=" + TEX_DIR,
tex_file,
">",
get_null()
]
exit_code = os.system(" ".join(commands))
if exit_code != 0:
latex_output = ''
log_file = tex_file.replace(".tex", ".log")
if os.path.exists(log_file):
with open(log_file, 'r') as f:
latex_output = f.read()
raise Exception(
"Latex error converting to dvi. "
"See log output above or the log file: %s" % log_file)
return result
def dvi_to_svg(dvi_file, regen_if_exists=False):
"""
Converts a dvi, which potentially has multiple slides, into a
directory full of enumerated pngs corresponding with these slides.
Returns a list of PIL Image objects for these images sorted as they
where in the dvi
"""
result = dvi_file.replace(".dvi", ".svg")
if not os.path.exists(result):
commands = [
"dvisvgm",
dvi_file,
"-n",
"-v",
"0",
"-o",
result,
">",
get_null()
]
os.system(" ".join(commands))
return result
| for texs, color in texs_to_color_map.items():
try:
# If the given key behaves like tex_strings
texs + ''
self.set_color_by_tex(texs, color, **kwargs)
except TypeError:
# If the given key is a tuple
for tex in texs:
self.set_color_by_tex(tex, color, **kwargs)
return self | identifier_body |
index.js | /*
Created by wujianxiong on 10/12/2020
*/
import React from 'react';
import { connect } from 'dva';
import { VtxGrid, VtxDatagrid, VtxExport } from 'vtx-ui';
const { VtxExport2 } = VtxExport;
import { Select, Input, Button, Icon, Tabs, message, Modal } from 'antd';
const TabPane = Tabs.TabPane;
import ViewItem from '../../components/onlineOperation/View';
import EditItem from '../../components/onlineOperation/View';
import NewItem from '../../components/onlineOperation/View';
import { handleColumns } from '../../utils/tools';
import styles from './index.less';
import { update } from 'lodash';
const Option = Select.Option;
function OnlineOperation({ dispatch, onlineOperation, accessControlM }) {
// onlineOperationM.js传递过来的参数
const { searchParams, dataSource, currentPage, pageSize, loading, total,
viewItem, editItem, newItem, selectedRowKeys } = onlineOperation;
// 获取新增删除等权限
let buttonLimit = {}
if(accessControlM['waterFactory'.toLowerCase()]) {
buttonLimit = accessControlM['waterFactory'.toLowerCase()]
}
// 更新参数
const updateState = (obj) => {
dispatch({ type: 'onlineOperation/updateState', payload: {...obj} })
}
// mock columns
let columns = []
let overViewcolumns = [
{
dataIndex: "a",
key: "a",
title: "区域"
},
{
dataIndex: "b",
key: "b",
title: "事业部"
},
{
dataIndex: "c",
key: "c",
title: "水厂"
},
{
dataIndex: "d",
key: "d",
title: "运维模式"
}
]
// mock columns
let recordColumns = [
{
dataIndex: "a",
key: "a",
title: "日期"
},
{
dataIndex: "b",
key: "b",
title: "指标(为可选项)"
},
{
dataIndex: "c",
key: "c",
title: "起始时间"
},
{
dataIndex: "d",
key: "d",
title: "结束时间"
},
{
dataIndex: "e",
key: "e",
title: "情况说明"
},
{
dataIndex: "f",
key: "f",
title: "标液浓度(mg/L)"
},
{
dataIndex: "g",
key: "g",
title: "设备结果(mg/L)"
},
{
dataIndex: "h",
key: "h",
title: "人工监测结果(mg/L)"
},
{
dataIndex: "i",
key: "i",
title: "备注"
},
]
const optCol = [['操作', 'action', {
renderButtons: ()=>{
let btns = [];
if(buttonLimit['VIEW']) {
// 查看
btns.push({ name: <Icon type='view' title='查看'/>, onClick(rowData) {
updateState({
viewItem: {
...rowData
}
})
updateViewWindow()
} })
// 编辑
btns.push({ name: <Icon type='file-edit' title='编辑'/>, onClick(rowData) {
updateState({
editItem: {
...rowData
}
})
updateEditWindow();
}})
// 删除
btns.push({ name: <Icon type='delete' title='删除'/>, onClick(rowData) {
dispatch({
type: 'onlineOperation/deleteItems',
payload: ({
ids: [rowData.id],
onSuccess: (ids) => {
let page = currentPage !=1 && ids.length === (total - (currentPage -1) * pageSize) ? currentPage -1 : currentPage;
dispatch({ type: 'onlineOperation/getList', payload: { selectedRowKeys: [], currentPage: page } });
message.success('删除成功!')
},
onError: (msg) => { message.error(msg) }
})
})
}})
}
return btns
}, width: '150px'
}]]
columns = searchParams.dataFillType === 'produce' ? overViewcolumns : recordColumns
columns = columns.concat(handleColumns(optCol))
// 表格数据项配置
let vtxDatagridProps = {
columns,
dataSource,
indexColumn: true,
startIndex: (currentPage-1) * pageSize + 1,
autoFit: true,
loading,
pagination: {
showSizeChanger: true,
pageSizeOptions: ['10', '20', '30', '40', '50'],
showQuickJumper: true,
current: currentPage,
total,
pageSize,
showTotal: total => `合计 ${total} 条`
},
onChange(pagination, filters, sorter) {
dispatch({ type: 'onlineOperation', payload: {
currentPage: pagination.current,
pageSize: pagination.pageSize
}})
}
}
// 选择表格行
vtxDatagridProps = _.assign(vtxDatagridProps, {
rowSelection: {
type: 'checkbox',
selectedRowKeys,
onChange(selectedRowKeys, selectedRows) {
updateState({ selectedRowKeys })
}
}
})
// 获取表格数据
const getList = () => {
dispatch({ type: 'onlineOperation/updateQueryParams' });
dispatch({ type: 'onlineOperation/getList' });
}
// 查询
const vtxGridParams = {
nameProps: {
value: searchParams.name,
placeholder: '请输入名称',
allowClear: true,
style: {
width: '100%'
},
onChange(value) {
updateState({
searchParams: {
name: value
}
})
},
onPressEnter() {
getList()
},
},
query() {
getList()
},
clear() {
dispatch({ type: 'onlineOperation/initQueryParams'});
dispatch({ type: 'onlineOperation/getList' });
}
}
// mock下拉数据
const waterFactoryList = [ {
| d9a9e3750a34e91",
"value" : "测试水厂0001",
"name" : "测试水厂0001",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "c3e5d28d0da94357ad3625af1e58f342",
"value" : "水厂测试门户1",
"name" : "水厂测试门户1",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "757bc4e466b745b1adbfc39b887d4038",
"value" : "水厂5",
"name" : "水厂5",
"parentId" : "c95fe0e685a54030970e90e17864ace1"
}]
// 模态框名称
const modalTitle = searchParams.dataFillType==='produce' ? '在线概况' : '在线异常运维记录'
// 查看窗口
const updateViewWindow = (status = true) => {
updateState({
viewItem: {
visible: status
}
})
}
const viewItemProps = {
modalProps: {
title: `${modalTitle} > 查看`,
visible: viewItem.visible,
onCancel: () => updateViewWindow(false),
width: 1000
},
contentProps: {
...viewItem,
btnType: 'view'
}
}
// 编辑窗口
const updateEditWindow = (status = true) => {
updateState({
editItem: {
visible: status
}
})
}
const editItemProps = {
modalProps: {
title: `${modalTitle} > 编辑`,
visible: editItem.visible,
onCancel: () => updateEditWindow(false),
width: 1000
},
contentProps: {
...editItem,
btnType: 'edit'
}
}
// 新增窗口
const updateNewWindow = (status = true) => {
updateState({
newItem: {
visible: status
}
})
if(!status) {
dispatch({ type: 'onlineOperation/initNewItem' })
}
}
const newItemProps = {
modalProps: {
title: `${modalTitle} > 新增`,
visible: newItem.visible,
onCancel: () => updateNewWindow(false),
width: 1000
},
contentProps: {
...newItem,
btnType: 'add'
}
}
// 表格外功能-删除
const deleteItems = () => {
Modal.confirm({
content: `确定删除选中的${selectedRowKeys.length}条数据吗?`,
okText: '确定',
cancelText: '取消',
onOk() {
dispatch({
type: 'onlineOperation/deleteItems',
payload: {
ids: selectedRowKeys,
onSuccess: (ids) => {
let page = currentPage != 1 && ids.length === (total - (currentPage - 1) * pageSize) ? currentPage - 1 : currentPage;
dispatch({
type: 'onlineOperation/getList',
payload: {
selectedRowKeys: [],
currentPage: page
}
})
message.success('删除成功');
},
onError: (msg) => { message.error(msg); }
}
})
}
})
}
// 表格外功能-导出
let downloadURL = searchParams.dataFillType==='produce'?'/cloud/gzzhsw/api/cp/basic/pipelineNetPerformance/exportDataExcel':'/cloud/gzzhsw/api/cp/basic/sewageFactory/exportDataExcel'
const exportProps = {
downloadURL,
getExportParams(exportType) {
const param = {
tenantId: VtxUtil.getUrlParam('tenantId'),
};
switch (exportType) {
case 'rows':
if (selectedRowKeys.length === 0) {
message.info('需要选择一项进行导出');
return;
}
param.isAll = false;
param.ids = selectedRowKeys.join();
break;
case 'page':
if (dataSource.length === 0) {
message.info('当前页没有数据');
return;
}
const ids = dataSource.map((item, index) => {
return item.id;
});
param.isAll = false;
param.ids = ids.join();
break;
case 'all':
if (total === 0) {
message.info('暂无数据可进行导出');
return;
}
param.isAll = true;
}
return param
}
}
return (
<div className={styles.normal}>
{/* 条件查询 */}
<VtxGrid
titles={['水厂名称', '运维模式']}
gridweight={[1,1]}
confirm={vtxGridParams.query}
clear={vtxGridParams.clear}
>
<Select {...vtxGridParams.nameProps}>
{waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})}
</Select>
<Select {...vtxGridParams.nameProps}>
{waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})}
</Select>
</VtxGrid>
{/* tabs */}
<div className={styles.normal_body}>
<div className={styles.tabContainer}>
<Tabs activeKey={searchParams.dataFillType} onChange={(key)=>{
updateState({ searchParams: {dataFillType: key}})
getList()}
}>
<TabPane tab='在线概况' key='produce'></TabPane>
<TabPane tab='在线异常运维记录' key='assay'></TabPane>
</Tabs>
</div>
{/* 功能按钮 */}
<div className={styles.buttonContainer}>
{buttonLimit['ADD']&&<Button icon="file-add" onClick={() => updateNewWindow()}>新增</Button>}
{buttonLimit['DELETE']&&<Button icon="delete" onClick={deleteItems}>删除</Button>}
{buttonLimit['DELETE'] &&<VtxExport2 {...exportProps}><Button icon="export">导出</Button></VtxExport2>}
</div>
{/* 表格 */}
<div className={styles.tableContainer}>
<VtxDatagrid {...vtxDatagridProps}></VtxDatagrid>
</div>
</div>
{/* 操作 */}
{/* 查看 */}
{viewItem.visible && <ViewItem {...viewItemProps}/>}
{/* 编辑 */}
{editItem.visible && <EditItem {...editItemProps}/>}
{/* 新增 */}
{newItem.visible && <NewItem {...newItemProps}/>}
</div>
)
}
export default connect( ({onlineOperation, accessControlM}) => ({onlineOperation, accessControlM}) )(OnlineOperation); | "id" : "08ffc34f7b7444249 | identifier_body |
index.js | /*
Created by wujianxiong on 10/12/2020
*/
import React from 'react';
import { connect } from 'dva';
import { VtxGrid, VtxDatagrid, VtxExport } from 'vtx-ui';
const { VtxExport2 } = VtxExport;
import { Select, Input, Button, Icon, Tabs, message, Modal } from 'antd';
const TabPane = Tabs.TabPane;
import ViewItem from '../../components/onlineOperation/View';
import EditItem from '../../components/onlineOperation/View';
import NewItem from '../../components/onlineOperation/View';
import { handleColumns } from '../../utils/tools';
import styles from './index.less';
import { update } from 'lodash';
const Option = Select.Option;
function OnlineOperation({ dispatch, onlineOperation, accessControlM }) {
// onlineOperationM.js传递过来的参数
const { searchParams, dataSource, currentPage, pageSize, loading, total,
viewItem, editItem, newItem, selectedRowKeys } = onlineOperation;
// 获取新增删除等权限
let buttonLimit = {}
if(accessControlM['waterFactory'.toLowerCase()]) {
buttonLimit = accessControlM['waterFactory'.toLowerCase()]
}
// 更新参数
const updateState = (obj) => {
dispatch({ type: 'onlineOperation/updateState', payload: {...obj} })
}
// mock columns
let columns = []
let overViewcolumns = [
{
dataIndex: "a",
key: "a",
title: "区域"
},
{
dataIndex: "b",
key: "b",
title: "事业部"
},
{
dataIndex: "c",
key: "c",
title: "水厂"
},
{
dataIndex: "d",
key: "d",
title: "运维模式"
}
]
// mock columns
let recordColumns = [
{
dataIndex: "a",
key: "a",
title: "日期"
},
{
dataIndex: "b",
key: "b",
title: "指标(为可选项)"
},
{
dataIndex: "c",
key: "c",
title: "起始时间"
},
{
dataIndex: "d",
key: "d",
title: "结束时间"
},
{
dataIndex: "e",
key: "e",
title: "情况说明"
},
{
dataIndex: "f",
key: "f",
title: "标液浓度(mg/L)"
},
{
dataIndex: "g",
key: "g",
title: "设备结果(mg/L)"
},
{
dataIndex: "h",
key: "h",
title: "人工监测结果(mg/L)"
},
{
dataIndex: "i",
key: "i",
title: "备注"
},
]
const optCol = [['操作', 'action', {
renderButtons: ()=>{
let btns = [];
if(buttonLimit['VIEW']) {
// 查看
btns.push({ name: <Icon type='view' title='查看'/>, onClick(rowData) {
updateState({
viewItem: {
...rowData
}
})
updateViewWindow()
} })
// 编辑
btns.push({ name: <Icon type='file-edit' title='编辑'/>, onClick(rowData) {
updateState({
editItem: {
...rowData
}
})
updateEditWindow();
}})
// 删除
btns.push({ name: <Icon type='delete' title='删除'/>, onClick(rowData) {
dispatch({
type: 'onlineOperation/deleteItems',
payload: ({
ids: [rowData.id],
onSuccess: (ids) => {
let page = currentPage !=1 && ids.length === (total - (currentPage -1) * pageSize) ? currentPage -1 : currentPage;
dispatch({ type: 'onlineOperation/getList', payload: { selectedRowKeys: [], currentPage: page } });
message.success('删除成功!')
},
onError: (msg) => { message.error(msg) }
})
})
}})
}
return btns
}, width: '150px'
}]]
columns = searchParams.dataFillType === 'produce' ? overViewcolumns : recordColumns
columns = columns.concat(handleColumns(optCol))
// 表格数据项配置
let vtxDatagridProps = {
columns,
dataSource,
indexColumn: true,
startIndex: (currentPage-1) * pageSize + 1,
autoFit: true,
loading,
pagination: {
showSizeChanger: true,
pageSizeOptions: ['10', '20', '30', '40', '50'],
showQuickJumper: true,
current: currentPage,
total,
pageSize,
showTotal: total => `合计 ${total} 条`
},
onChange(pagination, filters, sorter) {
dispatch({ type: 'onlineOperation', payload: {
currentPage: pagination.current,
pageSize: pagination.pageSize
}})
}
}
// 选择表格行
vtxDatagridProps = _.assign(vtxDatagridProps, {
rowSelection: {
type: 'checkbox',
selectedRowKeys,
onChange(selectedRowKeys, selectedRows) {
updateState({ selectedRowKeys })
}
}
})
// 获取表格数据
const getList = () => {
dispatch({ type: 'onlineOperation/updateQueryParams' });
dispatch({ type: 'onlineOperation/getList' });
}
// 查询
const vtxGridParams = {
nameProps: {
value: searchParams.name,
placeholder: '请输入名称',
allowClear: true,
style: {
width: '100%'
},
onChange(value) {
updateState({
searchParams: {
name: value
}
})
},
onPressEnter() {
getList()
},
},
query() {
getList()
},
clear() {
dispatch({ type: 'onlineOperation/initQueryParams'});
dispatch({ type: 'onlineOperation/getList' });
}
}
// mock下拉数据
const waterFactoryList = [ {
"id" : "08ffc34f7b7444249d9a9e3750a34e91",
"value" : "测试水厂0001",
"name" : "测试水厂0001",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "c3e5d28d0da94357ad3625af1e58f342",
"value" : "水厂测试门户1",
"name" : "水厂测试门户1",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "757bc4e466b745b1adbfc39b887d4038",
"value" : "水厂5",
"name" : "水厂5",
"parentId" : "c95fe0e685a54030970e90e17864ace1"
}]
// 模态框名称
const modalTitle = searchParams.dataFillType==='produce' ? '在线概况' : '在线异常运维记录'
// 查看窗口
const updateViewWindow = (status = true) => {
updateState({
viewItem: {
visible: status
}
})
}
const viewItemProps = {
modalProps: {
title: `${modalTitle} > 查看`,
visible: viewItem.visible,
onCancel: () => updateViewWindow(false),
width: 1000
},
contentProps: {
...viewItem,
btnType: 'view'
}
}
// 编辑窗口
const updateEditWindow = (status = true) => {
updateState({
editItem: {
visible: status
}
})
}
const editItemProps = {
modalProps: {
title: `${modalTitle} > 编辑`,
visible: editItem.visible,
onCancel: () => updateEditWindow(false),
width: 1000
},
contentProps: {
...editItem,
btnType: 'edit'
}
}
// 新增窗口
const updateNewWindow = (status = true) => {
updateState({
newItem: {
visible: status
}
})
if(!status) {
dispatch({ type: 'onlineOperation/initNewItem' })
}
}
const newItemProps = {
modalProps: {
title: `${modalTitle} > 新增`,
visible: newItem.visible,
onCancel: () => updateNewWindow(false),
width: 1000
},
contentProps: {
...newItem,
btnType: 'add'
}
}
// 表格外功能-删除
const deleteItems = () => {
Modal.confirm({
content: `确定删除选中的${selectedRowKeys.length}条数据吗?`,
okText: '确定',
cancelText: '取消',
onOk() {
dispatch({
type: 'onlineOperation/deleteItems',
payload: {
ids: selectedRowKeys,
onSuccess: (ids) => {
let page = currentPage != 1 && ids.length === (total - (currentPage - 1) * pageSize) ? currentPage - 1 : currentPage;
dispatch({
type: 'onlineOperation/getList',
payload: {
selectedRowKeys: [],
currentPage: page
}
})
message.success('删除成功');
},
onError: (msg) => { message.error(msg); }
}
})
}
})
}
// 表格外功能-导出
let downloadURL = searchParams.dataFillType==='produce'?'/cloud/gzzhsw/api/cp/basic/pipelineNetPerformance/exportDataExcel':'/cloud/gzzhsw/api/cp/basic/sewageFactory/exportDataExcel'
const exportProps = {
downloadURL,
getExportParams(exportType) {
const param = {
tenantId: VtxUtil.getUrlParam('tenantId'),
};
switch (exportType) {
case 'rows':
if (selectedRowKeys.length === 0) {
message.info('需要选择一项进行导出');
return;
}
param.isAll = false;
param.ids = selectedRowKeys.join();
break;
case 'page':
if (dataSource.length === 0) {
message.info('当前页没有数据');
return;
}
const ids = dataSource.map((item, index) => {
return item.id;
});
param.isAll = false;
param.ids = ids.join();
break;
case 'all':
if (total === 0) {
message.info('暂无数据可进 | return param
}
}
return (
<div className={styles.normal}>
{/* 条件查询 */}
<VtxGrid
titles={['水厂名称', '运维模式']}
gridweight={[1,1]}
confirm={vtxGridParams.query}
clear={vtxGridParams.clear}
>
<Select {...vtxGridParams.nameProps}>
{waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})}
</Select>
<Select {...vtxGridParams.nameProps}>
{waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})}
</Select>
</VtxGrid>
{/* tabs */}
<div className={styles.normal_body}>
<div className={styles.tabContainer}>
<Tabs activeKey={searchParams.dataFillType} onChange={(key)=>{
updateState({ searchParams: {dataFillType: key}})
getList()}
}>
<TabPane tab='在线概况' key='produce'></TabPane>
<TabPane tab='在线异常运维记录' key='assay'></TabPane>
</Tabs>
</div>
{/* 功能按钮 */}
<div className={styles.buttonContainer}>
{buttonLimit['ADD']&&<Button icon="file-add" onClick={() => updateNewWindow()}>新增</Button>}
{buttonLimit['DELETE']&&<Button icon="delete" onClick={deleteItems}>删除</Button>}
{buttonLimit['DELETE'] &&<VtxExport2 {...exportProps}><Button icon="export">导出</Button></VtxExport2>}
</div>
{/* 表格 */}
<div className={styles.tableContainer}>
<VtxDatagrid {...vtxDatagridProps}></VtxDatagrid>
</div>
</div>
{/* 操作 */}
{/* 查看 */}
{viewItem.visible && <ViewItem {...viewItemProps}/>}
{/* 编辑 */}
{editItem.visible && <EditItem {...editItemProps}/>}
{/* 新增 */}
{newItem.visible && <NewItem {...newItemProps}/>}
</div>
)
}
export default connect( ({onlineOperation, accessControlM}) => ({onlineOperation, accessControlM}) )(OnlineOperation); | 行导出');
return;
}
param.isAll = true;
}
| conditional_block |
index.js | /*
Created by wujianxiong on 10/12/2020
*/
import React from 'react';
import { connect } from 'dva';
import { VtxGrid, VtxDatagrid, VtxExport } from 'vtx-ui';
const { VtxExport2 } = VtxExport;
import { Select, Input, Button, Icon, Tabs, message, Modal } from 'antd';
const TabPane = Tabs.TabPane;
import ViewItem from '../../components/onlineOperation/View';
import EditItem from '../../components/onlineOperation/View';
import NewItem from '../../components/onlineOperation/View';
import { handleColumns } from '../../utils/tools';
import styles from './index.less';
import { update } from 'lodash';
const Option = Select.Option;
function OnlineOperation({ dispatch, onlineOperation, accessControlM }) {
// onlineOperationM.js传递过来的参数
const { searchParams, dataSource, currentPage, pageSize, loading, total,
viewItem, editItem, newItem, selectedRowKeys } = onlineOperation;
// 获取新增删除等权限
let buttonLimit = {}
if(accessControlM['waterFactory'.toLowerCase()]) {
buttonLimit = accessControlM['waterFactory'.toLowerCase()]
}
// 更新参数
const updateState = (obj) => {
dispatch({ type: 'onlineOperation/updateState', payload: {...obj} })
}
// mock columns
let columns = []
let overViewcolumns = [
{
dataIndex: "a",
key: "a",
title: "区域"
},
{
dataIndex: "b",
key: "b",
title: "事业部"
},
{
dataIndex: "c",
key: "c",
title: "水厂"
},
{
dataIndex: "d",
key: "d",
title: "运维模式"
}
]
// mock columns
let recordColumns = [
{
dataIndex: "a",
key: "a",
title: "日期"
},
{
dataIndex: "b",
key: "b",
title: "指标(为可选项)"
},
{
dataIndex: "c",
key: "c",
title: "起始时间"
},
{
dataIndex: "d",
key: "d",
title: "结束时间"
},
{
dataIndex: "e",
key: "e",
title: "情况说明"
},
{
dataIndex: "f",
key: "f",
title: "标液浓度(mg/L)"
},
{
dataIndex: "g",
key: "g",
title: "设备结果(mg/L)"
},
{
dataIndex: "h",
key: "h",
title: "人工监测结果(mg/L)"
},
{
dataIndex: "i",
key: "i",
title: "备注"
},
]
const optCol = [['操作', 'action', {
renderButtons: ()=>{
let btns = [];
if(buttonLimit['VIEW']) {
// 查看
btns.push({ name: <Icon type='view' title='查看'/>, onClick(rowData) {
updateState({
viewItem: {
...rowData
}
})
updateViewWindow()
} })
// 编辑
btns.push({ name: <Icon type='file-edit' title='编辑'/>, onClick(rowData) {
updateState({
editItem: {
...rowData
}
})
updateEditWindow();
}})
// 删除
btns.push({ name: <Icon type='delete' title='删除'/>, onClick(rowData) {
dispatch({
type: 'onlineOperation/deleteItems',
payload: ({
ids | ata.id],
onSuccess: (ids) => {
let page = currentPage !=1 && ids.length === (total - (currentPage -1) * pageSize) ? currentPage -1 : currentPage;
dispatch({ type: 'onlineOperation/getList', payload: { selectedRowKeys: [], currentPage: page } });
message.success('删除成功!')
},
onError: (msg) => { message.error(msg) }
})
})
}})
}
return btns
}, width: '150px'
}]]
columns = searchParams.dataFillType === 'produce' ? overViewcolumns : recordColumns
columns = columns.concat(handleColumns(optCol))
// 表格数据项配置
let vtxDatagridProps = {
columns,
dataSource,
indexColumn: true,
startIndex: (currentPage-1) * pageSize + 1,
autoFit: true,
loading,
pagination: {
showSizeChanger: true,
pageSizeOptions: ['10', '20', '30', '40', '50'],
showQuickJumper: true,
current: currentPage,
total,
pageSize,
showTotal: total => `合计 ${total} 条`
},
onChange(pagination, filters, sorter) {
dispatch({ type: 'onlineOperation', payload: {
currentPage: pagination.current,
pageSize: pagination.pageSize
}})
}
}
// 选择表格行
vtxDatagridProps = _.assign(vtxDatagridProps, {
rowSelection: {
type: 'checkbox',
selectedRowKeys,
onChange(selectedRowKeys, selectedRows) {
updateState({ selectedRowKeys })
}
}
})
// 获取表格数据
const getList = () => {
dispatch({ type: 'onlineOperation/updateQueryParams' });
dispatch({ type: 'onlineOperation/getList' });
}
// 查询
const vtxGridParams = {
nameProps: {
value: searchParams.name,
placeholder: '请输入名称',
allowClear: true,
style: {
width: '100%'
},
onChange(value) {
updateState({
searchParams: {
name: value
}
})
},
onPressEnter() {
getList()
},
},
query() {
getList()
},
clear() {
dispatch({ type: 'onlineOperation/initQueryParams'});
dispatch({ type: 'onlineOperation/getList' });
}
}
// mock下拉数据
const waterFactoryList = [ {
"id" : "08ffc34f7b7444249d9a9e3750a34e91",
"value" : "测试水厂0001",
"name" : "测试水厂0001",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "c3e5d28d0da94357ad3625af1e58f342",
"value" : "水厂测试门户1",
"name" : "水厂测试门户1",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "757bc4e466b745b1adbfc39b887d4038",
"value" : "水厂5",
"name" : "水厂5",
"parentId" : "c95fe0e685a54030970e90e17864ace1"
}]
// 模态框名称
const modalTitle = searchParams.dataFillType==='produce' ? '在线概况' : '在线异常运维记录'
// 查看窗口
const updateViewWindow = (status = true) => {
updateState({
viewItem: {
visible: status
}
})
}
const viewItemProps = {
modalProps: {
title: `${modalTitle} > 查看`,
visible: viewItem.visible,
onCancel: () => updateViewWindow(false),
width: 1000
},
contentProps: {
...viewItem,
btnType: 'view'
}
}
// 编辑窗口
const updateEditWindow = (status = true) => {
updateState({
editItem: {
visible: status
}
})
}
const editItemProps = {
modalProps: {
title: `${modalTitle} > 编辑`,
visible: editItem.visible,
onCancel: () => updateEditWindow(false),
width: 1000
},
contentProps: {
...editItem,
btnType: 'edit'
}
}
// 新增窗口
const updateNewWindow = (status = true) => {
updateState({
newItem: {
visible: status
}
})
if(!status) {
dispatch({ type: 'onlineOperation/initNewItem' })
}
}
const newItemProps = {
modalProps: {
title: `${modalTitle} > 新增`,
visible: newItem.visible,
onCancel: () => updateNewWindow(false),
width: 1000
},
contentProps: {
...newItem,
btnType: 'add'
}
}
// 表格外功能-删除
const deleteItems = () => {
Modal.confirm({
content: `确定删除选中的${selectedRowKeys.length}条数据吗?`,
okText: '确定',
cancelText: '取消',
onOk() {
dispatch({
type: 'onlineOperation/deleteItems',
payload: {
ids: selectedRowKeys,
onSuccess: (ids) => {
let page = currentPage != 1 && ids.length === (total - (currentPage - 1) * pageSize) ? currentPage - 1 : currentPage;
dispatch({
type: 'onlineOperation/getList',
payload: {
selectedRowKeys: [],
currentPage: page
}
})
message.success('删除成功');
},
onError: (msg) => { message.error(msg); }
}
})
}
})
}
// 表格外功能-导出
let downloadURL = searchParams.dataFillType==='produce'?'/cloud/gzzhsw/api/cp/basic/pipelineNetPerformance/exportDataExcel':'/cloud/gzzhsw/api/cp/basic/sewageFactory/exportDataExcel'
const exportProps = {
downloadURL,
getExportParams(exportType) {
const param = {
tenantId: VtxUtil.getUrlParam('tenantId'),
};
switch (exportType) {
case 'rows':
if (selectedRowKeys.length === 0) {
message.info('需要选择一项进行导出');
return;
}
param.isAll = false;
param.ids = selectedRowKeys.join();
break;
case 'page':
if (dataSource.length === 0) {
message.info('当前页没有数据');
return;
}
const ids = dataSource.map((item, index) => {
return item.id;
});
param.isAll = false;
param.ids = ids.join();
break;
case 'all':
if (total === 0) {
message.info('暂无数据可进行导出');
return;
}
param.isAll = true;
}
return param
}
}
return (
<div className={styles.normal}>
{/* 条件查询 */}
<VtxGrid
titles={['水厂名称', '运维模式']}
gridweight={[1,1]}
confirm={vtxGridParams.query}
clear={vtxGridParams.clear}
>
<Select {...vtxGridParams.nameProps}>
{waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})}
</Select>
<Select {...vtxGridParams.nameProps}>
{waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})}
</Select>
</VtxGrid>
{/* tabs */}
<div className={styles.normal_body}>
<div className={styles.tabContainer}>
<Tabs activeKey={searchParams.dataFillType} onChange={(key)=>{
updateState({ searchParams: {dataFillType: key}})
getList()}
}>
<TabPane tab='在线概况' key='produce'></TabPane>
<TabPane tab='在线异常运维记录' key='assay'></TabPane>
</Tabs>
</div>
{/* 功能按钮 */}
<div className={styles.buttonContainer}>
{buttonLimit['ADD']&&<Button icon="file-add" onClick={() => updateNewWindow()}>新增</Button>}
{buttonLimit['DELETE']&&<Button icon="delete" onClick={deleteItems}>删除</Button>}
{buttonLimit['DELETE'] &&<VtxExport2 {...exportProps}><Button icon="export">导出</Button></VtxExport2>}
</div>
{/* 表格 */}
<div className={styles.tableContainer}>
<VtxDatagrid {...vtxDatagridProps}></VtxDatagrid>
</div>
</div>
{/* 操作 */}
{/* 查看 */}
{viewItem.visible && <ViewItem {...viewItemProps}/>}
{/* 编辑 */}
{editItem.visible && <EditItem {...editItemProps}/>}
{/* 新增 */}
{newItem.visible && <NewItem {...newItemProps}/>}
</div>
)
}
export default connect( ({onlineOperation, accessControlM}) => ({onlineOperation, accessControlM}) )(OnlineOperation); | : [rowD | identifier_name |
index.js | /*
Created by wujianxiong on 10/12/2020
*/
import React from 'react';
import { connect } from 'dva';
import { VtxGrid, VtxDatagrid, VtxExport } from 'vtx-ui';
const { VtxExport2 } = VtxExport;
import { Select, Input, Button, Icon, Tabs, message, Modal } from 'antd';
const TabPane = Tabs.TabPane;
import ViewItem from '../../components/onlineOperation/View';
import EditItem from '../../components/onlineOperation/View';
import NewItem from '../../components/onlineOperation/View';
import { handleColumns } from '../../utils/tools';
import styles from './index.less';
import { update } from 'lodash';
const Option = Select.Option;
function OnlineOperation({ dispatch, onlineOperation, accessControlM }) {
// onlineOperationM.js传递过来的参数
const { searchParams, dataSource, currentPage, pageSize, loading, total,
viewItem, editItem, newItem, selectedRowKeys } = onlineOperation;
// 获取新增删除等权限
let buttonLimit = {}
if(accessControlM['waterFactory'.toLowerCase()]) {
buttonLimit = accessControlM['waterFactory'.toLowerCase()]
}
// 更新参数
const updateState = (obj) => {
dispatch({ type: 'onlineOperation/updateState', payload: {...obj} })
}
// mock columns
let columns = []
let overViewcolumns = [
{
dataIndex: "a",
key: "a",
title: "区域"
},
{
dataIndex: "b",
key: "b",
title: "事业部"
},
{
dataIndex: "c",
key: "c",
title: "水厂"
},
{
dataIndex: "d",
key: "d",
title: "运维模式"
}
]
// mock columns
let recordColumns = [
{
dataIndex: "a",
key: "a",
title: "日期"
},
{
dataIndex: "b",
key: "b",
title: "指标(为可选项)"
},
{
dataIndex: "c",
key: "c",
title: "起始时间"
},
{
dataIndex: "d",
key: "d",
title: "结束时间"
},
{
dataIndex: "e",
key: "e",
title: "情况说明"
},
{
dataIndex: "f",
key: "f",
title: "标液浓度(mg/L)"
},
{
dataIndex: "g",
key: "g",
title: "设备结果(mg/L)"
},
{
dataIndex: "h",
key: "h",
title: "人工监测结果(mg/L)"
},
{
dataIndex: "i",
key: "i",
title: "备注"
},
]
const optCol = [['操作', 'action', {
renderButtons: ()=>{
let btns = [];
if(buttonLimit['VIEW']) {
// 查看
btns.push({ name: <Icon type='view' title='查看'/>, onClick(rowData) {
updateState({
viewItem: {
...rowData
}
})
updateViewWindow()
} })
// 编辑
btns.push({ name: <Icon type='file-edit' title='编辑'/>, onClick(rowData) {
updateState({
editItem: {
...rowData
}
})
updateEditWindow();
}})
// 删除
btns.push({ name: <Icon type='delete' title='删除'/>, onClick(rowData) {
dispatch({
type: 'onlineOperation/deleteItems',
payload: ({
ids: [rowData.id],
onSuccess: (ids) => {
let page = currentPage !=1 && ids.length === (total - (currentPage -1) * pageSize) ? currentPage -1 : currentPage;
dispatch({ type: 'onlineOperation/getList', payload: { selectedRowKeys: [], currentPage: page } });
message.success('删除成功!')
},
onError: (msg) => { message.error(msg) }
})
})
}})
}
return btns
}, width: '150px'
}]]
columns = searchParams.dataFillType === 'produce' ? overViewcolumns : recordColumns
columns = columns.concat(handleColumns(optCol))
// 表格数据项配置
let vtxDatagridProps = {
columns,
dataSource,
indexColumn: true,
startIndex: (currentPage-1) * pageSize + 1,
autoFit: true,
loading,
pagination: {
showSizeChanger: true,
pageSizeOptions: ['10', '20', '30', '40', '50'],
showQuickJumper: true,
current: currentPage,
total,
pageSize,
showTotal: total => `合计 ${total} 条`
},
onChange(pagination, filters, sorter) {
dispatch({ type: 'onlineOperation', payload: {
currentPage: pagination.current,
pageSize: pagination.pageSize
}})
}
}
// 选择表格行
vtxDatagridProps = _.assign(vtxDatagridProps, {
rowSelection: {
type: 'checkbox',
selectedRowKeys,
onChange(selectedRowKeys, selectedRows) {
updateState({ selectedRowKeys })
}
}
})
// 获取表格数据
const getList = () => {
dispatch({ type: 'onlineOperation/updateQueryParams' });
dispatch({ type: 'onlineOperation/getList' });
}
// 查询
const vtxGridParams = {
nameProps: {
value: searchParams.name,
placeholder: '请输入名称',
allowClear: true,
style: {
width: '100%'
},
onChange(value) {
updateState({
searchParams: {
name: value
}
})
},
onPressEnter() {
getList()
},
},
query() {
getList()
},
clear() {
dispatch({ type: 'onlineOperation/initQueryParams'});
dispatch({ type: 'onlineOperation/getList' });
}
}
// mock下拉数据
const waterFactoryList = [ {
"id" : "08ffc34f7b7444249d9a9e3750a34e91",
"value" : "测试水厂0001",
"name" : "测试水厂0001",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "c3e5d28d0da94357ad3625af1e58f342",
"value" : "水厂测试门户1",
"name" : "水厂测试门户1",
"parentId" : "e6a34203250b471c969fba90fccc6c87"
}, {
"id" : "757bc4e466b745b1adbfc39b887d4038",
"value" : "水厂5",
"name" : "水厂5",
"parentId" : "c95fe0e685a54030970e90e17864ace1"
}]
// 模态框名称
const modalTitle = searchParams.dataFillType==='produce' ? '在线概况' : '在线异常运维记录'
// 查看窗口
const updateViewWindow = (status = true) => {
updateState({
viewItem: {
visible: status
}
})
}
const viewItemProps = {
modalProps: {
title: `${modalTitle} > 查看`,
visible: viewItem.visible,
onCancel: () => updateViewWindow(false),
width: 1000
},
contentProps: {
...viewItem,
btnType: 'view'
}
}
// 编辑窗口
const updateEditWindow = (status = true) => {
updateState({
editItem: {
visible: status
}
})
}
const editItemProps = {
modalProps: {
title: `${modalTitle} > 编辑`,
visible: editItem.visible,
onCancel: () => updateEditWindow(false),
width: 1000
},
contentProps: {
...editItem,
btnType: 'edit'
}
}
// 新增窗口
const updateNewWindow = (status = true) => {
updateState({
newItem: {
visible: status
}
})
if(!status) {
dispatch({ type: 'onlineOperation/initNewItem' })
}
}
const newItemProps = {
modalProps: {
title: `${modalTitle} > 新增`,
visible: newItem.visible,
onCancel: () => updateNewWindow(false),
width: 1000
},
contentProps: {
...newItem,
btnType: 'add'
}
}
// 表格外功能-删除
const deleteItems = () => {
Modal.confirm({
content: `确定删除选中的${selectedRowKeys.length}条数据吗?`,
okText: '确定',
cancelText: '取消',
onOk() {
dispatch({
type: 'onlineOperation/deleteItems',
payload: {
ids: selectedRowKeys,
onSuccess: (ids) => {
let page = currentPage != 1 && ids.length === (total - (currentPage - 1) * pageSize) ? currentPage - 1 : currentPage;
dispatch({
type: 'onlineOperation/getList',
payload: {
selectedRowKeys: [],
currentPage: page
}
})
message.success('删除成功');
},
onError: (msg) => { message.error(msg); }
}
})
}
})
}
// 表格外功能-导出
let downloadURL = searchParams.dataFillType==='produce'?'/cloud/gzzhsw/api/cp/basic/pipelineNetPerformance/exportDataExcel':'/cloud/gzzhsw/api/cp/basic/sewageFactory/exportDataExcel'
const exportProps = {
downloadURL,
getExportParams(exportType) {
const param = {
tenantId: VtxUtil.getUrlParam('tenantId'),
};
switch (exportType) {
case 'rows':
if (selectedRowKeys.length === 0) {
message.info('需要选择一项进行导出');
return;
}
param.isAll = false;
param.ids = selectedRowKeys.join();
break;
case 'page':
if (dataSource.length === 0) {
message.info('当前页没有数据');
return;
}
const ids = dataSource.map((item, index) => {
return item.id;
});
param.isAll = false;
param.ids = ids.join();
break;
case 'all':
if (total === 0) {
message.info('暂无数据可进行导出');
return;
}
param.isAll = true;
}
return param
}
}
return (
<div className={styles.normal}>
{/* 条件查询 */}
<VtxGrid
titles={['水厂名称', '运维模式']}
gridweight={[1,1]}
confirm={vtxGridParams.query}
clear={vtxGridParams.clear}
>
<Select {...vtxGridParams.nameProps}>
{waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})} | {waterFactoryList.map(item=>{
return (
<Option key={item.id}>{item.name}</Option>
)
})}
</Select>
</VtxGrid>
{/* tabs */}
<div className={styles.normal_body}>
<div className={styles.tabContainer}>
<Tabs activeKey={searchParams.dataFillType} onChange={(key)=>{
updateState({ searchParams: {dataFillType: key}})
getList()}
}>
<TabPane tab='在线概况' key='produce'></TabPane>
<TabPane tab='在线异常运维记录' key='assay'></TabPane>
</Tabs>
</div>
{/* 功能按钮 */}
<div className={styles.buttonContainer}>
{buttonLimit['ADD']&&<Button icon="file-add" onClick={() => updateNewWindow()}>新增</Button>}
{buttonLimit['DELETE']&&<Button icon="delete" onClick={deleteItems}>删除</Button>}
{buttonLimit['DELETE'] &&<VtxExport2 {...exportProps}><Button icon="export">导出</Button></VtxExport2>}
</div>
{/* 表格 */}
<div className={styles.tableContainer}>
<VtxDatagrid {...vtxDatagridProps}></VtxDatagrid>
</div>
</div>
{/* 操作 */}
{/* 查看 */}
{viewItem.visible && <ViewItem {...viewItemProps}/>}
{/* 编辑 */}
{editItem.visible && <EditItem {...editItemProps}/>}
{/* 新增 */}
{newItem.visible && <NewItem {...newItemProps}/>}
</div>
)
}
export default connect( ({onlineOperation, accessControlM}) => ({onlineOperation, accessControlM}) )(OnlineOperation); | </Select>
<Select {...vtxGridParams.nameProps}> | random_line_split |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct TimedData {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
}
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 | else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
}
| {
(network.tx as f64).log2()
} | conditional_block |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct TimedData {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>, | fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
}
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 {
(network.tx as f64).log2()
} else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
} | }
impl Default for DataCollection { | random_line_split |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct | {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
}
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 {
(network.tx as f64).log2()
} else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
}
| TimedData | identifier_name |
data_farmer.rs | use lazy_static::lazy_static;
/// In charge of cleaning, processing, and managing data. I couldn't think of
/// a better name for the file. Since I called data collection "harvesting",
/// then this is the farmer I guess.
///
/// Essentially the main goal is to shift the initial calculation and distribution
/// of joiner points and data to one central location that will only do it
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
/// which will be a costly process.
///
/// This will also handle the *cleaning* of stale data. That should be done
/// in some manner (timer on another thread, some loop) that will occasionally
/// call the purging function. Failure to do so *will* result in a growing
/// memory usage and higher CPU usage - you will be trying to process more and
/// more points as this is used!
use std::{time::Instant, vec::Vec};
use crate::{
data_harvester::{battery_harvester, cpu, disks, mem, network, processes, temperature, Data},
utils::gen_util::get_simple_byte_values,
};
use regex::Regex;
pub type TimeOffset = f64;
pub type Value = f64;
#[derive(Debug, Default)]
pub struct TimedData {
pub rx_data: Value,
pub tx_data: Value,
pub cpu_data: Vec<Value>,
pub mem_data: Value,
pub swap_data: Value,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub frozen_instant: Option<Instant>,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
pub memory_harvest: mem::MemHarvest,
pub swap_harvest: mem::MemHarvest,
pub cpu_harvest: cpu::CpuHarvest,
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
pub temp_harvest: Vec<temperature::TempHarvest>,
pub battery_harvest: Vec<battery_harvester::BatteryHarvest>,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
frozen_instant: None,
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
memory_harvest: mem::MemHarvest::default(),
swap_harvest: mem::MemHarvest::default(),
cpu_harvest: cpu::CpuHarvest::default(),
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_harvest: Vec::default(),
battery_harvest: Vec::default(),
}
}
}
impl DataCollection {
pub fn reset(&mut self) |
pub fn set_frozen_time(&mut self) {
self.frozen_instant = Some(self.current_instant);
}
pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
let current_time = Instant::now();
let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
self.timed_data_vec.drain(0..remove_index);
}
pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();
// Network
if let Some(network) = &harvested_data.network {
self.eat_network(network, &mut new_entry);
}
// Memory and Swap
if let Some(memory) = &harvested_data.memory {
if let Some(swap) = &harvested_data.swap {
self.eat_memory_and_swap(memory, swap, &mut new_entry);
}
}
// CPU
if let Some(cpu) = &harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
}
// Temp
if let Some(temperature_sensors) = &harvested_data.temperature_sensors {
self.eat_temp(temperature_sensors);
}
// Disks
if let Some(disks) = &harvested_data.disks {
if let Some(io) = &harvested_data.io {
self.eat_disks(disks, io, harvested_time);
}
}
// Processes
if let Some(list_of_processes) = &harvested_data.list_of_processes {
self.eat_proc(list_of_processes);
}
// Battery
if let Some(list_of_batteries) = &harvested_data.list_of_batteries {
self.eat_battery(list_of_batteries);
}
// And we're done eating. Update time and push the new entry!
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
total => (memory.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.mem_data = mem_percent;
// Swap
if swap.mem_total_in_mb > 0 {
let swap_percent = match swap.mem_total_in_mb {
0 => 0f64,
total => (swap.mem_used_in_mb as f64) / (total as f64) * 100.0,
};
new_entry.swap_data = swap_percent;
}
// In addition copy over latest data for easy reference
self.memory_harvest = memory.clone();
self.swap_harvest = swap.clone();
}
fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
new_entry.rx_data = if network.rx > 0 {
(network.rx as f64).log2()
} else {
0.0
};
// TX
new_entry.tx_data = if network.tx > 0 {
(network.tx as f64).log2()
} else {
0.0
};
// In addition copy over latest data for easy reference
self.network_harvest = network.clone();
}
fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
cpu.iter()
.for_each(|cpu| new_entry.cpu_data.push(cpu.cpu_usage));
self.cpu_harvest = cpu.to_vec();
}
fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}
fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// TODO: [PO] To implement
let time_since_last_harvest = harvested_time
.duration_since(self.current_instant)
.as_secs_f64();
for (itx, device) in disks.iter().enumerate() {
if let Some(trim) = device.name.split('/').last() {
let io_device = if cfg!(target_os = "macos") {
// Must trim one level further!
lazy_static! {
static ref DISK_REGEX: Regex = Regex::new(r"disk\d+").unwrap();
}
if let Some(disk_trim) = DISK_REGEX.find(trim) {
io.get(disk_trim.as_str())
} else {
None
}
} else {
io.get(trim)
};
if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
}
if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
if let Some(io_labels) = self.io_labels.get_mut(itx) {
let converted_read = get_simple_byte_values(r_rate, false);
let converted_write = get_simple_byte_values(w_rate, false);
*io_labels = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
}
}
}
self.disk_harvest = disks.to_vec();
self.io_harvest = io.clone();
}
fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}
fn eat_battery(&mut self, list_of_batteries: &[battery_harvester::BatteryHarvest]) {
trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
}
| {
self.timed_data_vec = Vec::default();
self.network_harvest = network::NetworkHarvest::default();
self.memory_harvest = mem::MemHarvest::default();
self.swap_harvest = mem::MemHarvest::default();
self.cpu_harvest = cpu::CpuHarvest::default();
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
self.battery_harvest = Vec::default();
} | identifier_body |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1 != X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn | <X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd, .. } = extended_gcd__inline(a, b);
gcd
}
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm, .. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _), .. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem != a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper { None } else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just 'max' and its direct dependencies
// (which are of the form 'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are
// not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?}, != {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?}, != {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b != X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
}
| gcd | identifier_name |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1 != X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn gcd<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd, .. } = extended_gcd__inline(a, b);
gcd
}
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm, .. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _), .. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem != a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper | else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just 'max' and its direct dependencies
// (which are of the form 'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are
// not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?}, != {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?}, != {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b != X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
}
| { None } | conditional_block |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1 != X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn gcd<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
|
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm, .. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _), .. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem != a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper { None } else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just 'max' and its direct dependencies
// (which are of the form 'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are
// not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?}, != {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?}, != {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b != X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
}
| {
let GcdData { gcd, .. } = extended_gcd__inline(a, b);
gcd
} | identifier_body |
lib.rs | #![allow(non_snake_case)]
#![cfg_attr(test, feature(test))]
extern crate num_integer;
extern crate num_traits;
use num_integer::Integer;
use num_traits::{PrimInt,Signed,One,NumCast};
use ::std::collections::HashMap;
use ::std::hash::Hash;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate rand;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
pub struct GcdData<X> {
// greatest common divisor
pub gcd: X,
// least common multiple
pub lcm: X,
// bezout coefficients
pub coeffs: (X, X),
// quotients of the inputs by the GCD
pub quotients: (X, X),
}
// NOTE:
// The Signed bound is unavoidable for Extended GCD because the Bezout
// coefficients can be negative. This is unfortunate for plain old gcd(),
// which technically shouldn't require the Signed bound.
// Since the bezout coefficients have no impact on each other or on the gcd,
// a sufficiently smart compiler can rip their computations out entirely.
// And as luck would have it, rustc is sufficiently smart!
#[inline(always)]
fn extended_gcd__inline<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
let (a_sign, a) = (a.signum(), a.abs());
let (b_sign, b) = (b.signum(), b.abs());
// Silly trick because rust doesn't have true multiple assignment:
// Store the two targets in one variable! Order is (old, current).
let mut s = (X::one(), X::zero()); // a coefficient
let mut t = (X::zero(), X::one()); // b coefficient
let mut r = (a, b); // gcd
while r.1 != X::zero() {
let (div, rem) = (r.0/r.1, r.0%r.1);
r = (r.1, rem);
s = (s.1, s.0 - div * s.1);
t = (t.1, t.0 - div * t.1);
}
let quots = (a_sign * t.1.abs(), b_sign * s.1.abs());
GcdData {
gcd: r.0,
// FIXME think more about sign of LCM
// (current implementation preserves the property a*b == gcd*lcm
// which is nice, but I don't know if it is The Right Thing)
lcm: r.0*quots.0*quots.1,
coeffs: (a_sign*s.0, b_sign*t.0),
quotients: quots,
}
}
/// Compute a greatest common divisor with other miscellany.
pub fn extended_gcd<X>(a: X, b: X) -> GcdData<X> where
X: PrimInt + Integer + Signed,
{
extended_gcd__inline(a, b)
}
/// Compute a greatest common divisor.
pub fn gcd<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd, .. } = extended_gcd__inline(a, b);
gcd
}
/// Compute a least common multiple.
pub fn lcm<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let GcdData { lcm, .. } = extended_gcd__inline(a, b);
lcm
}
/// Compute a modular multiplicative inverse, if it exists.
///
/// This implementation uses the extended Gcd algorithm,
pub fn inverse_mod<X>(a: X, m: X) -> Option<X> where
X: PrimInt + Integer + Signed,
{
let GcdData { gcd: g, coeffs: (inv, _), .. } = extended_gcd__inline(a, m);
if g == X::one() { Some(inv.mod_floor(&m)) } else { None }
}
/// Merge many equations of the form `x = ai (mod ni)` into one.
///
/// The moduli don't need to be coprime;
/// ``None`` is returned if the equations are inconsistent.
///
/// `chinese_remainder(vec![])` is defined to be `Some((0,1))`.
pub fn chinese_remainder<X,I>(congruences: I) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
I: IntoIterator<Item=(X,X)>,
{
// something something "monadic" something "fold"
congruences.into_iter().fold(Some((X::zero(),X::one())),
|opt, new_pair| opt.and_then(|acc_pair|
chinese_remainder2(acc_pair, new_pair)
)
)
}
/// Merge two equations of the form ``x = ai (mod ni)`` into one.
///
/// The moduli don't need to be coprime;
/// `None` is returned if the equations are inconsistent.
///
/// Panics if a modulus is negative or zero.
pub fn chinese_remainder2<X>((a1,n1):(X,X), (a2,n2):(X,X)) -> Option<(X,X)> where
X: PrimInt + Integer + Signed,
{
// I'm too lazy right now to consider whether there is a
// reasonable behavior for negative moduli
assert!(n1.is_positive());
assert!(n2.is_positive());
let GcdData {
gcd: g,
lcm: n3,
coeffs: (c1,c2),
..
} = extended_gcd__inline(n1, n2);
let (a1div, a1rem) = a1.div_rem(&g);
let (a2div, a2rem) = a2.div_rem(&g);
if a1rem != a2rem { None }
else {
let a3 = (a2div*c1*n1 + a1div*c2*n2 + a1rem).mod_floor(&n3);
Some((a3, n3))
}
}
// used for conversions of literals (which will clearly never fail)
fn lit<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
// slightly more verbose for use outside mathematical expressions
fn convert<X>(x: i64) -> X where X: PrimInt { X::from(x).unwrap() }
/// An argument to tame function count explosion for functions
/// which can optionally deal with permutation symmetry.
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum OrderType {
/// Order matters; consider all distinct permutations.
Ordered,
/// Order does not matter; only consider distinct combinations.
Unordered,
}
/// Used as a half-baked alternative to writing a generic interface
/// over RangeTo and RangeToInclusive
#[derive(Copy,Clone,Hash,PartialEq,Eq,Debug)]
pub enum UpperBound<X> { Upto(X), Under(X), }
impl<X> UpperBound<X> where X: Integer + One,
{
// FIXME: I think it is hard to read code that uses this.
/// a generic form of (min..x).next_back() or (min...x).next_back()
fn inclusive_limit_from(self, min: X) -> Option<X> {
match self {
Under(upper) => if min >= upper { None } else { Some(upper - One::one()) },
Upto(max) => if min > max { None } else { Some(max) },
}
}
}
#[test]
fn test_inclusive_limit_from() {
assert_eq!(Upto(4).inclusive_limit_from(3), Some(4));
assert_eq!(Upto(3).inclusive_limit_from(3), Some(3));
assert_eq!(Upto(2).inclusive_limit_from(3), None);
assert_eq!(Upto(1).inclusive_limit_from(3), None);
assert_eq!(Under(5).inclusive_limit_from(3), Some(4));
assert_eq!(Under(4).inclusive_limit_from(3), Some(3));
assert_eq!(Under(3).inclusive_limit_from(3), None);
assert_eq!(Under(2).inclusive_limit_from(3), None);
// no underflow please kthx
assert_eq!(Under(0).inclusive_limit_from(0), None);
}
use UpperBound::*;
use OrderType::*;
// NOTE: Further possible generalizations:
// * Take a Range instead of UpperBound so that zero can optionally be included
// (however it would also require solving how to produce correct results
// for all other lower bounds)
// * count_coprime_tuplets(max, n)
/// Counts coprime pairs of integers `>= 1`
///
/// # Notes
///
/// `(0,1)` and `(1,0)` are not counted.
/// `(1,1)` (the only symmetric pair) is counted once.
///
/// # Reference
///
/// http://laurentmazare.github.io/2014/09/14/counting-coprime-pairs/
pub fn count_coprime_pairs<X>(bound: UpperBound<X>, order_type: OrderType) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
let max = {
if let Some(max) = bound.inclusive_limit_from(lit(1)) { max }
else { return lit(0); } // catches Under(0), Upto(0), Under(1)
};
let ordered = count_ordered_coprime_pairs(max);
match order_type {
Ordered => ordered,
// Every combination was counted twice except for (1,1);
// Add 1 so that they are ALL double counted, then halve it.
// (this also fortituously produces 0 for max == 0)
Unordered => (ordered + lit(1)) / lit(2),
}
}
fn count_ordered_coprime_pairs<X>(max: X) -> X where
X: PrimInt + Integer + Hash,
X: ::std::fmt::Debug,
{
// Function can be described with this recursion relation:
//
// c(n) = n**2 - sum_{k=2}^n c(n // k)
//
// where '//' is floored division.
//
// Many values of k share the same value of (n // k),
// thereby permitting a coarse-graining approach.
// unique values of m (=floor(n/k)) for small k
let fine_deps = |n|
(2..).map(convert::<X>)
.map(move |k| n/k).take_while(move |&m| m*m > n);
// values of m (=floor(n/k)) shared by many large k.
let coarse_deps = |n|
(1..).map(convert::<X>)
.take_while(move |&m| m*m <= n)
// don't produce m == 1 for n == 1
.skip_while(move |_| n == lit(1));
let coarse_multiplicity = |n,m| n/m - n/(m + lit(1));
// Get all values that need to be computed at some point.
//
// Interestingly, these are just 'max' and its direct dependencies
// (which are of the form 'max // k'). The reason the subproblems
// never introduce new dependencies is because integer division
// apparently satisfies the following property for x non-negative
// and a,b positive:
//
// (x // a) // b == (x // b) // a == x // (a*b)
//
// (NOTE: euclidean division wins *yet again*; it is the only sign convention
// under which this also works for negative 'x', 'a', and 'b'!)
let order = {
let mut vec = vec![max];
vec.extend(fine_deps(max));
vec.extend(coarse_deps(max));
vec.sort();
vec
};
let mut memo = HashMap::new();
let compute = |n, memo: &HashMap<X,X>| {
let acc = n*n;
let acc = coarse_deps(n)
.map(|m| memo[&m.into()] * coarse_multiplicity(n,m))
.fold(acc, |a,b| a-b);
let acc = fine_deps(n)
.map(|m| memo[&m.into()])
.fold(acc, |a,b| a-b);
acc
};
for x in order {
let value = compute(x, &memo);
memo.insert(x, value);
}
memo[&max]
}
#[cfg(test)]
mod tests {
use super::*;
use super::OrderType::*;
use super::UpperBound::*;
use ::num_integer::Integer;
use ::num_traits::{PrimInt,Signed};
use test;
use rand::{Rng};
#[test]
fn test_gcd() {
// swap left/right
// (using a pair that takes several iterations)
assert_eq!(gcd(234,123), 3);
assert_eq!(gcd(123,234), 3);
// negatives
assert_eq!(gcd(-15,20), 5);
assert_eq!(gcd(15,-20), 5);
assert_eq!(gcd(-15,-20), 5);
// zeroes
assert_eq!(gcd(0,17), 17);
assert_eq!(gcd(17,0), 17);
assert_eq!(gcd(0,0), 0);
}
#[test]
fn test_chinese_remainder() {
// test both interfaces
let eq1 = (2328,16256);
let eq2 = (410,5418);
let soln = (28450328, 44037504);
assert_eq!(chinese_remainder2(eq1,eq2), Some(soln));
assert_eq!(chinese_remainder(vec![eq1,eq2]), Some(soln));
// (0,1) serves as an "identity"
assert_eq!(chinese_remainder(vec![]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1)]), Some((0,1)));
assert_eq!(chinese_remainder(vec![(0,1),(13,36)]), Some((13,36)));
assert_eq!(chinese_remainder(vec![(13,36),(0,1)]), Some((13,36)));
// single equation
assert_eq!(chinese_remainder(vec![eq1]), Some(eq1));
// inconsistent equations
assert_eq!(chinese_remainder2((10,7),(4,14)), None);
assert_eq!(chinese_remainder(vec![(10,7),(4,14)]), None);
// FIXME: test more than 2 equations
// FIXME: do we specify behavior for when the input a_i are | // not already reduced modulo n_i?
}
#[test]
fn test_inverse_mod() {
let solns15 = vec![
None, Some(1), Some(8), None, Some(4),
None, None, Some(13), Some(2), None,
None, Some(11), None, Some(7), Some(14),
];
for x in -15..30 {
assert_eq!(inverse_mod(x,15), solns15[x.mod_floor(&15) as usize]);
}
}
#[test]
fn test_count_coprime_pairs() {
fn check<X>(bound: UpperBound<X>, expect_o: X, expect_u: X)
where X: ::std::fmt::Debug + PrimInt + ::std::hash::Hash + Integer {
let actual_o = count_coprime_pairs(bound, Ordered);
let actual_u = count_coprime_pairs(bound, Unordered);
assert!(actual_o == expect_o,
"g({:?}, Ordered) == {:?}, != {:?}", bound, actual_o, expect_o);
assert!(actual_u == expect_u,
"g({:?}, Unordered) == {:?}, != {:?}", bound, actual_u, expect_u);
};
// special-ish cases
check(Under(0u32), 0, 0); // unsigned to check for underflow
check(Under(0i32), 0, 0); // signed to check for poor usage of checked_sub
check(Upto(0u32), 0, 0);
check(Upto(0i32), 0, 0);
check(Upto(1u32), 1, 1);
check(Upto(1i32), 1, 1);
// a nontrivial coprime pair (2,3)
check(Upto(3u32), 7, 4);
// a nontrivial non-coprime pair (2,4)
check(Upto(4u32), 11, 6);
// problem size large enough to test both fine-graining and coarse-graining
check(Upto(100u32), 6087, 3044);
// a biggun
assert_eq!(count_coprime_pairs(Upto(10_000_000i64), Ordered), 60792712854483i64);
// try a variety of bounds in an attempt to get memo[&x] to panic
// on a missed dependency
let mut rng = ::rand::thread_rng();
test::black_box(
(0..100).map(|_| rng.gen_range(100, 100_000i64))
.map(|x| count_coprime_pairs(Upto(x), Ordered))
.sum::<i64>()
);
}
// Gold standard for binary comparison.
#[inline(never)]
fn gcd__reference<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
let mut a = a.abs();
let mut b = b.abs();
while b != X::zero() {
let tmp = b;
b = a % b;
a = tmp;
}
a
}
// Impressively, rustc grinds this down to a *byte-perfect match*
// against gcd__reference.
#[inline(never)]
fn gcd__optimized<X>(a: X, b: X) -> X where
X: PrimInt + Integer + Signed,
{
gcd(a, b)
}
// force the two inline(never) functions above to be compiled
#[test]
fn dummy__compile_testfuncs() {
assert_eq!(gcd__reference(15,20), 5);
assert_eq!(gcd__optimized(20,15), 5);
// Interestingly, the compiled inline(never) functions will
// recieve optimizations based on their inputs.
// Without these following invocations, rustc will compile
// faster versions that only support positive arguments.
assert_eq!(gcd__reference(-15,-20), 5);
assert_eq!(gcd__optimized(-20,-15), 5);
}
} | random_line_split | |
main.rs | //! YM player
use std::io::{stdout, Write};
use core::ops::AddAssign;
use core::fmt;
use spectrusty_core::{audio::*, chip::nanos_from_frame_tc_cpu_hz};
use spectrusty_audio::{
synth::ext::*,
host::cpal::{AudioHandle, AudioHandleAnyFormat}
};
use spectrusty_peripherals::ay::{audio::*, AyRegister, AyRegChange};
use ym_file_parser::YmSong;
use clap::Parser;
use cpal::traits::*;
/* built-in song */
static BUZZ_YM: &[u8] = include_bytes!("../BUZZ.YM");
const NORMAL_AMPLITUDE: u8 = 100;
/* calculate amplitude level */
fn amplitude_level<T: Copy + FromSample<f32>>(level: u8) -> T {
const A: f32 = 3.1623e-3;
const B: f32 = 5.757;
let y: f32 = match level {
0 => 0.0,
NORMAL_AMPLITUDE => 1.0,
v => {
let x = v as f32 / NORMAL_AMPLITUDE as f32;
A * (B * x).exp()
}
};
T::from_sample(y)
}
/* AY/YM channels mapped as follows: [A, B, C], where N -> 0: left, 1: right, 2: center */
#[derive(Debug, Clone, Copy)]
struct ChannelMap([usize; 3]);
impl fmt::Display for ChannelMap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// [A, B, C], where N -> 0: left, 1: right, 2: center
let [a, b, c] = self.0;
if a == b && b == c {
write!(f, "mono")
}
else {
let mut res = ['?'; 3];
res[a] = 'A';
res[b] = 'B';
res[c] = 'C';
let [l, r, c] = res;
write!(f, "{l}{c}{r}")
}
}
}
impl Default for ChannelMap {
fn default() -> Self {
ChannelMap([0, 1, 2]) // ACB
}
}
const MONO_CHANNEL_MAP: ChannelMap = ChannelMap([0, 0, 0]);
/* How to mix YM audio channels */
#[derive(Debug, Clone, Copy)]
enum ChannelMode {
/// Center channel is mixed-in with stereo channels.
MixedStereo(f32),
/// All channels are mixed-in together into a single audio channel.
Mono,
/// Left and right channel are played in stereo, redirect a center channel into a specific audio channel.
Channel(u32)
}
impl Default for ChannelMode {
fn default() -> Self {
ChannelMode::MixedStereo(0.8)
}
}
impl fmt::Display for ChannelMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ChannelMode::MixedStereo(ampl) => write!(f, "{ampl}"),
ChannelMode::Mono => write!(f, "m"),
ChannelMode::Channel(n) => write!(f, "{n}"),
}
}
}
fn print_time(secs: u32) |
fn print_current(last_secs: &mut u32, cur_secs: f32, total_secs: f32) {
let secs = cur_secs.trunc() as u32;
if *last_secs == secs {
return;
}
*last_secs = secs;
print!("\r");
print_time(secs);
print!(" -> ");
print_time((total_secs - cur_secs).trunc() as u32);
stdout().flush().unwrap();
}
/****************************************************************************/
/* PLAYER */
/****************************************************************************/
struct PlayEnv {
ym_file: YmSong,
ampl_level: f32,
repeat: u32,
channel_map: ChannelMap,
track: bool,
}
fn play_with_blep<A, B, SD, S>(
PlayEnv { mut ym_file, ampl_level, repeat, channel_map, track }: PlayEnv,
mut audio: AudioHandle<S>,
bandlim: &mut B,
render_audio: &dyn Fn(&mut BlepAmpFilter<&mut B>, &mut Vec<S>)
)
where A: AmpLevels<SD>,
B: BandLimitedExt<SD, S> + ?Sized,
SD: SampleDelta + FromSample<f32> + MulNorm,
S: AudioSample + cpal::SizedSample
{
log::debug!("Channels: {channel_map} {:?}", channel_map.0);
/* Spectrusty's emulated AY is clocked at a half frequency of a host CPU clock,
we need to adjust cycles counter */
let host_frame_cycles = (ym_file.frame_cycles() * HOST_CLOCK_RATIO as f32) as i32;
let host_frequency = ym_file.chipset_frequency as f64 * HOST_CLOCK_RATIO as f64;
log::trace!("AY host frequency: {} Hz, frame: {} cycles", host_frequency, host_frame_cycles);
/* create a BLEP amplitude filter wrapper */
let mut bandlim = BlepAmpFilter::new(SD::from_sample(ampl_level), bandlim);
/* ensure BLEP has enough space to fit a single audio frame
(there is no margin - our frames will have constant size). */
bandlim.ensure_frame_time(audio.sample_rate, host_frequency, host_frame_cycles, 0);
/* number of audio output channels */
let channels = audio.channels as usize;
/* create an emulator instance */
let mut ay = Ay3_891xAudio::default();
/* buffered frame changes to AY-3-891x registers */
let mut changes = Vec::new();
/* play counter */
let mut counter = repeat;
/* total seconds */
let total_secs = ym_file.frames.len() as f32 / ym_file.frame_frequency as f32;
let mut last_secs: u32 = u32::MAX;
loop {
if track {
let cur_secs = ym_file.cursor() as f32 / ym_file.frame_frequency as f32;
print_current(&mut last_secs, cur_secs, total_secs);
}
/* produce YM chipset changes */
let finished = ym_file.produce_next_ay_frame(|ts, reg, val| {
changes.push(
AyRegChange::new(
(ts * HOST_CLOCK_RATIO as f32).trunc() as i32,
AyRegister::from(reg),
val))
});
/* render audio into BLEP */
ay.render_audio::<A,_,_>(changes.drain(..),
&mut bandlim,
host_frame_cycles,
host_frame_cycles,
channel_map.0);
/* close frame */
let frame_sample_count = bandlim.end_frame(host_frame_cycles);
/* render BLEP frame into the sample buffer */
audio.producer.render_frame(|ref mut buf| {
/* ensure the BLEP frame fits into the sample buffer */
buf.resize(frame_sample_count * channels, S::silence());
render_audio(&mut bandlim, buf);
});
/* send a rendered sample buffer to the consumer */
audio.producer.send_frame().unwrap();
if finished {
log::info!("Finished.");
if repeat != 0 {
counter -= 1;
if counter == 0 {
break;
}
}
}
}
/* let the audio thread finish playing */
for _ in 0..50 {
audio.producer.render_frame(|ref mut buf| {
buf.fill(S::silence());
});
audio.producer.send_frame().unwrap();
}
audio.close();
}
fn play_with_amps<A, SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where A: AmpLevels<SD>,
SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm + 'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample
{
let Args { volume, repeat, channels: channel_map, mode, track, hpass, lpass, .. } = args;
log::debug!("Repeat: {repeat}, volume: {volume}%");
let ampl_level = amplitude_level(args.volume);
log::trace!("Amplitude filter: {ampl_level}");
let mut env = PlayEnv { ym_file, ampl_level, repeat, channel_map, track };
let channels = audio.channels as usize;
match mode {
ChannelMode::MixedStereo(mono_filter) if channels >= 2 => {
/* a multi-channel to stereo mixer */
let mut blep = BlepStereo::new(mono_filter.into_sample(),
/* a stereo band-limited pulse buffer */
BandLimitedAny::new(2, lpass, hpass));
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
ChannelMode::Channel(channel) if channels >= channel as usize => {
/* a multi-channel band-limited pulse buffer */
let third_chan = (channel - 1) as usize;
let mut blep = BandLimitedAny::new(3, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1, third_chan]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
_ => {
/* a monophonic band-limited pulse buffer */
let mut blep = BandLimitedAny::new(1, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
env.channel_map = MONO_CHANNEL_MAP;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_fill_interleaved(buf, channels, 0);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
}
}
fn play<SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm + 'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample,
AyFuseAmps<SD>: AmpLevels<SD>,
AyAmps<SD>: AmpLevels<SD>
{
if args.fuse {
log::debug!("YM amplitide levels: fuse (measured)");
play_with_amps::<AyFuseAmps<_>, _, _>(audio, ym_file, args)
}
else {
log::debug!("YM amplitide levels: default (specs)");
play_with_amps::<AyAmps<_>, _, _>(audio, ym_file, args)
}
}
/****************************************************************************/
/* MAIN */
/****************************************************************************/
#[derive(Default, Debug, Clone, Copy, PartialEq)]
struct StreamConfigHint {
channels: Option<cpal::ChannelCount>,
sample_rate: Option<cpal::SampleRate>,
sample_format: Option<cpal::SampleFormat>
}
impl fmt::Display for StreamConfigHint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self == &StreamConfigHint::default() {
return f.write_str("*");
}
if let Some(format) = self.sample_format {
write!(f, "{:?}", format)?;
}
if self.channels.is_some() && self.sample_rate.is_some() {
f.write_str(",")?;
}
if let Some(channels) = self.channels {
write!(f, "{}", channels)?;
}
if let Some(rate) = self.sample_rate {
write!(f, "@{}", rate.0)?;
}
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// A file path to an YM song.
ym_file: Option<String>,
/// Audio mixer volume: 0 - 100.
#[arg(short, long, default_value_t = 50, value_parser = volume_in_range)]
volume: u8,
/// Play counter, 0 to play forever.
#[arg(short, long, default_value_t = 0)]
repeat: u32,
/// YM channels map: Left Center Right.
#[arg(short, long, default_value_t = ChannelMap::default(), value_parser = parse_channels)]
channels: ChannelMap,
/// Channel mode: s|m|0.s|N.
///
/// "s" - stereo mode with a center channel mixed with an amplitude of 0.8
///
/// "m" - monophonic mode, played on all audio channels
///
/// "0.s" - stereo mode, center channel amplitude: 0.s
///
/// "N" - multi-channel mode, redirect center channel to Nth (3+) audio channel
#[arg(short, long, default_value_t = ChannelMode::default(), value_parser = parse_channel_mode)]
mode: ChannelMode,
/// Switch to alternative YM amplitude levels (measured vs specs).
#[arg(short, long, default_value_t = false)]
fuse: bool,
/// Enable low-pass audio band filter.
#[arg(long, default_value_t = false)]
lpass: bool,
/// Enable high-pass audio band filter.
#[arg(long, default_value_t = false)]
hpass: bool,
/// Desired audio output parameters: ST,CHANS@RATE.
///
/// ST is a sample type, e.g.: U8, I16, U32, F32.
///
/// CHANS is the number of channels and RATE is the sample rate.
#[arg(short, long, default_value_t = StreamConfigHint::default(), value_parser = parse_stream_config)]
audio: StreamConfigHint,
/// Track the current song time.
#[arg(short, long, default_value_t = false)]
track: bool,
/// Log verbosity level.
///
/// -d for INFO, -dd for DEBUG, -ddd for TRACE
#[arg(short, long, action = clap::ArgAction::Count)]
debug: u8
}
fn volume_in_range(s: &str) -> Result<u8, String> {
let volume: usize = s
.parse()
.map_err(|_| format!("`{s}` isn't a volume"))?;
if (0..=NORMAL_AMPLITUDE as usize).contains(&volume) {
Ok(volume as u8)
} else {
Err(format!("volume not in range 0 - {NORMAL_AMPLITUDE}"))
}
}
fn parse_channel_mode(s: &str) -> Result<ChannelMode, String> {
Ok(match s {
"s"|"S" => ChannelMode::MixedStereo(0.8),
"m"|"M" => ChannelMode::Mono,
s if s.starts_with("0.") => {
let amp: f32 = s.parse().map_err(|_| format!("`{s}` isn't a stereo mixer amplitude"))?;
ChannelMode::MixedStereo(amp)
}
s => {
let channel: u32 = s.parse().map_err(|_| format!("`{s}` isn't a mixer mode channel"))?;
if channel < 3 {
return Err("mixer mode channel must be >= 3".into());
}
ChannelMode::Channel(channel)
}
})
}
fn parse_channels(s: &str) -> Result<ChannelMap, String> {
const ERROR_MSG: &str = "channel mapping should be a permutation of ABC characters";
if s.len() != 3 {
return Err(ERROR_MSG.into());
}
let mut channels = [usize::MAX; 3];
// [A, B, C], where N -> 0: left, 1: right, 2: center
for (ch, chan) in s.chars().zip([0, 2, 1].into_iter()) {
let pos = match ch.to_ascii_uppercase() {
'A' => 0,
'B' => 1,
'C' => 2,
_ => return Err(ERROR_MSG.into())
};
if channels[pos] != usize::MAX {
return Err(ERROR_MSG.into());
}
channels[pos] = chan;
}
Ok(ChannelMap(channels))
}
fn parse_stream_config(mut s: &str) -> Result<StreamConfigHint, String> {
let mut config = StreamConfigHint::default();
if s == "*" {
return Ok(config);
}
const FORMATS: &[([&str;2], cpal::SampleFormat)] = &[
(["i8", "I8"], cpal::SampleFormat::I8),
(["u8", "U8"], cpal::SampleFormat::U8),
(["i16", "I16"], cpal::SampleFormat::I16),
(["u16", "U16"], cpal::SampleFormat::U16),
(["i32", "I32"], cpal::SampleFormat::I32),
(["u32", "U32"], cpal::SampleFormat::U32),
(["f32", "F32"], cpal::SampleFormat::F32),
(["i64", "I64"], cpal::SampleFormat::I64),
(["u64", "U64"], cpal::SampleFormat::U64),
(["f64", "F64"], cpal::SampleFormat::F64)];
for ([lc, uc], format) in FORMATS.into_iter() {
if s.starts_with(lc) || s.starts_with(uc) {
config.sample_format = Some(*format);
(_, s) = s.split_at(lc.len());
break;
}
}
if s.starts_with(",") {
(_, s) = s.split_at(1);
}
let chan = match s.split_once("@") {
Some((chan, rate)) => {
if !rate.is_empty() {
config.sample_rate = Some(cpal::SampleRate(u32::from_str_radix(rate, 10)
.map_err(|_| "expected sample rate")?));
}
chan
},
None => s
};
if !chan.is_empty() {
config.channels = Some(u16::from_str_radix(chan, 10)
.map_err(|_| "expected number of channels")?);
}
Ok(config)
}
fn find_best_audio_config(device: &cpal::Device, request: StreamConfigHint) -> Result<cpal::SupportedStreamConfig, Box<dyn std::error::Error>>
{
log::trace!("Audio device: {}", device.name().unwrap_or_else(|e| e.to_string()));
let default_config = device.default_output_config()?;
if request == StreamConfigHint::default() {
return Ok(default_config);
}
let channels = request.channels.unwrap_or(default_config.channels());
for config in device.supported_output_configs()? {
if config.channels() != channels {
continue;
}
if let Some(sample_format) = request.sample_format {
if config.sample_format() != sample_format {
continue;
}
}
else if config.sample_format() != default_config.sample_format() {
continue;
}
let sample_rate = match request.sample_rate {
Some(sample_rate) => if !(config.min_sample_rate()..=config.max_sample_rate()).contains(&sample_rate) {
continue;
}
else {
sample_rate
}
None => default_config.sample_rate()
};
return Ok(config.with_sample_rate(sample_rate));
}
Err("Could not find the audio configuration matching given parameters")?
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
simple_logger::init_with_level(match args.debug {
0 => log::Level::Warn,
1 => log::Level::Info,
2 => log::Level::Debug,
_ => log::Level::Trace
})?;
let ym_file = match args.ym_file {
Some(ref ym_path) => {
log::info!("Loading YM file: {}", ym_path);
ym_file_parser::parse_file(ym_path)?
}
None => YmSong::parse(BUZZ_YM)?
};
log::info!(r#"{} "{}" by {}"#,
ym_file.version,
ym_file.title.trim(),
ym_file.author.trim());
log::info!(r#"Duration: {:?} {}"#,
ym_file.song_duration(),
ym_file.comments.trim());
log::debug!("Chip: {} Hz, frame: {} Hz, {} cycles each",
ym_file.clock_frequency(),
ym_file.frame_frequency,
ym_file.frame_cycles());
log::debug!("Frames total: {}, loop to: {}, {:?}",
ym_file.frames.len(),
ym_file.loop_frame,
ym_file.song_attrs);
if log::log_enabled!(log::Level::Debug) && !ym_file.dd_samples.is_empty() {
let mut sample_lens = Vec::with_capacity(ym_file.dd_samples_ends.len());
ym_file.dd_samples_ends.iter().try_fold(0,
|prev, &off| {
(off != 0).then(|| {
sample_lens.push(off - prev);
off
})
});
log::debug!("Drums: {}, sample lengths: {sample_lens:?}, total: {}",
sample_lens.len(), ym_file.dd_samples.len());
}
/* calculate a duration of a single frame */
let frame_duration_nanos = nanos_from_frame_tc_cpu_hz(
ym_file.frame_cycles().round() as u32,
ym_file.chipset_frequency) as u32;
log::trace!("Frame duration: {} ns", frame_duration_nanos);
let device = cpal::default_host().default_output_device().ok_or("no default audio device!")?;
log::debug!("Audio request: {}", args.audio);
let supported_config = find_best_audio_config(&device, args.audio)?;
log::trace!("Audio config supported: {supported_config:?}");
let config = supported_config.config();
// if let &cpal::SupportedBufferSize::Range { min, max } = supported_config.buffer_size() {
// let frame_duration_secs = core::time::Duration::from_nanos(frame_duration_nanos.into()).as_secs_f64();
// let audio_frame_samples = (config.sample_rate.0 as f64 * frame_duration_secs).ceil() as cpal::FrameCount;
// if (min..=max).contains(&audio_frame_samples) {
// config.buffer_size = cpal::BufferSize::Fixed(audio_frame_samples);
// }
// }
/* create an audio backend */
log::trace!("Audio config selected: {config:?}");
let latency = 20000000 / frame_duration_nanos as usize + 5;
let audio = AudioHandleAnyFormat::create_with_device_config_and_sample_format(
&device, &config, supported_config.sample_format(), frame_duration_nanos, latency)?;
log::trace!("Audio format: {:?}", audio.sample_format());
/* start audio thread */
audio.play()?;
match audio {
AudioHandleAnyFormat::I8(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::U8(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::I16(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::U16(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::I32(audio) => play::<i32, _>(audio, ym_file, args),
AudioHandleAnyFormat::U32(audio) => play::<i32, _>(audio, ym_file, args),
AudioHandleAnyFormat::I64(audio) => play::<f64, _>(audio, ym_file, args),
AudioHandleAnyFormat::U64(audio) => play::<f64, _>(audio, ym_file, args),
AudioHandleAnyFormat::F32(audio) => play::<f32, _>(audio, ym_file, args),
AudioHandleAnyFormat::F64(audio) => play::<f64, _>(audio, ym_file, args),
_ => Err("Unsupported audio sample format!")?
}
Ok(())
}
| {
let hours = secs / 3600;
let minutes = (secs % 3600) / 60;
let secs = secs % 60;
if hours != 0 {
print!("{hours}:{minutes:02}:{secs:02}");
}
else {
print!("{minutes:02}:{secs:02}");
}
} | identifier_body |
main.rs | //! YM player
use std::io::{stdout, Write};
use core::ops::AddAssign;
use core::fmt;
use spectrusty_core::{audio::*, chip::nanos_from_frame_tc_cpu_hz};
use spectrusty_audio::{
synth::ext::*,
host::cpal::{AudioHandle, AudioHandleAnyFormat}
};
use spectrusty_peripherals::ay::{audio::*, AyRegister, AyRegChange};
use ym_file_parser::YmSong;
use clap::Parser;
use cpal::traits::*;
/* built-in song */
static BUZZ_YM: &[u8] = include_bytes!("../BUZZ.YM");
const NORMAL_AMPLITUDE: u8 = 100;
/* calculate amplitude level */
fn amplitude_level<T: Copy + FromSample<f32>>(level: u8) -> T {
const A: f32 = 3.1623e-3;
const B: f32 = 5.757;
let y: f32 = match level {
0 => 0.0,
NORMAL_AMPLITUDE => 1.0,
v => {
let x = v as f32 / NORMAL_AMPLITUDE as f32;
A * (B * x).exp()
}
};
T::from_sample(y)
}
/* AY/YM channels mapped as follows: [A, B, C], where N -> 0: left, 1: right, 2: center */
#[derive(Debug, Clone, Copy)]
struct ChannelMap([usize; 3]);
impl fmt::Display for ChannelMap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// [A, B, C], where N -> 0: left, 1: right, 2: center
let [a, b, c] = self.0;
if a == b && b == c {
write!(f, "mono")
}
else {
let mut res = ['?'; 3];
res[a] = 'A';
res[b] = 'B';
res[c] = 'C';
let [l, r, c] = res;
write!(f, "{l}{c}{r}")
}
}
}
impl Default for ChannelMap {
fn default() -> Self {
ChannelMap([0, 1, 2]) // ACB
}
}
const MONO_CHANNEL_MAP: ChannelMap = ChannelMap([0, 0, 0]);
/* How to mix YM audio channels */
#[derive(Debug, Clone, Copy)]
enum ChannelMode {
/// Center channel is mixed-in with stereo channels.
MixedStereo(f32),
/// All channels are mixed-in together into a single audio channel.
Mono,
/// Left and right channel are played in stereo, redirect a center channel into a specific audio channel.
Channel(u32)
}
impl Default for ChannelMode {
fn default() -> Self {
ChannelMode::MixedStereo(0.8)
}
}
impl fmt::Display for ChannelMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ChannelMode::MixedStereo(ampl) => write!(f, "{ampl}"),
ChannelMode::Mono => write!(f, "m"),
ChannelMode::Channel(n) => write!(f, "{n}"),
}
}
}
fn print_time(secs: u32) {
let hours = secs / 3600;
let minutes = (secs % 3600) / 60;
let secs = secs % 60;
if hours != 0 {
print!("{hours}:{minutes:02}:{secs:02}");
}
else {
print!("{minutes:02}:{secs:02}");
}
}
fn print_current(last_secs: &mut u32, cur_secs: f32, total_secs: f32) {
let secs = cur_secs.trunc() as u32;
if *last_secs == secs {
return;
}
*last_secs = secs;
print!("\r");
print_time(secs);
print!(" -> ");
print_time((total_secs - cur_secs).trunc() as u32);
stdout().flush().unwrap();
}
/****************************************************************************/
/* PLAYER */
/****************************************************************************/
struct PlayEnv {
ym_file: YmSong,
ampl_level: f32,
repeat: u32,
channel_map: ChannelMap,
track: bool,
}
fn play_with_blep<A, B, SD, S>(
PlayEnv { mut ym_file, ampl_level, repeat, channel_map, track }: PlayEnv,
mut audio: AudioHandle<S>,
bandlim: &mut B,
render_audio: &dyn Fn(&mut BlepAmpFilter<&mut B>, &mut Vec<S>)
)
where A: AmpLevels<SD>,
B: BandLimitedExt<SD, S> + ?Sized,
SD: SampleDelta + FromSample<f32> + MulNorm,
S: AudioSample + cpal::SizedSample
{
log::debug!("Channels: {channel_map} {:?}", channel_map.0);
/* Spectrusty's emulated AY is clocked at a half frequency of a host CPU clock,
we need to adjust cycles counter */
let host_frame_cycles = (ym_file.frame_cycles() * HOST_CLOCK_RATIO as f32) as i32;
let host_frequency = ym_file.chipset_frequency as f64 * HOST_CLOCK_RATIO as f64;
log::trace!("AY host frequency: {} Hz, frame: {} cycles", host_frequency, host_frame_cycles);
/* create a BLEP amplitude filter wrapper */
let mut bandlim = BlepAmpFilter::new(SD::from_sample(ampl_level), bandlim);
/* ensure BLEP has enough space to fit a single audio frame
(there is no margin - our frames will have constant size). */
bandlim.ensure_frame_time(audio.sample_rate, host_frequency, host_frame_cycles, 0);
/* number of audio output channels */
let channels = audio.channels as usize;
/* create an emulator instance */
let mut ay = Ay3_891xAudio::default();
/* buffered frame changes to AY-3-891x registers */
let mut changes = Vec::new();
/* play counter */
let mut counter = repeat;
/* total seconds */
let total_secs = ym_file.frames.len() as f32 / ym_file.frame_frequency as f32;
let mut last_secs: u32 = u32::MAX;
loop {
if track {
let cur_secs = ym_file.cursor() as f32 / ym_file.frame_frequency as f32;
print_current(&mut last_secs, cur_secs, total_secs);
}
/* produce YM chipset changes */
let finished = ym_file.produce_next_ay_frame(|ts, reg, val| {
changes.push(
AyRegChange::new(
(ts * HOST_CLOCK_RATIO as f32).trunc() as i32,
AyRegister::from(reg),
val))
});
/* render audio into BLEP */
ay.render_audio::<A,_,_>(changes.drain(..),
&mut bandlim,
host_frame_cycles,
host_frame_cycles,
channel_map.0);
/* close frame */
let frame_sample_count = bandlim.end_frame(host_frame_cycles);
/* render BLEP frame into the sample buffer */
audio.producer.render_frame(|ref mut buf| {
/* ensure the BLEP frame fits into the sample buffer */
buf.resize(frame_sample_count * channels, S::silence());
render_audio(&mut bandlim, buf);
});
/* send a rendered sample buffer to the consumer */
audio.producer.send_frame().unwrap();
if finished {
log::info!("Finished.");
if repeat != 0 {
counter -= 1;
if counter == 0 {
break;
}
}
}
}
/* let the audio thread finish playing */
for _ in 0..50 {
audio.producer.render_frame(|ref mut buf| {
buf.fill(S::silence());
});
audio.producer.send_frame().unwrap();
}
audio.close();
}
fn play_with_amps<A, SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where A: AmpLevels<SD>,
SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm + 'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample
{
let Args { volume, repeat, channels: channel_map, mode, track, hpass, lpass, .. } = args;
log::debug!("Repeat: {repeat}, volume: {volume}%");
let ampl_level = amplitude_level(args.volume);
log::trace!("Amplitude filter: {ampl_level}");
let mut env = PlayEnv { ym_file, ampl_level, repeat, channel_map, track };
let channels = audio.channels as usize;
match mode {
ChannelMode::MixedStereo(mono_filter) if channels >= 2 => {
/* a multi-channel to stereo mixer */
let mut blep = BlepStereo::new(mono_filter.into_sample(),
/* a stereo band-limited pulse buffer */
BandLimitedAny::new(2, lpass, hpass));
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
ChannelMode::Channel(channel) if channels >= channel as usize => {
/* a multi-channel band-limited pulse buffer */
let third_chan = (channel - 1) as usize;
let mut blep = BandLimitedAny::new(3, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1, third_chan]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
_ => {
/* a monophonic band-limited pulse buffer */
let mut blep = BandLimitedAny::new(1, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
env.channel_map = MONO_CHANNEL_MAP;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_fill_interleaved(buf, channels, 0);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
}
}
fn play<SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm + 'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample,
AyFuseAmps<SD>: AmpLevels<SD>,
AyAmps<SD>: AmpLevels<SD>
{
if args.fuse {
log::debug!("YM amplitide levels: fuse (measured)");
play_with_amps::<AyFuseAmps<_>, _, _>(audio, ym_file, args)
}
else {
log::debug!("YM amplitide levels: default (specs)");
play_with_amps::<AyAmps<_>, _, _>(audio, ym_file, args)
}
}
/****************************************************************************/
/* MAIN */
/****************************************************************************/
#[derive(Default, Debug, Clone, Copy, PartialEq)]
struct StreamConfigHint {
channels: Option<cpal::ChannelCount>,
sample_rate: Option<cpal::SampleRate>,
sample_format: Option<cpal::SampleFormat>
}
impl fmt::Display for StreamConfigHint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self == &StreamConfigHint::default() {
return f.write_str("*");
}
if let Some(format) = self.sample_format {
write!(f, "{:?}", format)?;
}
if self.channels.is_some() && self.sample_rate.is_some() {
f.write_str(",")?;
}
if let Some(channels) = self.channels {
write!(f, "{}", channels)?;
}
if let Some(rate) = self.sample_rate {
write!(f, "@{}", rate.0)?;
}
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// A file path to an YM song.
ym_file: Option<String>,
/// Audio mixer volume: 0 - 100.
#[arg(short, long, default_value_t = 50, value_parser = volume_in_range)]
volume: u8,
/// Play counter, 0 to play forever.
#[arg(short, long, default_value_t = 0)]
repeat: u32,
/// YM channels map: Left Center Right.
#[arg(short, long, default_value_t = ChannelMap::default(), value_parser = parse_channels)]
channels: ChannelMap,
/// Channel mode: s|m|0.s|N.
///
/// "s" - stereo mode with a center channel mixed with an amplitude of 0.8
///
/// "m" - monophonic mode, played on all audio channels
///
/// "0.s" - stereo mode, center channel amplitude: 0.s
///
/// "N" - multi-channel mode, redirect center channel to Nth (3+) audio channel
#[arg(short, long, default_value_t = ChannelMode::default(), value_parser = parse_channel_mode)]
mode: ChannelMode,
/// Switch to alternative YM amplitude levels (measured vs specs).
#[arg(short, long, default_value_t = false)]
fuse: bool,
/// Enable low-pass audio band filter.
#[arg(long, default_value_t = false)]
lpass: bool,
/// Enable high-pass audio band filter.
#[arg(long, default_value_t = false)]
hpass: bool,
/// Desired audio output parameters: ST,CHANS@RATE.
///
/// ST is a sample type, e.g.: U8, I16, U32, F32.
///
/// CHANS is the number of channels and RATE is the sample rate.
#[arg(short, long, default_value_t = StreamConfigHint::default(), value_parser = parse_stream_config)]
audio: StreamConfigHint,
/// Track the current song time.
#[arg(short, long, default_value_t = false)]
track: bool,
/// Log verbosity level.
///
/// -d for INFO, -dd for DEBUG, -ddd for TRACE
#[arg(short, long, action = clap::ArgAction::Count)]
debug: u8
}
fn volume_in_range(s: &str) -> Result<u8, String> {
let volume: usize = s
.parse()
.map_err(|_| format!("`{s}` isn't a volume"))?;
if (0..=NORMAL_AMPLITUDE as usize).contains(&volume) {
Ok(volume as u8)
} else {
Err(format!("volume not in range 0 - {NORMAL_AMPLITUDE}"))
}
}
fn parse_channel_mode(s: &str) -> Result<ChannelMode, String> {
Ok(match s {
"s"|"S" => ChannelMode::MixedStereo(0.8),
"m"|"M" => ChannelMode::Mono,
s if s.starts_with("0.") => {
let amp: f32 = s.parse().map_err(|_| format!("`{s}` isn't a stereo mixer amplitude"))?;
ChannelMode::MixedStereo(amp)
}
s => {
let channel: u32 = s.parse().map_err(|_| format!("`{s}` isn't a mixer mode channel"))?;
if channel < 3 {
return Err("mixer mode channel must be >= 3".into());
}
ChannelMode::Channel(channel)
}
})
}
fn parse_channels(s: &str) -> Result<ChannelMap, String> {
const ERROR_MSG: &str = "channel mapping should be a permutation of ABC characters";
if s.len() != 3 {
return Err(ERROR_MSG.into());
}
let mut channels = [usize::MAX; 3];
// [A, B, C], where N -> 0: left, 1: right, 2: center
for (ch, chan) in s.chars().zip([0, 2, 1].into_iter()) {
let pos = match ch.to_ascii_uppercase() {
'A' => 0,
'B' => 1,
'C' => 2,
_ => return Err(ERROR_MSG.into())
};
if channels[pos] != usize::MAX {
return Err(ERROR_MSG.into());
}
channels[pos] = chan;
}
Ok(ChannelMap(channels))
}
fn | (mut s: &str) -> Result<StreamConfigHint, String> {
let mut config = StreamConfigHint::default();
if s == "*" {
return Ok(config);
}
const FORMATS: &[([&str;2], cpal::SampleFormat)] = &[
(["i8", "I8"], cpal::SampleFormat::I8),
(["u8", "U8"], cpal::SampleFormat::U8),
(["i16", "I16"], cpal::SampleFormat::I16),
(["u16", "U16"], cpal::SampleFormat::U16),
(["i32", "I32"], cpal::SampleFormat::I32),
(["u32", "U32"], cpal::SampleFormat::U32),
(["f32", "F32"], cpal::SampleFormat::F32),
(["i64", "I64"], cpal::SampleFormat::I64),
(["u64", "U64"], cpal::SampleFormat::U64),
(["f64", "F64"], cpal::SampleFormat::F64)];
for ([lc, uc], format) in FORMATS.into_iter() {
if s.starts_with(lc) || s.starts_with(uc) {
config.sample_format = Some(*format);
(_, s) = s.split_at(lc.len());
break;
}
}
if s.starts_with(",") {
(_, s) = s.split_at(1);
}
let chan = match s.split_once("@") {
Some((chan, rate)) => {
if !rate.is_empty() {
config.sample_rate = Some(cpal::SampleRate(u32::from_str_radix(rate, 10)
.map_err(|_| "expected sample rate")?));
}
chan
},
None => s
};
if !chan.is_empty() {
config.channels = Some(u16::from_str_radix(chan, 10)
.map_err(|_| "expected number of channels")?);
}
Ok(config)
}
fn find_best_audio_config(device: &cpal::Device, request: StreamConfigHint) -> Result<cpal::SupportedStreamConfig, Box<dyn std::error::Error>>
{
log::trace!("Audio device: {}", device.name().unwrap_or_else(|e| e.to_string()));
let default_config = device.default_output_config()?;
if request == StreamConfigHint::default() {
return Ok(default_config);
}
let channels = request.channels.unwrap_or(default_config.channels());
for config in device.supported_output_configs()? {
if config.channels() != channels {
continue;
}
if let Some(sample_format) = request.sample_format {
if config.sample_format() != sample_format {
continue;
}
}
else if config.sample_format() != default_config.sample_format() {
continue;
}
let sample_rate = match request.sample_rate {
Some(sample_rate) => if !(config.min_sample_rate()..=config.max_sample_rate()).contains(&sample_rate) {
continue;
}
else {
sample_rate
}
None => default_config.sample_rate()
};
return Ok(config.with_sample_rate(sample_rate));
}
Err("Could not find the audio configuration matching given parameters")?
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
simple_logger::init_with_level(match args.debug {
0 => log::Level::Warn,
1 => log::Level::Info,
2 => log::Level::Debug,
_ => log::Level::Trace
})?;
let ym_file = match args.ym_file {
Some(ref ym_path) => {
log::info!("Loading YM file: {}", ym_path);
ym_file_parser::parse_file(ym_path)?
}
None => YmSong::parse(BUZZ_YM)?
};
log::info!(r#"{} "{}" by {}"#,
ym_file.version,
ym_file.title.trim(),
ym_file.author.trim());
log::info!(r#"Duration: {:?} {}"#,
ym_file.song_duration(),
ym_file.comments.trim());
log::debug!("Chip: {} Hz, frame: {} Hz, {} cycles each",
ym_file.clock_frequency(),
ym_file.frame_frequency,
ym_file.frame_cycles());
log::debug!("Frames total: {}, loop to: {}, {:?}",
ym_file.frames.len(),
ym_file.loop_frame,
ym_file.song_attrs);
if log::log_enabled!(log::Level::Debug) && !ym_file.dd_samples.is_empty() {
let mut sample_lens = Vec::with_capacity(ym_file.dd_samples_ends.len());
ym_file.dd_samples_ends.iter().try_fold(0,
|prev, &off| {
(off != 0).then(|| {
sample_lens.push(off - prev);
off
})
});
log::debug!("Drums: {}, sample lengths: {sample_lens:?}, total: {}",
sample_lens.len(), ym_file.dd_samples.len());
}
/* calculate a duration of a single frame */
let frame_duration_nanos = nanos_from_frame_tc_cpu_hz(
ym_file.frame_cycles().round() as u32,
ym_file.chipset_frequency) as u32;
log::trace!("Frame duration: {} ns", frame_duration_nanos);
let device = cpal::default_host().default_output_device().ok_or("no default audio device!")?;
log::debug!("Audio request: {}", args.audio);
let supported_config = find_best_audio_config(&device, args.audio)?;
log::trace!("Audio config supported: {supported_config:?}");
let config = supported_config.config();
// if let &cpal::SupportedBufferSize::Range { min, max } = supported_config.buffer_size() {
// let frame_duration_secs = core::time::Duration::from_nanos(frame_duration_nanos.into()).as_secs_f64();
// let audio_frame_samples = (config.sample_rate.0 as f64 * frame_duration_secs).ceil() as cpal::FrameCount;
// if (min..=max).contains(&audio_frame_samples) {
// config.buffer_size = cpal::BufferSize::Fixed(audio_frame_samples);
// }
// }
/* create an audio backend */
log::trace!("Audio config selected: {config:?}");
let latency = 20000000 / frame_duration_nanos as usize + 5;
let audio = AudioHandleAnyFormat::create_with_device_config_and_sample_format(
&device, &config, supported_config.sample_format(), frame_duration_nanos, latency)?;
log::trace!("Audio format: {:?}", audio.sample_format());
/* start audio thread */
audio.play()?;
match audio {
AudioHandleAnyFormat::I8(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::U8(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::I16(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::U16(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::I32(audio) => play::<i32, _>(audio, ym_file, args),
AudioHandleAnyFormat::U32(audio) => play::<i32, _>(audio, ym_file, args),
AudioHandleAnyFormat::I64(audio) => play::<f64, _>(audio, ym_file, args),
AudioHandleAnyFormat::U64(audio) => play::<f64, _>(audio, ym_file, args),
AudioHandleAnyFormat::F32(audio) => play::<f32, _>(audio, ym_file, args),
AudioHandleAnyFormat::F64(audio) => play::<f64, _>(audio, ym_file, args),
_ => Err("Unsupported audio sample format!")?
}
Ok(())
}
| parse_stream_config | identifier_name |
main.rs | //! YM player
use std::io::{stdout, Write};
use core::ops::AddAssign;
use core::fmt;
use spectrusty_core::{audio::*, chip::nanos_from_frame_tc_cpu_hz};
use spectrusty_audio::{
synth::ext::*,
host::cpal::{AudioHandle, AudioHandleAnyFormat}
};
use spectrusty_peripherals::ay::{audio::*, AyRegister, AyRegChange};
use ym_file_parser::YmSong;
use clap::Parser;
use cpal::traits::*;
/* built-in song */
static BUZZ_YM: &[u8] = include_bytes!("../BUZZ.YM");
const NORMAL_AMPLITUDE: u8 = 100;
/* calculate amplitude level */
fn amplitude_level<T: Copy + FromSample<f32>>(level: u8) -> T {
const A: f32 = 3.1623e-3;
const B: f32 = 5.757;
let y: f32 = match level {
0 => 0.0,
NORMAL_AMPLITUDE => 1.0,
v => {
let x = v as f32 / NORMAL_AMPLITUDE as f32;
A * (B * x).exp()
}
};
T::from_sample(y)
}
/* AY/YM channels mapped as follows: [A, B, C], where N -> 0: left, 1: right, 2: center */
#[derive(Debug, Clone, Copy)]
struct ChannelMap([usize; 3]);
impl fmt::Display for ChannelMap {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// [A, B, C], where N -> 0: left, 1: right, 2: center
let [a, b, c] = self.0;
if a == b && b == c {
write!(f, "mono")
}
else {
let mut res = ['?'; 3];
res[a] = 'A';
res[b] = 'B';
res[c] = 'C';
let [l, r, c] = res;
write!(f, "{l}{c}{r}")
}
}
}
impl Default for ChannelMap {
fn default() -> Self {
ChannelMap([0, 1, 2]) // ACB
}
}
const MONO_CHANNEL_MAP: ChannelMap = ChannelMap([0, 0, 0]);
/* How to mix YM audio channels */
#[derive(Debug, Clone, Copy)]
enum ChannelMode {
/// Center channel is mixed-in with stereo channels.
MixedStereo(f32),
/// All channels are mixed-in together into a single audio channel.
Mono,
/// Left and right channel are played in stereo, redirect a center channel into a specific audio channel.
Channel(u32)
}
impl Default for ChannelMode {
fn default() -> Self {
ChannelMode::MixedStereo(0.8)
}
}
impl fmt::Display for ChannelMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ChannelMode::MixedStereo(ampl) => write!(f, "{ampl}"),
ChannelMode::Mono => write!(f, "m"),
ChannelMode::Channel(n) => write!(f, "{n}"),
}
}
}
fn print_time(secs: u32) {
let hours = secs / 3600;
let minutes = (secs % 3600) / 60;
let secs = secs % 60;
if hours != 0 {
print!("{hours}:{minutes:02}:{secs:02}");
}
else {
print!("{minutes:02}:{secs:02}");
}
}
fn print_current(last_secs: &mut u32, cur_secs: f32, total_secs: f32) {
let secs = cur_secs.trunc() as u32;
if *last_secs == secs {
return;
}
*last_secs = secs;
print!("\r");
print_time(secs);
print!(" -> ");
print_time((total_secs - cur_secs).trunc() as u32);
stdout().flush().unwrap();
}
/****************************************************************************/
/* PLAYER */
/****************************************************************************/
struct PlayEnv {
ym_file: YmSong,
ampl_level: f32,
repeat: u32,
channel_map: ChannelMap,
track: bool,
}
fn play_with_blep<A, B, SD, S>(
PlayEnv { mut ym_file, ampl_level, repeat, channel_map, track }: PlayEnv,
mut audio: AudioHandle<S>,
bandlim: &mut B,
render_audio: &dyn Fn(&mut BlepAmpFilter<&mut B>, &mut Vec<S>)
)
where A: AmpLevels<SD>,
B: BandLimitedExt<SD, S> + ?Sized,
SD: SampleDelta + FromSample<f32> + MulNorm,
S: AudioSample + cpal::SizedSample
{
log::debug!("Channels: {channel_map} {:?}", channel_map.0);
/* Spectrusty's emulated AY is clocked at a half frequency of a host CPU clock,
we need to adjust cycles counter */
let host_frame_cycles = (ym_file.frame_cycles() * HOST_CLOCK_RATIO as f32) as i32;
let host_frequency = ym_file.chipset_frequency as f64 * HOST_CLOCK_RATIO as f64;
log::trace!("AY host frequency: {} Hz, frame: {} cycles", host_frequency, host_frame_cycles);
/* create a BLEP amplitude filter wrapper */
let mut bandlim = BlepAmpFilter::new(SD::from_sample(ampl_level), bandlim);
/* ensure BLEP has enough space to fit a single audio frame
(there is no margin - our frames will have constant size). */
bandlim.ensure_frame_time(audio.sample_rate, host_frequency, host_frame_cycles, 0);
/* number of audio output channels */
let channels = audio.channels as usize;
/* create an emulator instance */
let mut ay = Ay3_891xAudio::default();
/* buffered frame changes to AY-3-891x registers */
let mut changes = Vec::new();
/* play counter */
let mut counter = repeat;
/* total seconds */
let total_secs = ym_file.frames.len() as f32 / ym_file.frame_frequency as f32;
let mut last_secs: u32 = u32::MAX;
loop {
if track {
let cur_secs = ym_file.cursor() as f32 / ym_file.frame_frequency as f32;
print_current(&mut last_secs, cur_secs, total_secs);
}
/* produce YM chipset changes */
let finished = ym_file.produce_next_ay_frame(|ts, reg, val| {
changes.push(
AyRegChange::new(
(ts * HOST_CLOCK_RATIO as f32).trunc() as i32,
AyRegister::from(reg),
val))
});
/* render audio into BLEP */
ay.render_audio::<A,_,_>(changes.drain(..),
&mut bandlim,
host_frame_cycles,
host_frame_cycles,
channel_map.0);
/* close frame */
let frame_sample_count = bandlim.end_frame(host_frame_cycles);
/* render BLEP frame into the sample buffer */
audio.producer.render_frame(|ref mut buf| {
/* ensure the BLEP frame fits into the sample buffer */
buf.resize(frame_sample_count * channels, S::silence());
render_audio(&mut bandlim, buf);
});
/* send a rendered sample buffer to the consumer */
audio.producer.send_frame().unwrap();
if finished {
log::info!("Finished.");
if repeat != 0 {
counter -= 1;
if counter == 0 {
break;
}
}
}
}
/* let the audio thread finish playing */
for _ in 0..50 {
audio.producer.render_frame(|ref mut buf| {
buf.fill(S::silence());
});
audio.producer.send_frame().unwrap();
}
audio.close();
}
fn play_with_amps<A, SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where A: AmpLevels<SD>,
SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm + 'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample
{
let Args { volume, repeat, channels: channel_map, mode, track, hpass, lpass, .. } = args;
log::debug!("Repeat: {repeat}, volume: {volume}%");
let ampl_level = amplitude_level(args.volume);
log::trace!("Amplitude filter: {ampl_level}");
let mut env = PlayEnv { ym_file, ampl_level, repeat, channel_map, track };
let channels = audio.channels as usize;
match mode {
ChannelMode::MixedStereo(mono_filter) if channels >= 2 => {
/* a multi-channel to stereo mixer */
let mut blep = BlepStereo::new(mono_filter.into_sample(),
/* a stereo band-limited pulse buffer */
BandLimitedAny::new(2, lpass, hpass));
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
ChannelMode::Channel(channel) if channels >= channel as usize => {
/* a multi-channel band-limited pulse buffer */
let third_chan = (channel - 1) as usize;
let mut blep = BandLimitedAny::new(3, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_map_interleaved(buf, channels, &[0, 1, third_chan]);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
_ => {
/* a monophonic band-limited pulse buffer */
let mut blep = BandLimitedAny::new(1, lpass, hpass);
log::debug!("Band limited: {blep:?}");
let blep: &mut dyn BandLimitedExt<_, _> = &mut blep;
env.channel_map = MONO_CHANNEL_MAP;
play_with_blep::<A, _, _, _>(env, audio, blep,
&|blep, buf| {
blep.render_audio_fill_interleaved(buf, channels, 0);
/* prepare BLEP for the next frame */
blep.next_frame_ext();
}
);
}
}
}
fn play<SD, S>(
audio: AudioHandle<S>,
ym_file: YmSong,
args: Args
)
where SD: SampleDelta + FromSample<f32> + AddAssign + MulNorm + 'static + std::fmt::Debug,
S: FromSample<SD> + AudioSample + cpal::SizedSample,
AyFuseAmps<SD>: AmpLevels<SD>,
AyAmps<SD>: AmpLevels<SD>
{
if args.fuse {
log::debug!("YM amplitide levels: fuse (measured)");
play_with_amps::<AyFuseAmps<_>, _, _>(audio, ym_file, args)
}
else {
log::debug!("YM amplitide levels: default (specs)");
play_with_amps::<AyAmps<_>, _, _>(audio, ym_file, args)
}
}
/****************************************************************************/
/* MAIN */
/****************************************************************************/
#[derive(Default, Debug, Clone, Copy, PartialEq)]
struct StreamConfigHint {
channels: Option<cpal::ChannelCount>,
sample_rate: Option<cpal::SampleRate>,
sample_format: Option<cpal::SampleFormat>
}
impl fmt::Display for StreamConfigHint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self == &StreamConfigHint::default() {
return f.write_str("*");
}
if let Some(format) = self.sample_format {
write!(f, "{:?}", format)?;
}
if self.channels.is_some() && self.sample_rate.is_some() {
f.write_str(",")?;
}
if let Some(channels) = self.channels {
write!(f, "{}", channels)?;
}
if let Some(rate) = self.sample_rate {
write!(f, "@{}", rate.0)?;
}
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// A file path to an YM song.
ym_file: Option<String>,
/// Audio mixer volume: 0 - 100.
#[arg(short, long, default_value_t = 50, value_parser = volume_in_range)]
volume: u8,
/// Play counter, 0 to play forever.
#[arg(short, long, default_value_t = 0)]
repeat: u32,
/// YM channels map: Left Center Right.
#[arg(short, long, default_value_t = ChannelMap::default(), value_parser = parse_channels)]
channels: ChannelMap,
/// Channel mode: s|m|0.s|N.
///
/// "s" - stereo mode with a center channel mixed with an amplitude of 0.8
///
/// "m" - monophonic mode, played on all audio channels
///
/// "0.s" - stereo mode, center channel amplitude: 0.s
///
/// "N" - multi-channel mode, redirect center channel to Nth (3+) audio channel
#[arg(short, long, default_value_t = ChannelMode::default(), value_parser = parse_channel_mode)]
mode: ChannelMode,
/// Switch to alternative YM amplitude levels (measured vs specs).
#[arg(short, long, default_value_t = false)]
fuse: bool,
/// Enable low-pass audio band filter.
#[arg(long, default_value_t = false)]
lpass: bool,
/// Enable high-pass audio band filter.
#[arg(long, default_value_t = false)]
hpass: bool,
/// Desired audio output parameters: ST,CHANS@RATE.
///
/// ST is a sample type, e.g.: U8, I16, U32, F32.
///
/// CHANS is the number of channels and RATE is the sample rate.
#[arg(short, long, default_value_t = StreamConfigHint::default(), value_parser = parse_stream_config)]
audio: StreamConfigHint,
/// Track the current song time.
#[arg(short, long, default_value_t = false)]
track: bool,
/// Log verbosity level.
///
/// -d for INFO, -dd for DEBUG, -ddd for TRACE
#[arg(short, long, action = clap::ArgAction::Count)]
debug: u8
}
fn volume_in_range(s: &str) -> Result<u8, String> {
let volume: usize = s
.parse()
.map_err(|_| format!("`{s}` isn't a volume"))?;
if (0..=NORMAL_AMPLITUDE as usize).contains(&volume) {
Ok(volume as u8)
} else {
Err(format!("volume not in range 0 - {NORMAL_AMPLITUDE}"))
}
}
fn parse_channel_mode(s: &str) -> Result<ChannelMode, String> {
Ok(match s {
"s"|"S" => ChannelMode::MixedStereo(0.8),
"m"|"M" => ChannelMode::Mono,
s if s.starts_with("0.") => {
let amp: f32 = s.parse().map_err(|_| format!("`{s}` isn't a stereo mixer amplitude"))?;
ChannelMode::MixedStereo(amp)
}
s => {
let channel: u32 = s.parse().map_err(|_| format!("`{s}` isn't a mixer mode channel"))?;
if channel < 3 {
return Err("mixer mode channel must be >= 3".into());
} | })
}
fn parse_channels(s: &str) -> Result<ChannelMap, String> {
const ERROR_MSG: &str = "channel mapping should be a permutation of ABC characters";
if s.len() != 3 {
return Err(ERROR_MSG.into());
}
let mut channels = [usize::MAX; 3];
// [A, B, C], where N -> 0: left, 1: right, 2: center
for (ch, chan) in s.chars().zip([0, 2, 1].into_iter()) {
let pos = match ch.to_ascii_uppercase() {
'A' => 0,
'B' => 1,
'C' => 2,
_ => return Err(ERROR_MSG.into())
};
if channels[pos] != usize::MAX {
return Err(ERROR_MSG.into());
}
channels[pos] = chan;
}
Ok(ChannelMap(channels))
}
fn parse_stream_config(mut s: &str) -> Result<StreamConfigHint, String> {
let mut config = StreamConfigHint::default();
if s == "*" {
return Ok(config);
}
const FORMATS: &[([&str;2], cpal::SampleFormat)] = &[
(["i8", "I8"], cpal::SampleFormat::I8),
(["u8", "U8"], cpal::SampleFormat::U8),
(["i16", "I16"], cpal::SampleFormat::I16),
(["u16", "U16"], cpal::SampleFormat::U16),
(["i32", "I32"], cpal::SampleFormat::I32),
(["u32", "U32"], cpal::SampleFormat::U32),
(["f32", "F32"], cpal::SampleFormat::F32),
(["i64", "I64"], cpal::SampleFormat::I64),
(["u64", "U64"], cpal::SampleFormat::U64),
(["f64", "F64"], cpal::SampleFormat::F64)];
for ([lc, uc], format) in FORMATS.into_iter() {
if s.starts_with(lc) || s.starts_with(uc) {
config.sample_format = Some(*format);
(_, s) = s.split_at(lc.len());
break;
}
}
if s.starts_with(",") {
(_, s) = s.split_at(1);
}
let chan = match s.split_once("@") {
Some((chan, rate)) => {
if !rate.is_empty() {
config.sample_rate = Some(cpal::SampleRate(u32::from_str_radix(rate, 10)
.map_err(|_| "expected sample rate")?));
}
chan
},
None => s
};
if !chan.is_empty() {
config.channels = Some(u16::from_str_radix(chan, 10)
.map_err(|_| "expected number of channels")?);
}
Ok(config)
}
fn find_best_audio_config(device: &cpal::Device, request: StreamConfigHint) -> Result<cpal::SupportedStreamConfig, Box<dyn std::error::Error>>
{
log::trace!("Audio device: {}", device.name().unwrap_or_else(|e| e.to_string()));
let default_config = device.default_output_config()?;
if request == StreamConfigHint::default() {
return Ok(default_config);
}
let channels = request.channels.unwrap_or(default_config.channels());
for config in device.supported_output_configs()? {
if config.channels() != channels {
continue;
}
if let Some(sample_format) = request.sample_format {
if config.sample_format() != sample_format {
continue;
}
}
else if config.sample_format() != default_config.sample_format() {
continue;
}
let sample_rate = match request.sample_rate {
Some(sample_rate) => if !(config.min_sample_rate()..=config.max_sample_rate()).contains(&sample_rate) {
continue;
}
else {
sample_rate
}
None => default_config.sample_rate()
};
return Ok(config.with_sample_rate(sample_rate));
}
Err("Could not find the audio configuration matching given parameters")?
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Args::parse();
simple_logger::init_with_level(match args.debug {
0 => log::Level::Warn,
1 => log::Level::Info,
2 => log::Level::Debug,
_ => log::Level::Trace
})?;
let ym_file = match args.ym_file {
Some(ref ym_path) => {
log::info!("Loading YM file: {}", ym_path);
ym_file_parser::parse_file(ym_path)?
}
None => YmSong::parse(BUZZ_YM)?
};
log::info!(r#"{} "{}" by {}"#,
ym_file.version,
ym_file.title.trim(),
ym_file.author.trim());
log::info!(r#"Duration: {:?} {}"#,
ym_file.song_duration(),
ym_file.comments.trim());
log::debug!("Chip: {} Hz, frame: {} Hz, {} cycles each",
ym_file.clock_frequency(),
ym_file.frame_frequency,
ym_file.frame_cycles());
log::debug!("Frames total: {}, loop to: {}, {:?}",
ym_file.frames.len(),
ym_file.loop_frame,
ym_file.song_attrs);
if log::log_enabled!(log::Level::Debug) && !ym_file.dd_samples.is_empty() {
let mut sample_lens = Vec::with_capacity(ym_file.dd_samples_ends.len());
ym_file.dd_samples_ends.iter().try_fold(0,
|prev, &off| {
(off != 0).then(|| {
sample_lens.push(off - prev);
off
})
});
log::debug!("Drums: {}, sample lengths: {sample_lens:?}, total: {}",
sample_lens.len(), ym_file.dd_samples.len());
}
/* calculate a duration of a single frame */
let frame_duration_nanos = nanos_from_frame_tc_cpu_hz(
ym_file.frame_cycles().round() as u32,
ym_file.chipset_frequency) as u32;
log::trace!("Frame duration: {} ns", frame_duration_nanos);
let device = cpal::default_host().default_output_device().ok_or("no default audio device!")?;
log::debug!("Audio request: {}", args.audio);
let supported_config = find_best_audio_config(&device, args.audio)?;
log::trace!("Audio config supported: {supported_config:?}");
let config = supported_config.config();
// if let &cpal::SupportedBufferSize::Range { min, max } = supported_config.buffer_size() {
// let frame_duration_secs = core::time::Duration::from_nanos(frame_duration_nanos.into()).as_secs_f64();
// let audio_frame_samples = (config.sample_rate.0 as f64 * frame_duration_secs).ceil() as cpal::FrameCount;
// if (min..=max).contains(&audio_frame_samples) {
// config.buffer_size = cpal::BufferSize::Fixed(audio_frame_samples);
// }
// }
/* create an audio backend */
log::trace!("Audio config selected: {config:?}");
let latency = 20000000 / frame_duration_nanos as usize + 5;
let audio = AudioHandleAnyFormat::create_with_device_config_and_sample_format(
&device, &config, supported_config.sample_format(), frame_duration_nanos, latency)?;
log::trace!("Audio format: {:?}", audio.sample_format());
/* start audio thread */
audio.play()?;
match audio {
AudioHandleAnyFormat::I8(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::U8(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::I16(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::U16(audio) => play::<i16, _>(audio, ym_file, args),
AudioHandleAnyFormat::I32(audio) => play::<i32, _>(audio, ym_file, args),
AudioHandleAnyFormat::U32(audio) => play::<i32, _>(audio, ym_file, args),
AudioHandleAnyFormat::I64(audio) => play::<f64, _>(audio, ym_file, args),
AudioHandleAnyFormat::U64(audio) => play::<f64, _>(audio, ym_file, args),
AudioHandleAnyFormat::F32(audio) => play::<f32, _>(audio, ym_file, args),
AudioHandleAnyFormat::F64(audio) => play::<f64, _>(audio, ym_file, args),
_ => Err("Unsupported audio sample format!")?
}
Ok(())
} | ChannelMode::Channel(channel)
} | random_line_split |
component.py | #
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from conductor.common.music.messaging import message
from conductor.common.music.model import base
from conductor.i18n import _LE # pylint: disable=W0212
from conductor.i18n import _LI # pylint: disable=W0212
import cotyledon
import futurist
import inspect
from oslo_config import cfg
from oslo_log import log
from oslo_messaging._drivers import common as rpc_common
import socket
import sys
import time
LOG = log.getLogger(__name__)
CONF = cfg.CONF
MESSAGING_SERVER_OPTS = [
cfg.StrOpt('keyspace',
default='conductor_rpc',
help='Music keyspace for messages'),
cfg.IntOpt('check_interval',
default=1,
min=1,
help='Wait interval while checking for a message response. '
'Default value is 1 second.'),
cfg.IntOpt('response_timeout',
default=120,
min=1,
help='Overall message response timeout. '
'Default value is 120 seconds.'),
cfg.IntOpt('timeout',
default=300,
min=1,
help='Timeout for detecting a VM is down, and other VMs can pick the plan up. '
'Default value is 5 minutes. (integer value)'),
cfg.IntOpt('workers',
default=1,
min=1,
help='Number of workers for messaging service. '
'Default value is 1.'),
cfg.IntOpt('polling_interval',
default=1,
min=1,
help='Time between checking for new messages. '
'Default value is 1.'),
cfg.BoolOpt('debug',
default=False,
help='Log debug messages. '
'Default value is False.'),
]
CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server')
# Some class/method descriptions taken from this Oslo Messaging
# RPC API Tutorial/Demo: https://www.youtube.com/watch?v=Bf4gkeoBzvA
RPCSVRNAME = "Music-RPC Server"
class Target(object):
"""Returns a messaging target.
A target encapsulates all the information to identify where a message
should be sent or what messages a server is listening for.
"""
_topic = None
_topic_class = None
def __init__(self, topic):
"""Set the topic and topic class"""
self._topic = topic
# Because this is Music-specific, the server is
# built-in to the API class, stored as the transport.
# Thus, unlike oslo.messaging, there is no server
# specified for a target. There also isn't an
# exchange, namespace, or version at the moment.
# Dynamically create a message class for this topic.
self._topic_class = base.create_dynamic_model(
keyspace=CONF.messaging_server.keyspace,
baseclass=message.Message, classname=self.topic)
if not self._topic_class:
raise RuntimeError("Error setting the topic class for the messaging layer.")
@property
def topic(self):
"""Topic Property"""
return self._topic
@property
def topic_class(self):
"""Topic Class Property"""
return self._topic_class
class RPCClient(object):
"""Returns an RPC client using Music as a transport.
The RPC client is responsible for sending method invocations
to remote servers via a messaging transport.
A method invocation consists of a request context dictionary
a method name, and a dictionary of arguments. A cast() invocation
just sends the request and returns immediately. A call() invocation
waits for the server to send a return value.
"""
def __init__(self, conf, transport, target):
"""Set the transport and target"""
self.conf = conf
self.transport = transport
self.target = target
self.RPC = self.target.topic_class
# introduced as a quick means to cache messages
# with the aim of preventing unnecessary communication
# across conductor components.
# self.message_cache = dict()
def __check_rpc_status(self, rpc_id, rpc_method):
"""Check status for a given message id"""
# Wait check_interval seconds before proceeding
check_interval = self.conf.messaging_server.check_interval
time.sleep(check_interval)
if self.conf.messaging_server.debug:
LOG.debug("Checking status for message {} method {} on "
"topic {}".format(rpc_id, rpc_method, self.target.topic))
rpc = self.RPC.query.one(rpc_id)
return rpc
def cast(self, ctxt, method, args):
"""Asynchronous Call"""
rpc = self.RPC(action=self.RPC.CAST,
ctxt=ctxt, method=method, args=args)
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Casting method {} with args {}".format(method, args))
return rpc_id
def call(self, ctxt, method, args):
"""Synchronous Call"""
# # check if the call has a message saved in cache
# # key: string concatenation of ctxt + method + args
# # value: rpc response object
# key = ""
# for k, v in ctxt.items():
# key += str(k)
# key += '#' + str(v) + '#'
# key += '|' + str(method) + '|'
# for k, v in args.items():
# key += str(k)
# key += '#' + str(v) + '#'
#
# # check if the method has been called before
# # and cached
# if key in self.message_cache:
# LOG.debug("Retrieved method {} with args "
# "{} from cache".format(method, args))
# return self.message_cache[key]
rpc_start_time = time.time()
rpc = self.RPC(action=self.RPC.CALL,
ctxt=ctxt, method=method, args=args)
# TODO(jdandrea): Do something if the assert fails.
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued.").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Calling method {} with args {}".format(method, args))
# Check message status within a thread
executor = futurist.ThreadPoolExecutor()
started_at = time.time()
while (time.time() - started_at) <= self.conf.messaging_server.response_timeout:
fut = executor.submit(self.__check_rpc_status, rpc_id, method)
rpc = fut.result()
if rpc and rpc.finished:
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} response received".
format(rpc_id, method))
break
executor.shutdown()
# Get response, delete message, and return response
if not rpc or not rpc.finished:
LOG.error(_LE("Message {} on topic {} timed out at {} seconds").
format(rpc_id, topic,
self.conf.messaging_server.response_timeout))
elif not rpc.ok:
LOG.error(_LE("Message {} on topic {} returned an error").
format(rpc_id, topic))
response = rpc.response
failure = rpc.failure
rpc.delete() # TODO(jdandrea): Put a TTL on the msg instead?
# self.message_cache[key] = response
LOG.debug("Elapsed time: {0:.3f} sec".format(
time.time() - rpc_start_time)
)
# If there's a failure, raise it as an exception
allowed = []
if failure is not None and failure != '':
# TODO(jdandrea): Do we need to populate allowed(_remote_exmods)?
raise rpc_common.deserialize_remote_exception(failure, allowed)
return response
class RPCService(cotyledon.Service):
"""Listener for the RPC service.
An RPC Service exposes a number of endpoints, each of which contain
a set of methods which may be invoked remotely by clients over a
given transport. To create an RPC server, you supply a transport,
target, and a list of endpoints.
Start the server with server.run()
"""
# This will appear in 'ps xaf'
name = RPCSVRNAME
def __init__(self, worker_id, conf, **kwargs):
"""Initializer"""
super(RPCService, self).__init__(worker_id)
if conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._init(conf, **kwargs)
self.running = True
def _init(self, conf, **kwargs):
"""Prepare to process requests"""
self.conf = conf
self.rpc_listener = None
self.transport = kwargs.pop('transport')
self.target = kwargs.pop('target')
self.endpoints = kwargs.pop('endpoints')
self.flush = kwargs.pop('flush')
self.kwargs = kwargs
self.RPC = self.target.topic_class
self.name = "{}, topic({})".format(RPCSVRNAME, self.target.topic)
self.messaging_owner_condition = {
"owner": socket.gethostname()
}
self.enqueued_status_condition = {
"status": message.Message.ENQUEUED
}
self.working_status_condition = {
"status": message.Message.WORKING
}
if self.flush:
self._flush_enqueued()
def _flush_enqueued(self):
"""Flush all messages with an enqueued status.
Use this only when the parent service is not running concurrently.
"""
msgs = self.RPC.query.all()
for msg in msgs:
if msg.enqueued:
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
msg.delete()
def _log_error_and_update_msg(self, msg, error_msg):
LOG.error(error_msg)
msg.response = {
'error': {
'message': error_msg
}
}
msg.status = message.Message.ERROR
msg.update(condition=self.messaging_owner_condition)
def current_time_seconds(self):
"""Current time in milliseconds."""
return int(round(time.time()))
def millisec_to_sec(self, millisec):
"""Convert milliseconds to seconds"""
return millisec / 1000
def __check_for_messages(self):
"""Wait for the polling interval, then do the real message check."""
# Wait for at least poll_interval sec
polling_interval = self.conf.messaging_server.polling_interval
time.sleep(polling_interval)
if self.conf.messaging_server.debug:
LOG.debug("Topic {}: Checking for new messages".format(
self.target.topic))
self._do()
return True
# FIXME(jdandrea): Better name for this, please, kthx.
def _do(self):
"""Look for a new RPC call and serve it"""
# Get all the messages in queue
msgs = self.RPC.query.all()
for msg in msgs:
# Find the first msg marked as enqueued.
if msg.working and \
(self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
> self.conf.messaging_server.response_timeout:
msg.status = message.Message.ENQUEUED
msg.update(condition=self.working_status_condition)
if not msg.enqueued:
continue
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
# Change the status to WORKING (operation with a lock)
msg.status = message.Message.WORKING
msg.owner = socket.gethostname()
# All update should have a condition (status == enqueued)
_is_updated = msg.update(condition=self.enqueued_status_condition)
if not _is_updated or 'FAILURE' in _is_updated:
continue
# RPC methods must not start/end with an underscore.
if msg.method.startswith('_') or msg.method.endswith('_'):
error_msg = _LE("Method {} must not start or end"
"with underscores").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# The first endpoint that supports the method wins.
method = None
for endpoint in self.endpoints:
if msg.method not in dir(endpoint):
continue
endpoint_method = getattr(endpoint, msg.method)
if callable(endpoint_method):
method = endpoint_method
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} is "
"handled by endpoint {}".
format(msg.id, msg.method,
method.__str__.__name__))
break
if not method:
error_msg = _LE("Message {} method {} unsupported "
"in endpoints.").format(msg.id, msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# All methods must take a ctxt and args param.
if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:
error_msg = _LE("Method {} must take three args: "
"self, ctx, arg").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
| LOG.info(_LI("Message {} method {} received").format(
msg.id, msg.method))
if self.conf.messaging_server.debug:
LOG.debug(
_LI("Message {} method {} context: {}, args: {}").format(
msg.id, msg.method, msg.ctxt, msg.args))
failure = None
try:
# Add the template to conductor.plan table
# Methods return an opaque dictionary
result = method(msg.ctxt, msg.args)
# FIXME(jdandrea): Remove response/error and make it opaque.
# That means this would just be assigned result outright.
msg.response = result.get('response', result)
except Exception:
# Current sys.exc_info() content can be overridden
# by another exception raised by a log handler during
# LOG.exception(). So keep a copy and delete it later.
failure = sys.exc_info()
# Do not log details about the failure here. It will
# be returned later upstream.
LOG.exception(_LE('Exception during message handling'))
try:
if failure is None:
msg.status = message.Message.COMPLETED
else:
msg.failure = \
rpc_common.serialize_remote_exception(failure)
msg.status = message.Message.ERROR
LOG.info(_LI("Message {} method {}, status: {}").format(
msg.id, msg.method, msg.status))
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {}, response: {}".format(
msg.id, msg.method, msg.response))
_is_success = 'FAILURE'
while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
<= self.conf.messaging_server.response_timeout:
_is_success = msg.update()
LOG.info(_LI("updating the message status from working to {}, "
"atomic update response from MUSIC {}").format(msg.status, _is_success))
except Exception:
LOG.exception(_LE("Can not send reply for message {} "
"method {}").
format(msg.id, msg.method))
finally:
# Remove circular object reference between the current
# stack frame and the traceback in exc_info.
del failure
def _gracefully_stop(self):
"""Gracefully stop working on things"""
pass
def _restart(self):
"""Prepare to restart the RPC Server"""
pass
def run(self):
"""Run"""
# The server listens for messages and calls the
# appropriate methods. It also deletes messages once
# processed.
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
# Listen for messages within a thread
executor = futurist.ThreadPoolExecutor()
while self.running:
fut = executor.submit(self.__check_for_messages)
fut.result()
executor.shutdown()
def terminate(self):
"""Terminate"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self.running = False
self._gracefully_stop()
super(RPCService, self).terminate()
def reload(self):
"""Reload"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._restart() | random_line_split | |
component.py | #
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from conductor.common.music.messaging import message
from conductor.common.music.model import base
from conductor.i18n import _LE # pylint: disable=W0212
from conductor.i18n import _LI # pylint: disable=W0212
import cotyledon
import futurist
import inspect
from oslo_config import cfg
from oslo_log import log
from oslo_messaging._drivers import common as rpc_common
import socket
import sys
import time
LOG = log.getLogger(__name__)
CONF = cfg.CONF
MESSAGING_SERVER_OPTS = [
cfg.StrOpt('keyspace',
default='conductor_rpc',
help='Music keyspace for messages'),
cfg.IntOpt('check_interval',
default=1,
min=1,
help='Wait interval while checking for a message response. '
'Default value is 1 second.'),
cfg.IntOpt('response_timeout',
default=120,
min=1,
help='Overall message response timeout. '
'Default value is 120 seconds.'),
cfg.IntOpt('timeout',
default=300,
min=1,
help='Timeout for detecting a VM is down, and other VMs can pick the plan up. '
'Default value is 5 minutes. (integer value)'),
cfg.IntOpt('workers',
default=1,
min=1,
help='Number of workers for messaging service. '
'Default value is 1.'),
cfg.IntOpt('polling_interval',
default=1,
min=1,
help='Time between checking for new messages. '
'Default value is 1.'),
cfg.BoolOpt('debug',
default=False,
help='Log debug messages. '
'Default value is False.'),
]
CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server')
# Some class/method descriptions taken from this Oslo Messaging
# RPC API Tutorial/Demo: https://www.youtube.com/watch?v=Bf4gkeoBzvA
RPCSVRNAME = "Music-RPC Server"
class Target(object):
"""Returns a messaging target.
A target encapsulates all the information to identify where a message
should be sent or what messages a server is listening for.
"""
_topic = None
_topic_class = None
def __init__(self, topic):
"""Set the topic and topic class"""
self._topic = topic
# Because this is Music-specific, the server is
# built-in to the API class, stored as the transport.
# Thus, unlike oslo.messaging, there is no server
# specified for a target. There also isn't an
# exchange, namespace, or version at the moment.
# Dynamically create a message class for this topic.
self._topic_class = base.create_dynamic_model(
keyspace=CONF.messaging_server.keyspace,
baseclass=message.Message, classname=self.topic)
if not self._topic_class:
raise RuntimeError("Error setting the topic class for the messaging layer.")
@property
def topic(self):
"""Topic Property"""
return self._topic
@property
def topic_class(self):
"""Topic Class Property"""
return self._topic_class
class RPCClient(object):
"""Returns an RPC client using Music as a transport.
The RPC client is responsible for sending method invocations
to remote servers via a messaging transport.
A method invocation consists of a request context dictionary
a method name, and a dictionary of arguments. A cast() invocation
just sends the request and returns immediately. A call() invocation
waits for the server to send a return value.
"""
def __init__(self, conf, transport, target):
"""Set the transport and target"""
self.conf = conf
self.transport = transport
self.target = target
self.RPC = self.target.topic_class
# introduced as a quick means to cache messages
# with the aim of preventing unnecessary communication
# across conductor components.
# self.message_cache = dict()
def __check_rpc_status(self, rpc_id, rpc_method):
"""Check status for a given message id"""
# Wait check_interval seconds before proceeding
check_interval = self.conf.messaging_server.check_interval
time.sleep(check_interval)
if self.conf.messaging_server.debug:
LOG.debug("Checking status for message {} method {} on "
"topic {}".format(rpc_id, rpc_method, self.target.topic))
rpc = self.RPC.query.one(rpc_id)
return rpc
def cast(self, ctxt, method, args):
"""Asynchronous Call"""
rpc = self.RPC(action=self.RPC.CAST,
ctxt=ctxt, method=method, args=args)
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Casting method {} with args {}".format(method, args))
return rpc_id
def call(self, ctxt, method, args):
"""Synchronous Call"""
# # check if the call has a message saved in cache
# # key: string concatenation of ctxt + method + args
# # value: rpc response object
# key = ""
# for k, v in ctxt.items():
# key += str(k)
# key += '#' + str(v) + '#'
# key += '|' + str(method) + '|'
# for k, v in args.items():
# key += str(k)
# key += '#' + str(v) + '#'
#
# # check if the method has been called before
# # and cached
# if key in self.message_cache:
# LOG.debug("Retrieved method {} with args "
# "{} from cache".format(method, args))
# return self.message_cache[key]
rpc_start_time = time.time()
rpc = self.RPC(action=self.RPC.CALL,
ctxt=ctxt, method=method, args=args)
# TODO(jdandrea): Do something if the assert fails.
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued.").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Calling method {} with args {}".format(method, args))
# Check message status within a thread
executor = futurist.ThreadPoolExecutor()
started_at = time.time()
while (time.time() - started_at) <= self.conf.messaging_server.response_timeout:
fut = executor.submit(self.__check_rpc_status, rpc_id, method)
rpc = fut.result()
if rpc and rpc.finished:
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} response received".
format(rpc_id, method))
break
executor.shutdown()
# Get response, delete message, and return response
if not rpc or not rpc.finished:
LOG.error(_LE("Message {} on topic {} timed out at {} seconds").
format(rpc_id, topic,
self.conf.messaging_server.response_timeout))
elif not rpc.ok:
LOG.error(_LE("Message {} on topic {} returned an error").
format(rpc_id, topic))
response = rpc.response
failure = rpc.failure
rpc.delete() # TODO(jdandrea): Put a TTL on the msg instead?
# self.message_cache[key] = response
LOG.debug("Elapsed time: {0:.3f} sec".format(
time.time() - rpc_start_time)
)
# If there's a failure, raise it as an exception
allowed = []
if failure is not None and failure != '':
# TODO(jdandrea): Do we need to populate allowed(_remote_exmods)?
raise rpc_common.deserialize_remote_exception(failure, allowed)
return response
class RPCService(cotyledon.Service):
"""Listener for the RPC service.
An RPC Service exposes a number of endpoints, each of which contain
a set of methods which may be invoked remotely by clients over a
given transport. To create an RPC server, you supply a transport,
target, and a list of endpoints.
Start the server with server.run()
"""
# This will appear in 'ps xaf'
name = RPCSVRNAME
def __init__(self, worker_id, conf, **kwargs):
"""Initializer"""
super(RPCService, self).__init__(worker_id)
if conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._init(conf, **kwargs)
self.running = True
def _init(self, conf, **kwargs):
"""Prepare to process requests"""
self.conf = conf
self.rpc_listener = None
self.transport = kwargs.pop('transport')
self.target = kwargs.pop('target')
self.endpoints = kwargs.pop('endpoints')
self.flush = kwargs.pop('flush')
self.kwargs = kwargs
self.RPC = self.target.topic_class
self.name = "{}, topic({})".format(RPCSVRNAME, self.target.topic)
self.messaging_owner_condition = {
"owner": socket.gethostname()
}
self.enqueued_status_condition = {
"status": message.Message.ENQUEUED
}
self.working_status_condition = {
"status": message.Message.WORKING
}
if self.flush:
|
def _flush_enqueued(self):
"""Flush all messages with an enqueued status.
Use this only when the parent service is not running concurrently.
"""
msgs = self.RPC.query.all()
for msg in msgs:
if msg.enqueued:
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
msg.delete()
def _log_error_and_update_msg(self, msg, error_msg):
LOG.error(error_msg)
msg.response = {
'error': {
'message': error_msg
}
}
msg.status = message.Message.ERROR
msg.update(condition=self.messaging_owner_condition)
def current_time_seconds(self):
"""Current time in milliseconds."""
return int(round(time.time()))
def millisec_to_sec(self, millisec):
"""Convert milliseconds to seconds"""
return millisec / 1000
def __check_for_messages(self):
"""Wait for the polling interval, then do the real message check."""
# Wait for at least poll_interval sec
polling_interval = self.conf.messaging_server.polling_interval
time.sleep(polling_interval)
if self.conf.messaging_server.debug:
LOG.debug("Topic {}: Checking for new messages".format(
self.target.topic))
self._do()
return True
# FIXME(jdandrea): Better name for this, please, kthx.
def _do(self):
"""Look for a new RPC call and serve it"""
# Get all the messages in queue
msgs = self.RPC.query.all()
for msg in msgs:
# Find the first msg marked as enqueued.
if msg.working and \
(self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
> self.conf.messaging_server.response_timeout:
msg.status = message.Message.ENQUEUED
msg.update(condition=self.working_status_condition)
if not msg.enqueued:
continue
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
# Change the status to WORKING (operation with a lock)
msg.status = message.Message.WORKING
msg.owner = socket.gethostname()
# All update should have a condition (status == enqueued)
_is_updated = msg.update(condition=self.enqueued_status_condition)
if not _is_updated or 'FAILURE' in _is_updated:
continue
# RPC methods must not start/end with an underscore.
if msg.method.startswith('_') or msg.method.endswith('_'):
error_msg = _LE("Method {} must not start or end"
"with underscores").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# The first endpoint that supports the method wins.
method = None
for endpoint in self.endpoints:
if msg.method not in dir(endpoint):
continue
endpoint_method = getattr(endpoint, msg.method)
if callable(endpoint_method):
method = endpoint_method
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} is "
"handled by endpoint {}".
format(msg.id, msg.method,
method.__str__.__name__))
break
if not method:
error_msg = _LE("Message {} method {} unsupported "
"in endpoints.").format(msg.id, msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# All methods must take a ctxt and args param.
if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:
error_msg = _LE("Method {} must take three args: "
"self, ctx, arg").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
LOG.info(_LI("Message {} method {} received").format(
msg.id, msg.method))
if self.conf.messaging_server.debug:
LOG.debug(
_LI("Message {} method {} context: {}, args: {}").format(
msg.id, msg.method, msg.ctxt, msg.args))
failure = None
try:
# Add the template to conductor.plan table
# Methods return an opaque dictionary
result = method(msg.ctxt, msg.args)
# FIXME(jdandrea): Remove response/error and make it opaque.
# That means this would just be assigned result outright.
msg.response = result.get('response', result)
except Exception:
# Current sys.exc_info() content can be overridden
# by another exception raised by a log handler during
# LOG.exception(). So keep a copy and delete it later.
failure = sys.exc_info()
# Do not log details about the failure here. It will
# be returned later upstream.
LOG.exception(_LE('Exception during message handling'))
try:
if failure is None:
msg.status = message.Message.COMPLETED
else:
msg.failure = \
rpc_common.serialize_remote_exception(failure)
msg.status = message.Message.ERROR
LOG.info(_LI("Message {} method {}, status: {}").format(
msg.id, msg.method, msg.status))
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {}, response: {}".format(
msg.id, msg.method, msg.response))
_is_success = 'FAILURE'
while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
<= self.conf.messaging_server.response_timeout:
_is_success = msg.update()
LOG.info(_LI("updating the message status from working to {}, "
"atomic update response from MUSIC {}").format(msg.status, _is_success))
except Exception:
LOG.exception(_LE("Can not send reply for message {} "
"method {}").
format(msg.id, msg.method))
finally:
# Remove circular object reference between the current
# stack frame and the traceback in exc_info.
del failure
def _gracefully_stop(self):
"""Gracefully stop working on things"""
pass
def _restart(self):
"""Prepare to restart the RPC Server"""
pass
def run(self):
"""Run"""
# The server listens for messages and calls the
# appropriate methods. It also deletes messages once
# processed.
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
# Listen for messages within a thread
executor = futurist.ThreadPoolExecutor()
while self.running:
fut = executor.submit(self.__check_for_messages)
fut.result()
executor.shutdown()
def terminate(self):
"""Terminate"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self.running = False
self._gracefully_stop()
super(RPCService, self).terminate()
def reload(self):
"""Reload"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._restart()
| self._flush_enqueued() | conditional_block |
component.py | #
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from conductor.common.music.messaging import message
from conductor.common.music.model import base
from conductor.i18n import _LE # pylint: disable=W0212
from conductor.i18n import _LI # pylint: disable=W0212
import cotyledon
import futurist
import inspect
from oslo_config import cfg
from oslo_log import log
from oslo_messaging._drivers import common as rpc_common
import socket
import sys
import time
LOG = log.getLogger(__name__)
CONF = cfg.CONF
MESSAGING_SERVER_OPTS = [
cfg.StrOpt('keyspace',
default='conductor_rpc',
help='Music keyspace for messages'),
cfg.IntOpt('check_interval',
default=1,
min=1,
help='Wait interval while checking for a message response. '
'Default value is 1 second.'),
cfg.IntOpt('response_timeout',
default=120,
min=1,
help='Overall message response timeout. '
'Default value is 120 seconds.'),
cfg.IntOpt('timeout',
default=300,
min=1,
help='Timeout for detecting a VM is down, and other VMs can pick the plan up. '
'Default value is 5 minutes. (integer value)'),
cfg.IntOpt('workers',
default=1,
min=1,
help='Number of workers for messaging service. '
'Default value is 1.'),
cfg.IntOpt('polling_interval',
default=1,
min=1,
help='Time between checking for new messages. '
'Default value is 1.'),
cfg.BoolOpt('debug',
default=False,
help='Log debug messages. '
'Default value is False.'),
]
CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server')
# Some class/method descriptions taken from this Oslo Messaging
# RPC API Tutorial/Demo: https://www.youtube.com/watch?v=Bf4gkeoBzvA
RPCSVRNAME = "Music-RPC Server"
class Target(object):
"""Returns a messaging target.
A target encapsulates all the information to identify where a message
should be sent or what messages a server is listening for.
"""
_topic = None
_topic_class = None
def | (self, topic):
"""Set the topic and topic class"""
self._topic = topic
# Because this is Music-specific, the server is
# built-in to the API class, stored as the transport.
# Thus, unlike oslo.messaging, there is no server
# specified for a target. There also isn't an
# exchange, namespace, or version at the moment.
# Dynamically create a message class for this topic.
self._topic_class = base.create_dynamic_model(
keyspace=CONF.messaging_server.keyspace,
baseclass=message.Message, classname=self.topic)
if not self._topic_class:
raise RuntimeError("Error setting the topic class for the messaging layer.")
@property
def topic(self):
"""Topic Property"""
return self._topic
@property
def topic_class(self):
"""Topic Class Property"""
return self._topic_class
class RPCClient(object):
"""Returns an RPC client using Music as a transport.
The RPC client is responsible for sending method invocations
to remote servers via a messaging transport.
A method invocation consists of a request context dictionary
a method name, and a dictionary of arguments. A cast() invocation
just sends the request and returns immediately. A call() invocation
waits for the server to send a return value.
"""
def __init__(self, conf, transport, target):
"""Set the transport and target"""
self.conf = conf
self.transport = transport
self.target = target
self.RPC = self.target.topic_class
# introduced as a quick means to cache messages
# with the aim of preventing unnecessary communication
# across conductor components.
# self.message_cache = dict()
def __check_rpc_status(self, rpc_id, rpc_method):
"""Check status for a given message id"""
# Wait check_interval seconds before proceeding
check_interval = self.conf.messaging_server.check_interval
time.sleep(check_interval)
if self.conf.messaging_server.debug:
LOG.debug("Checking status for message {} method {} on "
"topic {}".format(rpc_id, rpc_method, self.target.topic))
rpc = self.RPC.query.one(rpc_id)
return rpc
def cast(self, ctxt, method, args):
"""Asynchronous Call"""
rpc = self.RPC(action=self.RPC.CAST,
ctxt=ctxt, method=method, args=args)
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Casting method {} with args {}".format(method, args))
return rpc_id
def call(self, ctxt, method, args):
"""Synchronous Call"""
# # check if the call has a message saved in cache
# # key: string concatenation of ctxt + method + args
# # value: rpc response object
# key = ""
# for k, v in ctxt.items():
# key += str(k)
# key += '#' + str(v) + '#'
# key += '|' + str(method) + '|'
# for k, v in args.items():
# key += str(k)
# key += '#' + str(v) + '#'
#
# # check if the method has been called before
# # and cached
# if key in self.message_cache:
# LOG.debug("Retrieved method {} with args "
# "{} from cache".format(method, args))
# return self.message_cache[key]
rpc_start_time = time.time()
rpc = self.RPC(action=self.RPC.CALL,
ctxt=ctxt, method=method, args=args)
# TODO(jdandrea): Do something if the assert fails.
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued.").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Calling method {} with args {}".format(method, args))
# Check message status within a thread
executor = futurist.ThreadPoolExecutor()
started_at = time.time()
while (time.time() - started_at) <= self.conf.messaging_server.response_timeout:
fut = executor.submit(self.__check_rpc_status, rpc_id, method)
rpc = fut.result()
if rpc and rpc.finished:
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} response received".
format(rpc_id, method))
break
executor.shutdown()
# Get response, delete message, and return response
if not rpc or not rpc.finished:
LOG.error(_LE("Message {} on topic {} timed out at {} seconds").
format(rpc_id, topic,
self.conf.messaging_server.response_timeout))
elif not rpc.ok:
LOG.error(_LE("Message {} on topic {} returned an error").
format(rpc_id, topic))
response = rpc.response
failure = rpc.failure
rpc.delete() # TODO(jdandrea): Put a TTL on the msg instead?
# self.message_cache[key] = response
LOG.debug("Elapsed time: {0:.3f} sec".format(
time.time() - rpc_start_time)
)
# If there's a failure, raise it as an exception
allowed = []
if failure is not None and failure != '':
# TODO(jdandrea): Do we need to populate allowed(_remote_exmods)?
raise rpc_common.deserialize_remote_exception(failure, allowed)
return response
class RPCService(cotyledon.Service):
"""Listener for the RPC service.
An RPC Service exposes a number of endpoints, each of which contain
a set of methods which may be invoked remotely by clients over a
given transport. To create an RPC server, you supply a transport,
target, and a list of endpoints.
Start the server with server.run()
"""
# This will appear in 'ps xaf'
name = RPCSVRNAME
def __init__(self, worker_id, conf, **kwargs):
"""Initializer"""
super(RPCService, self).__init__(worker_id)
if conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._init(conf, **kwargs)
self.running = True
def _init(self, conf, **kwargs):
"""Prepare to process requests"""
self.conf = conf
self.rpc_listener = None
self.transport = kwargs.pop('transport')
self.target = kwargs.pop('target')
self.endpoints = kwargs.pop('endpoints')
self.flush = kwargs.pop('flush')
self.kwargs = kwargs
self.RPC = self.target.topic_class
self.name = "{}, topic({})".format(RPCSVRNAME, self.target.topic)
self.messaging_owner_condition = {
"owner": socket.gethostname()
}
self.enqueued_status_condition = {
"status": message.Message.ENQUEUED
}
self.working_status_condition = {
"status": message.Message.WORKING
}
if self.flush:
self._flush_enqueued()
def _flush_enqueued(self):
"""Flush all messages with an enqueued status.
Use this only when the parent service is not running concurrently.
"""
msgs = self.RPC.query.all()
for msg in msgs:
if msg.enqueued:
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
msg.delete()
def _log_error_and_update_msg(self, msg, error_msg):
LOG.error(error_msg)
msg.response = {
'error': {
'message': error_msg
}
}
msg.status = message.Message.ERROR
msg.update(condition=self.messaging_owner_condition)
def current_time_seconds(self):
"""Current time in milliseconds."""
return int(round(time.time()))
def millisec_to_sec(self, millisec):
"""Convert milliseconds to seconds"""
return millisec / 1000
def __check_for_messages(self):
"""Wait for the polling interval, then do the real message check."""
# Wait for at least poll_interval sec
polling_interval = self.conf.messaging_server.polling_interval
time.sleep(polling_interval)
if self.conf.messaging_server.debug:
LOG.debug("Topic {}: Checking for new messages".format(
self.target.topic))
self._do()
return True
# FIXME(jdandrea): Better name for this, please, kthx.
def _do(self):
"""Look for a new RPC call and serve it"""
# Get all the messages in queue
msgs = self.RPC.query.all()
for msg in msgs:
# Find the first msg marked as enqueued.
if msg.working and \
(self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
> self.conf.messaging_server.response_timeout:
msg.status = message.Message.ENQUEUED
msg.update(condition=self.working_status_condition)
if not msg.enqueued:
continue
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
# Change the status to WORKING (operation with a lock)
msg.status = message.Message.WORKING
msg.owner = socket.gethostname()
# All update should have a condition (status == enqueued)
_is_updated = msg.update(condition=self.enqueued_status_condition)
if not _is_updated or 'FAILURE' in _is_updated:
continue
# RPC methods must not start/end with an underscore.
if msg.method.startswith('_') or msg.method.endswith('_'):
error_msg = _LE("Method {} must not start or end"
"with underscores").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# The first endpoint that supports the method wins.
method = None
for endpoint in self.endpoints:
if msg.method not in dir(endpoint):
continue
endpoint_method = getattr(endpoint, msg.method)
if callable(endpoint_method):
method = endpoint_method
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} is "
"handled by endpoint {}".
format(msg.id, msg.method,
method.__str__.__name__))
break
if not method:
error_msg = _LE("Message {} method {} unsupported "
"in endpoints.").format(msg.id, msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# All methods must take a ctxt and args param.
if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:
error_msg = _LE("Method {} must take three args: "
"self, ctx, arg").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
LOG.info(_LI("Message {} method {} received").format(
msg.id, msg.method))
if self.conf.messaging_server.debug:
LOG.debug(
_LI("Message {} method {} context: {}, args: {}").format(
msg.id, msg.method, msg.ctxt, msg.args))
failure = None
try:
# Add the template to conductor.plan table
# Methods return an opaque dictionary
result = method(msg.ctxt, msg.args)
# FIXME(jdandrea): Remove response/error and make it opaque.
# That means this would just be assigned result outright.
msg.response = result.get('response', result)
except Exception:
# Current sys.exc_info() content can be overridden
# by another exception raised by a log handler during
# LOG.exception(). So keep a copy and delete it later.
failure = sys.exc_info()
# Do not log details about the failure here. It will
# be returned later upstream.
LOG.exception(_LE('Exception during message handling'))
try:
if failure is None:
msg.status = message.Message.COMPLETED
else:
msg.failure = \
rpc_common.serialize_remote_exception(failure)
msg.status = message.Message.ERROR
LOG.info(_LI("Message {} method {}, status: {}").format(
msg.id, msg.method, msg.status))
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {}, response: {}".format(
msg.id, msg.method, msg.response))
_is_success = 'FAILURE'
while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
<= self.conf.messaging_server.response_timeout:
_is_success = msg.update()
LOG.info(_LI("updating the message status from working to {}, "
"atomic update response from MUSIC {}").format(msg.status, _is_success))
except Exception:
LOG.exception(_LE("Can not send reply for message {} "
"method {}").
format(msg.id, msg.method))
finally:
# Remove circular object reference between the current
# stack frame and the traceback in exc_info.
del failure
def _gracefully_stop(self):
"""Gracefully stop working on things"""
pass
def _restart(self):
"""Prepare to restart the RPC Server"""
pass
def run(self):
"""Run"""
# The server listens for messages and calls the
# appropriate methods. It also deletes messages once
# processed.
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
# Listen for messages within a thread
executor = futurist.ThreadPoolExecutor()
while self.running:
fut = executor.submit(self.__check_for_messages)
fut.result()
executor.shutdown()
def terminate(self):
"""Terminate"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self.running = False
self._gracefully_stop()
super(RPCService, self).terminate()
def reload(self):
"""Reload"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._restart()
| __init__ | identifier_name |
component.py | #
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
from conductor.common.music.messaging import message
from conductor.common.music.model import base
from conductor.i18n import _LE # pylint: disable=W0212
from conductor.i18n import _LI # pylint: disable=W0212
import cotyledon
import futurist
import inspect
from oslo_config import cfg
from oslo_log import log
from oslo_messaging._drivers import common as rpc_common
import socket
import sys
import time
LOG = log.getLogger(__name__)
CONF = cfg.CONF
MESSAGING_SERVER_OPTS = [
cfg.StrOpt('keyspace',
default='conductor_rpc',
help='Music keyspace for messages'),
cfg.IntOpt('check_interval',
default=1,
min=1,
help='Wait interval while checking for a message response. '
'Default value is 1 second.'),
cfg.IntOpt('response_timeout',
default=120,
min=1,
help='Overall message response timeout. '
'Default value is 120 seconds.'),
cfg.IntOpt('timeout',
default=300,
min=1,
help='Timeout for detecting a VM is down, and other VMs can pick the plan up. '
'Default value is 5 minutes. (integer value)'),
cfg.IntOpt('workers',
default=1,
min=1,
help='Number of workers for messaging service. '
'Default value is 1.'),
cfg.IntOpt('polling_interval',
default=1,
min=1,
help='Time between checking for new messages. '
'Default value is 1.'),
cfg.BoolOpt('debug',
default=False,
help='Log debug messages. '
'Default value is False.'),
]
CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server')
# Some class/method descriptions taken from this Oslo Messaging
# RPC API Tutorial/Demo: https://www.youtube.com/watch?v=Bf4gkeoBzvA
RPCSVRNAME = "Music-RPC Server"
class Target(object):
"""Returns a messaging target.
A target encapsulates all the information to identify where a message
should be sent or what messages a server is listening for.
"""
_topic = None
_topic_class = None
def __init__(self, topic):
"""Set the topic and topic class"""
self._topic = topic
# Because this is Music-specific, the server is
# built-in to the API class, stored as the transport.
# Thus, unlike oslo.messaging, there is no server
# specified for a target. There also isn't an
# exchange, namespace, or version at the moment.
# Dynamically create a message class for this topic.
self._topic_class = base.create_dynamic_model(
keyspace=CONF.messaging_server.keyspace,
baseclass=message.Message, classname=self.topic)
if not self._topic_class:
raise RuntimeError("Error setting the topic class for the messaging layer.")
@property
def topic(self):
"""Topic Property"""
return self._topic
@property
def topic_class(self):
"""Topic Class Property"""
return self._topic_class
class RPCClient(object):
"""Returns an RPC client using Music as a transport.
The RPC client is responsible for sending method invocations
to remote servers via a messaging transport.
A method invocation consists of a request context dictionary
a method name, and a dictionary of arguments. A cast() invocation
just sends the request and returns immediately. A call() invocation
waits for the server to send a return value.
"""
def __init__(self, conf, transport, target):
"""Set the transport and target"""
self.conf = conf
self.transport = transport
self.target = target
self.RPC = self.target.topic_class
# introduced as a quick means to cache messages
# with the aim of preventing unnecessary communication
# across conductor components.
# self.message_cache = dict()
def __check_rpc_status(self, rpc_id, rpc_method):
"""Check status for a given message id"""
# Wait check_interval seconds before proceeding
check_interval = self.conf.messaging_server.check_interval
time.sleep(check_interval)
if self.conf.messaging_server.debug:
LOG.debug("Checking status for message {} method {} on "
"topic {}".format(rpc_id, rpc_method, self.target.topic))
rpc = self.RPC.query.one(rpc_id)
return rpc
def cast(self, ctxt, method, args):
"""Asynchronous Call"""
rpc = self.RPC(action=self.RPC.CAST,
ctxt=ctxt, method=method, args=args)
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Casting method {} with args {}".format(method, args))
return rpc_id
def call(self, ctxt, method, args):
"""Synchronous Call"""
# # check if the call has a message saved in cache
# # key: string concatenation of ctxt + method + args
# # value: rpc response object
# key = ""
# for k, v in ctxt.items():
# key += str(k)
# key += '#' + str(v) + '#'
# key += '|' + str(method) + '|'
# for k, v in args.items():
# key += str(k)
# key += '#' + str(v) + '#'
#
# # check if the method has been called before
# # and cached
# if key in self.message_cache:
# LOG.debug("Retrieved method {} with args "
# "{} from cache".format(method, args))
# return self.message_cache[key]
rpc_start_time = time.time()
rpc = self.RPC(action=self.RPC.CALL,
ctxt=ctxt, method=method, args=args)
# TODO(jdandrea): Do something if the assert fails.
assert (rpc.enqueued)
rpc_id = rpc.id
topic = self.target.topic
LOG.info(
_LI("Message {} on topic {} enqueued.").format(rpc_id, topic))
if self.conf.messaging_server.debug:
LOG.debug("Calling method {} with args {}".format(method, args))
# Check message status within a thread
executor = futurist.ThreadPoolExecutor()
started_at = time.time()
while (time.time() - started_at) <= self.conf.messaging_server.response_timeout:
fut = executor.submit(self.__check_rpc_status, rpc_id, method)
rpc = fut.result()
if rpc and rpc.finished:
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} response received".
format(rpc_id, method))
break
executor.shutdown()
# Get response, delete message, and return response
if not rpc or not rpc.finished:
LOG.error(_LE("Message {} on topic {} timed out at {} seconds").
format(rpc_id, topic,
self.conf.messaging_server.response_timeout))
elif not rpc.ok:
LOG.error(_LE("Message {} on topic {} returned an error").
format(rpc_id, topic))
response = rpc.response
failure = rpc.failure
rpc.delete() # TODO(jdandrea): Put a TTL on the msg instead?
# self.message_cache[key] = response
LOG.debug("Elapsed time: {0:.3f} sec".format(
time.time() - rpc_start_time)
)
# If there's a failure, raise it as an exception
allowed = []
if failure is not None and failure != '':
# TODO(jdandrea): Do we need to populate allowed(_remote_exmods)?
raise rpc_common.deserialize_remote_exception(failure, allowed)
return response
class RPCService(cotyledon.Service):
"""Listener for the RPC service.
An RPC Service exposes a number of endpoints, each of which contain
a set of methods which may be invoked remotely by clients over a
given transport. To create an RPC server, you supply a transport,
target, and a list of endpoints.
Start the server with server.run()
"""
# This will appear in 'ps xaf'
name = RPCSVRNAME
def __init__(self, worker_id, conf, **kwargs):
"""Initializer"""
super(RPCService, self).__init__(worker_id)
if conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._init(conf, **kwargs)
self.running = True
def _init(self, conf, **kwargs):
"""Prepare to process requests"""
self.conf = conf
self.rpc_listener = None
self.transport = kwargs.pop('transport')
self.target = kwargs.pop('target')
self.endpoints = kwargs.pop('endpoints')
self.flush = kwargs.pop('flush')
self.kwargs = kwargs
self.RPC = self.target.topic_class
self.name = "{}, topic({})".format(RPCSVRNAME, self.target.topic)
self.messaging_owner_condition = {
"owner": socket.gethostname()
}
self.enqueued_status_condition = {
"status": message.Message.ENQUEUED
}
self.working_status_condition = {
"status": message.Message.WORKING
}
if self.flush:
self._flush_enqueued()
def _flush_enqueued(self):
"""Flush all messages with an enqueued status.
Use this only when the parent service is not running concurrently.
"""
msgs = self.RPC.query.all()
for msg in msgs:
if msg.enqueued:
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
msg.delete()
def _log_error_and_update_msg(self, msg, error_msg):
LOG.error(error_msg)
msg.response = {
'error': {
'message': error_msg
}
}
msg.status = message.Message.ERROR
msg.update(condition=self.messaging_owner_condition)
def current_time_seconds(self):
"""Current time in milliseconds."""
return int(round(time.time()))
def millisec_to_sec(self, millisec):
"""Convert milliseconds to seconds"""
return millisec / 1000
def __check_for_messages(self):
"""Wait for the polling interval, then do the real message check."""
# Wait for at least poll_interval sec
polling_interval = self.conf.messaging_server.polling_interval
time.sleep(polling_interval)
if self.conf.messaging_server.debug:
LOG.debug("Topic {}: Checking for new messages".format(
self.target.topic))
self._do()
return True
# FIXME(jdandrea): Better name for this, please, kthx.
def _do(self):
"""Look for a new RPC call and serve it"""
# Get all the messages in queue
msgs = self.RPC.query.all()
for msg in msgs:
# Find the first msg marked as enqueued.
if msg.working and \
(self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
> self.conf.messaging_server.response_timeout:
msg.status = message.Message.ENQUEUED
msg.update(condition=self.working_status_condition)
if not msg.enqueued:
continue
if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))
elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object
LOG.info('Plan name: {}'.format(msg.args['plan_name']))
# Change the status to WORKING (operation with a lock)
msg.status = message.Message.WORKING
msg.owner = socket.gethostname()
# All update should have a condition (status == enqueued)
_is_updated = msg.update(condition=self.enqueued_status_condition)
if not _is_updated or 'FAILURE' in _is_updated:
continue
# RPC methods must not start/end with an underscore.
if msg.method.startswith('_') or msg.method.endswith('_'):
error_msg = _LE("Method {} must not start or end"
"with underscores").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# The first endpoint that supports the method wins.
method = None
for endpoint in self.endpoints:
if msg.method not in dir(endpoint):
continue
endpoint_method = getattr(endpoint, msg.method)
if callable(endpoint_method):
method = endpoint_method
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {} is "
"handled by endpoint {}".
format(msg.id, msg.method,
method.__str__.__name__))
break
if not method:
error_msg = _LE("Message {} method {} unsupported "
"in endpoints.").format(msg.id, msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
# All methods must take a ctxt and args param.
if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:
error_msg = _LE("Method {} must take three args: "
"self, ctx, arg").format(msg.method)
self._log_error_and_update_msg(msg, error_msg)
return
LOG.info(_LI("Message {} method {} received").format(
msg.id, msg.method))
if self.conf.messaging_server.debug:
LOG.debug(
_LI("Message {} method {} context: {}, args: {}").format(
msg.id, msg.method, msg.ctxt, msg.args))
failure = None
try:
# Add the template to conductor.plan table
# Methods return an opaque dictionary
result = method(msg.ctxt, msg.args)
# FIXME(jdandrea): Remove response/error and make it opaque.
# That means this would just be assigned result outright.
msg.response = result.get('response', result)
except Exception:
# Current sys.exc_info() content can be overridden
# by another exception raised by a log handler during
# LOG.exception(). So keep a copy and delete it later.
failure = sys.exc_info()
# Do not log details about the failure here. It will
# be returned later upstream.
LOG.exception(_LE('Exception during message handling'))
try:
if failure is None:
msg.status = message.Message.COMPLETED
else:
msg.failure = \
rpc_common.serialize_remote_exception(failure)
msg.status = message.Message.ERROR
LOG.info(_LI("Message {} method {}, status: {}").format(
msg.id, msg.method, msg.status))
if self.conf.messaging_server.debug:
LOG.debug("Message {} method {}, response: {}".format(
msg.id, msg.method, msg.response))
_is_success = 'FAILURE'
while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \
<= self.conf.messaging_server.response_timeout:
_is_success = msg.update()
LOG.info(_LI("updating the message status from working to {}, "
"atomic update response from MUSIC {}").format(msg.status, _is_success))
except Exception:
LOG.exception(_LE("Can not send reply for message {} "
"method {}").
format(msg.id, msg.method))
finally:
# Remove circular object reference between the current
# stack frame and the traceback in exc_info.
del failure
def _gracefully_stop(self):
"""Gracefully stop working on things"""
pass
def _restart(self):
"""Prepare to restart the RPC Server"""
pass
def run(self):
|
def terminate(self):
"""Terminate"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self.running = False
self._gracefully_stop()
super(RPCService, self).terminate()
def reload(self):
"""Reload"""
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
self._restart()
| """Run"""
# The server listens for messages and calls the
# appropriate methods. It also deletes messages once
# processed.
if self.conf.messaging_server.debug:
LOG.debug("%s" % self.__class__.__name__)
# Listen for messages within a thread
executor = futurist.ThreadPoolExecutor()
while self.running:
fut = executor.submit(self.__check_for_messages)
fut.result()
executor.shutdown() | identifier_body |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) |
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode {
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self {
Self {
params: SystemState::new(world),
}
}
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
}
| {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
} | conditional_block |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
}
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode {
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self |
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
}
| {
Self {
params: SystemState::new(world),
}
} | identifier_body |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
}
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode { | _graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self {
Self {
params: SystemState::new(world),
}
}
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
} | fn run(
&self, | random_line_split |
mod.rs | mod light;
pub use light::*;
use crate::StandardMaterial;
use bevy_asset::{Assets, Handle};
use bevy_ecs::{prelude::*, system::SystemState};
use bevy_math::Mat4;
use bevy_render2::{
core_pipeline::Transparent3dPhase,
mesh::Mesh,
pipeline::*,
render_graph::{Node, NodeRunError, RenderGraphContext},
render_phase::{Draw, DrawFunctions, Drawable, RenderPhase, TrackedRenderPass},
render_resource::{BindGroupBuilder, BindGroupId, BufferId, DynamicUniformVec},
renderer::{RenderContext, RenderResources},
shader::{Shader, ShaderStage, ShaderStages},
texture::{TextureFormat, TextureSampleType},
view::{ViewMeta, ViewUniform},
};
use bevy_transform::components::GlobalTransform;
pub struct PbrShaders {
pipeline: PipelineId,
pipeline_descriptor: RenderPipelineDescriptor,
}
// TODO: this pattern for initializing the shaders / pipeline isn't ideal. this should be handled by the asset system
impl FromWorld for PbrShaders {
fn from_world(world: &mut World) -> Self {
let render_resources = world.get_resource::<RenderResources>().unwrap();
let vertex_shader = Shader::from_glsl(ShaderStage::Vertex, include_str!("pbr.vert"))
.get_spirv_shader(None)
.unwrap();
let fragment_shader = Shader::from_glsl(ShaderStage::Fragment, include_str!("pbr.frag"))
.get_spirv_shader(None)
.unwrap();
let vertex_layout = vertex_shader.reflect_layout(&Default::default()).unwrap();
let fragment_layout = fragment_shader.reflect_layout(&Default::default()).unwrap();
let mut pipeline_layout =
PipelineLayout::from_shader_layouts(&mut [vertex_layout, fragment_layout]);
let vertex = render_resources.create_shader_module(&vertex_shader);
let fragment = render_resources.create_shader_module(&fragment_shader);
pipeline_layout.vertex_buffer_descriptors = vec![VertexBufferLayout {
stride: 32,
name: "Vertex".into(),
step_mode: InputStepMode::Vertex,
attributes: vec![
// GOTCHA! Vertex_Position isn't first in the buffer due to how Mesh sorts attributes (alphabetically)
VertexAttribute {
name: "Vertex_Position".into(),
format: VertexFormat::Float32x3,
offset: 12,
shader_location: 0,
},
VertexAttribute {
name: "Vertex_Normals".into(),
format: VertexFormat::Float32x3,
offset: 0,
shader_location: 1,
},
VertexAttribute {
name: "Vertex_Uv".into(),
format: VertexFormat::Float32x2,
offset: 24,
shader_location: 2,
},
],
}];
pipeline_layout.bind_group_mut(0).bindings[0].set_dynamic(true);
pipeline_layout.bind_group_mut(0).bindings[1].set_dynamic(true);
if let BindType::Texture { sample_type, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[2].bind_type
{
*sample_type = TextureSampleType::Depth;
}
if let BindType::Sampler { comparison, .. } =
&mut pipeline_layout.bind_group_mut(0).bindings[3].bind_type
{
*comparison = true;
}
pipeline_layout.bind_group_mut(1).bindings[0].set_dynamic(true);
pipeline_layout.update_bind_group_ids();
let pipeline_descriptor = RenderPipelineDescriptor {
depth_stencil: Some(DepthStencilState {
format: TextureFormat::Depth32Float,
depth_write_enabled: true,
depth_compare: CompareFunction::Less,
stencil: StencilState {
front: StencilFaceState::IGNORE,
back: StencilFaceState::IGNORE,
read_mask: 0,
write_mask: 0,
},
bias: DepthBiasState {
constant: 0,
slope_scale: 0.0,
clamp: 0.0,
},
}),
color_target_states: vec![ColorTargetState {
format: TextureFormat::default(),
blend: Some(BlendState {
color: BlendComponent {
src_factor: BlendFactor::SrcAlpha,
dst_factor: BlendFactor::OneMinusSrcAlpha,
operation: BlendOperation::Add,
},
alpha: BlendComponent {
src_factor: BlendFactor::One,
dst_factor: BlendFactor::One,
operation: BlendOperation::Add,
},
}),
write_mask: ColorWrite::ALL,
}],
..RenderPipelineDescriptor::new(
ShaderStages {
vertex,
fragment: Some(fragment),
},
pipeline_layout,
)
};
let pipeline = render_resources.create_render_pipeline(&pipeline_descriptor);
PbrShaders {
pipeline,
pipeline_descriptor,
}
}
}
struct ExtractedMesh {
transform: Mat4,
vertex_buffer: BufferId,
index_info: Option<IndexInfo>,
transform_binding_offset: u32,
}
struct IndexInfo {
buffer: BufferId,
count: u32,
}
pub struct ExtractedMeshes {
meshes: Vec<ExtractedMesh>,
}
pub fn extract_meshes(
mut commands: Commands,
meshes: Res<Assets<Mesh>>,
_materials: Res<Assets<StandardMaterial>>,
query: Query<(&GlobalTransform, &Handle<Mesh>, &Handle<StandardMaterial>)>,
) {
let mut extracted_meshes = Vec::new();
for (transform, mesh_handle, _material_handle) in query.iter() {
if let Some(mesh) = meshes.get(mesh_handle) {
if let Some(gpu_data) = &mesh.gpu_data() {
extracted_meshes.push(ExtractedMesh {
transform: transform.compute_matrix(),
vertex_buffer: gpu_data.vertex_buffer,
index_info: gpu_data.index_buffer.map(|i| IndexInfo {
buffer: i,
count: mesh.indices().unwrap().len() as u32,
}),
transform_binding_offset: 0,
})
}
}
}
commands.insert_resource(ExtractedMeshes {
meshes: extracted_meshes,
});
}
#[derive(Default)]
pub struct MeshMeta {
transform_uniforms: DynamicUniformVec<Mat4>,
}
pub fn prepare_meshes(
render_resources: Res<RenderResources>,
mut mesh_meta: ResMut<MeshMeta>,
mut extracted_meshes: ResMut<ExtractedMeshes>,
) {
mesh_meta
.transform_uniforms
.reserve_and_clear(extracted_meshes.meshes.len(), &render_resources);
for extracted_mesh in extracted_meshes.meshes.iter_mut() {
extracted_mesh.transform_binding_offset =
mesh_meta.transform_uniforms.push(extracted_mesh.transform);
}
mesh_meta
.transform_uniforms
.write_to_staging_buffer(&render_resources);
}
// TODO: This is temporary. Once we expose BindGroupLayouts directly, we can create view bind groups without specific shader context
struct MeshViewBindGroups {
view_bind_group: BindGroupId,
mesh_transform_bind_group: BindGroupId,
}
pub fn queue_meshes(
mut commands: Commands,
draw_functions: Res<DrawFunctions>,
render_resources: Res<RenderResources>,
pbr_shaders: Res<PbrShaders>,
shadow_shaders: Res<ShadowShaders>,
mesh_meta: Res<MeshMeta>,
light_meta: Res<LightMeta>,
view_meta: Res<ViewMeta>,
extracted_meshes: Res<ExtractedMeshes>,
mut views: Query<(Entity, &ViewLights, &mut RenderPhase<Transparent3dPhase>)>,
mut view_light_shadow_phases: Query<&mut RenderPhase<ShadowPhase>>,
) {
if extracted_meshes.meshes.is_empty() {
return;
}
for (entity, view_lights, mut transparent_phase) in views.iter_mut() {
let layout = &pbr_shaders.pipeline_descriptor.layout;
let view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.add_binding(1, light_meta.view_gpu_lights.binding())
.add_binding(2, view_lights.light_depth_texture_view)
.add_binding(3, shadow_shaders.light_sampler)
.finish();
// TODO: this will only create the bind group if it isn't already created. this is a bit nasty
render_resources.create_bind_group(layout.bind_group(0).id, &view_bind_group);
let mesh_transform_bind_group = BindGroupBuilder::default()
.add_binding(0, mesh_meta.transform_uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(1).id, &mesh_transform_bind_group);
commands.entity(entity).insert(MeshViewBindGroups {
view_bind_group: view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
let draw_pbr = draw_functions.read().get_id::<DrawPbr>().unwrap();
for i in 0..extracted_meshes.meshes.len() {
// TODO: currently there is only "transparent phase". this should pick transparent vs opaque according to the mesh material
transparent_phase.add(Drawable {
draw_function: draw_pbr,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
});
}
// ultimately lights should check meshes for relevancy (ex: light views can "see" different meshes than the main view can)
let draw_shadow_mesh = draw_functions.read().get_id::<DrawShadowMesh>().unwrap();
for view_light_entity in view_lights.lights.iter().copied() {
let mut shadow_phase = view_light_shadow_phases.get_mut(view_light_entity).unwrap();
let layout = &shadow_shaders.pipeline_descriptor.layout;
let shadow_view_bind_group = BindGroupBuilder::default()
.add_binding(0, view_meta.uniforms.binding())
.finish();
render_resources.create_bind_group(layout.bind_group(0).id, &shadow_view_bind_group);
// TODO: this should only queue up meshes that are actually visible by each "light view"
for i in 0..extracted_meshes.meshes.len() {
shadow_phase.add(Drawable {
draw_function: draw_shadow_mesh,
draw_key: i,
sort_key: 0, // TODO: sort back-to-front
})
}
commands
.entity(view_light_entity)
.insert(MeshViewBindGroups {
view_bind_group: shadow_view_bind_group.id,
mesh_transform_bind_group: mesh_transform_bind_group.id,
});
}
}
}
// TODO: this logic can be moved to prepare_meshes once wgpu::Queue is exposed directly
pub struct PbrNode;
impl Node for PbrNode {
fn | (
&self,
_graph: &mut RenderGraphContext,
render_context: &mut dyn RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let mesh_meta = world.get_resource::<MeshMeta>().unwrap();
let light_meta = world.get_resource::<LightMeta>().unwrap();
mesh_meta
.transform_uniforms
.write_to_uniform_buffer(render_context);
light_meta
.view_gpu_lights
.write_to_uniform_buffer(render_context);
Ok(())
}
}
type DrawPbrParams<'a> = (
Res<'a, PbrShaders>,
Res<'a, ExtractedMeshes>,
Query<'a, (&'a ViewUniform, &'a MeshViewBindGroups, &'a ViewLights)>,
);
pub struct DrawPbr {
params: SystemState<DrawPbrParams<'static>>,
}
impl DrawPbr {
pub fn new(world: &mut World) -> Self {
Self {
params: SystemState::new(world),
}
}
}
impl Draw for DrawPbr {
fn draw(
&mut self,
world: &World,
pass: &mut TrackedRenderPass,
view: Entity,
draw_key: usize,
_sort_key: usize,
) {
let (pbr_shaders, extracted_meshes, views) = self.params.get(world);
let (view_uniforms, mesh_view_bind_groups, view_lights) = views.get(view).unwrap();
let layout = &pbr_shaders.pipeline_descriptor.layout;
let extracted_mesh = &extracted_meshes.meshes[draw_key];
pass.set_pipeline(pbr_shaders.pipeline);
pass.set_bind_group(
0,
layout.bind_group(0).id,
mesh_view_bind_groups.view_bind_group,
Some(&[
view_uniforms.view_uniform_offset,
view_lights.gpu_light_binding_index,
]),
);
pass.set_bind_group(
1,
layout.bind_group(1).id,
mesh_view_bind_groups.mesh_transform_bind_group,
Some(&[extracted_mesh.transform_binding_offset]),
);
pass.set_vertex_buffer(0, extracted_mesh.vertex_buffer, 0);
if let Some(index_info) = &extracted_mesh.index_info {
pass.set_index_buffer(index_info.buffer, 0, IndexFormat::Uint32);
pass.draw_indexed(0..index_info.count, 0, 0..1);
} else {
panic!("non-indexed drawing not supported yet")
}
}
}
| run | identifier_name |
kafka_scaler.go | package scalers
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"github.com/Shopify/sarama"
"github.com/go-logr/logr"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
type kafkaScaler struct {
metricType v2.MetricTargetType
metadata kafkaMetadata
client sarama.Client
admin sarama.ClusterAdmin
logger logr.Logger
previousOffsets map[string]map[int32]int64
}
const (
stringEnable = "enable"
stringDisable = "disable"
)
type kafkaMetadata struct {
bootstrapServers []string
group string
topic string
partitionLimitation []int32
lagThreshold int64
activationLagThreshold int64
offsetResetPolicy offsetResetPolicy
allowIdleConsumers bool
excludePersistentLag bool
version sarama.KafkaVersion
// If an invalid offset is found, whether to scale to 1 (false - the default) so consumption can
// occur or scale to 0 (true). See discussion in https://github.com/kedacore/keda/issues/2612
scaleToZeroOnInvalidOffset bool
// SASL
saslType kafkaSaslType
username string
password string
// OAUTHBEARER
scopes []string
oauthTokenEndpointURI string
oauthExtensions map[string]string
// TLS
enableTLS bool
cert string
key string
keyPassword string
ca string
scalerIndex int
}
type offsetResetPolicy string
const (
latest offsetResetPolicy = "latest"
earliest offsetResetPolicy = "earliest"
)
type kafkaSaslType string
// supported SASL types
const (
KafkaSASLTypeNone kafkaSaslType = "none"
KafkaSASLTypePlaintext kafkaSaslType = "plaintext"
KafkaSASLTypeSCRAMSHA256 kafkaSaslType = "scram_sha256"
KafkaSASLTypeSCRAMSHA512 kafkaSaslType = "scram_sha512"
KafkaSASLTypeOAuthbearer kafkaSaslType = "oauthbearer"
)
const (
lagThresholdMetricName = "lagThreshold"
activationLagThresholdMetricName = "activationLagThreshold"
kafkaMetricType = "External"
defaultKafkaLagThreshold = 10
defaultKafkaActivationLagThreshold = 0
defaultOffsetResetPolicy = latest
invalidOffset = -1
)
// NewKafkaScaler creates a new kafkaScaler
func | (config *ScalerConfig) (Scaler, error) {
metricType, err := GetMetricTargetType(config)
if err != nil {
return nil, fmt.Errorf("error getting scaler metric type: %w", err)
}
logger := InitializeLogger(config, "kafka_scaler")
kafkaMetadata, err := parseKafkaMetadata(config, logger)
if err != nil {
return nil, fmt.Errorf("error parsing kafka metadata: %w", err)
}
client, admin, err := getKafkaClients(kafkaMetadata)
if err != nil {
return nil, err
}
previousOffsets := make(map[string]map[int32]int64)
return &kafkaScaler{
client: client,
admin: admin,
metricType: metricType,
metadata: kafkaMetadata,
logger: logger,
previousOffsets: previousOffsets,
}, nil
}
func parseKafkaAuthParams(config *ScalerConfig, meta *kafkaMetadata) error {
meta.saslType = KafkaSASLTypeNone
var saslAuthType string
switch {
case config.TriggerMetadata["sasl"] != "":
saslAuthType = config.TriggerMetadata["sasl"]
default:
saslAuthType = ""
}
if val, ok := config.AuthParams["sasl"]; ok {
if saslAuthType != "" {
return errors.New("unable to set `sasl` in both ScaledObject and TriggerAuthentication together")
}
saslAuthType = val
}
if saslAuthType != "" {
saslAuthType = strings.TrimSpace(saslAuthType)
mode := kafkaSaslType(saslAuthType)
if mode == KafkaSASLTypePlaintext || mode == KafkaSASLTypeSCRAMSHA256 || mode == KafkaSASLTypeSCRAMSHA512 || mode == KafkaSASLTypeOAuthbearer {
if config.AuthParams["username"] == "" {
return errors.New("no username given")
}
meta.username = strings.TrimSpace(config.AuthParams["username"])
if config.AuthParams["password"] == "" {
return errors.New("no password given")
}
meta.password = strings.TrimSpace(config.AuthParams["password"])
meta.saslType = mode
if mode == KafkaSASLTypeOAuthbearer {
meta.scopes = strings.Split(config.AuthParams["scopes"], ",")
if config.AuthParams["oauthTokenEndpointUri"] == "" {
return errors.New("no oauth token endpoint uri given")
}
meta.oauthTokenEndpointURI = strings.TrimSpace(config.AuthParams["oauthTokenEndpointUri"])
meta.oauthExtensions = make(map[string]string)
oauthExtensionsRaw := config.AuthParams["oauthExtensions"]
if oauthExtensionsRaw != "" {
for _, extension := range strings.Split(oauthExtensionsRaw, ",") {
splittedExtension := strings.Split(extension, "=")
if len(splittedExtension) != 2 {
return errors.New("invalid OAuthBearer extension, must be of format key=value")
}
meta.oauthExtensions[splittedExtension[0]] = splittedExtension[1]
}
}
}
} else {
return fmt.Errorf("err SASL mode %s given", mode)
}
}
meta.enableTLS = false
enableTLS := false
if val, ok := config.TriggerMetadata["tls"]; ok {
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if val, ok := config.AuthParams["tls"]; ok {
val = strings.TrimSpace(val)
if enableTLS {
return errors.New("unable to set `tls` in both ScaledObject and TriggerAuthentication together")
}
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if enableTLS {
certGiven := config.AuthParams["cert"] != ""
keyGiven := config.AuthParams["key"] != ""
if certGiven && !keyGiven {
return errors.New("key must be provided with cert")
}
if keyGiven && !certGiven {
return errors.New("cert must be provided with key")
}
meta.ca = config.AuthParams["ca"]
meta.cert = config.AuthParams["cert"]
meta.key = config.AuthParams["key"]
if value, found := config.AuthParams["keyPassword"]; found {
meta.keyPassword = value
} else {
meta.keyPassword = ""
}
meta.enableTLS = true
}
return nil
}
func parseKafkaMetadata(config *ScalerConfig, logger logr.Logger) (kafkaMetadata, error) {
meta := kafkaMetadata{}
switch {
case config.TriggerMetadata["bootstrapServersFromEnv"] != "":
meta.bootstrapServers = strings.Split(config.ResolvedEnv[config.TriggerMetadata["bootstrapServersFromEnv"]], ",")
case config.TriggerMetadata["bootstrapServers"] != "":
meta.bootstrapServers = strings.Split(config.TriggerMetadata["bootstrapServers"], ",")
default:
return meta, errors.New("no bootstrapServers given")
}
switch {
case config.TriggerMetadata["consumerGroupFromEnv"] != "":
meta.group = config.ResolvedEnv[config.TriggerMetadata["consumerGroupFromEnv"]]
case config.TriggerMetadata["consumerGroup"] != "":
meta.group = config.TriggerMetadata["consumerGroup"]
default:
return meta, errors.New("no consumer group given")
}
switch {
case config.TriggerMetadata["topicFromEnv"] != "":
meta.topic = config.ResolvedEnv[config.TriggerMetadata["topicFromEnv"]]
case config.TriggerMetadata["topic"] != "":
meta.topic = config.TriggerMetadata["topic"]
default:
meta.topic = ""
logger.V(1).Info(fmt.Sprintf("consumer group %q has no topic specified, "+
"will use all topics subscribed by the consumer group for scaling", meta.group))
}
meta.partitionLimitation = nil
partitionLimitationMetadata := strings.TrimSpace(config.TriggerMetadata["partitionLimitation"])
if partitionLimitationMetadata != "" {
if meta.topic == "" {
logger.V(1).Info("no specific topic set, ignoring partitionLimitation setting")
} else {
pattern := config.TriggerMetadata["partitionLimitation"]
parsed, err := kedautil.ParseInt32List(pattern)
if err != nil {
return meta, fmt.Errorf("error parsing in partitionLimitation '%s': %w", pattern, err)
}
meta.partitionLimitation = parsed
logger.V(0).Info(fmt.Sprintf("partition limit active '%s'", pattern))
}
}
meta.offsetResetPolicy = defaultOffsetResetPolicy
if config.TriggerMetadata["offsetResetPolicy"] != "" {
policy := offsetResetPolicy(config.TriggerMetadata["offsetResetPolicy"])
if policy != earliest && policy != latest {
return meta, fmt.Errorf("err offsetResetPolicy policy %q given", policy)
}
meta.offsetResetPolicy = policy
}
meta.lagThreshold = defaultKafkaLagThreshold
if val, ok := config.TriggerMetadata[lagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", lagThresholdMetricName, err)
}
if t <= 0 {
return meta, fmt.Errorf("%q must be positive number", lagThresholdMetricName)
}
meta.lagThreshold = t
}
meta.activationLagThreshold = defaultKafkaActivationLagThreshold
if val, ok := config.TriggerMetadata[activationLagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", activationLagThresholdMetricName, err)
}
if t < 0 {
return meta, fmt.Errorf("%q must be positive number", activationLagThresholdMetricName)
}
meta.activationLagThreshold = t
}
if err := parseKafkaAuthParams(config, &meta); err != nil {
return meta, err
}
meta.allowIdleConsumers = false
if val, ok := config.TriggerMetadata["allowIdleConsumers"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing allowIdleConsumers: %w", err)
}
meta.allowIdleConsumers = t
}
meta.excludePersistentLag = false
if val, ok := config.TriggerMetadata["excludePersistentLag"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing excludePersistentLag: %w", err)
}
meta.excludePersistentLag = t
}
meta.scaleToZeroOnInvalidOffset = false
if val, ok := config.TriggerMetadata["scaleToZeroOnInvalidOffset"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing scaleToZeroOnInvalidOffset: %w", err)
}
meta.scaleToZeroOnInvalidOffset = t
}
meta.version = sarama.V1_0_0_0
if val, ok := config.TriggerMetadata["version"]; ok {
val = strings.TrimSpace(val)
version, err := sarama.ParseKafkaVersion(val)
if err != nil {
return meta, fmt.Errorf("error parsing kafka version: %w", err)
}
meta.version = version
}
meta.scalerIndex = config.ScalerIndex
return meta, nil
}
func getKafkaClients(metadata kafkaMetadata) (sarama.Client, sarama.ClusterAdmin, error) {
config := sarama.NewConfig()
config.Version = metadata.version
if metadata.saslType != KafkaSASLTypeNone {
config.Net.SASL.Enable = true
config.Net.SASL.User = metadata.username
config.Net.SASL.Password = metadata.password
}
if metadata.enableTLS {
config.Net.TLS.Enable = true
tlsConfig, err := kedautil.NewTLSConfigWithPassword(metadata.cert, metadata.key, metadata.keyPassword, metadata.ca, false)
if err != nil {
return nil, nil, err
}
config.Net.TLS.Config = tlsConfig
}
if metadata.saslType == KafkaSASLTypePlaintext {
config.Net.SASL.Mechanism = sarama.SASLTypePlaintext
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA256 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA512 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
}
if metadata.saslType == KafkaSASLTypeOAuthbearer {
config.Net.SASL.Mechanism = sarama.SASLTypeOAuth
config.Net.SASL.TokenProvider = OAuthBearerTokenProvider(metadata.username, metadata.password, metadata.oauthTokenEndpointURI, metadata.scopes, metadata.oauthExtensions)
}
client, err := sarama.NewClient(metadata.bootstrapServers, config)
if err != nil {
return nil, nil, fmt.Errorf("error creating kafka client: %w", err)
}
admin, err := sarama.NewClusterAdminFromClient(client)
if err != nil {
if !client.Closed() {
client.Close()
}
return nil, nil, fmt.Errorf("error creating kafka admin: %w", err)
}
return client, admin, nil
}
func (s *kafkaScaler) getTopicPartitions() (map[string][]int32, error) {
var topicsToDescribe = make([]string, 0)
// when no topic is specified, query to cg group to fetch all subscribed topics
if s.metadata.topic == "" {
listCGOffsetResponse, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, nil)
if err != nil {
return nil, fmt.Errorf("error listing cg offset: %w", err)
}
if listCGOffsetResponse.Err > 0 {
errMsg := fmt.Errorf("error listing cg offset: %w", listCGOffsetResponse.Err)
s.logger.Error(errMsg, "")
}
for topicName := range listCGOffsetResponse.Blocks {
topicsToDescribe = append(topicsToDescribe, topicName)
}
} else {
topicsToDescribe = []string{s.metadata.topic}
}
topicsMetadata, err := s.admin.DescribeTopics(topicsToDescribe)
if err != nil {
return nil, fmt.Errorf("error describing topics: %w", err)
}
if s.metadata.topic != "" && len(topicsMetadata) != 1 {
return nil, fmt.Errorf("expected only 1 topic metadata, got %d", len(topicsMetadata))
}
topicPartitions := make(map[string][]int32, len(topicsMetadata))
for _, topicMetadata := range topicsMetadata {
if topicMetadata.Err > 0 {
errMsg := fmt.Errorf("error describing topics: %w", topicMetadata.Err)
s.logger.Error(errMsg, "")
}
partitionMetadata := topicMetadata.Partitions
var partitions []int32
for _, p := range partitionMetadata {
if s.isActivePartition(p.ID) {
partitions = append(partitions, p.ID)
}
}
if len(partitions) == 0 {
return nil, fmt.Errorf("expected at least one active partition within the topic '%s'", topicMetadata.Name)
}
topicPartitions[topicMetadata.Name] = partitions
}
return topicPartitions, nil
}
func (s *kafkaScaler) isActivePartition(pID int32) bool {
if s.metadata.partitionLimitation == nil {
return true
}
for _, _pID := range s.metadata.partitionLimitation {
if pID == _pID {
return true
}
}
return false
}
func (s *kafkaScaler) getConsumerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, error) {
offsets, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, topicPartitions)
if err != nil {
return nil, fmt.Errorf("error listing consumer group offsets: %w", err)
}
if offsets.Err > 0 {
errMsg := fmt.Errorf("error listing consumer group offsets: %w", offsets.Err)
s.logger.Error(errMsg, "")
}
return offsets, nil
}
// getLagForPartition returns (lag, lagWithPersistent, error)
// When excludePersistentLag is set to `false` (default), lag will always be equal to lagWithPersistent
// When excludePersistentLag is set to `true`, if partition is deemed to have persistent lag, lag will be set to 0 and lagWithPersistent will be latestOffset - consumerOffset
// These return values will allow proper scaling from 0 -> 1 replicas by the IsActive func.
func (s *kafkaScaler) getLagForPartition(topic string, partitionID int32, offsets *sarama.OffsetFetchResponse, topicPartitionOffsets map[string]map[int32]int64) (int64, int64, error) {
block := offsets.GetBlock(topic, partitionID)
if block == nil {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d from offset block: %v", topic, partitionID, offsets.Blocks)
s.logger.Error(errMsg, "")
return 0, 0, errMsg
}
if block.Err > 0 {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d: %w", topic, partitionID, offsets.Err)
s.logger.Error(errMsg, "")
}
consumerOffset := block.Offset
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == latest {
retVal := int64(1)
if s.metadata.scaleToZeroOnInvalidOffset {
retVal = 0
}
msg := fmt.Sprintf(
"invalid offset found for topic %s in group %s and partition %d, probably no offset is committed yet. Returning with lag of %d",
topic, s.metadata.group, partitionID, retVal)
s.logger.V(1).Info(msg)
return retVal, retVal, nil
}
if _, found := topicPartitionOffsets[topic]; !found {
return 0, 0, fmt.Errorf("error finding partition offset for topic %s", topic)
}
latestOffset := topicPartitionOffsets[topic][partitionID]
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == earliest {
return latestOffset, latestOffset, nil
}
// This code block tries to prevent KEDA Kafka trigger from scaling the scale target based on erroneous events
if s.metadata.excludePersistentLag {
switch previousOffset, found := s.previousOffsets[topic][partitionID]; {
case !found:
// No record of previous offset, so store current consumer offset
// Allow this consumer lag to be considered in scaling
if _, topicFound := s.previousOffsets[topic]; !topicFound {
s.previousOffsets[topic] = map[int32]int64{partitionID: consumerOffset}
} else {
s.previousOffsets[topic][partitionID] = consumerOffset
}
case previousOffset == consumerOffset:
// Indicates consumer is still on the same offset as the previous polling cycle, there may be some issue with consuming this offset.
// return 0, so this consumer lag is not considered for scaling
return 0, latestOffset - consumerOffset, nil
default:
// Successfully Consumed some messages, proceed to change the previous offset
s.previousOffsets[topic][partitionID] = consumerOffset
}
}
return latestOffset - consumerOffset, latestOffset - consumerOffset, nil
}
// Close closes the kafka admin and client
func (s *kafkaScaler) Close(context.Context) error {
// underlying client will also be closed on admin's Close() call
if s.admin == nil {
return nil
}
return s.admin.Close()
}
func (s *kafkaScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
var metricName string
if s.metadata.topic != "" {
metricName = fmt.Sprintf("kafka-%s", s.metadata.topic)
} else {
metricName = fmt.Sprintf("kafka-%s-topics", s.metadata.group)
}
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(metricName)),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
metricSpec := v2.MetricSpec{External: externalMetric, Type: kafkaMetricType}
return []v2.MetricSpec{metricSpec}
}
type consumerOffsetResult struct {
consumerOffsets *sarama.OffsetFetchResponse
err error
}
type producerOffsetResult struct {
producerOffsets map[string]map[int32]int64
err error
}
func (s *kafkaScaler) getConsumerAndProducerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, map[string]map[int32]int64, error) {
consumerChan := make(chan consumerOffsetResult, 1)
go func() {
consumerOffsets, err := s.getConsumerOffsets(topicPartitions)
consumerChan <- consumerOffsetResult{consumerOffsets, err}
}()
producerChan := make(chan producerOffsetResult, 1)
go func() {
producerOffsets, err := s.getProducerOffsets(topicPartitions)
producerChan <- producerOffsetResult{producerOffsets, err}
}()
consumerRes := <-consumerChan
if consumerRes.err != nil {
return nil, nil, consumerRes.err
}
producerRes := <-producerChan
if producerRes.err != nil {
return nil, nil, producerRes.err
}
return consumerRes.consumerOffsets, producerRes.producerOffsets, nil
}
// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric
func (s *kafkaScaler) GetMetricsAndActivity(_ context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) {
totalLag, totalLagWithPersistent, err := s.getTotalLag()
if err != nil {
return []external_metrics.ExternalMetricValue{}, false, err
}
metric := GenerateMetricInMili(metricName, float64(totalLag))
return []external_metrics.ExternalMetricValue{metric}, totalLagWithPersistent > s.metadata.activationLagThreshold, nil
}
// getTotalLag returns totalLag, totalLagWithPersistent, error
// totalLag and totalLagWithPersistent are the summations of lag and lagWithPersistent returned by getLagForPartition function respectively.
// totalLag maybe less than totalLagWithPersistent when excludePersistentLag is set to `true` due to some partitions deemed as having persistent lag
func (s *kafkaScaler) getTotalLag() (int64, int64, error) {
topicPartitions, err := s.getTopicPartitions()
if err != nil {
return 0, 0, err
}
consumerOffsets, producerOffsets, err := s.getConsumerAndProducerOffsets(topicPartitions)
if err != nil {
return 0, 0, err
}
totalLag := int64(0)
totalLagWithPersistent := int64(0)
totalTopicPartitions := int64(0)
for topic, partitionsOffsets := range producerOffsets {
for partition := range partitionsOffsets {
lag, lagWithPersistent, err := s.getLagForPartition(topic, partition, consumerOffsets, producerOffsets)
if err != nil {
return 0, 0, err
}
totalLag += lag
totalLagWithPersistent += lagWithPersistent
}
totalTopicPartitions += (int64)(len(partitionsOffsets))
}
s.logger.V(1).Info(fmt.Sprintf("Kafka scaler: Providing metrics based on totalLag %v, topicPartitions %v, threshold %v", totalLag, len(topicPartitions), s.metadata.lagThreshold))
if !s.metadata.allowIdleConsumers {
// don't scale out beyond the number of topicPartitions
if (totalLag / s.metadata.lagThreshold) > totalTopicPartitions {
totalLag = totalTopicPartitions * s.metadata.lagThreshold
}
}
return totalLag, totalLagWithPersistent, nil
}
type brokerOffsetResult struct {
offsetResp *sarama.OffsetResponse
err error
}
func (s *kafkaScaler) getProducerOffsets(topicPartitions map[string][]int32) (map[string]map[int32]int64, error) {
version := int16(0)
if s.client.Config().Version.IsAtLeast(sarama.V0_10_1_0) {
version = 1
}
// Step 1: build one OffsetRequest instance per broker.
requests := make(map[*sarama.Broker]*sarama.OffsetRequest)
for topic, partitions := range topicPartitions {
for _, partitionID := range partitions {
broker, err := s.client.Leader(topic, partitionID)
if err != nil {
return nil, err
}
request, ok := requests[broker]
if !ok {
request = &sarama.OffsetRequest{Version: version}
requests[broker] = request
}
request.AddBlock(topic, partitionID, sarama.OffsetNewest, 1)
}
}
// Step 2: send requests, one per broker, and collect topicPartitionsOffsets
resultCh := make(chan brokerOffsetResult, len(requests))
var wg sync.WaitGroup
wg.Add(len(requests))
for broker, request := range requests {
go func(brCopy *sarama.Broker, reqCopy *sarama.OffsetRequest) {
defer wg.Done()
response, err := brCopy.GetAvailableOffsets(reqCopy)
resultCh <- brokerOffsetResult{response, err}
}(broker, request)
}
wg.Wait()
close(resultCh)
topicPartitionsOffsets := make(map[string]map[int32]int64)
for brokerOffsetRes := range resultCh {
if brokerOffsetRes.err != nil {
return nil, brokerOffsetRes.err
}
for topic, blocks := range brokerOffsetRes.offsetResp.Blocks {
if _, found := topicPartitionsOffsets[topic]; !found {
topicPartitionsOffsets[topic] = make(map[int32]int64)
}
for partitionID, block := range blocks {
if block.Err != sarama.ErrNoError {
return nil, block.Err
}
topicPartitionsOffsets[topic][partitionID] = block.Offset
}
}
}
return topicPartitionsOffsets, nil
}
| NewKafkaScaler | identifier_name |
kafka_scaler.go | package scalers
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"github.com/Shopify/sarama"
"github.com/go-logr/logr"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
type kafkaScaler struct {
metricType v2.MetricTargetType
metadata kafkaMetadata
client sarama.Client
admin sarama.ClusterAdmin
logger logr.Logger
previousOffsets map[string]map[int32]int64
}
const (
stringEnable = "enable"
stringDisable = "disable"
)
type kafkaMetadata struct {
bootstrapServers []string
group string
topic string
partitionLimitation []int32
lagThreshold int64
activationLagThreshold int64
offsetResetPolicy offsetResetPolicy
allowIdleConsumers bool
excludePersistentLag bool
version sarama.KafkaVersion
// If an invalid offset is found, whether to scale to 1 (false - the default) so consumption can
// occur or scale to 0 (true). See discussion in https://github.com/kedacore/keda/issues/2612
scaleToZeroOnInvalidOffset bool
// SASL
saslType kafkaSaslType
username string
password string
// OAUTHBEARER
scopes []string
oauthTokenEndpointURI string
oauthExtensions map[string]string
// TLS
enableTLS bool
cert string
key string
keyPassword string
ca string
scalerIndex int
}
type offsetResetPolicy string
const (
latest offsetResetPolicy = "latest"
earliest offsetResetPolicy = "earliest"
)
type kafkaSaslType string
// supported SASL types
const (
KafkaSASLTypeNone kafkaSaslType = "none"
KafkaSASLTypePlaintext kafkaSaslType = "plaintext"
KafkaSASLTypeSCRAMSHA256 kafkaSaslType = "scram_sha256"
KafkaSASLTypeSCRAMSHA512 kafkaSaslType = "scram_sha512"
KafkaSASLTypeOAuthbearer kafkaSaslType = "oauthbearer"
)
const (
lagThresholdMetricName = "lagThreshold"
activationLagThresholdMetricName = "activationLagThreshold"
kafkaMetricType = "External"
defaultKafkaLagThreshold = 10
defaultKafkaActivationLagThreshold = 0
defaultOffsetResetPolicy = latest
invalidOffset = -1
)
// NewKafkaScaler creates a new kafkaScaler
func NewKafkaScaler(config *ScalerConfig) (Scaler, error) {
metricType, err := GetMetricTargetType(config)
if err != nil {
return nil, fmt.Errorf("error getting scaler metric type: %w", err)
}
logger := InitializeLogger(config, "kafka_scaler")
kafkaMetadata, err := parseKafkaMetadata(config, logger)
if err != nil {
return nil, fmt.Errorf("error parsing kafka metadata: %w", err)
}
client, admin, err := getKafkaClients(kafkaMetadata)
if err != nil {
return nil, err
}
previousOffsets := make(map[string]map[int32]int64)
return &kafkaScaler{
client: client,
admin: admin,
metricType: metricType,
metadata: kafkaMetadata,
logger: logger,
previousOffsets: previousOffsets,
}, nil
}
func parseKafkaAuthParams(config *ScalerConfig, meta *kafkaMetadata) error {
meta.saslType = KafkaSASLTypeNone
var saslAuthType string
switch {
case config.TriggerMetadata["sasl"] != "":
saslAuthType = config.TriggerMetadata["sasl"]
default:
saslAuthType = ""
}
if val, ok := config.AuthParams["sasl"]; ok {
if saslAuthType != "" {
return errors.New("unable to set `sasl` in both ScaledObject and TriggerAuthentication together")
}
saslAuthType = val
}
if saslAuthType != "" {
saslAuthType = strings.TrimSpace(saslAuthType)
mode := kafkaSaslType(saslAuthType)
if mode == KafkaSASLTypePlaintext || mode == KafkaSASLTypeSCRAMSHA256 || mode == KafkaSASLTypeSCRAMSHA512 || mode == KafkaSASLTypeOAuthbearer {
if config.AuthParams["username"] == "" {
return errors.New("no username given")
}
meta.username = strings.TrimSpace(config.AuthParams["username"])
if config.AuthParams["password"] == "" {
return errors.New("no password given")
}
meta.password = strings.TrimSpace(config.AuthParams["password"])
meta.saslType = mode
if mode == KafkaSASLTypeOAuthbearer {
meta.scopes = strings.Split(config.AuthParams["scopes"], ",")
if config.AuthParams["oauthTokenEndpointUri"] == "" {
return errors.New("no oauth token endpoint uri given")
}
meta.oauthTokenEndpointURI = strings.TrimSpace(config.AuthParams["oauthTokenEndpointUri"])
meta.oauthExtensions = make(map[string]string)
oauthExtensionsRaw := config.AuthParams["oauthExtensions"]
if oauthExtensionsRaw != "" {
for _, extension := range strings.Split(oauthExtensionsRaw, ",") {
splittedExtension := strings.Split(extension, "=")
if len(splittedExtension) != 2 {
return errors.New("invalid OAuthBearer extension, must be of format key=value")
}
meta.oauthExtensions[splittedExtension[0]] = splittedExtension[1]
}
}
}
} else {
return fmt.Errorf("err SASL mode %s given", mode)
}
}
meta.enableTLS = false
enableTLS := false
if val, ok := config.TriggerMetadata["tls"]; ok {
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if val, ok := config.AuthParams["tls"]; ok {
val = strings.TrimSpace(val)
if enableTLS {
return errors.New("unable to set `tls` in both ScaledObject and TriggerAuthentication together")
}
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if enableTLS {
certGiven := config.AuthParams["cert"] != ""
keyGiven := config.AuthParams["key"] != ""
if certGiven && !keyGiven {
return errors.New("key must be provided with cert")
}
if keyGiven && !certGiven {
return errors.New("cert must be provided with key")
}
meta.ca = config.AuthParams["ca"]
meta.cert = config.AuthParams["cert"]
meta.key = config.AuthParams["key"]
if value, found := config.AuthParams["keyPassword"]; found {
meta.keyPassword = value
} else {
meta.keyPassword = ""
}
meta.enableTLS = true
}
return nil
}
func parseKafkaMetadata(config *ScalerConfig, logger logr.Logger) (kafkaMetadata, error) {
meta := kafkaMetadata{}
switch {
case config.TriggerMetadata["bootstrapServersFromEnv"] != "":
meta.bootstrapServers = strings.Split(config.ResolvedEnv[config.TriggerMetadata["bootstrapServersFromEnv"]], ",")
case config.TriggerMetadata["bootstrapServers"] != "":
meta.bootstrapServers = strings.Split(config.TriggerMetadata["bootstrapServers"], ",")
default:
return meta, errors.New("no bootstrapServers given")
}
switch {
case config.TriggerMetadata["consumerGroupFromEnv"] != "":
meta.group = config.ResolvedEnv[config.TriggerMetadata["consumerGroupFromEnv"]]
case config.TriggerMetadata["consumerGroup"] != "":
meta.group = config.TriggerMetadata["consumerGroup"]
default:
return meta, errors.New("no consumer group given")
}
switch {
case config.TriggerMetadata["topicFromEnv"] != "":
meta.topic = config.ResolvedEnv[config.TriggerMetadata["topicFromEnv"]]
case config.TriggerMetadata["topic"] != "":
meta.topic = config.TriggerMetadata["topic"]
default:
meta.topic = ""
logger.V(1).Info(fmt.Sprintf("consumer group %q has no topic specified, "+
"will use all topics subscribed by the consumer group for scaling", meta.group))
}
meta.partitionLimitation = nil
partitionLimitationMetadata := strings.TrimSpace(config.TriggerMetadata["partitionLimitation"])
if partitionLimitationMetadata != "" {
if meta.topic == "" {
logger.V(1).Info("no specific topic set, ignoring partitionLimitation setting")
} else {
pattern := config.TriggerMetadata["partitionLimitation"]
parsed, err := kedautil.ParseInt32List(pattern)
if err != nil {
return meta, fmt.Errorf("error parsing in partitionLimitation '%s': %w", pattern, err)
}
meta.partitionLimitation = parsed
logger.V(0).Info(fmt.Sprintf("partition limit active '%s'", pattern))
}
}
meta.offsetResetPolicy = defaultOffsetResetPolicy
if config.TriggerMetadata["offsetResetPolicy"] != "" {
policy := offsetResetPolicy(config.TriggerMetadata["offsetResetPolicy"])
if policy != earliest && policy != latest {
return meta, fmt.Errorf("err offsetResetPolicy policy %q given", policy)
}
meta.offsetResetPolicy = policy
}
meta.lagThreshold = defaultKafkaLagThreshold
if val, ok := config.TriggerMetadata[lagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", lagThresholdMetricName, err)
}
if t <= 0 {
return meta, fmt.Errorf("%q must be positive number", lagThresholdMetricName)
}
meta.lagThreshold = t
}
meta.activationLagThreshold = defaultKafkaActivationLagThreshold
if val, ok := config.TriggerMetadata[activationLagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", activationLagThresholdMetricName, err)
}
if t < 0 {
return meta, fmt.Errorf("%q must be positive number", activationLagThresholdMetricName)
}
meta.activationLagThreshold = t
}
if err := parseKafkaAuthParams(config, &meta); err != nil {
return meta, err
}
meta.allowIdleConsumers = false
if val, ok := config.TriggerMetadata["allowIdleConsumers"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing allowIdleConsumers: %w", err)
}
meta.allowIdleConsumers = t
}
meta.excludePersistentLag = false
if val, ok := config.TriggerMetadata["excludePersistentLag"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing excludePersistentLag: %w", err)
}
meta.excludePersistentLag = t
}
meta.scaleToZeroOnInvalidOffset = false
if val, ok := config.TriggerMetadata["scaleToZeroOnInvalidOffset"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing scaleToZeroOnInvalidOffset: %w", err)
}
meta.scaleToZeroOnInvalidOffset = t
}
meta.version = sarama.V1_0_0_0
if val, ok := config.TriggerMetadata["version"]; ok {
val = strings.TrimSpace(val)
version, err := sarama.ParseKafkaVersion(val)
if err != nil {
return meta, fmt.Errorf("error parsing kafka version: %w", err)
}
meta.version = version
}
meta.scalerIndex = config.ScalerIndex
return meta, nil
}
func getKafkaClients(metadata kafkaMetadata) (sarama.Client, sarama.ClusterAdmin, error) {
config := sarama.NewConfig()
config.Version = metadata.version
if metadata.saslType != KafkaSASLTypeNone {
config.Net.SASL.Enable = true
config.Net.SASL.User = metadata.username
config.Net.SASL.Password = metadata.password
}
if metadata.enableTLS {
config.Net.TLS.Enable = true
tlsConfig, err := kedautil.NewTLSConfigWithPassword(metadata.cert, metadata.key, metadata.keyPassword, metadata.ca, false)
if err != nil {
return nil, nil, err
}
config.Net.TLS.Config = tlsConfig
}
if metadata.saslType == KafkaSASLTypePlaintext {
config.Net.SASL.Mechanism = sarama.SASLTypePlaintext
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA256 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA512 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
}
if metadata.saslType == KafkaSASLTypeOAuthbearer {
config.Net.SASL.Mechanism = sarama.SASLTypeOAuth
config.Net.SASL.TokenProvider = OAuthBearerTokenProvider(metadata.username, metadata.password, metadata.oauthTokenEndpointURI, metadata.scopes, metadata.oauthExtensions)
}
client, err := sarama.NewClient(metadata.bootstrapServers, config)
if err != nil {
return nil, nil, fmt.Errorf("error creating kafka client: %w", err)
}
admin, err := sarama.NewClusterAdminFromClient(client)
if err != nil {
if !client.Closed() {
client.Close()
}
return nil, nil, fmt.Errorf("error creating kafka admin: %w", err)
}
return client, admin, nil
}
func (s *kafkaScaler) getTopicPartitions() (map[string][]int32, error) |
func (s *kafkaScaler) isActivePartition(pID int32) bool {
if s.metadata.partitionLimitation == nil {
return true
}
for _, _pID := range s.metadata.partitionLimitation {
if pID == _pID {
return true
}
}
return false
}
func (s *kafkaScaler) getConsumerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, error) {
offsets, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, topicPartitions)
if err != nil {
return nil, fmt.Errorf("error listing consumer group offsets: %w", err)
}
if offsets.Err > 0 {
errMsg := fmt.Errorf("error listing consumer group offsets: %w", offsets.Err)
s.logger.Error(errMsg, "")
}
return offsets, nil
}
// getLagForPartition returns (lag, lagWithPersistent, error)
// When excludePersistentLag is set to `false` (default), lag will always be equal to lagWithPersistent
// When excludePersistentLag is set to `true`, if partition is deemed to have persistent lag, lag will be set to 0 and lagWithPersistent will be latestOffset - consumerOffset
// These return values will allow proper scaling from 0 -> 1 replicas by the IsActive func.
func (s *kafkaScaler) getLagForPartition(topic string, partitionID int32, offsets *sarama.OffsetFetchResponse, topicPartitionOffsets map[string]map[int32]int64) (int64, int64, error) {
block := offsets.GetBlock(topic, partitionID)
if block == nil {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d from offset block: %v", topic, partitionID, offsets.Blocks)
s.logger.Error(errMsg, "")
return 0, 0, errMsg
}
if block.Err > 0 {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d: %w", topic, partitionID, offsets.Err)
s.logger.Error(errMsg, "")
}
consumerOffset := block.Offset
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == latest {
retVal := int64(1)
if s.metadata.scaleToZeroOnInvalidOffset {
retVal = 0
}
msg := fmt.Sprintf(
"invalid offset found for topic %s in group %s and partition %d, probably no offset is committed yet. Returning with lag of %d",
topic, s.metadata.group, partitionID, retVal)
s.logger.V(1).Info(msg)
return retVal, retVal, nil
}
if _, found := topicPartitionOffsets[topic]; !found {
return 0, 0, fmt.Errorf("error finding partition offset for topic %s", topic)
}
latestOffset := topicPartitionOffsets[topic][partitionID]
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == earliest {
return latestOffset, latestOffset, nil
}
// This code block tries to prevent KEDA Kafka trigger from scaling the scale target based on erroneous events
if s.metadata.excludePersistentLag {
switch previousOffset, found := s.previousOffsets[topic][partitionID]; {
case !found:
// No record of previous offset, so store current consumer offset
// Allow this consumer lag to be considered in scaling
if _, topicFound := s.previousOffsets[topic]; !topicFound {
s.previousOffsets[topic] = map[int32]int64{partitionID: consumerOffset}
} else {
s.previousOffsets[topic][partitionID] = consumerOffset
}
case previousOffset == consumerOffset:
// Indicates consumer is still on the same offset as the previous polling cycle, there may be some issue with consuming this offset.
// return 0, so this consumer lag is not considered for scaling
return 0, latestOffset - consumerOffset, nil
default:
// Successfully Consumed some messages, proceed to change the previous offset
s.previousOffsets[topic][partitionID] = consumerOffset
}
}
return latestOffset - consumerOffset, latestOffset - consumerOffset, nil
}
// Close closes the kafka admin and client
func (s *kafkaScaler) Close(context.Context) error {
// underlying client will also be closed on admin's Close() call
if s.admin == nil {
return nil
}
return s.admin.Close()
}
func (s *kafkaScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
var metricName string
if s.metadata.topic != "" {
metricName = fmt.Sprintf("kafka-%s", s.metadata.topic)
} else {
metricName = fmt.Sprintf("kafka-%s-topics", s.metadata.group)
}
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(metricName)),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
metricSpec := v2.MetricSpec{External: externalMetric, Type: kafkaMetricType}
return []v2.MetricSpec{metricSpec}
}
type consumerOffsetResult struct {
consumerOffsets *sarama.OffsetFetchResponse
err error
}
type producerOffsetResult struct {
producerOffsets map[string]map[int32]int64
err error
}
func (s *kafkaScaler) getConsumerAndProducerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, map[string]map[int32]int64, error) {
consumerChan := make(chan consumerOffsetResult, 1)
go func() {
consumerOffsets, err := s.getConsumerOffsets(topicPartitions)
consumerChan <- consumerOffsetResult{consumerOffsets, err}
}()
producerChan := make(chan producerOffsetResult, 1)
go func() {
producerOffsets, err := s.getProducerOffsets(topicPartitions)
producerChan <- producerOffsetResult{producerOffsets, err}
}()
consumerRes := <-consumerChan
if consumerRes.err != nil {
return nil, nil, consumerRes.err
}
producerRes := <-producerChan
if producerRes.err != nil {
return nil, nil, producerRes.err
}
return consumerRes.consumerOffsets, producerRes.producerOffsets, nil
}
// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric
func (s *kafkaScaler) GetMetricsAndActivity(_ context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) {
totalLag, totalLagWithPersistent, err := s.getTotalLag()
if err != nil {
return []external_metrics.ExternalMetricValue{}, false, err
}
metric := GenerateMetricInMili(metricName, float64(totalLag))
return []external_metrics.ExternalMetricValue{metric}, totalLagWithPersistent > s.metadata.activationLagThreshold, nil
}
// getTotalLag returns totalLag, totalLagWithPersistent, error
// totalLag and totalLagWithPersistent are the summations of lag and lagWithPersistent returned by getLagForPartition function respectively.
// totalLag maybe less than totalLagWithPersistent when excludePersistentLag is set to `true` due to some partitions deemed as having persistent lag
func (s *kafkaScaler) getTotalLag() (int64, int64, error) {
topicPartitions, err := s.getTopicPartitions()
if err != nil {
return 0, 0, err
}
consumerOffsets, producerOffsets, err := s.getConsumerAndProducerOffsets(topicPartitions)
if err != nil {
return 0, 0, err
}
totalLag := int64(0)
totalLagWithPersistent := int64(0)
totalTopicPartitions := int64(0)
for topic, partitionsOffsets := range producerOffsets {
for partition := range partitionsOffsets {
lag, lagWithPersistent, err := s.getLagForPartition(topic, partition, consumerOffsets, producerOffsets)
if err != nil {
return 0, 0, err
}
totalLag += lag
totalLagWithPersistent += lagWithPersistent
}
totalTopicPartitions += (int64)(len(partitionsOffsets))
}
s.logger.V(1).Info(fmt.Sprintf("Kafka scaler: Providing metrics based on totalLag %v, topicPartitions %v, threshold %v", totalLag, len(topicPartitions), s.metadata.lagThreshold))
if !s.metadata.allowIdleConsumers {
// don't scale out beyond the number of topicPartitions
if (totalLag / s.metadata.lagThreshold) > totalTopicPartitions {
totalLag = totalTopicPartitions * s.metadata.lagThreshold
}
}
return totalLag, totalLagWithPersistent, nil
}
type brokerOffsetResult struct {
offsetResp *sarama.OffsetResponse
err error
}
func (s *kafkaScaler) getProducerOffsets(topicPartitions map[string][]int32) (map[string]map[int32]int64, error) {
version := int16(0)
if s.client.Config().Version.IsAtLeast(sarama.V0_10_1_0) {
version = 1
}
// Step 1: build one OffsetRequest instance per broker.
requests := make(map[*sarama.Broker]*sarama.OffsetRequest)
for topic, partitions := range topicPartitions {
for _, partitionID := range partitions {
broker, err := s.client.Leader(topic, partitionID)
if err != nil {
return nil, err
}
request, ok := requests[broker]
if !ok {
request = &sarama.OffsetRequest{Version: version}
requests[broker] = request
}
request.AddBlock(topic, partitionID, sarama.OffsetNewest, 1)
}
}
// Step 2: send requests, one per broker, and collect topicPartitionsOffsets
resultCh := make(chan brokerOffsetResult, len(requests))
var wg sync.WaitGroup
wg.Add(len(requests))
for broker, request := range requests {
go func(brCopy *sarama.Broker, reqCopy *sarama.OffsetRequest) {
defer wg.Done()
response, err := brCopy.GetAvailableOffsets(reqCopy)
resultCh <- brokerOffsetResult{response, err}
}(broker, request)
}
wg.Wait()
close(resultCh)
topicPartitionsOffsets := make(map[string]map[int32]int64)
for brokerOffsetRes := range resultCh {
if brokerOffsetRes.err != nil {
return nil, brokerOffsetRes.err
}
for topic, blocks := range brokerOffsetRes.offsetResp.Blocks {
if _, found := topicPartitionsOffsets[topic]; !found {
topicPartitionsOffsets[topic] = make(map[int32]int64)
}
for partitionID, block := range blocks {
if block.Err != sarama.ErrNoError {
return nil, block.Err
}
topicPartitionsOffsets[topic][partitionID] = block.Offset
}
}
}
return topicPartitionsOffsets, nil
}
| {
var topicsToDescribe = make([]string, 0)
// when no topic is specified, query to cg group to fetch all subscribed topics
if s.metadata.topic == "" {
listCGOffsetResponse, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, nil)
if err != nil {
return nil, fmt.Errorf("error listing cg offset: %w", err)
}
if listCGOffsetResponse.Err > 0 {
errMsg := fmt.Errorf("error listing cg offset: %w", listCGOffsetResponse.Err)
s.logger.Error(errMsg, "")
}
for topicName := range listCGOffsetResponse.Blocks {
topicsToDescribe = append(topicsToDescribe, topicName)
}
} else {
topicsToDescribe = []string{s.metadata.topic}
}
topicsMetadata, err := s.admin.DescribeTopics(topicsToDescribe)
if err != nil {
return nil, fmt.Errorf("error describing topics: %w", err)
}
if s.metadata.topic != "" && len(topicsMetadata) != 1 {
return nil, fmt.Errorf("expected only 1 topic metadata, got %d", len(topicsMetadata))
}
topicPartitions := make(map[string][]int32, len(topicsMetadata))
for _, topicMetadata := range topicsMetadata {
if topicMetadata.Err > 0 {
errMsg := fmt.Errorf("error describing topics: %w", topicMetadata.Err)
s.logger.Error(errMsg, "")
}
partitionMetadata := topicMetadata.Partitions
var partitions []int32
for _, p := range partitionMetadata {
if s.isActivePartition(p.ID) {
partitions = append(partitions, p.ID)
}
}
if len(partitions) == 0 {
return nil, fmt.Errorf("expected at least one active partition within the topic '%s'", topicMetadata.Name)
}
topicPartitions[topicMetadata.Name] = partitions
}
return topicPartitions, nil
} | identifier_body |
kafka_scaler.go | package scalers
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"github.com/Shopify/sarama"
"github.com/go-logr/logr"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
type kafkaScaler struct {
metricType v2.MetricTargetType
metadata kafkaMetadata
client sarama.Client
admin sarama.ClusterAdmin
logger logr.Logger
previousOffsets map[string]map[int32]int64
}
const (
stringEnable = "enable"
stringDisable = "disable"
)
type kafkaMetadata struct {
bootstrapServers []string
group string
topic string
partitionLimitation []int32
lagThreshold int64
activationLagThreshold int64
offsetResetPolicy offsetResetPolicy
allowIdleConsumers bool
excludePersistentLag bool
version sarama.KafkaVersion
// If an invalid offset is found, whether to scale to 1 (false - the default) so consumption can
// occur or scale to 0 (true). See discussion in https://github.com/kedacore/keda/issues/2612
scaleToZeroOnInvalidOffset bool
// SASL
saslType kafkaSaslType
username string
password string
// OAUTHBEARER
scopes []string
oauthTokenEndpointURI string
oauthExtensions map[string]string
// TLS
enableTLS bool
cert string
key string
keyPassword string
ca string
scalerIndex int
}
type offsetResetPolicy string
const (
latest offsetResetPolicy = "latest"
earliest offsetResetPolicy = "earliest"
)
type kafkaSaslType string
// supported SASL types
const (
KafkaSASLTypeNone kafkaSaslType = "none"
KafkaSASLTypePlaintext kafkaSaslType = "plaintext"
KafkaSASLTypeSCRAMSHA256 kafkaSaslType = "scram_sha256"
KafkaSASLTypeSCRAMSHA512 kafkaSaslType = "scram_sha512"
KafkaSASLTypeOAuthbearer kafkaSaslType = "oauthbearer"
)
const (
lagThresholdMetricName = "lagThreshold"
activationLagThresholdMetricName = "activationLagThreshold"
kafkaMetricType = "External"
defaultKafkaLagThreshold = 10
defaultKafkaActivationLagThreshold = 0
defaultOffsetResetPolicy = latest
invalidOffset = -1
)
// NewKafkaScaler creates a new kafkaScaler
func NewKafkaScaler(config *ScalerConfig) (Scaler, error) {
metricType, err := GetMetricTargetType(config)
if err != nil {
return nil, fmt.Errorf("error getting scaler metric type: %w", err)
}
logger := InitializeLogger(config, "kafka_scaler")
kafkaMetadata, err := parseKafkaMetadata(config, logger)
if err != nil {
return nil, fmt.Errorf("error parsing kafka metadata: %w", err)
}
client, admin, err := getKafkaClients(kafkaMetadata)
if err != nil {
return nil, err
}
previousOffsets := make(map[string]map[int32]int64)
return &kafkaScaler{
client: client,
admin: admin,
metricType: metricType,
metadata: kafkaMetadata,
logger: logger,
previousOffsets: previousOffsets,
}, nil
}
func parseKafkaAuthParams(config *ScalerConfig, meta *kafkaMetadata) error {
meta.saslType = KafkaSASLTypeNone
var saslAuthType string
switch {
case config.TriggerMetadata["sasl"] != "":
saslAuthType = config.TriggerMetadata["sasl"]
default:
saslAuthType = ""
}
if val, ok := config.AuthParams["sasl"]; ok {
if saslAuthType != "" {
return errors.New("unable to set `sasl` in both ScaledObject and TriggerAuthentication together")
}
saslAuthType = val
}
if saslAuthType != "" {
saslAuthType = strings.TrimSpace(saslAuthType)
mode := kafkaSaslType(saslAuthType)
if mode == KafkaSASLTypePlaintext || mode == KafkaSASLTypeSCRAMSHA256 || mode == KafkaSASLTypeSCRAMSHA512 || mode == KafkaSASLTypeOAuthbearer {
if config.AuthParams["username"] == "" {
return errors.New("no username given")
}
meta.username = strings.TrimSpace(config.AuthParams["username"])
if config.AuthParams["password"] == "" {
return errors.New("no password given")
}
meta.password = strings.TrimSpace(config.AuthParams["password"])
meta.saslType = mode
if mode == KafkaSASLTypeOAuthbearer {
meta.scopes = strings.Split(config.AuthParams["scopes"], ",")
if config.AuthParams["oauthTokenEndpointUri"] == "" {
return errors.New("no oauth token endpoint uri given")
}
meta.oauthTokenEndpointURI = strings.TrimSpace(config.AuthParams["oauthTokenEndpointUri"])
meta.oauthExtensions = make(map[string]string)
oauthExtensionsRaw := config.AuthParams["oauthExtensions"]
if oauthExtensionsRaw != "" {
for _, extension := range strings.Split(oauthExtensionsRaw, ",") {
splittedExtension := strings.Split(extension, "=")
if len(splittedExtension) != 2 {
return errors.New("invalid OAuthBearer extension, must be of format key=value")
}
meta.oauthExtensions[splittedExtension[0]] = splittedExtension[1]
}
}
}
} else {
return fmt.Errorf("err SASL mode %s given", mode)
}
}
meta.enableTLS = false
enableTLS := false
if val, ok := config.TriggerMetadata["tls"]; ok {
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if val, ok := config.AuthParams["tls"]; ok {
val = strings.TrimSpace(val)
if enableTLS {
return errors.New("unable to set `tls` in both ScaledObject and TriggerAuthentication together")
}
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if enableTLS {
certGiven := config.AuthParams["cert"] != ""
keyGiven := config.AuthParams["key"] != ""
if certGiven && !keyGiven {
return errors.New("key must be provided with cert")
}
if keyGiven && !certGiven {
return errors.New("cert must be provided with key")
}
meta.ca = config.AuthParams["ca"]
meta.cert = config.AuthParams["cert"]
meta.key = config.AuthParams["key"]
if value, found := config.AuthParams["keyPassword"]; found {
meta.keyPassword = value
} else {
meta.keyPassword = ""
}
meta.enableTLS = true
}
return nil
}
func parseKafkaMetadata(config *ScalerConfig, logger logr.Logger) (kafkaMetadata, error) {
meta := kafkaMetadata{}
switch {
case config.TriggerMetadata["bootstrapServersFromEnv"] != "":
meta.bootstrapServers = strings.Split(config.ResolvedEnv[config.TriggerMetadata["bootstrapServersFromEnv"]], ",")
case config.TriggerMetadata["bootstrapServers"] != "":
meta.bootstrapServers = strings.Split(config.TriggerMetadata["bootstrapServers"], ",")
default:
return meta, errors.New("no bootstrapServers given")
}
switch {
case config.TriggerMetadata["consumerGroupFromEnv"] != "":
meta.group = config.ResolvedEnv[config.TriggerMetadata["consumerGroupFromEnv"]]
case config.TriggerMetadata["consumerGroup"] != "":
meta.group = config.TriggerMetadata["consumerGroup"]
default:
return meta, errors.New("no consumer group given")
}
switch {
case config.TriggerMetadata["topicFromEnv"] != "":
meta.topic = config.ResolvedEnv[config.TriggerMetadata["topicFromEnv"]]
case config.TriggerMetadata["topic"] != "":
meta.topic = config.TriggerMetadata["topic"]
default:
meta.topic = ""
logger.V(1).Info(fmt.Sprintf("consumer group %q has no topic specified, "+
"will use all topics subscribed by the consumer group for scaling", meta.group))
}
meta.partitionLimitation = nil
partitionLimitationMetadata := strings.TrimSpace(config.TriggerMetadata["partitionLimitation"])
if partitionLimitationMetadata != "" {
if meta.topic == "" {
logger.V(1).Info("no specific topic set, ignoring partitionLimitation setting")
} else {
pattern := config.TriggerMetadata["partitionLimitation"]
parsed, err := kedautil.ParseInt32List(pattern)
if err != nil {
return meta, fmt.Errorf("error parsing in partitionLimitation '%s': %w", pattern, err)
}
meta.partitionLimitation = parsed
logger.V(0).Info(fmt.Sprintf("partition limit active '%s'", pattern))
}
}
meta.offsetResetPolicy = defaultOffsetResetPolicy
if config.TriggerMetadata["offsetResetPolicy"] != "" {
policy := offsetResetPolicy(config.TriggerMetadata["offsetResetPolicy"])
if policy != earliest && policy != latest {
return meta, fmt.Errorf("err offsetResetPolicy policy %q given", policy)
}
meta.offsetResetPolicy = policy
}
meta.lagThreshold = defaultKafkaLagThreshold
if val, ok := config.TriggerMetadata[lagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", lagThresholdMetricName, err)
}
if t <= 0 {
return meta, fmt.Errorf("%q must be positive number", lagThresholdMetricName)
}
meta.lagThreshold = t
}
meta.activationLagThreshold = defaultKafkaActivationLagThreshold
if val, ok := config.TriggerMetadata[activationLagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", activationLagThresholdMetricName, err)
}
if t < 0 {
return meta, fmt.Errorf("%q must be positive number", activationLagThresholdMetricName)
}
meta.activationLagThreshold = t
}
if err := parseKafkaAuthParams(config, &meta); err != nil {
return meta, err
}
meta.allowIdleConsumers = false
if val, ok := config.TriggerMetadata["allowIdleConsumers"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing allowIdleConsumers: %w", err)
}
meta.allowIdleConsumers = t
}
meta.excludePersistentLag = false
if val, ok := config.TriggerMetadata["excludePersistentLag"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing excludePersistentLag: %w", err)
}
meta.excludePersistentLag = t
}
meta.scaleToZeroOnInvalidOffset = false
if val, ok := config.TriggerMetadata["scaleToZeroOnInvalidOffset"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing scaleToZeroOnInvalidOffset: %w", err)
}
meta.scaleToZeroOnInvalidOffset = t
}
meta.version = sarama.V1_0_0_0
if val, ok := config.TriggerMetadata["version"]; ok |
meta.scalerIndex = config.ScalerIndex
return meta, nil
}
func getKafkaClients(metadata kafkaMetadata) (sarama.Client, sarama.ClusterAdmin, error) {
config := sarama.NewConfig()
config.Version = metadata.version
if metadata.saslType != KafkaSASLTypeNone {
config.Net.SASL.Enable = true
config.Net.SASL.User = metadata.username
config.Net.SASL.Password = metadata.password
}
if metadata.enableTLS {
config.Net.TLS.Enable = true
tlsConfig, err := kedautil.NewTLSConfigWithPassword(metadata.cert, metadata.key, metadata.keyPassword, metadata.ca, false)
if err != nil {
return nil, nil, err
}
config.Net.TLS.Config = tlsConfig
}
if metadata.saslType == KafkaSASLTypePlaintext {
config.Net.SASL.Mechanism = sarama.SASLTypePlaintext
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA256 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA512 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
}
if metadata.saslType == KafkaSASLTypeOAuthbearer {
config.Net.SASL.Mechanism = sarama.SASLTypeOAuth
config.Net.SASL.TokenProvider = OAuthBearerTokenProvider(metadata.username, metadata.password, metadata.oauthTokenEndpointURI, metadata.scopes, metadata.oauthExtensions)
}
client, err := sarama.NewClient(metadata.bootstrapServers, config)
if err != nil {
return nil, nil, fmt.Errorf("error creating kafka client: %w", err)
}
admin, err := sarama.NewClusterAdminFromClient(client)
if err != nil {
if !client.Closed() {
client.Close()
}
return nil, nil, fmt.Errorf("error creating kafka admin: %w", err)
}
return client, admin, nil
}
func (s *kafkaScaler) getTopicPartitions() (map[string][]int32, error) {
var topicsToDescribe = make([]string, 0)
// when no topic is specified, query to cg group to fetch all subscribed topics
if s.metadata.topic == "" {
listCGOffsetResponse, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, nil)
if err != nil {
return nil, fmt.Errorf("error listing cg offset: %w", err)
}
if listCGOffsetResponse.Err > 0 {
errMsg := fmt.Errorf("error listing cg offset: %w", listCGOffsetResponse.Err)
s.logger.Error(errMsg, "")
}
for topicName := range listCGOffsetResponse.Blocks {
topicsToDescribe = append(topicsToDescribe, topicName)
}
} else {
topicsToDescribe = []string{s.metadata.topic}
}
topicsMetadata, err := s.admin.DescribeTopics(topicsToDescribe)
if err != nil {
return nil, fmt.Errorf("error describing topics: %w", err)
}
if s.metadata.topic != "" && len(topicsMetadata) != 1 {
return nil, fmt.Errorf("expected only 1 topic metadata, got %d", len(topicsMetadata))
}
topicPartitions := make(map[string][]int32, len(topicsMetadata))
for _, topicMetadata := range topicsMetadata {
if topicMetadata.Err > 0 {
errMsg := fmt.Errorf("error describing topics: %w", topicMetadata.Err)
s.logger.Error(errMsg, "")
}
partitionMetadata := topicMetadata.Partitions
var partitions []int32
for _, p := range partitionMetadata {
if s.isActivePartition(p.ID) {
partitions = append(partitions, p.ID)
}
}
if len(partitions) == 0 {
return nil, fmt.Errorf("expected at least one active partition within the topic '%s'", topicMetadata.Name)
}
topicPartitions[topicMetadata.Name] = partitions
}
return topicPartitions, nil
}
func (s *kafkaScaler) isActivePartition(pID int32) bool {
if s.metadata.partitionLimitation == nil {
return true
}
for _, _pID := range s.metadata.partitionLimitation {
if pID == _pID {
return true
}
}
return false
}
func (s *kafkaScaler) getConsumerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, error) {
offsets, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, topicPartitions)
if err != nil {
return nil, fmt.Errorf("error listing consumer group offsets: %w", err)
}
if offsets.Err > 0 {
errMsg := fmt.Errorf("error listing consumer group offsets: %w", offsets.Err)
s.logger.Error(errMsg, "")
}
return offsets, nil
}
// getLagForPartition returns (lag, lagWithPersistent, error)
// When excludePersistentLag is set to `false` (default), lag will always be equal to lagWithPersistent
// When excludePersistentLag is set to `true`, if partition is deemed to have persistent lag, lag will be set to 0 and lagWithPersistent will be latestOffset - consumerOffset
// These return values will allow proper scaling from 0 -> 1 replicas by the IsActive func.
func (s *kafkaScaler) getLagForPartition(topic string, partitionID int32, offsets *sarama.OffsetFetchResponse, topicPartitionOffsets map[string]map[int32]int64) (int64, int64, error) {
block := offsets.GetBlock(topic, partitionID)
if block == nil {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d from offset block: %v", topic, partitionID, offsets.Blocks)
s.logger.Error(errMsg, "")
return 0, 0, errMsg
}
if block.Err > 0 {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d: %w", topic, partitionID, offsets.Err)
s.logger.Error(errMsg, "")
}
consumerOffset := block.Offset
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == latest {
retVal := int64(1)
if s.metadata.scaleToZeroOnInvalidOffset {
retVal = 0
}
msg := fmt.Sprintf(
"invalid offset found for topic %s in group %s and partition %d, probably no offset is committed yet. Returning with lag of %d",
topic, s.metadata.group, partitionID, retVal)
s.logger.V(1).Info(msg)
return retVal, retVal, nil
}
if _, found := topicPartitionOffsets[topic]; !found {
return 0, 0, fmt.Errorf("error finding partition offset for topic %s", topic)
}
latestOffset := topicPartitionOffsets[topic][partitionID]
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == earliest {
return latestOffset, latestOffset, nil
}
// This code block tries to prevent KEDA Kafka trigger from scaling the scale target based on erroneous events
if s.metadata.excludePersistentLag {
switch previousOffset, found := s.previousOffsets[topic][partitionID]; {
case !found:
// No record of previous offset, so store current consumer offset
// Allow this consumer lag to be considered in scaling
if _, topicFound := s.previousOffsets[topic]; !topicFound {
s.previousOffsets[topic] = map[int32]int64{partitionID: consumerOffset}
} else {
s.previousOffsets[topic][partitionID] = consumerOffset
}
case previousOffset == consumerOffset:
// Indicates consumer is still on the same offset as the previous polling cycle, there may be some issue with consuming this offset.
// return 0, so this consumer lag is not considered for scaling
return 0, latestOffset - consumerOffset, nil
default:
// Successfully Consumed some messages, proceed to change the previous offset
s.previousOffsets[topic][partitionID] = consumerOffset
}
}
return latestOffset - consumerOffset, latestOffset - consumerOffset, nil
}
// Close closes the kafka admin and client
func (s *kafkaScaler) Close(context.Context) error {
// underlying client will also be closed on admin's Close() call
if s.admin == nil {
return nil
}
return s.admin.Close()
}
func (s *kafkaScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
var metricName string
if s.metadata.topic != "" {
metricName = fmt.Sprintf("kafka-%s", s.metadata.topic)
} else {
metricName = fmt.Sprintf("kafka-%s-topics", s.metadata.group)
}
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(metricName)),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
metricSpec := v2.MetricSpec{External: externalMetric, Type: kafkaMetricType}
return []v2.MetricSpec{metricSpec}
}
type consumerOffsetResult struct {
consumerOffsets *sarama.OffsetFetchResponse
err error
}
type producerOffsetResult struct {
producerOffsets map[string]map[int32]int64
err error
}
func (s *kafkaScaler) getConsumerAndProducerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, map[string]map[int32]int64, error) {
consumerChan := make(chan consumerOffsetResult, 1)
go func() {
consumerOffsets, err := s.getConsumerOffsets(topicPartitions)
consumerChan <- consumerOffsetResult{consumerOffsets, err}
}()
producerChan := make(chan producerOffsetResult, 1)
go func() {
producerOffsets, err := s.getProducerOffsets(topicPartitions)
producerChan <- producerOffsetResult{producerOffsets, err}
}()
consumerRes := <-consumerChan
if consumerRes.err != nil {
return nil, nil, consumerRes.err
}
producerRes := <-producerChan
if producerRes.err != nil {
return nil, nil, producerRes.err
}
return consumerRes.consumerOffsets, producerRes.producerOffsets, nil
}
// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric
func (s *kafkaScaler) GetMetricsAndActivity(_ context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) {
totalLag, totalLagWithPersistent, err := s.getTotalLag()
if err != nil {
return []external_metrics.ExternalMetricValue{}, false, err
}
metric := GenerateMetricInMili(metricName, float64(totalLag))
return []external_metrics.ExternalMetricValue{metric}, totalLagWithPersistent > s.metadata.activationLagThreshold, nil
}
// getTotalLag returns totalLag, totalLagWithPersistent, error
// totalLag and totalLagWithPersistent are the summations of lag and lagWithPersistent returned by getLagForPartition function respectively.
// totalLag maybe less than totalLagWithPersistent when excludePersistentLag is set to `true` due to some partitions deemed as having persistent lag
func (s *kafkaScaler) getTotalLag() (int64, int64, error) {
topicPartitions, err := s.getTopicPartitions()
if err != nil {
return 0, 0, err
}
consumerOffsets, producerOffsets, err := s.getConsumerAndProducerOffsets(topicPartitions)
if err != nil {
return 0, 0, err
}
totalLag := int64(0)
totalLagWithPersistent := int64(0)
totalTopicPartitions := int64(0)
for topic, partitionsOffsets := range producerOffsets {
for partition := range partitionsOffsets {
lag, lagWithPersistent, err := s.getLagForPartition(topic, partition, consumerOffsets, producerOffsets)
if err != nil {
return 0, 0, err
}
totalLag += lag
totalLagWithPersistent += lagWithPersistent
}
totalTopicPartitions += (int64)(len(partitionsOffsets))
}
s.logger.V(1).Info(fmt.Sprintf("Kafka scaler: Providing metrics based on totalLag %v, topicPartitions %v, threshold %v", totalLag, len(topicPartitions), s.metadata.lagThreshold))
if !s.metadata.allowIdleConsumers {
// don't scale out beyond the number of topicPartitions
if (totalLag / s.metadata.lagThreshold) > totalTopicPartitions {
totalLag = totalTopicPartitions * s.metadata.lagThreshold
}
}
return totalLag, totalLagWithPersistent, nil
}
type brokerOffsetResult struct {
offsetResp *sarama.OffsetResponse
err error
}
func (s *kafkaScaler) getProducerOffsets(topicPartitions map[string][]int32) (map[string]map[int32]int64, error) {
version := int16(0)
if s.client.Config().Version.IsAtLeast(sarama.V0_10_1_0) {
version = 1
}
// Step 1: build one OffsetRequest instance per broker.
requests := make(map[*sarama.Broker]*sarama.OffsetRequest)
for topic, partitions := range topicPartitions {
for _, partitionID := range partitions {
broker, err := s.client.Leader(topic, partitionID)
if err != nil {
return nil, err
}
request, ok := requests[broker]
if !ok {
request = &sarama.OffsetRequest{Version: version}
requests[broker] = request
}
request.AddBlock(topic, partitionID, sarama.OffsetNewest, 1)
}
}
// Step 2: send requests, one per broker, and collect topicPartitionsOffsets
resultCh := make(chan brokerOffsetResult, len(requests))
var wg sync.WaitGroup
wg.Add(len(requests))
for broker, request := range requests {
go func(brCopy *sarama.Broker, reqCopy *sarama.OffsetRequest) {
defer wg.Done()
response, err := brCopy.GetAvailableOffsets(reqCopy)
resultCh <- brokerOffsetResult{response, err}
}(broker, request)
}
wg.Wait()
close(resultCh)
topicPartitionsOffsets := make(map[string]map[int32]int64)
for brokerOffsetRes := range resultCh {
if brokerOffsetRes.err != nil {
return nil, brokerOffsetRes.err
}
for topic, blocks := range brokerOffsetRes.offsetResp.Blocks {
if _, found := topicPartitionsOffsets[topic]; !found {
topicPartitionsOffsets[topic] = make(map[int32]int64)
}
for partitionID, block := range blocks {
if block.Err != sarama.ErrNoError {
return nil, block.Err
}
topicPartitionsOffsets[topic][partitionID] = block.Offset
}
}
}
return topicPartitionsOffsets, nil
}
| {
val = strings.TrimSpace(val)
version, err := sarama.ParseKafkaVersion(val)
if err != nil {
return meta, fmt.Errorf("error parsing kafka version: %w", err)
}
meta.version = version
} | conditional_block |
kafka_scaler.go | package scalers
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"sync"
"github.com/Shopify/sarama"
"github.com/go-logr/logr"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
kedautil "github.com/kedacore/keda/v2/pkg/util"
)
type kafkaScaler struct {
metricType v2.MetricTargetType
metadata kafkaMetadata
client sarama.Client
admin sarama.ClusterAdmin
logger logr.Logger
previousOffsets map[string]map[int32]int64
}
const (
stringEnable = "enable"
stringDisable = "disable"
)
type kafkaMetadata struct {
bootstrapServers []string
group string
topic string
partitionLimitation []int32
lagThreshold int64
activationLagThreshold int64
offsetResetPolicy offsetResetPolicy
allowIdleConsumers bool
excludePersistentLag bool
version sarama.KafkaVersion
// If an invalid offset is found, whether to scale to 1 (false - the default) so consumption can
// occur or scale to 0 (true). See discussion in https://github.com/kedacore/keda/issues/2612
scaleToZeroOnInvalidOffset bool
// SASL
saslType kafkaSaslType
username string
password string
// OAUTHBEARER
scopes []string
oauthTokenEndpointURI string
oauthExtensions map[string]string
// TLS
enableTLS bool
cert string
key string
keyPassword string
ca string
scalerIndex int
}
type offsetResetPolicy string
const (
latest offsetResetPolicy = "latest"
earliest offsetResetPolicy = "earliest"
)
type kafkaSaslType string
// supported SASL types
const (
KafkaSASLTypeNone kafkaSaslType = "none"
KafkaSASLTypePlaintext kafkaSaslType = "plaintext"
KafkaSASLTypeSCRAMSHA256 kafkaSaslType = "scram_sha256"
KafkaSASLTypeSCRAMSHA512 kafkaSaslType = "scram_sha512"
KafkaSASLTypeOAuthbearer kafkaSaslType = "oauthbearer"
)
const (
lagThresholdMetricName = "lagThreshold"
activationLagThresholdMetricName = "activationLagThreshold"
kafkaMetricType = "External"
defaultKafkaLagThreshold = 10
defaultKafkaActivationLagThreshold = 0
defaultOffsetResetPolicy = latest
invalidOffset = -1
)
// NewKafkaScaler creates a new kafkaScaler
func NewKafkaScaler(config *ScalerConfig) (Scaler, error) {
metricType, err := GetMetricTargetType(config)
if err != nil {
return nil, fmt.Errorf("error getting scaler metric type: %w", err)
}
logger := InitializeLogger(config, "kafka_scaler")
kafkaMetadata, err := parseKafkaMetadata(config, logger)
if err != nil {
return nil, fmt.Errorf("error parsing kafka metadata: %w", err)
}
client, admin, err := getKafkaClients(kafkaMetadata)
if err != nil {
return nil, err
}
previousOffsets := make(map[string]map[int32]int64)
return &kafkaScaler{
client: client,
admin: admin,
metricType: metricType,
metadata: kafkaMetadata,
logger: logger,
previousOffsets: previousOffsets,
}, nil
}
func parseKafkaAuthParams(config *ScalerConfig, meta *kafkaMetadata) error {
meta.saslType = KafkaSASLTypeNone
var saslAuthType string
switch {
case config.TriggerMetadata["sasl"] != "":
saslAuthType = config.TriggerMetadata["sasl"]
default:
saslAuthType = ""
}
if val, ok := config.AuthParams["sasl"]; ok {
if saslAuthType != "" {
return errors.New("unable to set `sasl` in both ScaledObject and TriggerAuthentication together")
}
saslAuthType = val
}
if saslAuthType != "" {
saslAuthType = strings.TrimSpace(saslAuthType)
mode := kafkaSaslType(saslAuthType)
if mode == KafkaSASLTypePlaintext || mode == KafkaSASLTypeSCRAMSHA256 || mode == KafkaSASLTypeSCRAMSHA512 || mode == KafkaSASLTypeOAuthbearer {
if config.AuthParams["username"] == "" {
return errors.New("no username given")
}
meta.username = strings.TrimSpace(config.AuthParams["username"])
if config.AuthParams["password"] == "" {
return errors.New("no password given")
}
meta.password = strings.TrimSpace(config.AuthParams["password"])
meta.saslType = mode
if mode == KafkaSASLTypeOAuthbearer {
meta.scopes = strings.Split(config.AuthParams["scopes"], ",")
if config.AuthParams["oauthTokenEndpointUri"] == "" {
return errors.New("no oauth token endpoint uri given")
}
meta.oauthTokenEndpointURI = strings.TrimSpace(config.AuthParams["oauthTokenEndpointUri"])
meta.oauthExtensions = make(map[string]string)
oauthExtensionsRaw := config.AuthParams["oauthExtensions"]
if oauthExtensionsRaw != "" {
for _, extension := range strings.Split(oauthExtensionsRaw, ",") {
splittedExtension := strings.Split(extension, "=")
if len(splittedExtension) != 2 {
return errors.New("invalid OAuthBearer extension, must be of format key=value")
}
meta.oauthExtensions[splittedExtension[0]] = splittedExtension[1]
}
}
}
} else {
return fmt.Errorf("err SASL mode %s given", mode)
}
}
meta.enableTLS = false
enableTLS := false
if val, ok := config.TriggerMetadata["tls"]; ok {
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if val, ok := config.AuthParams["tls"]; ok {
val = strings.TrimSpace(val)
if enableTLS {
return errors.New("unable to set `tls` in both ScaledObject and TriggerAuthentication together")
}
switch val {
case stringEnable:
enableTLS = true
case stringDisable:
enableTLS = false
default:
return fmt.Errorf("error incorrect TLS value given, got %s", val)
}
}
if enableTLS {
certGiven := config.AuthParams["cert"] != ""
keyGiven := config.AuthParams["key"] != ""
if certGiven && !keyGiven {
return errors.New("key must be provided with cert")
}
if keyGiven && !certGiven {
return errors.New("cert must be provided with key")
}
meta.ca = config.AuthParams["ca"]
meta.cert = config.AuthParams["cert"]
meta.key = config.AuthParams["key"]
if value, found := config.AuthParams["keyPassword"]; found {
meta.keyPassword = value
} else {
meta.keyPassword = ""
}
meta.enableTLS = true
}
return nil
}
func parseKafkaMetadata(config *ScalerConfig, logger logr.Logger) (kafkaMetadata, error) {
meta := kafkaMetadata{}
switch {
case config.TriggerMetadata["bootstrapServersFromEnv"] != "":
meta.bootstrapServers = strings.Split(config.ResolvedEnv[config.TriggerMetadata["bootstrapServersFromEnv"]], ",")
case config.TriggerMetadata["bootstrapServers"] != "":
meta.bootstrapServers = strings.Split(config.TriggerMetadata["bootstrapServers"], ",")
default:
return meta, errors.New("no bootstrapServers given")
}
switch {
case config.TriggerMetadata["consumerGroupFromEnv"] != "":
meta.group = config.ResolvedEnv[config.TriggerMetadata["consumerGroupFromEnv"]]
case config.TriggerMetadata["consumerGroup"] != "":
meta.group = config.TriggerMetadata["consumerGroup"]
default:
return meta, errors.New("no consumer group given")
}
switch {
case config.TriggerMetadata["topicFromEnv"] != "":
meta.topic = config.ResolvedEnv[config.TriggerMetadata["topicFromEnv"]]
case config.TriggerMetadata["topic"] != "":
meta.topic = config.TriggerMetadata["topic"]
default:
meta.topic = ""
logger.V(1).Info(fmt.Sprintf("consumer group %q has no topic specified, "+
"will use all topics subscribed by the consumer group for scaling", meta.group))
}
meta.partitionLimitation = nil
partitionLimitationMetadata := strings.TrimSpace(config.TriggerMetadata["partitionLimitation"])
if partitionLimitationMetadata != "" {
if meta.topic == "" {
logger.V(1).Info("no specific topic set, ignoring partitionLimitation setting")
} else {
pattern := config.TriggerMetadata["partitionLimitation"]
parsed, err := kedautil.ParseInt32List(pattern)
if err != nil {
return meta, fmt.Errorf("error parsing in partitionLimitation '%s': %w", pattern, err)
}
meta.partitionLimitation = parsed
logger.V(0).Info(fmt.Sprintf("partition limit active '%s'", pattern))
}
}
meta.offsetResetPolicy = defaultOffsetResetPolicy
if config.TriggerMetadata["offsetResetPolicy"] != "" {
policy := offsetResetPolicy(config.TriggerMetadata["offsetResetPolicy"])
if policy != earliest && policy != latest {
return meta, fmt.Errorf("err offsetResetPolicy policy %q given", policy)
}
meta.offsetResetPolicy = policy
}
meta.lagThreshold = defaultKafkaLagThreshold
if val, ok := config.TriggerMetadata[lagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", lagThresholdMetricName, err)
}
if t <= 0 {
return meta, fmt.Errorf("%q must be positive number", lagThresholdMetricName)
}
meta.lagThreshold = t
}
meta.activationLagThreshold = defaultKafkaActivationLagThreshold
if val, ok := config.TriggerMetadata[activationLagThresholdMetricName]; ok {
t, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return meta, fmt.Errorf("error parsing %q: %w", activationLagThresholdMetricName, err)
}
if t < 0 {
return meta, fmt.Errorf("%q must be positive number", activationLagThresholdMetricName)
}
meta.activationLagThreshold = t
}
if err := parseKafkaAuthParams(config, &meta); err != nil {
return meta, err
}
meta.allowIdleConsumers = false
if val, ok := config.TriggerMetadata["allowIdleConsumers"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing allowIdleConsumers: %w", err)
}
meta.allowIdleConsumers = t
}
meta.excludePersistentLag = false
if val, ok := config.TriggerMetadata["excludePersistentLag"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing excludePersistentLag: %w", err)
}
meta.excludePersistentLag = t
}
meta.scaleToZeroOnInvalidOffset = false
if val, ok := config.TriggerMetadata["scaleToZeroOnInvalidOffset"]; ok {
t, err := strconv.ParseBool(val)
if err != nil {
return meta, fmt.Errorf("error parsing scaleToZeroOnInvalidOffset: %w", err)
}
meta.scaleToZeroOnInvalidOffset = t
}
meta.version = sarama.V1_0_0_0
if val, ok := config.TriggerMetadata["version"]; ok {
val = strings.TrimSpace(val)
version, err := sarama.ParseKafkaVersion(val)
if err != nil {
return meta, fmt.Errorf("error parsing kafka version: %w", err)
}
meta.version = version
}
meta.scalerIndex = config.ScalerIndex
return meta, nil
}
func getKafkaClients(metadata kafkaMetadata) (sarama.Client, sarama.ClusterAdmin, error) {
config := sarama.NewConfig()
config.Version = metadata.version
if metadata.saslType != KafkaSASLTypeNone {
config.Net.SASL.Enable = true
config.Net.SASL.User = metadata.username
config.Net.SASL.Password = metadata.password
}
if metadata.enableTLS {
config.Net.TLS.Enable = true
tlsConfig, err := kedautil.NewTLSConfigWithPassword(metadata.cert, metadata.key, metadata.keyPassword, metadata.ca, false)
if err != nil {
return nil, nil, err
}
config.Net.TLS.Config = tlsConfig
}
if metadata.saslType == KafkaSASLTypePlaintext {
config.Net.SASL.Mechanism = sarama.SASLTypePlaintext
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA256 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA256
}
if metadata.saslType == KafkaSASLTypeSCRAMSHA512 {
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} }
config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512
}
if metadata.saslType == KafkaSASLTypeOAuthbearer {
config.Net.SASL.Mechanism = sarama.SASLTypeOAuth
config.Net.SASL.TokenProvider = OAuthBearerTokenProvider(metadata.username, metadata.password, metadata.oauthTokenEndpointURI, metadata.scopes, metadata.oauthExtensions)
}
client, err := sarama.NewClient(metadata.bootstrapServers, config)
if err != nil {
return nil, nil, fmt.Errorf("error creating kafka client: %w", err)
}
admin, err := sarama.NewClusterAdminFromClient(client)
if err != nil {
if !client.Closed() {
client.Close()
}
return nil, nil, fmt.Errorf("error creating kafka admin: %w", err)
}
return client, admin, nil
}
func (s *kafkaScaler) getTopicPartitions() (map[string][]int32, error) {
var topicsToDescribe = make([]string, 0)
// when no topic is specified, query to cg group to fetch all subscribed topics
if s.metadata.topic == "" {
listCGOffsetResponse, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, nil)
if err != nil {
return nil, fmt.Errorf("error listing cg offset: %w", err)
}
if listCGOffsetResponse.Err > 0 {
errMsg := fmt.Errorf("error listing cg offset: %w", listCGOffsetResponse.Err)
s.logger.Error(errMsg, "")
}
for topicName := range listCGOffsetResponse.Blocks {
topicsToDescribe = append(topicsToDescribe, topicName)
}
} else {
topicsToDescribe = []string{s.metadata.topic}
}
topicsMetadata, err := s.admin.DescribeTopics(topicsToDescribe)
if err != nil {
return nil, fmt.Errorf("error describing topics: %w", err)
}
if s.metadata.topic != "" && len(topicsMetadata) != 1 {
return nil, fmt.Errorf("expected only 1 topic metadata, got %d", len(topicsMetadata))
}
topicPartitions := make(map[string][]int32, len(topicsMetadata))
for _, topicMetadata := range topicsMetadata {
if topicMetadata.Err > 0 {
errMsg := fmt.Errorf("error describing topics: %w", topicMetadata.Err)
s.logger.Error(errMsg, "")
}
partitionMetadata := topicMetadata.Partitions
var partitions []int32
for _, p := range partitionMetadata {
if s.isActivePartition(p.ID) {
partitions = append(partitions, p.ID)
}
}
if len(partitions) == 0 {
return nil, fmt.Errorf("expected at least one active partition within the topic '%s'", topicMetadata.Name)
}
topicPartitions[topicMetadata.Name] = partitions
}
return topicPartitions, nil
}
func (s *kafkaScaler) isActivePartition(pID int32) bool {
if s.metadata.partitionLimitation == nil {
return true
}
for _, _pID := range s.metadata.partitionLimitation {
if pID == _pID {
return true
}
}
return false
}
func (s *kafkaScaler) getConsumerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, error) {
offsets, err := s.admin.ListConsumerGroupOffsets(s.metadata.group, topicPartitions)
if err != nil {
return nil, fmt.Errorf("error listing consumer group offsets: %w", err)
}
if offsets.Err > 0 {
errMsg := fmt.Errorf("error listing consumer group offsets: %w", offsets.Err)
s.logger.Error(errMsg, "")
}
return offsets, nil
}
// getLagForPartition returns (lag, lagWithPersistent, error)
// When excludePersistentLag is set to `false` (default), lag will always be equal to lagWithPersistent
// When excludePersistentLag is set to `true`, if partition is deemed to have persistent lag, lag will be set to 0 and lagWithPersistent will be latestOffset - consumerOffset
// These return values will allow proper scaling from 0 -> 1 replicas by the IsActive func.
func (s *kafkaScaler) getLagForPartition(topic string, partitionID int32, offsets *sarama.OffsetFetchResponse, topicPartitionOffsets map[string]map[int32]int64) (int64, int64, error) {
block := offsets.GetBlock(topic, partitionID)
if block == nil {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d from offset block: %v", topic, partitionID, offsets.Blocks)
s.logger.Error(errMsg, "")
return 0, 0, errMsg
}
if block.Err > 0 {
errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d: %w", topic, partitionID, offsets.Err)
s.logger.Error(errMsg, "")
}
consumerOffset := block.Offset
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == latest {
retVal := int64(1)
if s.metadata.scaleToZeroOnInvalidOffset {
retVal = 0
}
msg := fmt.Sprintf(
"invalid offset found for topic %s in group %s and partition %d, probably no offset is committed yet. Returning with lag of %d",
topic, s.metadata.group, partitionID, retVal)
s.logger.V(1).Info(msg)
return retVal, retVal, nil
}
if _, found := topicPartitionOffsets[topic]; !found {
return 0, 0, fmt.Errorf("error finding partition offset for topic %s", topic)
}
latestOffset := topicPartitionOffsets[topic][partitionID]
if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == earliest {
return latestOffset, latestOffset, nil
}
// This code block tries to prevent KEDA Kafka trigger from scaling the scale target based on erroneous events
if s.metadata.excludePersistentLag {
switch previousOffset, found := s.previousOffsets[topic][partitionID]; {
case !found:
// No record of previous offset, so store current consumer offset
// Allow this consumer lag to be considered in scaling
if _, topicFound := s.previousOffsets[topic]; !topicFound {
s.previousOffsets[topic] = map[int32]int64{partitionID: consumerOffset}
} else {
s.previousOffsets[topic][partitionID] = consumerOffset
}
case previousOffset == consumerOffset:
// Indicates consumer is still on the same offset as the previous polling cycle, there may be some issue with consuming this offset.
// return 0, so this consumer lag is not considered for scaling
return 0, latestOffset - consumerOffset, nil
default:
// Successfully Consumed some messages, proceed to change the previous offset
s.previousOffsets[topic][partitionID] = consumerOffset
}
}
return latestOffset - consumerOffset, latestOffset - consumerOffset, nil
}
// Close closes the kafka admin and client
func (s *kafkaScaler) Close(context.Context) error {
// underlying client will also be closed on admin's Close() call
if s.admin == nil {
return nil
}
return s.admin.Close()
}
func (s *kafkaScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
var metricName string
if s.metadata.topic != "" {
metricName = fmt.Sprintf("kafka-%s", s.metadata.topic)
} else {
metricName = fmt.Sprintf("kafka-%s-topics", s.metadata.group)
}
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(metricName)),
},
Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold),
}
metricSpec := v2.MetricSpec{External: externalMetric, Type: kafkaMetricType}
return []v2.MetricSpec{metricSpec}
}
type consumerOffsetResult struct {
consumerOffsets *sarama.OffsetFetchResponse
err error
}
type producerOffsetResult struct {
producerOffsets map[string]map[int32]int64
err error
}
func (s *kafkaScaler) getConsumerAndProducerOffsets(topicPartitions map[string][]int32) (*sarama.OffsetFetchResponse, map[string]map[int32]int64, error) {
consumerChan := make(chan consumerOffsetResult, 1)
go func() {
consumerOffsets, err := s.getConsumerOffsets(topicPartitions)
consumerChan <- consumerOffsetResult{consumerOffsets, err}
}()
producerChan := make(chan producerOffsetResult, 1)
go func() {
producerOffsets, err := s.getProducerOffsets(topicPartitions)
producerChan <- producerOffsetResult{producerOffsets, err}
}()
consumerRes := <-consumerChan
if consumerRes.err != nil {
return nil, nil, consumerRes.err
}
producerRes := <-producerChan
if producerRes.err != nil {
return nil, nil, producerRes.err
}
return consumerRes.consumerOffsets, producerRes.producerOffsets, nil
}
// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric
func (s *kafkaScaler) GetMetricsAndActivity(_ context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) {
totalLag, totalLagWithPersistent, err := s.getTotalLag()
if err != nil {
return []external_metrics.ExternalMetricValue{}, false, err
}
metric := GenerateMetricInMili(metricName, float64(totalLag))
return []external_metrics.ExternalMetricValue{metric}, totalLagWithPersistent > s.metadata.activationLagThreshold, nil
}
// getTotalLag returns totalLag, totalLagWithPersistent, error
// totalLag and totalLagWithPersistent are the summations of lag and lagWithPersistent returned by getLagForPartition function respectively.
// totalLag maybe less than totalLagWithPersistent when excludePersistentLag is set to `true` due to some partitions deemed as having persistent lag
func (s *kafkaScaler) getTotalLag() (int64, int64, error) {
topicPartitions, err := s.getTopicPartitions()
if err != nil {
return 0, 0, err
}
consumerOffsets, producerOffsets, err := s.getConsumerAndProducerOffsets(topicPartitions)
if err != nil {
return 0, 0, err |
totalLag := int64(0)
totalLagWithPersistent := int64(0)
totalTopicPartitions := int64(0)
for topic, partitionsOffsets := range producerOffsets {
for partition := range partitionsOffsets {
lag, lagWithPersistent, err := s.getLagForPartition(topic, partition, consumerOffsets, producerOffsets)
if err != nil {
return 0, 0, err
}
totalLag += lag
totalLagWithPersistent += lagWithPersistent
}
totalTopicPartitions += (int64)(len(partitionsOffsets))
}
s.logger.V(1).Info(fmt.Sprintf("Kafka scaler: Providing metrics based on totalLag %v, topicPartitions %v, threshold %v", totalLag, len(topicPartitions), s.metadata.lagThreshold))
if !s.metadata.allowIdleConsumers {
// don't scale out beyond the number of topicPartitions
if (totalLag / s.metadata.lagThreshold) > totalTopicPartitions {
totalLag = totalTopicPartitions * s.metadata.lagThreshold
}
}
return totalLag, totalLagWithPersistent, nil
}
type brokerOffsetResult struct {
offsetResp *sarama.OffsetResponse
err error
}
func (s *kafkaScaler) getProducerOffsets(topicPartitions map[string][]int32) (map[string]map[int32]int64, error) {
version := int16(0)
if s.client.Config().Version.IsAtLeast(sarama.V0_10_1_0) {
version = 1
}
// Step 1: build one OffsetRequest instance per broker.
requests := make(map[*sarama.Broker]*sarama.OffsetRequest)
for topic, partitions := range topicPartitions {
for _, partitionID := range partitions {
broker, err := s.client.Leader(topic, partitionID)
if err != nil {
return nil, err
}
request, ok := requests[broker]
if !ok {
request = &sarama.OffsetRequest{Version: version}
requests[broker] = request
}
request.AddBlock(topic, partitionID, sarama.OffsetNewest, 1)
}
}
// Step 2: send requests, one per broker, and collect topicPartitionsOffsets
resultCh := make(chan brokerOffsetResult, len(requests))
var wg sync.WaitGroup
wg.Add(len(requests))
for broker, request := range requests {
go func(brCopy *sarama.Broker, reqCopy *sarama.OffsetRequest) {
defer wg.Done()
response, err := brCopy.GetAvailableOffsets(reqCopy)
resultCh <- brokerOffsetResult{response, err}
}(broker, request)
}
wg.Wait()
close(resultCh)
topicPartitionsOffsets := make(map[string]map[int32]int64)
for brokerOffsetRes := range resultCh {
if brokerOffsetRes.err != nil {
return nil, brokerOffsetRes.err
}
for topic, blocks := range brokerOffsetRes.offsetResp.Blocks {
if _, found := topicPartitionsOffsets[topic]; !found {
topicPartitionsOffsets[topic] = make(map[int32]int64)
}
for partitionID, block := range blocks {
if block.Err != sarama.ErrNoError {
return nil, block.Err
}
topicPartitionsOffsets[topic][partitionID] = block.Offset
}
}
}
return topicPartitionsOffsets, nil
} | } | random_line_split |
auto.go | package crawl
import (
"bytes"
"encoding/json"
"errors"
"strconv"
"sync"
"sync/atomic"
"time"
. "./base"
"./robot"
"./store"
"github.com/golang/glog"
)
const (
tickPeriod = 5 * time.Second
minPlay = 1
)
var market_begin_day time.Time
func init() {
market_begin_day, _ = time.Parse("2006-01-02", "1990-12-19")
}
type Stock struct {
Id string `json:"id"`
M1s Tdatas `json:"m1s"`
M5s Tdatas `json:"m5s"`
M30s Tdatas `json:"m30s"`
Days Tdatas `json:"days"`
Weeks Tdatas `json:"weeks"`
Months Tdatas `json:"months"`
Ticks Ticks `json:"-"`
last_tick RealtimeTick
hash int
count int32
loaded int32
broadcast bool
lst_trade time.Time
rw sync.RWMutex
Name string
}
func (p *Stock) MarshalTail(tail bool) ([]byte, error) {
p.rw.RLock()
defer p.rw.RUnlock()
s := Stock{
Id: p.Id,
Name: p.Name,
}
if !tail || !p.broadcast {
p.broadcast = true
// full
p.M1s.tail(&s.M1s, 0)
p.M5s.tail(&s.M5s, 0)
p.M30s.tail(&s.M30s, 0)
p.Days.tail(&s.Days, 0)
p.Weeks.tail(&s.Weeks, 0)
p.Months.tail(&s.Months, 0)
} else {
// tail
p.M1s.tail(&s.M1s, 240)
p.M5s.tail(&s.M5s, 60)
p.M30s.tail(&s.M30s, 8)
p.Days.tail(&s.Days, 8)
p.Weeks.tail(&s.Weeks, 8)
p.Months.tail(&s.Months, 8)
}
return json.Marshal(s)
}
func NewStock(id string, hub_height int) *Stock {
p := &Stock{
Id: id,
hash: StockHash(id),
count: 1,
}
p.M1s.Init(hub_height, id+" f1", nil, &p.M5s)
p.M5s.Init(hub_height, id+" f5", &p.M1s, &p.M30s)
p.M30s.Init(hub_height, id+" f30", &p.M5s, &p.Days)
p.Days.Init(hub_height, id+" day", &p.M30s, &p.Weeks)
p.Weeks.Init(hub_height, id+" week", &p.Days, &p.Months)
p.Months.Init(hub_height, id+" month", &p.Weeks, nil)
return p
}
type Stocks struct {
stocks PStockSlice
rwmutex sync.RWMutex
store store.Store
play int
ch chan *Stock
min_hub_height int
}
func NewStocks(storestr string, play, min_hub_height int) *Stocks {
store := store.Get(storestr)
if min_hub_height < 0 {
min_hub_height = 0
}
return &Stocks{
min_hub_height: min_hub_height,
store: store,
play: play,
}
}
func (p *Stocks) Store() store.Store { return p.store }
func (p *Stocks) Run() {
if p.play > minPlay {
for {
p.play_next_tick()
time.Sleep(time.Duration(p.play) * time.Millisecond)
}
}
robot.Work()
for {
if IsTradeTime(time.Now()) {
p.Ticks_update_real()
}
time.Sleep(tickPeriod)
}
}
func (p *Stocks) Chan(ch chan *Stock) {
p.ch = ch
}
func (p *Stocks) res(stock *Stock) {
if p.ch != nil {
p.ch <- stock
}
}
func (p *Stocks) update(s *Stock) {
if s.Update(p.store, p.play > minPlay) {
p.res(s)
}
}
func (p *Stocks) Insert(id string) (int, *Stock, bool) {
p.rwmutex.RLock()
i, ok := p.stocks.Search(id)
if ok {
s := p.stocks[i]
p.rwmutex.RUnlock()
if atomic.AddInt32(&s.count, 1) < 1 {
atomic.StoreInt32(&s.count, 1)
}
return i, s, false
}
s := NewStock(id, p.min_hub_height)
p.rwmutex.RUnlock()
p.rwmutex.Lock()
defer p.rwmutex.Unlock()
if i < 1 {
p.stocks = append(PStockSlice{s}, p.stocks...)
return 0, s, true
} else if i >= p.stocks.Len() {
p.stocks = append(p.stocks, s)
return p.stocks.Len() - 1, s, true
}
p.stocks = append(p.stocks, s)
copy(p.stocks[i+1:], p.stocks[i:])
p.stocks[i] = s
return i, s, true
}
func (p *Stocks) Remove(id string) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
if i, ok := p.stocks.Search(id); ok {
atomic.AddInt32(&p.stocks[i].count, -1)
}
}
func (p *Stocks) Watch(id string) (*Stock, bool) {
i, s, isnew := p.Insert(id)
if isnew {
go p.update(s)
glog.V(LogV).Infof("watch new stock id=%s index=%d", id, i)
} else {
glog.V(LogV).Infof("watch stock id=%s index=%d count=%d", id, i, s.count)
}
return s, isnew
}
func (p *Stocks) UnWatch(id string) {
p.Remove(id)
}
func (p *Stocks) Find_need_update_tick_ids() (pstocks PStockSlice) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
pstocks = append(pstocks, p.stocks[i])
}
return
}
func (p *Stocks) play_next_tick() {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
if atomic.LoadInt32(&p.stocks[i].count) < 1 {
continue
}
p.stocks[i].rw.Lock()
if p.stocks[i].Ticks.play == nil || len(p.stocks[i].Ticks.play) < 1 {
p.stocks[i].Ticks.play = p.stocks[i].Ticks.Data
p.stocks[i].Ticks.Data = []Tick{}
if len(p.stocks[i].Ticks.play) > 240 {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:240]
}
}
lplay := len(p.stocks[i].Ticks.play)
ldata := len(p.stocks[i].Ticks.Data)
if ldata < lplay {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:ldata+1]
p.stocks[i].Merge(false, p.store)
p.res(p.stocks[i])
}
p.stocks[i].rw.Unlock()
}
}
func (p *Stocks) Ticks_update_real() {
var wg sync.WaitGroup
stocks := p.Find_need_update_tick_ids()
l := len(stocks)
if l < 1 {
return
}
for i := 0; i < l; {
var b bytes.Buffer
var pstocks PStockSlice
step := 50
if i+step < l {
pstocks = stocks[i : i+step]
} else {
pstocks = stocks[i:l]
}
for j := 0; j < step && i < l; i, j = i+1, j+1 {
if b.Len() > 0 {
b.WriteString(",")
}
b.WriteString(stocks[i].Id)
}
if b.Len() < 1 {
continue
}
wg.Add(1)
go func(ids string, pstocks PStockSlice) {
defer wg.Done()
body := robot.Tick_download_real_from_sina(ids)
if body == nil {
return
}
for _, line := range bytes.Split(body, []byte("\";")) {
line = bytes.TrimSpace(line)
info := bytes.Split(line, []byte("=\""))
if len(info) != 2 {
continue
}
prefix := "var hq_str_"
if !bytes.HasPrefix(info[0], []byte(prefix)) {
continue
}
id := info[0][len(prefix):]
if idx, ok := pstocks.Search(string(id)); ok {
if pstocks[idx].tick_get_real(info[1]) {
pstocks[idx].Merge(false, p.store)
p.res(pstocks[idx])
}
}
}
}(b.String(), pstocks)
}
wg.Wait()
}
func StockHash(id string) int {
for i, c := range []byte(id) {
if c >= '0' && c <= '9' {
i, _ = strconv.Atoi(id[i:])
return i
}
}
return 0
}
func (p *Stock) Merge(day bool, store store.Store) {
m1_fresh_index := p.Ticks2M1s()
m5_fresh_index := p.M5s.MergeFrom(&p.M1s, false, Minute5end)
m30_fresh_index := p.M30s.MergeFrom(&p.M1s, false, Minute30end)
if day {
p.Ticks.clean()
td, _ := store.LoadMacd(p.Id, L1, p.M1s.start)
p.M1s.Macd(m1_fresh_index, td)
store.SaveMacds(p.Id, L1, p.M1s.Data)
td, _ = store.LoadMacd(p.Id, L5, p.M1s.start)
p.M5s.Macd(m5_fresh_index, td)
store.SaveMacds(p.Id, L5, p.M5s.Data)
td, _ = store.LoadMacd(p.Id, L30, p.M1s.start)
p.M30s.Macd(m30_fresh_index, td)
store.SaveMacds(p.Id, L30, p.M30s.Data)
} else {
p.M1s.Macd(m1_fresh_index, nil)
p.M5s.Macd(m5_fresh_index, nil)
p.M30s.Macd(m30_fresh_index, nil)
}
p.M1s.ParseChan()
p.M5s.ParseChan()
p.M30s.ParseChan()
if day {
p.Weeks.MergeFrom(&p.Days, true, Weekend)
p.Months.MergeFrom(&p.Days, true, Monthend)
td, _ := store.LoadMacd(p.Id, LDay, p.Days.start)
p.Days.Macd(0, td)
store.SaveMacds(p.Id, LDay, p.Days.Data)
td, _ = store.LoadMacd(p.Id, LWeek, p.Days.start)
p.Weeks.Macd(0, td)
store.SaveMacds(p.Id, LWeek, p.Weeks.Data)
td, _ = store.LoadMacd(p.Id, LMonth, p.Days.start)
p.Months.Macd(0, td)
store.SaveMacds(p.Id, LMonth, p.Months.Data)
p.Days.ParseChan()
p.Weeks.ParseChan()
p.Months.ParseChan()
}
}
func (p *Tdatas) ParseChan() {
if p.base == nil {
p.ParseTyping()
p.Typing.LinkTyping()
p.ParseSegment()
p.Segment.LinkTyping()
}
p.ParseHub()
p.LinkHub()
}
func (p *Stock) Update(store store.Store, play bool) bool {
if !atomic.CompareAndSwapInt32(&p.loaded, 0, 1) {
return false
}
p.Days_update(store)
p.Ticks_update(store)
p.Ticks_today_update()
if play {
glog.Warningln("WITH PLAY MODE")
} else {
p.Merge(true, store)
}
atomic.StoreInt32(&p.loaded, 2)
return true
}
func (p *Stock) days_download(t time.Time) ([]int, error) {
inds := []int{}
tds, err := robot.Days_download(p.Id, t)
if err != nil {
return inds, err
}
for i, count := 0, len(tds); i < count; i++ {
ind, isnew := p.Days.Add(tds[i])
if isnew || ind > 0 {
inds = append(inds, ind)
}
}
return inds, nil
}
func (p *Stock) Days_update(store store.Store) int {
c := Day_collection_name(p.Id)
p.Days.start = store.GetStartTime(p.Id, LDay)
p.Days.Data, _ = store.LoadTDatas(c, p.Days.start)
t := p.Days.latest_time()
now := time.Now().AddDate(0, 0, -1).UTC().Truncate(time.Hour * 24)
for !IsTradeDay(now) {
now = now.AddDate(0, 0, -1)
}
if t.Equal(now) || t.After(now) {
return 0
}
inds, _ := p.days_download(t)
if len(inds) > 0 {
store.SaveTDatas(c, p.Days.Data, inds)
factor := p.Days.Factor()
store.UpdateFactor(p.Id, factor)
}
return len(inds)
}
func (p *Stock) Ticks_update(store store.Store) int {
c := Tick_collection_name(p.Id)
p.M1s.start = store.GetStartTime(p.Id, L1)
p.Ticks.Data, _ = store.LoadTicks(c, p.M1s.start)
begin_time := p.M1s.start
l := len(p.Ticks.Data)
if l > 0 {
begin_time = p.Ticks.Data[0].Time
}
now := time.Now().UTC()
end_time := now.Truncate(time.Hour * 24)
if now.Hour() > 10 {
end_time = end_time.AddDate(0, 0, 1)
}
if begin_time.Equal(market_begin_day) {
begin_time = end_time.AddDate(0, -2, -1)
}
begin_time = begin_time.AddDate(0, 0, 1).Truncate(time.Hour * 24)
daylen := len(p.Days.Data)
if daylen < 1 {
return 0
}
i, _ := ((TdataSlice)(p.Days.Data)).Search(begin_time)
glog.V(LogV).Infof("from %d/%d %s begin_time=%s end_time=%s", i, daylen, p.M1s.start, begin_time, end_time)
var t time.Time
for ; i <= daylen; i++ {
if i < daylen {
t = p.Days.Data[i].Time
} else if i == daylen {
t = p.Days.Data[i-1].Time.AddDate(0, 0, 1)
}
if !end_time.After(t) {
glog.V(LogV).Infoln(t, "reach end_time", end_time)
break
}
if p.Ticks.hasTimeData(t) {
continue
}
glog.V(LogV).Infoln("prepare download ticks", t)
if ticks, err := p.ticks_download(t); ticks != nil {
for j, _ := range ticks {
p.Ticks.Add(ticks[j])
}
store.SaveTicks(c, ticks)
glog.V(LogV).Infoln("download ticks succ", t)
} else if err != nil {
glog.V(LogD).Infoln("download ticks err", err)
}
}
count := len(p.Ticks.Data)
glog.V(LogV).Infof("download ticks %d/%d", count-l, count)
return count - l
}
/*
func (p *Tdata) parse_mins_from_sina(line []byte) error {
items := [6]string{"day:", "open:", "high:", "close:", "low:", "volume:"}
v := [6]string{}
line = bytes.TrimSpace(line)
line = bytes.Trim(line, "[{}]")
infos := bytes.Split(line, []byte(","))
if len(infos) != 6 {
return errors.New("could not parse line " + string(line))
}
for i, item := range items {
v[i] = ""
for _, info := range infos {
if bytes.HasPrefix(info, []byte(item)) {
info = bytes.TrimPrefix(info, []byte(item))
info = bytes.Trim(info, "\"")
v[i] = string(info)
}
}
}
p.FromString(v[0], v[1], v[2], v[3], v[4], v[5])
return nil
}
*/
var UnknowSinaRes error = errors.New("could not find '成交时间' in head line")
func (p *Stock) ticks_download(t time.Time) ([]Tick, error) {
body := robot.Tick_download_from_sina(p.Id, t)
if body == nil {
return nil, UnknowSinaRes
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
count := len(lines) - 1
if count < 1 {
return nil, UnknowSinaRes
}
if bytes.Contains(lines[0], []byte("script")) {
return nil, UnknowSinaRes
}
if !bytes.Contains(lines[0], []byte("成交时间")) {
return nil, UnknowSinaRes
}
ticks := make([]Tick, count)
for i := count; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
infos := bytes.Split(line, []byte("\t"))
if len(infos) != 6 {
err := errors.New("could not parse line " + string(line))
return nil, err
}
ticks[count-i].FromString(t, infos[0], infos[1], infos[2],
infos[3], infos[4], infos[5])
}
FixTickTime(ticks)
return ticks, nil
}
func (p *Stock) Ticks_today_update() int {
l := len(p.Ticks.Data)
now := time.Now().UTC()
if !IsTradeDay(now) {
return 0
}
nhour := now.Hour()
if nhour < 1 || nhour > 10 {
return 0
}
p.ticks_get_today()
count := len(p.Ticks.Data)
return count - l
}
func (p *Stock) ticks_get_today() bool {
last_t, name, err := Tick_get_today_date(p.Id)
if err != nil {
glog.Warningln("get today date fail", err)
return false
}
p.Name = name
t := time.Now().UTC().Truncate(time.Hour * 24)
if t.After(last_t) {
return false
}
body := robot.Tick_download_today_from_sina(p.Id)
if body == nil {
return false
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
ticks := []Tick{}
tick := Tick{}
nul := []byte("")
for i := len(lines) - 1; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
line = bytes.Trim(line, ");")
infos := bytes.Split(line, []byte("] = new Array("))
if len(infos) != 2 {
continue
}
line = bytes.Replace(infos[1], []byte(" "), nul, -1)
line = bytes.Replace(line, []byte("'"), nul, -1)
infos = bytes.Split(line, []byte(","))
if len(infos) != 4 {
continue
}
tick.FromString(t, infos[0], infos[2], nul, infos[1], nul, infos[3])
if tick.Volume == 0 && tick.Price == 0 {
continue
}
ticks = append(ticks, tick)
}
FixTickTime(ticks)
FixTickData(ticks)
for _, tick := range ticks {
p.Ticks.Add(tick)
}
return true
}
func (p *Stock) tick_get_real(line []byte) bool {
infos := byte | s.Split(line, []byte(","))
if len(infos) < 33 {
glog.Warningln("sina hq api, res format changed")
return false
}
p.Name = string(infos[0])
nul := []byte("")
tick := RealtimeTick{}
t, _ := time.Parse("2006-01-02", string(infos[30]))
tick.FromString(t, infos[31], infos[3], nul, infos[8], infos[9], nul)
tick.Buyone = ParseCent(string(infos[11]))
tick.Sellone = ParseCent(string(infos[21]))
tick.SetStatus(infos[32])
if p.last_tick.Volume == 0 {
p.last_tick = tick
if tick.Time.Before(p.lst_trade) {
p.last_tick.Volume = 0
}
return false
}
if tick.Volume != p.last_tick.Volume {
if tick.Price >= p.last_tick.Sellone {
tick.Type = Buy_tick
} else if tick.Price <= p.last_tick.Buyone {
tick.Type = Sell_tick
} else {
tick.Type = Eq_tick
}
tick.Change = tick.Price - p.last_tick.Price
volume := (tick.Volume - p.last_tick.Volume) / 100
p.last_tick = tick
tick.Volume = volume
p.Ticks.Add(tick.Tick)
p.lst_trade = tick.Time
return true
}
return false
}
| identifier_body | |
auto.go | package crawl
import (
"bytes"
"encoding/json"
"errors"
"strconv"
"sync"
"sync/atomic"
"time"
. "./base"
"./robot"
"./store"
"github.com/golang/glog"
)
const (
tickPeriod = 5 * time.Second
minPlay = 1
)
var market_begin_day time.Time
func init() {
market_begin_day, _ = time.Parse("2006-01-02", "1990-12-19")
}
type Stock struct {
Id string `json:"id"`
M1s Tdatas `json:"m1s"`
M5s Tdatas `json:"m5s"`
M30s Tdatas `json:"m30s"`
Days Tdatas `json:"days"`
Weeks Tdatas `json:"weeks"`
Months Tdatas `json:"months"`
Ticks Ticks `json:"-"`
last_tick RealtimeTick
hash int
count int32
loaded int32
broadcast bool
lst_trade time.Time
rw sync.RWMutex
Name string
}
func (p *Stock) MarshalTail(tail bool) ([]byte, error) {
p.rw.RLock()
defer p.rw.RUnlock()
s := Stock{
Id: p.Id,
Name: p.Name,
}
if !tail || !p.broadcast {
p.broadcast = true
// full
p.M1s.tail(&s.M1s, 0)
p.M5s.tail(&s.M5s, 0)
p.M30s.tail(&s.M30s, 0)
p.Days.tail(&s.Days, 0)
p.Weeks.tail(&s.Weeks, 0)
p.Months.tail(&s.Months, 0)
} else {
// tail
p.M1s.tail(&s.M1s, 240)
p.M5s.tail(&s.M5s, 60)
p.M30s.tail(&s.M30s, 8)
p.Days.tail(&s.Days, 8)
p.Weeks.tail(&s.Weeks, 8)
p.Months.tail(&s.Months, 8)
}
return json.Marshal(s)
}
func NewStock(id string, hub_height int) *Stock {
p := &Stock{
Id: id,
hash: StockHash(id),
count: 1,
}
p.M1s.Init(hub_height, id+" f1", nil, &p.M5s)
p.M5s.Init(hub_height, id+" f5", &p.M1s, &p.M30s)
p.M30s.Init(hub_height, id+" f30", &p.M5s, &p.Days)
p.Days.Init(hub_height, id+" day", &p.M30s, &p.Weeks)
p.Weeks.Init(hub_height, id+" week", &p.Days, &p.Months)
p.Months.Init(hub_height, id+" month", &p.Weeks, nil)
return p
}
type Stocks struct {
stocks PStockSlice
rwmutex sync.RWMutex
store store.Store
play int
ch chan *Stock
min_hub_height int
}
func NewStocks(storestr string, play, min_hub_height int) *Stocks {
store := store.Get(storestr)
if min_hub_height < 0 {
min_hub_height = 0
}
return &Stocks{
min_hub_height: min_hub_height,
store: store,
play: play,
}
}
func (p *Stocks) Store() store.Store { return p.store }
func (p *Stocks) Run() {
if p.play > minPlay {
for {
p.play_next_tick()
time.Sleep(time.Duration(p.play) * time.Millisecond)
}
}
robot.Work()
for {
if IsTradeTime(time.Now()) {
p.Ticks_update_real()
}
time.Sleep(tickPeriod)
}
}
func (p *Stocks) Chan(ch chan *Stock) {
p.ch = ch
}
func (p *Stocks) res(stock *Stock) {
if p.ch != nil {
p.ch <- stock
}
}
func (p *Stocks) update(s *Stock) {
if s.Update(p.store, p.play > minPlay) {
p.res(s)
}
}
func (p *Stocks) Insert(id string) (int, *Stock, bool) {
p.rwmutex.RLock()
i, ok := p.stocks.Search(id)
if ok {
s := p.stocks[i]
p.rwmutex.RUnlock()
if atomic.AddInt32(&s.count, 1) < 1 {
atomic.StoreInt32(&s.count, 1)
}
return i, s, false
}
s := NewStock(id, p.min_hub_height)
p.rwmutex.RUnlock()
p.rwmutex.Lock()
defer p.rwmutex.Unlock()
if i < 1 {
p.stocks = append(PStockSlice{s}, p.stocks...)
return 0, s, true
} else if i >= p.stocks.Len() {
p.stocks = append(p.stocks, s)
return p.stocks.Len() - 1, s, true
}
p.stocks = append(p.stocks, s)
copy(p.stocks[i+1:], p.stocks[i:])
p.stocks[i] = s
return i, s, true
}
func (p *Stocks) Remove(id string) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
if i, ok := p.stocks.Search(id); ok {
atomic.AddInt32(&p.stocks[i].count, -1)
}
}
func (p *Stocks) Watch(id string) (*Stock, bool) {
i, s, isnew := p.Insert(id)
if isnew {
go p.update(s)
glog.V(LogV).Infof("watch new stock id=%s index=%d", id, i)
} else {
glog.V(LogV).Infof("watch stock id=%s index=%d count=%d", id, i, s.count)
}
return s, isnew
}
func (p *Stocks) UnWatch(id string) {
p.Remove(id)
}
func (p *Stocks) Find_need_update_tick_ids() (pstocks PStockSlice) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
pstocks = append(pstocks, p.stocks[i])
}
return
}
func (p *Stocks) play_next_tick() {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
if atomic.LoadInt32(&p.stocks[i].count) < 1 {
continue
}
p.stocks[i].rw.Lock()
if p.stocks[i].Ticks.play == nil || len(p.stocks[i].Ticks.play) < 1 {
p.stocks[i].Ticks.play = p.stocks[i].Ticks.Data
p.stocks[i].Ticks.Data = []Tick{}
if len(p.stocks[i].Ticks.play) > 240 {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:240]
}
}
lplay := len(p.stocks[i].Ticks.play)
ldata := len(p.stocks[i].Ticks.Data)
if ldata < lplay {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:ldata+1]
p.stocks[i].Merge(false, p.store)
p.res(p.stocks[i])
}
p.stocks[i].rw.Unlock()
}
}
func (p *Stocks) Ticks_update_real() {
var wg sync.WaitGroup
stocks := p.Find_need_update_tick_ids()
l := len(stocks)
if l < 1 {
return
}
for i := 0; i < l; {
var b bytes.Buffer
var pstocks PStockSlice
step := 50
if i+step < l {
pstocks = stocks[i : i+step]
} else {
pstocks = stocks[i:l]
}
for j := 0; j < step && i < l; i, j = i+1, j+1 {
if b.Len() > 0 {
b.WriteString(",")
}
b.WriteString(stocks[i].Id)
}
if b.Len() < 1 {
continue
}
wg.Add(1)
go func(ids string, pstocks PStockSlice) {
defer wg.Done()
body := robot.Tick_download_real_from_sina(ids)
if body == nil {
return
}
for _, line := range bytes.Split(body, []byte("\";")) {
line = bytes.TrimSpace(line)
info := bytes.Split(line, []byte("=\""))
if len(info) != 2 {
continue
}
prefix := "var hq_str_"
if !bytes.HasPrefix(info[0], []byte(prefix)) {
continue
}
id := info[0][len(prefix):]
if idx, ok := pstocks.Search(string(id)); ok {
if pstocks[idx].tick_get_real(info[1]) {
pstocks[idx].Merge(false, p.store)
p.res(pstocks[idx])
}
}
}
}(b.String(), pstocks)
}
wg.Wait()
}
func StockHash(id string) int {
for i, c := range []byte(id) {
if c >= '0' && c <= '9' {
i, _ = strconv.Atoi(id[i:])
return i
}
}
return 0
}
func (p *Stock) Merge(day bool, store store.Store) {
m1_fresh_index := p.Ticks2M1s()
m5_fresh_index := p.M5s.MergeFrom(&p.M1s, false, Minute5end)
m30_fresh_index := p.M30s.MergeFrom(&p.M1s, false, Minute30end)
if day {
p.Ticks.clean()
td, _ := store.LoadMacd(p.Id, L1, p.M1s.start)
p.M1s.Macd(m1_fresh_index, td)
store.SaveMacds(p.Id, L1, p.M1s.Data)
td, _ = store.LoadMacd(p.Id, L5, p.M1s.start)
p.M5s.Macd(m5_fresh_index, td)
store.SaveMacds(p.Id, L5, p.M5s.Data)
td, _ = store.LoadMacd(p.Id, L30, p.M1s.start)
p.M30s.Macd(m30_fresh_index, td)
store.SaveMacds(p.Id, L30, p.M30s.Data)
} else {
p.M1s.Macd(m1_fresh_index, nil)
p.M5s.Macd(m5_fresh_index, nil)
p.M30s.Macd(m30_fresh_index, nil)
}
p.M1s.ParseChan()
p.M5s.ParseChan()
p.M30s.ParseChan()
if day {
p.Weeks.MergeFrom(&p.Days, true, Weekend)
p.Months.MergeFrom(&p.Days, true, Monthend)
td, _ := store.LoadMacd(p.Id, LDay, p.Days.start)
p.Days.Macd(0, td)
store.SaveMacds(p.Id, LDay, p.Days.Data)
td, _ = store.LoadMacd(p.Id, LWeek, p.Days.start)
p.Weeks.Macd(0, td)
store.SaveMacds(p.Id, LWeek, p.Weeks.Data)
td, _ = store.LoadMacd(p.Id, LMonth, p.Days.start)
p.Months.Macd(0, td)
store.SaveMacds(p.Id, LMonth, p.Months.Data)
p.Days.ParseChan()
p.Weeks.ParseChan()
p.Months.ParseChan()
}
}
func (p *Tdatas) ParseChan() {
if p.base == nil {
p.ParseTyping()
p.Typing.LinkTyping()
p.ParseSegment()
p.Segment.LinkTyping()
}
p.ParseHub()
p.LinkHub()
}
func (p *Stock) Update(store store.Store, play bool) bool {
if !atomic.CompareAndSwapInt32(&p.loaded, 0, 1) {
return false
}
p.Days_update(store)
p.Ticks_update(store)
p.Ticks_today_update()
if play {
glog.Warningln("WITH PLAY MODE")
} else {
p.Merge(true, store)
}
atomic.StoreInt32(&p.loaded, 2)
return true
}
func (p *Stock) | (t time.Time) ([]int, error) {
inds := []int{}
tds, err := robot.Days_download(p.Id, t)
if err != nil {
return inds, err
}
for i, count := 0, len(tds); i < count; i++ {
ind, isnew := p.Days.Add(tds[i])
if isnew || ind > 0 {
inds = append(inds, ind)
}
}
return inds, nil
}
func (p *Stock) Days_update(store store.Store) int {
c := Day_collection_name(p.Id)
p.Days.start = store.GetStartTime(p.Id, LDay)
p.Days.Data, _ = store.LoadTDatas(c, p.Days.start)
t := p.Days.latest_time()
now := time.Now().AddDate(0, 0, -1).UTC().Truncate(time.Hour * 24)
for !IsTradeDay(now) {
now = now.AddDate(0, 0, -1)
}
if t.Equal(now) || t.After(now) {
return 0
}
inds, _ := p.days_download(t)
if len(inds) > 0 {
store.SaveTDatas(c, p.Days.Data, inds)
factor := p.Days.Factor()
store.UpdateFactor(p.Id, factor)
}
return len(inds)
}
func (p *Stock) Ticks_update(store store.Store) int {
c := Tick_collection_name(p.Id)
p.M1s.start = store.GetStartTime(p.Id, L1)
p.Ticks.Data, _ = store.LoadTicks(c, p.M1s.start)
begin_time := p.M1s.start
l := len(p.Ticks.Data)
if l > 0 {
begin_time = p.Ticks.Data[0].Time
}
now := time.Now().UTC()
end_time := now.Truncate(time.Hour * 24)
if now.Hour() > 10 {
end_time = end_time.AddDate(0, 0, 1)
}
if begin_time.Equal(market_begin_day) {
begin_time = end_time.AddDate(0, -2, -1)
}
begin_time = begin_time.AddDate(0, 0, 1).Truncate(time.Hour * 24)
daylen := len(p.Days.Data)
if daylen < 1 {
return 0
}
i, _ := ((TdataSlice)(p.Days.Data)).Search(begin_time)
glog.V(LogV).Infof("from %d/%d %s begin_time=%s end_time=%s", i, daylen, p.M1s.start, begin_time, end_time)
var t time.Time
for ; i <= daylen; i++ {
if i < daylen {
t = p.Days.Data[i].Time
} else if i == daylen {
t = p.Days.Data[i-1].Time.AddDate(0, 0, 1)
}
if !end_time.After(t) {
glog.V(LogV).Infoln(t, "reach end_time", end_time)
break
}
if p.Ticks.hasTimeData(t) {
continue
}
glog.V(LogV).Infoln("prepare download ticks", t)
if ticks, err := p.ticks_download(t); ticks != nil {
for j, _ := range ticks {
p.Ticks.Add(ticks[j])
}
store.SaveTicks(c, ticks)
glog.V(LogV).Infoln("download ticks succ", t)
} else if err != nil {
glog.V(LogD).Infoln("download ticks err", err)
}
}
count := len(p.Ticks.Data)
glog.V(LogV).Infof("download ticks %d/%d", count-l, count)
return count - l
}
/*
func (p *Tdata) parse_mins_from_sina(line []byte) error {
items := [6]string{"day:", "open:", "high:", "close:", "low:", "volume:"}
v := [6]string{}
line = bytes.TrimSpace(line)
line = bytes.Trim(line, "[{}]")
infos := bytes.Split(line, []byte(","))
if len(infos) != 6 {
return errors.New("could not parse line " + string(line))
}
for i, item := range items {
v[i] = ""
for _, info := range infos {
if bytes.HasPrefix(info, []byte(item)) {
info = bytes.TrimPrefix(info, []byte(item))
info = bytes.Trim(info, "\"")
v[i] = string(info)
}
}
}
p.FromString(v[0], v[1], v[2], v[3], v[4], v[5])
return nil
}
*/
var UnknowSinaRes error = errors.New("could not find '成交时间' in head line")
func (p *Stock) ticks_download(t time.Time) ([]Tick, error) {
body := robot.Tick_download_from_sina(p.Id, t)
if body == nil {
return nil, UnknowSinaRes
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
count := len(lines) - 1
if count < 1 {
return nil, UnknowSinaRes
}
if bytes.Contains(lines[0], []byte("script")) {
return nil, UnknowSinaRes
}
if !bytes.Contains(lines[0], []byte("成交时间")) {
return nil, UnknowSinaRes
}
ticks := make([]Tick, count)
for i := count; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
infos := bytes.Split(line, []byte("\t"))
if len(infos) != 6 {
err := errors.New("could not parse line " + string(line))
return nil, err
}
ticks[count-i].FromString(t, infos[0], infos[1], infos[2],
infos[3], infos[4], infos[5])
}
FixTickTime(ticks)
return ticks, nil
}
func (p *Stock) Ticks_today_update() int {
l := len(p.Ticks.Data)
now := time.Now().UTC()
if !IsTradeDay(now) {
return 0
}
nhour := now.Hour()
if nhour < 1 || nhour > 10 {
return 0
}
p.ticks_get_today()
count := len(p.Ticks.Data)
return count - l
}
func (p *Stock) ticks_get_today() bool {
last_t, name, err := Tick_get_today_date(p.Id)
if err != nil {
glog.Warningln("get today date fail", err)
return false
}
p.Name = name
t := time.Now().UTC().Truncate(time.Hour * 24)
if t.After(last_t) {
return false
}
body := robot.Tick_download_today_from_sina(p.Id)
if body == nil {
return false
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
ticks := []Tick{}
tick := Tick{}
nul := []byte("")
for i := len(lines) - 1; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
line = bytes.Trim(line, ");")
infos := bytes.Split(line, []byte("] = new Array("))
if len(infos) != 2 {
continue
}
line = bytes.Replace(infos[1], []byte(" "), nul, -1)
line = bytes.Replace(line, []byte("'"), nul, -1)
infos = bytes.Split(line, []byte(","))
if len(infos) != 4 {
continue
}
tick.FromString(t, infos[0], infos[2], nul, infos[1], nul, infos[3])
if tick.Volume == 0 && tick.Price == 0 {
continue
}
ticks = append(ticks, tick)
}
FixTickTime(ticks)
FixTickData(ticks)
for _, tick := range ticks {
p.Ticks.Add(tick)
}
return true
}
func (p *Stock) tick_get_real(line []byte) bool {
infos := bytes.Split(line, []byte(","))
if len(infos) < 33 {
glog.Warningln("sina hq api, res format changed")
return false
}
p.Name = string(infos[0])
nul := []byte("")
tick := RealtimeTick{}
t, _ := time.Parse("2006-01-02", string(infos[30]))
tick.FromString(t, infos[31], infos[3], nul, infos[8], infos[9], nul)
tick.Buyone = ParseCent(string(infos[11]))
tick.Sellone = ParseCent(string(infos[21]))
tick.SetStatus(infos[32])
if p.last_tick.Volume == 0 {
p.last_tick = tick
if tick.Time.Before(p.lst_trade) {
p.last_tick.Volume = 0
}
return false
}
if tick.Volume != p.last_tick.Volume {
if tick.Price >= p.last_tick.Sellone {
tick.Type = Buy_tick
} else if tick.Price <= p.last_tick.Buyone {
tick.Type = Sell_tick
} else {
tick.Type = Eq_tick
}
tick.Change = tick.Price - p.last_tick.Price
volume := (tick.Volume - p.last_tick.Volume) / 100
p.last_tick = tick
tick.Volume = volume
p.Ticks.Add(tick.Tick)
p.lst_trade = tick.Time
return true
}
return false
}
| days_download | identifier_name |
auto.go | package crawl
import (
"bytes"
"encoding/json"
"errors"
"strconv"
"sync"
"sync/atomic"
"time"
. "./base"
"./robot"
"./store"
"github.com/golang/glog"
)
const (
tickPeriod = 5 * time.Second
minPlay = 1
)
var market_begin_day time.Time
func init() {
market_begin_day, _ = time.Parse("2006-01-02", "1990-12-19")
}
type Stock struct {
Id string `json:"id"`
M1s Tdatas `json:"m1s"`
M5s Tdatas `json:"m5s"`
M30s Tdatas `json:"m30s"`
Days Tdatas `json:"days"`
Weeks Tdatas `json:"weeks"`
Months Tdatas `json:"months"`
Ticks Ticks `json:"-"`
last_tick RealtimeTick
hash int
count int32
loaded int32
broadcast bool
lst_trade time.Time
rw sync.RWMutex
Name string
}
func (p *Stock) MarshalTail(tail bool) ([]byte, error) {
p.rw.RLock()
defer p.rw.RUnlock()
s := Stock{
Id: p.Id,
Name: p.Name,
}
if !tail || !p.broadcast {
p.broadcast = true
// full
p.M1s.tail(&s.M1s, 0)
p.M5s.tail(&s.M5s, 0)
p.M30s.tail(&s.M30s, 0)
p.Days.tail(&s.Days, 0)
p.Weeks.tail(&s.Weeks, 0)
p.Months.tail(&s.Months, 0)
} else {
// tail
p.M1s.tail(&s.M1s, 240)
p.M5s.tail(&s.M5s, 60)
p.M30s.tail(&s.M30s, 8)
p.Days.tail(&s.Days, 8)
p.Weeks.tail(&s.Weeks, 8)
p.Months.tail(&s.Months, 8)
}
return json.Marshal(s)
}
func NewStock(id string, hub_height int) *Stock {
p := &Stock{
Id: id,
hash: StockHash(id),
count: 1,
}
p.M1s.Init(hub_height, id+" f1", nil, &p.M5s)
p.M5s.Init(hub_height, id+" f5", &p.M1s, &p.M30s)
p.M30s.Init(hub_height, id+" f30", &p.M5s, &p.Days)
p.Days.Init(hub_height, id+" day", &p.M30s, &p.Weeks)
p.Weeks.Init(hub_height, id+" week", &p.Days, &p.Months)
p.Months.Init(hub_height, id+" month", &p.Weeks, nil)
return p
}
type Stocks struct {
stocks PStockSlice
rwmutex sync.RWMutex
store store.Store
play int
ch chan *Stock
min_hub_height int
}
func NewStocks(storestr string, play, min_hub_height int) *Stocks {
store := store.Get(storestr)
if min_hub_height < 0 {
min_hub_height = 0
}
return &Stocks{
min_hub_height: min_hub_height,
store: store,
play: play,
}
}
func (p *Stocks) Store() store.Store { return p.store }
func (p *Stocks) Run() {
if p.play > minPlay {
for {
p.play_next_tick()
time.Sleep(time.Duration(p.play) * time.Millisecond)
}
}
robot.Work()
for {
if IsTradeTime(time.Now()) {
p.Ticks_update_real()
}
time.Sleep(tickPeriod)
}
}
func (p *Stocks) Chan(ch chan *Stock) {
p.ch = ch
}
func (p *Stocks) res(stock *Stock) {
if p.ch != nil {
p.ch <- stock
}
}
func (p *Stocks) update(s *Stock) {
if s.Update(p.store, p.play > minPlay) {
p.res(s)
}
}
func (p *Stocks) Insert(id string) (int, *Stock, bool) {
p.rwmutex.RLock()
i, ok := p.stocks.Search(id)
if ok {
s := p.stocks[i]
p.rwmutex.RUnlock()
if atomic.AddInt32(&s.count, 1) < 1 {
atomic.StoreInt32(&s.count, 1)
}
return i, s, false
}
s := NewStock(id, p.min_hub_height)
p.rwmutex.RUnlock()
p.rwmutex.Lock()
defer p.rwmutex.Unlock()
if i < 1 {
p.stocks = append(PStockSlice{s}, p.stocks...)
return 0, s, true
} else if i >= p.stocks.Len() {
p.stocks = append(p.stocks, s)
return p.stocks.Len() - 1, s, true
}
p.stocks = append(p.stocks, s)
copy(p.stocks[i+1:], p.stocks[i:])
p.stocks[i] = s
return i, s, true
}
func (p *Stocks) Remove(id string) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
if i, ok := p.stocks.Search(id); ok {
atomic.AddInt32(&p.stocks[i].count, -1)
}
}
func (p *Stocks) Watch(id string) (*Stock, bool) {
i, s, isnew := p.Insert(id)
if isnew {
go p.update(s)
glog.V(LogV).Infof("watch new stock id=%s index=%d", id, i)
} else {
glog.V(LogV).Infof("watch stock id=%s index=%d count=%d", id, i, s.count)
}
return s, isnew
}
func (p *Stocks) UnWatch(id string) {
p.Remove(id)
}
func (p *Stocks) Find_need_update_tick_ids() (pstocks PStockSlice) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
pstocks = append(pstocks, p.stocks[i])
}
return
}
func (p *Stocks) play_next_tick() {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
if atomic.LoadInt32(&p.stocks[i].count) < 1 {
continue
}
p.stocks[i].rw.Lock()
if p.stocks[i].Ticks.play == nil || len(p.stocks[i].Ticks.play) < 1 {
p.stocks[i].Ticks.play = p.stocks[i].Ticks.Data
p.stocks[i].Ticks.Data = []Tick{}
if len(p.stocks[i].Ticks.play) > 240 {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:240]
}
}
lplay := len(p.stocks[i].Ticks.play)
ldata := len(p.stocks[i].Ticks.Data)
if ldata < lplay {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:ldata+1]
p.stocks[i].Merge(false, p.store)
p.res(p.stocks[i])
}
p.stocks[i].rw.Unlock()
}
}
func (p *Stocks) Ticks_update_real() {
var wg sync.WaitGroup
stocks := p.Find_need_update_tick_ids()
l := len(stocks)
if l < 1 {
return
}
for i := 0; i < l; {
var b bytes.Buffer
var pstocks PStockSlice
step := 50
if i+step < l {
pstocks = stocks[i : i+step]
} else {
pstocks = stocks[i:l]
}
for j := 0; j < step && i < l; i, j = i+1, j+1 {
if b.Len() > 0 {
b.WriteString(",")
}
b.WriteString(stocks[i].Id)
}
if b.Len() < 1 {
continue
}
wg.Add(1)
go func(ids string, pstocks PStockSlice) {
defer wg.Done()
body := robot.Tick_download_real_from_sina(ids)
if body == nil { | for _, line := range bytes.Split(body, []byte("\";")) {
line = bytes.TrimSpace(line)
info := bytes.Split(line, []byte("=\""))
if len(info) != 2 {
continue
}
prefix := "var hq_str_"
if !bytes.HasPrefix(info[0], []byte(prefix)) {
continue
}
id := info[0][len(prefix):]
if idx, ok := pstocks.Search(string(id)); ok {
if pstocks[idx].tick_get_real(info[1]) {
pstocks[idx].Merge(false, p.store)
p.res(pstocks[idx])
}
}
}
}(b.String(), pstocks)
}
wg.Wait()
}
func StockHash(id string) int {
for i, c := range []byte(id) {
if c >= '0' && c <= '9' {
i, _ = strconv.Atoi(id[i:])
return i
}
}
return 0
}
func (p *Stock) Merge(day bool, store store.Store) {
m1_fresh_index := p.Ticks2M1s()
m5_fresh_index := p.M5s.MergeFrom(&p.M1s, false, Minute5end)
m30_fresh_index := p.M30s.MergeFrom(&p.M1s, false, Minute30end)
if day {
p.Ticks.clean()
td, _ := store.LoadMacd(p.Id, L1, p.M1s.start)
p.M1s.Macd(m1_fresh_index, td)
store.SaveMacds(p.Id, L1, p.M1s.Data)
td, _ = store.LoadMacd(p.Id, L5, p.M1s.start)
p.M5s.Macd(m5_fresh_index, td)
store.SaveMacds(p.Id, L5, p.M5s.Data)
td, _ = store.LoadMacd(p.Id, L30, p.M1s.start)
p.M30s.Macd(m30_fresh_index, td)
store.SaveMacds(p.Id, L30, p.M30s.Data)
} else {
p.M1s.Macd(m1_fresh_index, nil)
p.M5s.Macd(m5_fresh_index, nil)
p.M30s.Macd(m30_fresh_index, nil)
}
p.M1s.ParseChan()
p.M5s.ParseChan()
p.M30s.ParseChan()
if day {
p.Weeks.MergeFrom(&p.Days, true, Weekend)
p.Months.MergeFrom(&p.Days, true, Monthend)
td, _ := store.LoadMacd(p.Id, LDay, p.Days.start)
p.Days.Macd(0, td)
store.SaveMacds(p.Id, LDay, p.Days.Data)
td, _ = store.LoadMacd(p.Id, LWeek, p.Days.start)
p.Weeks.Macd(0, td)
store.SaveMacds(p.Id, LWeek, p.Weeks.Data)
td, _ = store.LoadMacd(p.Id, LMonth, p.Days.start)
p.Months.Macd(0, td)
store.SaveMacds(p.Id, LMonth, p.Months.Data)
p.Days.ParseChan()
p.Weeks.ParseChan()
p.Months.ParseChan()
}
}
func (p *Tdatas) ParseChan() {
if p.base == nil {
p.ParseTyping()
p.Typing.LinkTyping()
p.ParseSegment()
p.Segment.LinkTyping()
}
p.ParseHub()
p.LinkHub()
}
func (p *Stock) Update(store store.Store, play bool) bool {
if !atomic.CompareAndSwapInt32(&p.loaded, 0, 1) {
return false
}
p.Days_update(store)
p.Ticks_update(store)
p.Ticks_today_update()
if play {
glog.Warningln("WITH PLAY MODE")
} else {
p.Merge(true, store)
}
atomic.StoreInt32(&p.loaded, 2)
return true
}
func (p *Stock) days_download(t time.Time) ([]int, error) {
inds := []int{}
tds, err := robot.Days_download(p.Id, t)
if err != nil {
return inds, err
}
for i, count := 0, len(tds); i < count; i++ {
ind, isnew := p.Days.Add(tds[i])
if isnew || ind > 0 {
inds = append(inds, ind)
}
}
return inds, nil
}
func (p *Stock) Days_update(store store.Store) int {
c := Day_collection_name(p.Id)
p.Days.start = store.GetStartTime(p.Id, LDay)
p.Days.Data, _ = store.LoadTDatas(c, p.Days.start)
t := p.Days.latest_time()
now := time.Now().AddDate(0, 0, -1).UTC().Truncate(time.Hour * 24)
for !IsTradeDay(now) {
now = now.AddDate(0, 0, -1)
}
if t.Equal(now) || t.After(now) {
return 0
}
inds, _ := p.days_download(t)
if len(inds) > 0 {
store.SaveTDatas(c, p.Days.Data, inds)
factor := p.Days.Factor()
store.UpdateFactor(p.Id, factor)
}
return len(inds)
}
func (p *Stock) Ticks_update(store store.Store) int {
c := Tick_collection_name(p.Id)
p.M1s.start = store.GetStartTime(p.Id, L1)
p.Ticks.Data, _ = store.LoadTicks(c, p.M1s.start)
begin_time := p.M1s.start
l := len(p.Ticks.Data)
if l > 0 {
begin_time = p.Ticks.Data[0].Time
}
now := time.Now().UTC()
end_time := now.Truncate(time.Hour * 24)
if now.Hour() > 10 {
end_time = end_time.AddDate(0, 0, 1)
}
if begin_time.Equal(market_begin_day) {
begin_time = end_time.AddDate(0, -2, -1)
}
begin_time = begin_time.AddDate(0, 0, 1).Truncate(time.Hour * 24)
daylen := len(p.Days.Data)
if daylen < 1 {
return 0
}
i, _ := ((TdataSlice)(p.Days.Data)).Search(begin_time)
glog.V(LogV).Infof("from %d/%d %s begin_time=%s end_time=%s", i, daylen, p.M1s.start, begin_time, end_time)
var t time.Time
for ; i <= daylen; i++ {
if i < daylen {
t = p.Days.Data[i].Time
} else if i == daylen {
t = p.Days.Data[i-1].Time.AddDate(0, 0, 1)
}
if !end_time.After(t) {
glog.V(LogV).Infoln(t, "reach end_time", end_time)
break
}
if p.Ticks.hasTimeData(t) {
continue
}
glog.V(LogV).Infoln("prepare download ticks", t)
if ticks, err := p.ticks_download(t); ticks != nil {
for j, _ := range ticks {
p.Ticks.Add(ticks[j])
}
store.SaveTicks(c, ticks)
glog.V(LogV).Infoln("download ticks succ", t)
} else if err != nil {
glog.V(LogD).Infoln("download ticks err", err)
}
}
count := len(p.Ticks.Data)
glog.V(LogV).Infof("download ticks %d/%d", count-l, count)
return count - l
}
/*
func (p *Tdata) parse_mins_from_sina(line []byte) error {
items := [6]string{"day:", "open:", "high:", "close:", "low:", "volume:"}
v := [6]string{}
line = bytes.TrimSpace(line)
line = bytes.Trim(line, "[{}]")
infos := bytes.Split(line, []byte(","))
if len(infos) != 6 {
return errors.New("could not parse line " + string(line))
}
for i, item := range items {
v[i] = ""
for _, info := range infos {
if bytes.HasPrefix(info, []byte(item)) {
info = bytes.TrimPrefix(info, []byte(item))
info = bytes.Trim(info, "\"")
v[i] = string(info)
}
}
}
p.FromString(v[0], v[1], v[2], v[3], v[4], v[5])
return nil
}
*/
var UnknowSinaRes error = errors.New("could not find '成交时间' in head line")
func (p *Stock) ticks_download(t time.Time) ([]Tick, error) {
body := robot.Tick_download_from_sina(p.Id, t)
if body == nil {
return nil, UnknowSinaRes
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
count := len(lines) - 1
if count < 1 {
return nil, UnknowSinaRes
}
if bytes.Contains(lines[0], []byte("script")) {
return nil, UnknowSinaRes
}
if !bytes.Contains(lines[0], []byte("成交时间")) {
return nil, UnknowSinaRes
}
ticks := make([]Tick, count)
for i := count; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
infos := bytes.Split(line, []byte("\t"))
if len(infos) != 6 {
err := errors.New("could not parse line " + string(line))
return nil, err
}
ticks[count-i].FromString(t, infos[0], infos[1], infos[2],
infos[3], infos[4], infos[5])
}
FixTickTime(ticks)
return ticks, nil
}
func (p *Stock) Ticks_today_update() int {
l := len(p.Ticks.Data)
now := time.Now().UTC()
if !IsTradeDay(now) {
return 0
}
nhour := now.Hour()
if nhour < 1 || nhour > 10 {
return 0
}
p.ticks_get_today()
count := len(p.Ticks.Data)
return count - l
}
func (p *Stock) ticks_get_today() bool {
last_t, name, err := Tick_get_today_date(p.Id)
if err != nil {
glog.Warningln("get today date fail", err)
return false
}
p.Name = name
t := time.Now().UTC().Truncate(time.Hour * 24)
if t.After(last_t) {
return false
}
body := robot.Tick_download_today_from_sina(p.Id)
if body == nil {
return false
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
ticks := []Tick{}
tick := Tick{}
nul := []byte("")
for i := len(lines) - 1; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
line = bytes.Trim(line, ");")
infos := bytes.Split(line, []byte("] = new Array("))
if len(infos) != 2 {
continue
}
line = bytes.Replace(infos[1], []byte(" "), nul, -1)
line = bytes.Replace(line, []byte("'"), nul, -1)
infos = bytes.Split(line, []byte(","))
if len(infos) != 4 {
continue
}
tick.FromString(t, infos[0], infos[2], nul, infos[1], nul, infos[3])
if tick.Volume == 0 && tick.Price == 0 {
continue
}
ticks = append(ticks, tick)
}
FixTickTime(ticks)
FixTickData(ticks)
for _, tick := range ticks {
p.Ticks.Add(tick)
}
return true
}
func (p *Stock) tick_get_real(line []byte) bool {
infos := bytes.Split(line, []byte(","))
if len(infos) < 33 {
glog.Warningln("sina hq api, res format changed")
return false
}
p.Name = string(infos[0])
nul := []byte("")
tick := RealtimeTick{}
t, _ := time.Parse("2006-01-02", string(infos[30]))
tick.FromString(t, infos[31], infos[3], nul, infos[8], infos[9], nul)
tick.Buyone = ParseCent(string(infos[11]))
tick.Sellone = ParseCent(string(infos[21]))
tick.SetStatus(infos[32])
if p.last_tick.Volume == 0 {
p.last_tick = tick
if tick.Time.Before(p.lst_trade) {
p.last_tick.Volume = 0
}
return false
}
if tick.Volume != p.last_tick.Volume {
if tick.Price >= p.last_tick.Sellone {
tick.Type = Buy_tick
} else if tick.Price <= p.last_tick.Buyone {
tick.Type = Sell_tick
} else {
tick.Type = Eq_tick
}
tick.Change = tick.Price - p.last_tick.Price
volume := (tick.Volume - p.last_tick.Volume) / 100
p.last_tick = tick
tick.Volume = volume
p.Ticks.Add(tick.Tick)
p.lst_trade = tick.Time
return true
}
return false
} | return
} | random_line_split |
auto.go | package crawl
import (
"bytes"
"encoding/json"
"errors"
"strconv"
"sync"
"sync/atomic"
"time"
. "./base"
"./robot"
"./store"
"github.com/golang/glog"
)
const (
tickPeriod = 5 * time.Second
minPlay = 1
)
var market_begin_day time.Time
func init() {
market_begin_day, _ = time.Parse("2006-01-02", "1990-12-19")
}
type Stock struct {
Id string `json:"id"`
M1s Tdatas `json:"m1s"`
M5s Tdatas `json:"m5s"`
M30s Tdatas `json:"m30s"`
Days Tdatas `json:"days"`
Weeks Tdatas `json:"weeks"`
Months Tdatas `json:"months"`
Ticks Ticks `json:"-"`
last_tick RealtimeTick
hash int
count int32
loaded int32
broadcast bool
lst_trade time.Time
rw sync.RWMutex
Name string
}
func (p *Stock) MarshalTail(tail bool) ([]byte, error) {
p.rw.RLock()
defer p.rw.RUnlock()
s := Stock{
Id: p.Id,
Name: p.Name,
}
if !tail || !p.broadcast {
p.broadcast = true
// full
p.M1s.tail(&s.M1s, 0)
p.M5s.tail(&s.M5s, 0)
p.M30s.tail(&s.M30s, 0)
p.Days.tail(&s.Days, 0)
p.Weeks.tail(&s.Weeks, 0)
p.Months.tail(&s.Months, 0)
} else {
// tail
p.M1s.tail(&s.M1s, 240)
p.M5s.tail(&s.M5s, 60)
p.M30s.tail(&s.M30s, 8)
p.Days.tail(&s.Days, 8)
p.Weeks.tail(&s.Weeks, 8)
p.Months.tail(&s.Months, 8)
}
return json.Marshal(s)
}
func NewStock(id string, hub_height int) *Stock {
p := &Stock{
Id: id,
hash: StockHash(id),
count: 1,
}
p.M1s.Init(hub_height, id+" f1", nil, &p.M5s)
p.M5s.Init(hub_height, id+" f5", &p.M1s, &p.M30s)
p.M30s.Init(hub_height, id+" f30", &p.M5s, &p.Days)
p.Days.Init(hub_height, id+" day", &p.M30s, &p.Weeks)
p.Weeks.Init(hub_height, id+" week", &p.Days, &p.Months)
p.Months.Init(hub_height, id+" month", &p.Weeks, nil)
return p
}
type Stocks struct {
stocks PStockSlice
rwmutex sync.RWMutex
store store.Store
play int
ch chan *Stock
min_hub_height int
}
func NewStocks(storestr string, play, min_hub_height int) *Stocks {
store := store.Get(storestr)
if min_hub_height < 0 {
min_hub_height = 0
}
return &Stocks{
min_hub_height: min_hub_height,
store: store,
play: play,
}
}
func (p *Stocks) Store() store.Store { return p.store }
func (p *Stocks) Run() {
if p.play > minPlay {
for {
p.play_next_tick()
time.Sleep(time.Duration(p.play) * time.Millisecond)
}
}
robot.Work()
for {
if IsTradeTime(time.Now()) {
p.Ticks_update_real()
}
time.Sleep(tickPeriod)
}
}
func (p *Stocks) Chan(ch chan *Stock) {
p.ch = ch
}
func (p *Stocks) res(stock *Stock) {
if p.ch != nil {
p.ch <- stock
}
}
func (p *Stocks) update(s *Stock) {
if s.Update(p.store, p.play > minPlay) {
p.res(s)
}
}
func (p *Stocks) Insert(id string) (int, *Stock, bool) {
p.rwmutex.RLock()
i, ok := p.stocks.Search(id)
if ok {
s := p.stocks[i]
p.rwmutex.RUnlock()
if atomic.AddInt32(&s.count, 1) < 1 {
atomic.StoreInt32(&s.count, 1)
}
return i, s, false
}
s := NewStock(id, p.min_hub_height)
p.rwmutex.RUnlock()
p.rwmutex.Lock()
defer p.rwmutex.Unlock()
if i < 1 {
p.stocks = append(PStockSlice{s}, p.stocks...)
return 0, s, true
} else if i >= p.stocks.Len() {
p.stocks = append(p.stocks, s)
return p.stocks.Len() - 1, s, true
}
p.stocks = append(p.stocks, s)
copy(p.stocks[i+1:], p.stocks[i:])
p.stocks[i] = s
return i, s, true
}
func (p *Stocks) Remove(id string) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
if i, ok := p.stocks.Search(id); ok {
atomic.AddInt32(&p.stocks[i].count, -1)
}
}
func (p *Stocks) Watch(id string) (*Stock, bool) {
i, s, isnew := p.Insert(id)
if isnew {
go p.update(s)
glog.V(LogV).Infof("watch new stock id=%s index=%d", id, i)
} else {
glog.V(LogV).Infof("watch stock id=%s index=%d count=%d", id, i, s.count)
}
return s, isnew
}
func (p *Stocks) UnWatch(id string) {
p.Remove(id)
}
func (p *Stocks) Find_need_update_tick_ids() (pstocks PStockSlice) {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
pstocks = append(pstocks, p.stocks[i])
}
return
}
func (p *Stocks) play_next_tick() {
p.rwmutex.RLock()
defer p.rwmutex.RUnlock()
for i, l := 0, len(p.stocks); i < l; i++ {
if atomic.LoadInt32(&p.stocks[i].loaded) < 2 {
continue
}
if atomic.LoadInt32(&p.stocks[i].count) < 1 {
continue
}
p.stocks[i].rw.Lock()
if p.stocks[i].Ticks.play == nil || len(p.stocks[i].Ticks.play) < 1 {
p.stocks[i].Ticks.play = p.stocks[i].Ticks.Data
p.stocks[i].Ticks.Data = []Tick{}
if len(p.stocks[i].Ticks.play) > 240 {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:240]
}
}
lplay := len(p.stocks[i].Ticks.play)
ldata := len(p.stocks[i].Ticks.Data)
if ldata < lplay {
p.stocks[i].Ticks.Data = p.stocks[i].Ticks.play[:ldata+1]
p.stocks[i].Merge(false, p.store)
p.res(p.stocks[i])
}
p.stocks[i].rw.Unlock()
}
}
func (p *Stocks) Ticks_update_real() {
var wg sync.WaitGroup
stocks := p.Find_need_update_tick_ids()
l := len(stocks)
if l < 1 {
return
}
for i := 0; i < l; {
var b bytes.Buffer
var pstocks PStockSlice
step := 50
if i+step < l {
pstocks = stocks[i : i+step]
} else {
pstocks = stocks[i:l]
}
for j := 0; j < step && i < l; i, j = i+1, j+1 {
if b.Len() > 0 {
b.WriteString(",")
}
b.WriteString(stocks[i].Id)
}
if b.Len() < 1 {
continue
}
wg.Add(1)
go func(ids string, pstocks PStockSlice) {
defer wg.Done()
body := robot.Tick_download_real_from_sina(ids)
if body == nil {
return
}
for _, line := range bytes.Split(body, []byte("\";")) {
line = bytes.TrimSpace(line)
info := bytes.Split(line, []byte("=\""))
if len(info) != 2 {
continue
}
prefix := "var hq_str_"
if !bytes.HasPrefix(info[0], []byte(prefix)) {
continue
}
id := info[0][len(prefix):]
if idx, ok := pstocks.Search(string(id)); ok {
if pstocks[idx].tick_get_real(info[1]) {
pstocks[idx].Merge(false, p.store)
p.res(pstocks[idx])
}
}
}
}(b.String(), pstocks)
}
wg.Wait()
}
func StockHash(id string) int {
for i, c := range []byte(id) {
if c >= '0' && c <= '9' {
i, _ = strconv.Atoi(id[i:])
return i
}
}
return 0
}
func (p *Stock) Merge(day bool, store store.Store) {
m1_fresh_index := p.Ticks2M1s()
m5_fresh_index := p.M5s.MergeFrom(&p.M1s, false, Minute5end)
m30_fresh_index := p.M30s.MergeFrom(&p.M1s, false, Minute30end)
if day {
p.Ticks.clean()
td, _ := store.LoadMacd(p.Id, L1, p.M1s.start)
p.M1s.Macd(m1_fresh_index, td)
store.SaveMacds(p.Id, L1, p.M1s.Data)
td, _ = store.LoadMacd(p.Id, L5, p.M1s.start)
p.M5s.Macd(m5_fresh_index, td)
store.SaveMacds(p.Id, L5, p.M5s.Data)
td, _ = store.LoadMacd(p.Id, L30, p.M1s.start)
p.M30s.Macd(m30_fresh_index, td)
store.SaveMacds(p.Id, L30, p.M30s.Data)
} else {
p.M1s.Macd(m1_fresh_index, nil)
p.M5s.Macd(m5_fresh_index, nil)
p.M30s.Macd(m30_fresh_index, nil)
}
p.M1s.ParseChan()
p.M5s.ParseChan()
p.M30s.ParseChan()
if day {
p.Weeks.MergeFrom(&p.Days, true, Weekend)
p.Months.MergeFrom(&p.Days, true, Monthend)
td, _ := store.LoadMacd(p.Id, LDay, p.Days.start)
p.Days.Macd(0, td)
store.SaveMacds(p.Id, LDay, p.Days.Data)
td, _ = store.LoadMacd(p.Id, LWeek, p.Days.start)
p.Weeks.Macd(0, td)
store.SaveMacds(p.Id, LWeek, p.Weeks.Data)
td, _ = store.LoadMacd(p.Id, LMonth, p.Days.start)
p.Months.Macd(0, td)
store.SaveMacds(p.Id, LMonth, p.Months.Data)
p.Days.ParseChan()
p.Weeks.ParseChan()
p.Months.ParseChan()
}
}
func (p *Tdatas) ParseChan() {
if p.base == nil {
p.ParseTyping()
p.Typing.LinkTyping()
p.ParseSegment()
p.Segment.LinkTyping()
}
p.ParseHub()
p.LinkHub()
}
func (p *Stock) Update(store store.Store, play bool) bool {
if !atomic.CompareAndSwapInt32(&p.loaded, 0, 1) {
return false
}
p.Days_update(store)
p.Ticks_update(store)
p.Ticks_today_update()
if play {
glog.Warningln("WITH PLAY MODE")
} else {
p.Merge(true, store)
}
atomic.StoreInt32(&p.loaded, 2)
return true
}
func (p *Stock) days_download(t time.Time) ([]int, error) {
inds := []int{}
tds, err := robot.Days_download(p.Id, t)
if err != nil {
return inds, err
}
for i, count := 0, len(tds); i < count; i++ {
ind, isnew := p.Days.Add(tds[i])
if isnew || ind > 0 |
}
return inds, nil
}
func (p *Stock) Days_update(store store.Store) int {
c := Day_collection_name(p.Id)
p.Days.start = store.GetStartTime(p.Id, LDay)
p.Days.Data, _ = store.LoadTDatas(c, p.Days.start)
t := p.Days.latest_time()
now := time.Now().AddDate(0, 0, -1).UTC().Truncate(time.Hour * 24)
for !IsTradeDay(now) {
now = now.AddDate(0, 0, -1)
}
if t.Equal(now) || t.After(now) {
return 0
}
inds, _ := p.days_download(t)
if len(inds) > 0 {
store.SaveTDatas(c, p.Days.Data, inds)
factor := p.Days.Factor()
store.UpdateFactor(p.Id, factor)
}
return len(inds)
}
func (p *Stock) Ticks_update(store store.Store) int {
c := Tick_collection_name(p.Id)
p.M1s.start = store.GetStartTime(p.Id, L1)
p.Ticks.Data, _ = store.LoadTicks(c, p.M1s.start)
begin_time := p.M1s.start
l := len(p.Ticks.Data)
if l > 0 {
begin_time = p.Ticks.Data[0].Time
}
now := time.Now().UTC()
end_time := now.Truncate(time.Hour * 24)
if now.Hour() > 10 {
end_time = end_time.AddDate(0, 0, 1)
}
if begin_time.Equal(market_begin_day) {
begin_time = end_time.AddDate(0, -2, -1)
}
begin_time = begin_time.AddDate(0, 0, 1).Truncate(time.Hour * 24)
daylen := len(p.Days.Data)
if daylen < 1 {
return 0
}
i, _ := ((TdataSlice)(p.Days.Data)).Search(begin_time)
glog.V(LogV).Infof("from %d/%d %s begin_time=%s end_time=%s", i, daylen, p.M1s.start, begin_time, end_time)
var t time.Time
for ; i <= daylen; i++ {
if i < daylen {
t = p.Days.Data[i].Time
} else if i == daylen {
t = p.Days.Data[i-1].Time.AddDate(0, 0, 1)
}
if !end_time.After(t) {
glog.V(LogV).Infoln(t, "reach end_time", end_time)
break
}
if p.Ticks.hasTimeData(t) {
continue
}
glog.V(LogV).Infoln("prepare download ticks", t)
if ticks, err := p.ticks_download(t); ticks != nil {
for j, _ := range ticks {
p.Ticks.Add(ticks[j])
}
store.SaveTicks(c, ticks)
glog.V(LogV).Infoln("download ticks succ", t)
} else if err != nil {
glog.V(LogD).Infoln("download ticks err", err)
}
}
count := len(p.Ticks.Data)
glog.V(LogV).Infof("download ticks %d/%d", count-l, count)
return count - l
}
/*
func (p *Tdata) parse_mins_from_sina(line []byte) error {
items := [6]string{"day:", "open:", "high:", "close:", "low:", "volume:"}
v := [6]string{}
line = bytes.TrimSpace(line)
line = bytes.Trim(line, "[{}]")
infos := bytes.Split(line, []byte(","))
if len(infos) != 6 {
return errors.New("could not parse line " + string(line))
}
for i, item := range items {
v[i] = ""
for _, info := range infos {
if bytes.HasPrefix(info, []byte(item)) {
info = bytes.TrimPrefix(info, []byte(item))
info = bytes.Trim(info, "\"")
v[i] = string(info)
}
}
}
p.FromString(v[0], v[1], v[2], v[3], v[4], v[5])
return nil
}
*/
var UnknowSinaRes error = errors.New("could not find '成交时间' in head line")
func (p *Stock) ticks_download(t time.Time) ([]Tick, error) {
body := robot.Tick_download_from_sina(p.Id, t)
if body == nil {
return nil, UnknowSinaRes
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
count := len(lines) - 1
if count < 1 {
return nil, UnknowSinaRes
}
if bytes.Contains(lines[0], []byte("script")) {
return nil, UnknowSinaRes
}
if !bytes.Contains(lines[0], []byte("成交时间")) {
return nil, UnknowSinaRes
}
ticks := make([]Tick, count)
for i := count; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
infos := bytes.Split(line, []byte("\t"))
if len(infos) != 6 {
err := errors.New("could not parse line " + string(line))
return nil, err
}
ticks[count-i].FromString(t, infos[0], infos[1], infos[2],
infos[3], infos[4], infos[5])
}
FixTickTime(ticks)
return ticks, nil
}
func (p *Stock) Ticks_today_update() int {
l := len(p.Ticks.Data)
now := time.Now().UTC()
if !IsTradeDay(now) {
return 0
}
nhour := now.Hour()
if nhour < 1 || nhour > 10 {
return 0
}
p.ticks_get_today()
count := len(p.Ticks.Data)
return count - l
}
func (p *Stock) ticks_get_today() bool {
last_t, name, err := Tick_get_today_date(p.Id)
if err != nil {
glog.Warningln("get today date fail", err)
return false
}
p.Name = name
t := time.Now().UTC().Truncate(time.Hour * 24)
if t.After(last_t) {
return false
}
body := robot.Tick_download_today_from_sina(p.Id)
if body == nil {
return false
}
body = bytes.TrimSpace(body)
lines := bytes.Split(body, []byte("\n"))
ticks := []Tick{}
tick := Tick{}
nul := []byte("")
for i := len(lines) - 1; i > 0; i-- {
line := bytes.TrimSpace(lines[i])
line = bytes.Trim(line, ");")
infos := bytes.Split(line, []byte("] = new Array("))
if len(infos) != 2 {
continue
}
line = bytes.Replace(infos[1], []byte(" "), nul, -1)
line = bytes.Replace(line, []byte("'"), nul, -1)
infos = bytes.Split(line, []byte(","))
if len(infos) != 4 {
continue
}
tick.FromString(t, infos[0], infos[2], nul, infos[1], nul, infos[3])
if tick.Volume == 0 && tick.Price == 0 {
continue
}
ticks = append(ticks, tick)
}
FixTickTime(ticks)
FixTickData(ticks)
for _, tick := range ticks {
p.Ticks.Add(tick)
}
return true
}
func (p *Stock) tick_get_real(line []byte) bool {
infos := bytes.Split(line, []byte(","))
if len(infos) < 33 {
glog.Warningln("sina hq api, res format changed")
return false
}
p.Name = string(infos[0])
nul := []byte("")
tick := RealtimeTick{}
t, _ := time.Parse("2006-01-02", string(infos[30]))
tick.FromString(t, infos[31], infos[3], nul, infos[8], infos[9], nul)
tick.Buyone = ParseCent(string(infos[11]))
tick.Sellone = ParseCent(string(infos[21]))
tick.SetStatus(infos[32])
if p.last_tick.Volume == 0 {
p.last_tick = tick
if tick.Time.Before(p.lst_trade) {
p.last_tick.Volume = 0
}
return false
}
if tick.Volume != p.last_tick.Volume {
if tick.Price >= p.last_tick.Sellone {
tick.Type = Buy_tick
} else if tick.Price <= p.last_tick.Buyone {
tick.Type = Sell_tick
} else {
tick.Type = Eq_tick
}
tick.Change = tick.Price - p.last_tick.Price
volume := (tick.Volume - p.last_tick.Volume) / 100
p.last_tick = tick
tick.Volume = volume
p.Ticks.Add(tick.Tick)
p.lst_trade = tick.Time
return true
}
return false
}
| {
inds = append(inds, ind)
} | conditional_block |
mod.rs | //! Operating System backed readiness event queue.
//!
//! [`OsQueue`] provides an abstraction over platform specific Operating System
//! backed readiness event queues, such as kqueue or epoll.
//!
//! [`OsQueue`]: crate::os::OsQueue
//!
//! # Portability
//!
//! Using [`OsQueue`] provides a portable interface across supported platforms
//! as long as the caller takes the following into consideration:
//!
//! ### Draining readiness
//!
//! When using [edge-triggered] mode, once a readiness event is received, the
//! corresponding operation must be performed repeatedly until it returns
//! [`WouldBlock`]. Unless this is done, there is no guarantee that another
//! readiness event will be delivered, even if further data is received for the
//! [`Evented`] handle. See [`RegisterOption`] for more.
//!
//! [`WouldBlock`]: std::io::ErrorKind::WouldBlock
//! [edge-triggered]: crate::os::RegisterOption::EDGE
//! [`Evented`]: crate::os::Evented
//! [`RegisterOption`]: crate::os::RegisterOption
//!
//! ### Spurious events
//!
//! The [`Source::poll`] implementation may return readiness events even if the
//! associated [`Evented`] handle is not actually ready. Given the same code,
//! this may happen more on some platforms than others. It is important to never
//! assume that, just because a readiness notification was received, that the
//! associated operation will as well.
//!
//! If operation fails with a [`WouldBlock`] error, then the caller should not
//! treat this as an error and wait until another readiness event is received.
//!
//! Furthermore a single call to poll may result in multiple readiness events
//! being returned for a single `Evented` handle. For example, if a TCP socket
//! becomes both readable and writable, it may be possible for a single
//! readiness event to be returned with both [readable] and [writable] readiness
//! **OR** two separate events may be returned, one with readable set and one
//! with writable set.
//!
//! [`Source::poll`]: crate::event::Source::poll
//! [readable]: crate::os::Interests::READABLE
//! [writable]: crate::os::Interests::WRITABLE
//!
//! ### Registering handles
//!
//! Unless otherwise noted, it should be assumed that types implementing
//! [`Evented`] will never become ready unless they are registered with
//! `OsQueue`.
//!
//! For example:
//!
//! ```
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use std::thread;
//! use std::time::Duration;
//!
//! use gaea::event;
//! use gaea::net::TcpStream;
//! use gaea::os::{OsQueue, RegisterOption};
//!
//! let address = "216.58.193.100:80".parse()?;
//! let mut stream = TcpStream::connect(address)?;
//!
//! // This actually does nothing towards connecting the TCP stream.
//! thread::sleep(Duration::from_secs(1));
//!
//! let mut os_queue = OsQueue::new()?;
//!
//! // The connect is not guaranteed to have started until it is registered at
//! // this point.
//! os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
//! # Ok(())
//! # }
//! ```
//!
//! ### Timeout granularity
//!
//! The timeout provided to [`event::Source::blocking_poll`] will be rounded
//! up to the system clock granularity (usually 1ms), and kernel scheduling
//! delays mean that the blocking interval may be overrun by a small amount.
//!
//! ### Interrupts while polling
//!
//! Interrupts (`EINTR` in C and `io::ErrorKind::Interrupted` in Rust) are
//! **not** handled, they are returned as errors. In most cases however these
//! can simply be ignored, but it's up to the user how to deal with the "error".
//!
//! # Implementation notes
//!
//! `OsQueue` is backed by a readiness event queue provided by the operating
//! system. On all platforms a call to [`Source::poll`] is mostly just a direct
//! system call. The following system implementations back `OsQueue`:
//!
//! | OS | Selector |
//! |---------|----------|
//! | FreeBSD | [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) |
//! | Linux | [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) |
//! | macOS | [kqueue](https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man2/kqueue.2.html) |
//! | NetBSD | [kqueue](http://netbsd.gw.com/cgi-bin/man-cgi?kqueue) |
//! | OpenBSD | [kqueue](https://man.openbsd.org/kqueue) |
//!
//! On all supported platforms socket operations are handled by using the system
//! queue. Platform specific extensions (e.g. [`EventedFd`]) allow accessing
//! other features provided by individual system selectors.
//!
//! [`Eventedfd`]: crate::sys::unix::EventedFd
//! [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
use std::io;
use std::time::Duration;
use log::trace;
use crate::{event, sys};
mod awakener;
mod evented;
mod interests;
mod option;
pub mod signals;
pub use self::awakener::Awakener;
pub use self::evented::Evented;
pub use self::interests::Interests;
pub use self::option::RegisterOption;
pub use self::signals::{Signal, SignalSet, Signals};
/// Readiness event queue backed by the OS. | /// e.g. read or write.
///
/// To use this queue an [`Evented`] handle must first be registered using the
/// [`register`] method, supplying an associated id, readiness interests and
/// polling option. The [associated id] is used to associate a readiness event
/// with an `Evented` handle. The readiness [interests] defines which specific
/// operations on the handle to monitor for readiness. And the final argument,
/// [`RegisterOption`], defines how to deliver the readiness events, see
/// [`RegisterOption`] for more information.
///
/// See to [module documentation] for information.
///
/// [reading]: crate::event::Ready::READABLE
/// [writing]: crate::event::Ready::WRITABLE
/// [`register`]: OsQueue::register
/// [associated id]: event::Id
/// [interests]: Interests
/// [module documentation]: crate::os
#[derive(Debug)]
pub struct OsQueue {
selector: sys::Selector,
}
impl OsQueue {
/// Create a new OS backed readiness event queue.
///
/// This function will make a syscall to the operating system to create the
/// system selector. If this syscall fails it will return the error.
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::os::OsQueue;
/// use gaea::poll;
///
/// // Create a new OS backed readiness event queue.
/// let mut os_queue = OsQueue::new()?;
///
/// // Create an event sink.
/// let mut events = Vec::new();
///
/// // Poll the queue for new readiness events.
/// // But since no `Evented` handles have been registered we'll receive no
/// // events.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(500)))?;
/// # Ok(())
/// # }
/// ```
pub fn new() -> io::Result<OsQueue> {
sys::Selector::new().map(|selector| OsQueue { selector })
}
/// Register an [`Evented`] handle with the `OsQueue`.
///
/// Once registered, the [`Evented`] handle will be monitored for readiness
/// state changes. When it notices a state change, it will return a
/// readiness event for the handle the next time the queue is [`polled`].
///
/// [`polled`]: crate::poll
///
/// # Arguments
///
/// `handle`: This is the handle that the `OsQueue` should monitor for
/// readiness state changes.
///
/// `id`: The caller picks a id to associate with the handle. When [`poll`]
/// returns an [event] for the handle, this id is [included]. This allows
/// the caller to map the event to its handle. The id associated with the
/// `Evented` handle can be changed at any time by calling [`reregister`].
///
/// `interests`: Specifies which operations `OsQueue` should monitor for
/// readiness. `OsQueue` will only return readiness events for operations
/// specified by this argument. If a socket is registered with [readable]
/// interests and the socket becomes writable, no event will be returned
/// from [`poll`]. The readiness interests for an `Evented` handle can be
/// changed at any time by calling [`reregister`]. Most types that
/// implemented [`Evented`] have a associated constant named `INTERESTS`
/// which provide a sane interest for that type, e.g. [`TcpStream`
/// interests] are readable and writable.
///
/// `opt`: Specifies the registration option. Just like the interests and
/// id, the option can be changed for an `Evented` handle at any time by
/// calling [`reregister`].
///
/// [`poll`]: crate::poll
/// [event]: crate::event::Event
/// [included]: crate::event::Event::id
/// [`reregister`]: OsQueue::reregister
/// [readable]: Interests::READABLE
/// [`TcpStream` interests]: crate::net::TcpStream::INTERESTS
///
/// # Notes
///
/// Unless otherwise specified, the caller should assume that once an
/// `Evented` handle is registered with a `OsQueue` instance, it is bound to
/// that `OsQueue` for the lifetime of the `Evented` handle. This remains
/// true even if the `Evented` handle is [deregistered].
///
/// [deregistered]: OsQueue::deregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
/// use gaea::{event, poll};
///
/// // Create a new `OsQueue` as well a containers for the events.
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with queue.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(0) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// }
/// }
/// }
/// # }
/// ```
pub fn register<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("registering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.register(self, id, interests, opt)
}
/// Re-register an `Evented` handle with `OsQueue`.
///
/// Re-registering an `Evented` handle allows changing the details of the
/// registration. Specifically, it allows updating the associated `id`,
/// `interests`, and `opt` specified in previous `register` and `reregister`
/// calls.
///
/// The `reregister` arguments **fully override** the previous values. In
/// other words, if a socket is registered with [readable] interest and the
/// call to `reregister` specifies only [writable], then read interest is no
/// longer monitored for the handle.
///
/// The `Evented` handle must have previously been registered with this
/// `OsQueue` otherwise the call to `reregister` may return an error.
///
/// See the [`register`] documentation for details about the function
/// arguments.
///
/// [readable]: Interests::READABLE
/// [writable]: Interests::WRITABLE
/// [`register`]: OsQueue::register
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{Interests, RegisterOption, OsQueue};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`, only with readable interest.
/// os_queue.register(&mut stream, event::Id(0), Interests::READABLE, RegisterOption::EDGE)?;
///
/// // Reregister the connection specifying a different id and write interest
/// // instead. `RegisterOption::EDGE` must be specified even though that value
/// // is not being changed.
/// os_queue.reregister(&mut stream, event::Id(2), Interests::WRITABLE, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(2) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// } else if event.id() == event::Id(0) {
/// // We won't receive events with the old id anymore.
/// unreachable!();
/// }
/// }
/// }
/// # }
/// ```
pub fn reregister<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("reregistering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.reregister(self, id, interests, opt)
}
/// Deregister an `Evented` handle from `OsQueue`.
///
/// When an `Evented` handle is deregistered, the handle will no longer be
/// monitored for readiness state changes. Unlike disabling handles with
/// [`oneshot`], deregistering clears up any internal resources needed to
/// track the handle.
///
/// A handle can be registered again using [`register`] after it has been
/// deregistered; however, it must be passed back to the **same** `OsQueue`.
///
/// # Notes
///
/// Calling [`reregister`] after `deregister` may be work on some platforms
/// but not all. To properly re-register a handle after deregistering use
/// `register`, this works on all platforms.
///
/// [`oneshot`]: RegisterOption::ONESHOT
/// [`register`]: OsQueue::register
/// [`reregister`]: OsQueue::reregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Do stuff with the connection etc.
///
/// // Deregister it so the resources can be cleaned up.
/// os_queue.deregister(&mut stream)?;
///
/// // Set a timeout because we shouldn't receive any events anymore.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(100)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn deregister<E>(&mut self, handle: &mut E) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("deregistering handle");
handle.deregister(self)
}
/// Get access to the system selector. Used by platform specific code, e.g.
/// `EventedFd`.
pub(crate) fn selector(&self) -> &sys::Selector {
&self.selector
}
}
impl<ES, E> event::Source<ES, E> for OsQueue
where ES: event::Sink,
E: From<io::Error>,
{
fn max_timeout(&self) -> Option<Duration> {
// Can't tell if an event is available.
None
}
fn poll(&mut self, event_sink: &mut ES) -> Result<(), E> {
self.blocking_poll(event_sink, Some(Duration::from_millis(0)))
}
fn blocking_poll(&mut self, event_sink: &mut ES, timeout: Option<Duration>) -> Result<(), E> {
trace!("polling OS queue: timeout={:?}", timeout);
self.selector.select(event_sink, timeout)
.map_err(Into::into)
}
} | ///
/// This queue allows a program to monitor a large number of [`Evented`]
/// handles, waiting until one or more become "ready" for some class of
/// operations; e.g. [reading] or [writing]. An [`Evented`] type is considered
/// ready if it is possible to immediately perform a corresponding operation; | random_line_split |
mod.rs | //! Operating System backed readiness event queue.
//!
//! [`OsQueue`] provides an abstraction over platform specific Operating System
//! backed readiness event queues, such as kqueue or epoll.
//!
//! [`OsQueue`]: crate::os::OsQueue
//!
//! # Portability
//!
//! Using [`OsQueue`] provides a portable interface across supported platforms
//! as long as the caller takes the following into consideration:
//!
//! ### Draining readiness
//!
//! When using [edge-triggered] mode, once a readiness event is received, the
//! corresponding operation must be performed repeatedly until it returns
//! [`WouldBlock`]. Unless this is done, there is no guarantee that another
//! readiness event will be delivered, even if further data is received for the
//! [`Evented`] handle. See [`RegisterOption`] for more.
//!
//! [`WouldBlock`]: std::io::ErrorKind::WouldBlock
//! [edge-triggered]: crate::os::RegisterOption::EDGE
//! [`Evented`]: crate::os::Evented
//! [`RegisterOption`]: crate::os::RegisterOption
//!
//! ### Spurious events
//!
//! The [`Source::poll`] implementation may return readiness events even if the
//! associated [`Evented`] handle is not actually ready. Given the same code,
//! this may happen more on some platforms than others. It is important to never
//! assume that, just because a readiness notification was received, that the
//! associated operation will as well.
//!
//! If operation fails with a [`WouldBlock`] error, then the caller should not
//! treat this as an error and wait until another readiness event is received.
//!
//! Furthermore a single call to poll may result in multiple readiness events
//! being returned for a single `Evented` handle. For example, if a TCP socket
//! becomes both readable and writable, it may be possible for a single
//! readiness event to be returned with both [readable] and [writable] readiness
//! **OR** two separate events may be returned, one with readable set and one
//! with writable set.
//!
//! [`Source::poll`]: crate::event::Source::poll
//! [readable]: crate::os::Interests::READABLE
//! [writable]: crate::os::Interests::WRITABLE
//!
//! ### Registering handles
//!
//! Unless otherwise noted, it should be assumed that types implementing
//! [`Evented`] will never become ready unless they are registered with
//! `OsQueue`.
//!
//! For example:
//!
//! ```
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use std::thread;
//! use std::time::Duration;
//!
//! use gaea::event;
//! use gaea::net::TcpStream;
//! use gaea::os::{OsQueue, RegisterOption};
//!
//! let address = "216.58.193.100:80".parse()?;
//! let mut stream = TcpStream::connect(address)?;
//!
//! // This actually does nothing towards connecting the TCP stream.
//! thread::sleep(Duration::from_secs(1));
//!
//! let mut os_queue = OsQueue::new()?;
//!
//! // The connect is not guaranteed to have started until it is registered at
//! // this point.
//! os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
//! # Ok(())
//! # }
//! ```
//!
//! ### Timeout granularity
//!
//! The timeout provided to [`event::Source::blocking_poll`] will be rounded
//! up to the system clock granularity (usually 1ms), and kernel scheduling
//! delays mean that the blocking interval may be overrun by a small amount.
//!
//! ### Interrupts while polling
//!
//! Interrupts (`EINTR` in C and `io::ErrorKind::Interrupted` in Rust) are
//! **not** handled, they are returned as errors. In most cases however these
//! can simply be ignored, but it's up to the user how to deal with the "error".
//!
//! # Implementation notes
//!
//! `OsQueue` is backed by a readiness event queue provided by the operating
//! system. On all platforms a call to [`Source::poll`] is mostly just a direct
//! system call. The following system implementations back `OsQueue`:
//!
//! | OS | Selector |
//! |---------|----------|
//! | FreeBSD | [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) |
//! | Linux | [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) |
//! | macOS | [kqueue](https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man2/kqueue.2.html) |
//! | NetBSD | [kqueue](http://netbsd.gw.com/cgi-bin/man-cgi?kqueue) |
//! | OpenBSD | [kqueue](https://man.openbsd.org/kqueue) |
//!
//! On all supported platforms socket operations are handled by using the system
//! queue. Platform specific extensions (e.g. [`EventedFd`]) allow accessing
//! other features provided by individual system selectors.
//!
//! [`Eventedfd`]: crate::sys::unix::EventedFd
//! [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
use std::io;
use std::time::Duration;
use log::trace;
use crate::{event, sys};
mod awakener;
mod evented;
mod interests;
mod option;
pub mod signals;
pub use self::awakener::Awakener;
pub use self::evented::Evented;
pub use self::interests::Interests;
pub use self::option::RegisterOption;
pub use self::signals::{Signal, SignalSet, Signals};
/// Readiness event queue backed by the OS.
///
/// This queue allows a program to monitor a large number of [`Evented`]
/// handles, waiting until one or more become "ready" for some class of
/// operations; e.g. [reading] or [writing]. An [`Evented`] type is considered
/// ready if it is possible to immediately perform a corresponding operation;
/// e.g. read or write.
///
/// To use this queue an [`Evented`] handle must first be registered using the
/// [`register`] method, supplying an associated id, readiness interests and
/// polling option. The [associated id] is used to associate a readiness event
/// with an `Evented` handle. The readiness [interests] defines which specific
/// operations on the handle to monitor for readiness. And the final argument,
/// [`RegisterOption`], defines how to deliver the readiness events, see
/// [`RegisterOption`] for more information.
///
/// See to [module documentation] for information.
///
/// [reading]: crate::event::Ready::READABLE
/// [writing]: crate::event::Ready::WRITABLE
/// [`register`]: OsQueue::register
/// [associated id]: event::Id
/// [interests]: Interests
/// [module documentation]: crate::os
#[derive(Debug)]
pub struct OsQueue {
selector: sys::Selector,
}
impl OsQueue {
/// Create a new OS backed readiness event queue.
///
/// This function will make a syscall to the operating system to create the
/// system selector. If this syscall fails it will return the error.
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::os::OsQueue;
/// use gaea::poll;
///
/// // Create a new OS backed readiness event queue.
/// let mut os_queue = OsQueue::new()?;
///
/// // Create an event sink.
/// let mut events = Vec::new();
///
/// // Poll the queue for new readiness events.
/// // But since no `Evented` handles have been registered we'll receive no
/// // events.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(500)))?;
/// # Ok(())
/// # }
/// ```
pub fn new() -> io::Result<OsQueue> {
sys::Selector::new().map(|selector| OsQueue { selector })
}
/// Register an [`Evented`] handle with the `OsQueue`.
///
/// Once registered, the [`Evented`] handle will be monitored for readiness
/// state changes. When it notices a state change, it will return a
/// readiness event for the handle the next time the queue is [`polled`].
///
/// [`polled`]: crate::poll
///
/// # Arguments
///
/// `handle`: This is the handle that the `OsQueue` should monitor for
/// readiness state changes.
///
/// `id`: The caller picks a id to associate with the handle. When [`poll`]
/// returns an [event] for the handle, this id is [included]. This allows
/// the caller to map the event to its handle. The id associated with the
/// `Evented` handle can be changed at any time by calling [`reregister`].
///
/// `interests`: Specifies which operations `OsQueue` should monitor for
/// readiness. `OsQueue` will only return readiness events for operations
/// specified by this argument. If a socket is registered with [readable]
/// interests and the socket becomes writable, no event will be returned
/// from [`poll`]. The readiness interests for an `Evented` handle can be
/// changed at any time by calling [`reregister`]. Most types that
/// implemented [`Evented`] have a associated constant named `INTERESTS`
/// which provide a sane interest for that type, e.g. [`TcpStream`
/// interests] are readable and writable.
///
/// `opt`: Specifies the registration option. Just like the interests and
/// id, the option can be changed for an `Evented` handle at any time by
/// calling [`reregister`].
///
/// [`poll`]: crate::poll
/// [event]: crate::event::Event
/// [included]: crate::event::Event::id
/// [`reregister`]: OsQueue::reregister
/// [readable]: Interests::READABLE
/// [`TcpStream` interests]: crate::net::TcpStream::INTERESTS
///
/// # Notes
///
/// Unless otherwise specified, the caller should assume that once an
/// `Evented` handle is registered with a `OsQueue` instance, it is bound to
/// that `OsQueue` for the lifetime of the `Evented` handle. This remains
/// true even if the `Evented` handle is [deregistered].
///
/// [deregistered]: OsQueue::deregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
/// use gaea::{event, poll};
///
/// // Create a new `OsQueue` as well a containers for the events.
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with queue.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(0) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// }
/// }
/// }
/// # }
/// ```
pub fn register<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("registering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.register(self, id, interests, opt)
}
/// Re-register an `Evented` handle with `OsQueue`.
///
/// Re-registering an `Evented` handle allows changing the details of the
/// registration. Specifically, it allows updating the associated `id`,
/// `interests`, and `opt` specified in previous `register` and `reregister`
/// calls.
///
/// The `reregister` arguments **fully override** the previous values. In
/// other words, if a socket is registered with [readable] interest and the
/// call to `reregister` specifies only [writable], then read interest is no
/// longer monitored for the handle.
///
/// The `Evented` handle must have previously been registered with this
/// `OsQueue` otherwise the call to `reregister` may return an error.
///
/// See the [`register`] documentation for details about the function
/// arguments.
///
/// [readable]: Interests::READABLE
/// [writable]: Interests::WRITABLE
/// [`register`]: OsQueue::register
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{Interests, RegisterOption, OsQueue};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`, only with readable interest.
/// os_queue.register(&mut stream, event::Id(0), Interests::READABLE, RegisterOption::EDGE)?;
///
/// // Reregister the connection specifying a different id and write interest
/// // instead. `RegisterOption::EDGE` must be specified even though that value
/// // is not being changed.
/// os_queue.reregister(&mut stream, event::Id(2), Interests::WRITABLE, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(2) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// } else if event.id() == event::Id(0) {
/// // We won't receive events with the old id anymore.
/// unreachable!();
/// }
/// }
/// }
/// # }
/// ```
pub fn reregister<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("reregistering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.reregister(self, id, interests, opt)
}
/// Deregister an `Evented` handle from `OsQueue`.
///
/// When an `Evented` handle is deregistered, the handle will no longer be
/// monitored for readiness state changes. Unlike disabling handles with
/// [`oneshot`], deregistering clears up any internal resources needed to
/// track the handle.
///
/// A handle can be registered again using [`register`] after it has been
/// deregistered; however, it must be passed back to the **same** `OsQueue`.
///
/// # Notes
///
/// Calling [`reregister`] after `deregister` may be work on some platforms
/// but not all. To properly re-register a handle after deregistering use
/// `register`, this works on all platforms.
///
/// [`oneshot`]: RegisterOption::ONESHOT
/// [`register`]: OsQueue::register
/// [`reregister`]: OsQueue::reregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Do stuff with the connection etc.
///
/// // Deregister it so the resources can be cleaned up.
/// os_queue.deregister(&mut stream)?;
///
/// // Set a timeout because we shouldn't receive any events anymore.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(100)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn deregister<E>(&mut self, handle: &mut E) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("deregistering handle");
handle.deregister(self)
}
/// Get access to the system selector. Used by platform specific code, e.g.
/// `EventedFd`.
pub(crate) fn selector(&self) -> &sys::Selector |
}
impl<ES, E> event::Source<ES, E> for OsQueue
where ES: event::Sink,
E: From<io::Error>,
{
fn max_timeout(&self) -> Option<Duration> {
// Can't tell if an event is available.
None
}
fn poll(&mut self, event_sink: &mut ES) -> Result<(), E> {
self.blocking_poll(event_sink, Some(Duration::from_millis(0)))
}
fn blocking_poll(&mut self, event_sink: &mut ES, timeout: Option<Duration>) -> Result<(), E> {
trace!("polling OS queue: timeout={:?}", timeout);
self.selector.select(event_sink, timeout)
.map_err(Into::into)
}
}
| {
&self.selector
} | identifier_body |
mod.rs | //! Operating System backed readiness event queue.
//!
//! [`OsQueue`] provides an abstraction over platform specific Operating System
//! backed readiness event queues, such as kqueue or epoll.
//!
//! [`OsQueue`]: crate::os::OsQueue
//!
//! # Portability
//!
//! Using [`OsQueue`] provides a portable interface across supported platforms
//! as long as the caller takes the following into consideration:
//!
//! ### Draining readiness
//!
//! When using [edge-triggered] mode, once a readiness event is received, the
//! corresponding operation must be performed repeatedly until it returns
//! [`WouldBlock`]. Unless this is done, there is no guarantee that another
//! readiness event will be delivered, even if further data is received for the
//! [`Evented`] handle. See [`RegisterOption`] for more.
//!
//! [`WouldBlock`]: std::io::ErrorKind::WouldBlock
//! [edge-triggered]: crate::os::RegisterOption::EDGE
//! [`Evented`]: crate::os::Evented
//! [`RegisterOption`]: crate::os::RegisterOption
//!
//! ### Spurious events
//!
//! The [`Source::poll`] implementation may return readiness events even if the
//! associated [`Evented`] handle is not actually ready. Given the same code,
//! this may happen more on some platforms than others. It is important to never
//! assume that, just because a readiness notification was received, that the
//! associated operation will as well.
//!
//! If operation fails with a [`WouldBlock`] error, then the caller should not
//! treat this as an error and wait until another readiness event is received.
//!
//! Furthermore a single call to poll may result in multiple readiness events
//! being returned for a single `Evented` handle. For example, if a TCP socket
//! becomes both readable and writable, it may be possible for a single
//! readiness event to be returned with both [readable] and [writable] readiness
//! **OR** two separate events may be returned, one with readable set and one
//! with writable set.
//!
//! [`Source::poll`]: crate::event::Source::poll
//! [readable]: crate::os::Interests::READABLE
//! [writable]: crate::os::Interests::WRITABLE
//!
//! ### Registering handles
//!
//! Unless otherwise noted, it should be assumed that types implementing
//! [`Evented`] will never become ready unless they are registered with
//! `OsQueue`.
//!
//! For example:
//!
//! ```
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! use std::thread;
//! use std::time::Duration;
//!
//! use gaea::event;
//! use gaea::net::TcpStream;
//! use gaea::os::{OsQueue, RegisterOption};
//!
//! let address = "216.58.193.100:80".parse()?;
//! let mut stream = TcpStream::connect(address)?;
//!
//! // This actually does nothing towards connecting the TCP stream.
//! thread::sleep(Duration::from_secs(1));
//!
//! let mut os_queue = OsQueue::new()?;
//!
//! // The connect is not guaranteed to have started until it is registered at
//! // this point.
//! os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
//! # Ok(())
//! # }
//! ```
//!
//! ### Timeout granularity
//!
//! The timeout provided to [`event::Source::blocking_poll`] will be rounded
//! up to the system clock granularity (usually 1ms), and kernel scheduling
//! delays mean that the blocking interval may be overrun by a small amount.
//!
//! ### Interrupts while polling
//!
//! Interrupts (`EINTR` in C and `io::ErrorKind::Interrupted` in Rust) are
//! **not** handled, they are returned as errors. In most cases however these
//! can simply be ignored, but it's up to the user how to deal with the "error".
//!
//! # Implementation notes
//!
//! `OsQueue` is backed by a readiness event queue provided by the operating
//! system. On all platforms a call to [`Source::poll`] is mostly just a direct
//! system call. The following system implementations back `OsQueue`:
//!
//! | OS | Selector |
//! |---------|----------|
//! | FreeBSD | [kqueue](https://www.freebsd.org/cgi/man.cgi?query=kqueue) |
//! | Linux | [epoll](http://man7.org/linux/man-pages/man7/epoll.7.html) |
//! | macOS | [kqueue](https://developer.apple.com/legacy/library/documentation/Darwin/Reference/ManPages/man2/kqueue.2.html) |
//! | NetBSD | [kqueue](http://netbsd.gw.com/cgi-bin/man-cgi?kqueue) |
//! | OpenBSD | [kqueue](https://man.openbsd.org/kqueue) |
//!
//! On all supported platforms socket operations are handled by using the system
//! queue. Platform specific extensions (e.g. [`EventedFd`]) allow accessing
//! other features provided by individual system selectors.
//!
//! [`Eventedfd`]: crate::sys::unix::EventedFd
//! [`signalfd`]: http://man7.org/linux/man-pages/man2/signalfd.2.html
use std::io;
use std::time::Duration;
use log::trace;
use crate::{event, sys};
mod awakener;
mod evented;
mod interests;
mod option;
pub mod signals;
pub use self::awakener::Awakener;
pub use self::evented::Evented;
pub use self::interests::Interests;
pub use self::option::RegisterOption;
pub use self::signals::{Signal, SignalSet, Signals};
/// Readiness event queue backed by the OS.
///
/// This queue allows a program to monitor a large number of [`Evented`]
/// handles, waiting until one or more become "ready" for some class of
/// operations; e.g. [reading] or [writing]. An [`Evented`] type is considered
/// ready if it is possible to immediately perform a corresponding operation;
/// e.g. read or write.
///
/// To use this queue an [`Evented`] handle must first be registered using the
/// [`register`] method, supplying an associated id, readiness interests and
/// polling option. The [associated id] is used to associate a readiness event
/// with an `Evented` handle. The readiness [interests] defines which specific
/// operations on the handle to monitor for readiness. And the final argument,
/// [`RegisterOption`], defines how to deliver the readiness events, see
/// [`RegisterOption`] for more information.
///
/// See to [module documentation] for information.
///
/// [reading]: crate::event::Ready::READABLE
/// [writing]: crate::event::Ready::WRITABLE
/// [`register`]: OsQueue::register
/// [associated id]: event::Id
/// [interests]: Interests
/// [module documentation]: crate::os
#[derive(Debug)]
pub struct | {
selector: sys::Selector,
}
impl OsQueue {
/// Create a new OS backed readiness event queue.
///
/// This function will make a syscall to the operating system to create the
/// system selector. If this syscall fails it will return the error.
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::os::OsQueue;
/// use gaea::poll;
///
/// // Create a new OS backed readiness event queue.
/// let mut os_queue = OsQueue::new()?;
///
/// // Create an event sink.
/// let mut events = Vec::new();
///
/// // Poll the queue for new readiness events.
/// // But since no `Evented` handles have been registered we'll receive no
/// // events.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(500)))?;
/// # Ok(())
/// # }
/// ```
pub fn new() -> io::Result<OsQueue> {
sys::Selector::new().map(|selector| OsQueue { selector })
}
/// Register an [`Evented`] handle with the `OsQueue`.
///
/// Once registered, the [`Evented`] handle will be monitored for readiness
/// state changes. When it notices a state change, it will return a
/// readiness event for the handle the next time the queue is [`polled`].
///
/// [`polled`]: crate::poll
///
/// # Arguments
///
/// `handle`: This is the handle that the `OsQueue` should monitor for
/// readiness state changes.
///
/// `id`: The caller picks a id to associate with the handle. When [`poll`]
/// returns an [event] for the handle, this id is [included]. This allows
/// the caller to map the event to its handle. The id associated with the
/// `Evented` handle can be changed at any time by calling [`reregister`].
///
/// `interests`: Specifies which operations `OsQueue` should monitor for
/// readiness. `OsQueue` will only return readiness events for operations
/// specified by this argument. If a socket is registered with [readable]
/// interests and the socket becomes writable, no event will be returned
/// from [`poll`]. The readiness interests for an `Evented` handle can be
/// changed at any time by calling [`reregister`]. Most types that
/// implemented [`Evented`] have a associated constant named `INTERESTS`
/// which provide a sane interest for that type, e.g. [`TcpStream`
/// interests] are readable and writable.
///
/// `opt`: Specifies the registration option. Just like the interests and
/// id, the option can be changed for an `Evented` handle at any time by
/// calling [`reregister`].
///
/// [`poll`]: crate::poll
/// [event]: crate::event::Event
/// [included]: crate::event::Event::id
/// [`reregister`]: OsQueue::reregister
/// [readable]: Interests::READABLE
/// [`TcpStream` interests]: crate::net::TcpStream::INTERESTS
///
/// # Notes
///
/// Unless otherwise specified, the caller should assume that once an
/// `Evented` handle is registered with a `OsQueue` instance, it is bound to
/// that `OsQueue` for the lifetime of the `Evented` handle. This remains
/// true even if the `Evented` handle is [deregistered].
///
/// [deregistered]: OsQueue::deregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
/// use gaea::{event, poll};
///
/// // Create a new `OsQueue` as well a containers for the events.
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with queue.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(0) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// }
/// }
/// }
/// # }
/// ```
pub fn register<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("registering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.register(self, id, interests, opt)
}
/// Re-register an `Evented` handle with `OsQueue`.
///
/// Re-registering an `Evented` handle allows changing the details of the
/// registration. Specifically, it allows updating the associated `id`,
/// `interests`, and `opt` specified in previous `register` and `reregister`
/// calls.
///
/// The `reregister` arguments **fully override** the previous values. In
/// other words, if a socket is registered with [readable] interest and the
/// call to `reregister` specifies only [writable], then read interest is no
/// longer monitored for the handle.
///
/// The `Evented` handle must have previously been registered with this
/// `OsQueue` otherwise the call to `reregister` may return an error.
///
/// See the [`register`] documentation for details about the function
/// arguments.
///
/// [readable]: Interests::READABLE
/// [writable]: Interests::WRITABLE
/// [`register`]: OsQueue::register
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{Interests, RegisterOption, OsQueue};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`, only with readable interest.
/// os_queue.register(&mut stream, event::Id(0), Interests::READABLE, RegisterOption::EDGE)?;
///
/// // Reregister the connection specifying a different id and write interest
/// // instead. `RegisterOption::EDGE` must be specified even though that value
/// // is not being changed.
/// os_queue.reregister(&mut stream, event::Id(2), Interests::WRITABLE, RegisterOption::EDGE)?;
///
/// // Run the event loop.
/// loop {
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, None)?;
///
/// for event in events.drain(..) {
/// if event.id() == event::Id(2) {
/// // The TCP connection is (likely) ready for use.
/// # return Ok(());
/// } else if event.id() == event::Id(0) {
/// // We won't receive events with the old id anymore.
/// unreachable!();
/// }
/// }
/// }
/// # }
/// ```
pub fn reregister<E>(&mut self, handle: &mut E, id: event::Id, interests: Interests, opt: RegisterOption) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("reregistering handle: id={}, interests={:?}, opt={:?}", id, interests, opt);
handle.reregister(self, id, interests, opt)
}
/// Deregister an `Evented` handle from `OsQueue`.
///
/// When an `Evented` handle is deregistered, the handle will no longer be
/// monitored for readiness state changes. Unlike disabling handles with
/// [`oneshot`], deregistering clears up any internal resources needed to
/// track the handle.
///
/// A handle can be registered again using [`register`] after it has been
/// deregistered; however, it must be passed back to the **same** `OsQueue`.
///
/// # Notes
///
/// Calling [`reregister`] after `deregister` may be work on some platforms
/// but not all. To properly re-register a handle after deregistering use
/// `register`, this works on all platforms.
///
/// [`oneshot`]: RegisterOption::ONESHOT
/// [`register`]: OsQueue::register
/// [`reregister`]: OsQueue::reregister
///
/// # Examples
///
/// ```
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// use std::io;
/// use std::time::Duration;
///
/// use gaea::{event, poll};
/// use gaea::net::TcpStream;
/// use gaea::os::{OsQueue, RegisterOption};
///
/// let mut os_queue = OsQueue::new()?;
/// let mut events = Vec::new();
///
/// // Create a TCP connection. `TcpStream` implements the `Evented` trait.
/// let address = "216.58.193.100:80".parse()?;
/// let mut stream = TcpStream::connect(address)?;
///
/// // Register the connection with `OsQueue`.
/// os_queue.register(&mut stream, event::Id(0), TcpStream::INTERESTS, RegisterOption::EDGE)?;
///
/// // Do stuff with the connection etc.
///
/// // Deregister it so the resources can be cleaned up.
/// os_queue.deregister(&mut stream)?;
///
/// // Set a timeout because we shouldn't receive any events anymore.
/// poll::<_, io::Error>(&mut [&mut os_queue], &mut events, Some(Duration::from_millis(100)))?;
/// assert!(events.is_empty());
/// # Ok(())
/// # }
/// ```
pub fn deregister<E>(&mut self, handle: &mut E) -> io::Result<()>
where E: Evented + ?Sized,
{
trace!("deregistering handle");
handle.deregister(self)
}
/// Get access to the system selector. Used by platform specific code, e.g.
/// `EventedFd`.
pub(crate) fn selector(&self) -> &sys::Selector {
&self.selector
}
}
impl<ES, E> event::Source<ES, E> for OsQueue
where ES: event::Sink,
E: From<io::Error>,
{
fn max_timeout(&self) -> Option<Duration> {
// Can't tell if an event is available.
None
}
fn poll(&mut self, event_sink: &mut ES) -> Result<(), E> {
self.blocking_poll(event_sink, Some(Duration::from_millis(0)))
}
fn blocking_poll(&mut self, event_sink: &mut ES, timeout: Option<Duration>) -> Result<(), E> {
trace!("polling OS queue: timeout={:?}", timeout);
self.selector.select(event_sink, timeout)
.map_err(Into::into)
}
}
| OsQueue | identifier_name |
data-enums.ts | import type { LangEntry } from './foundry/localization';
const valuedEnums = new WeakMap();
export const enumValues = <T extends Record<string, LangEntry>>(
o: T,
): ReadonlyArray<T[keyof T]> => {
let existing = valuedEnums.get(o);
if (!existing) {
existing = Object.freeze(Object.values(o));
valuedEnums.set(o, existing);
}
return existing;
};
export enum Fork {
Alpha = 'alpha',
Beta = 'beta',
Gamma = 'gamma',
}
export enum ThreatInfo {
Classification = 'classification',
Niche = 'niche',
Numbers = 'numbers',
StressValue = 'stressValue',
MinStressValue = 'minStressValue',
ThreatLevel = 'threatLevel',
}
export enum ThreatLevel {
Yellow = 'yellow',
Orange = 'orange',
Red = 'red',
Ultraviolet = 'ultraviolet',
}
export enum MinStressOption {
None = 'none',
Half = 'half',
Value = 'value',
}
export enum EgoType {
AGI = 'agi',
ALI = 'ali',
ASI = 'asi',
Alien = 'alien',
Exhuman = 'exhuman',
Exsurgent = 'exsurgent',
Neogenetic = 'neogenetic',
Titan = 'titan',
Transhuman = 'transhuman',
Uplift = 'uplift',
Xenofauna = 'xenofauna',
}
export enum CharacterDetail {
Age = 'age',
Aliases = 'aliases',
Background = 'background',
Career = 'career',
Faction = 'faction',
Gender = 'gender',
Interest = 'interest',
Languages = 'languages',
}
export enum AptitudeType {
Cognition = 'cog',
Intuition = 'int',
Reflexes = 'ref',
Savvy = 'sav',
Somatics = 'som',
Willpower = 'wil',
}
export enum PoolType {
Insight = 'insight',
Moxie = 'moxie',
Vigor = 'vigor',
Flex = 'flex',
Threat = 'threat',
}
export enum MorphType {
Biomorph = 'biomorph',
Synthmorph = 'synthmorph',
Pod = 'pod',
// Vehicle = "vehicle",
// Bot = "bot",
// Swarm = "swarm",
// Infomorph = "infomorph"
}
export enum BiologicalType {
Biomorph = 'biomorph',
Pod = 'pod',
Create = 'creature',
}
export enum MorphCost {
GearPoints = 'gearPoints',
MorphPoints = 'morphPoints',
}
export enum Frame {
Biological = 'biological',
Robotic = 'robotic',
}
export enum Brain {
Organic = 'organic',
Synthetic = 'synthetic',
}
export enum Complexity {
Minor = 'minor',
Moderate = 'moderate',
Major = 'major',
Rare = 'rare',
}
export enum RechargeType {
Short = 'shortRecharge',
Long = 'longRecharge',
}
export enum TraitSource {
Ego = 'ego',
Morph = 'morph',
}
export enum TraitType {
Negative = 'negative',
Positive = 'positive',
}
export enum Refresh {
Daily = 'daily',
Weekly = 'weekly',
Arc = 'arc',
}
export enum PsiPush {
ExtraTarget = 'extraTarget',
IncreasedDuration = 'increasedDuration',
IncreasedEffect = 'increasedEffect',
IncreasedPenetration = 'increasedPenetration',
IncreasedPower = 'increasedPower',
IncreasedRange = 'increasedRange',
}
export enum SleightDuration {
ActionTurns = 'actionTurns',
Constant = 'constant',
Hours = 'hours',
Instant = 'instant',
Minutes = 'minutes',
Sustained = 'sustained',
}
export enum SleightType {
Chi = 'chi',
Gamma = 'gamma',
Epsilon = 'epsilon',
}
export enum SleightTarget {
Self = 'self',
BiologicalLife = 'biologicalLife',
PsiUser = 'psiUser',
}
export enum SleightSpecial {
Attack = 'attack',
Heal = 'heal',
MentalArmor = 'mentalArmor',
}
export enum SurpriseState {
None = 'none',
Alerted = 'alerted',
Surprised = 'surprised',
}
export enum PhysicalWare {
Bio = 'bioware',
Cyber = 'cyberware',
Hard = 'hardware',
Nano = 'nanoware',
}
export enum GearQuality {
TopOfTheLine = 'topOfTheLine',
StateOfTheArt = 'stateOfTheArt',
WellCrafted = 'wellCrafted',
Average = 'average',
Outdated = 'outdated',
Shoddy = 'shoddy',
InDisrepair = 'inDisrepair',
}
export enum BlueprintType {
SingleUse = 'singleUse',
LimitedUse = 'limitedUse',
MultiUse = 'multiUse',
OpenSource = 'openSource',
}
export enum DeviceType {
Mote = 'mote',
Host = 'host',
Server = 'server',
}
export enum SoftwareType {
App = 'app',
AppAsService = 'appAsService',
AppAsWare = 'appAsWare',
MeshService = 'meshService',
Meshware = 'meshware',
}
export enum Activation {
None = 'none',
Toggle = 'toggle',
Use = 'use',
}
export enum GearTrait {
Concealable = 'concealable',
Fragile = 'fragile',
SingleUse = 'singleUse',
TwoHanded = 'twoHanded',
}
export enum RangedWeaponTrait {
Fixed = 'fixed',
Long = 'long',
NoPointBlank = 'noPointBlank',
NoClose = 'noClose',
}
export enum RangeRating {
PointBlank = 'pointBlank',
Close = 'close',
Range = 'withinRange',
BeyondRange = 'beyondRange',
}
export enum RangedWeaponAccessory {
ArmSlide = 'armSlide',
ExtendedMagazine = 'extendedMagazine',
Gyromount = 'gyromount',
ImagingScope = 'imagingScope',
FlashSuppressor = 'flashSuppressor',
LaserSight = 'laserSight',
SafetySystem = 'safetySystem',
ShockSafety = 'shockSafety',
Silencer = 'silencer',
Smartlink = 'smartlink',
SmartMagazine = 'smartMagazine',
}
export enum AttackTrait {
Blinding = 'blinding',
Entangling = 'entangling',
Knockdown = 'knockdown',
Pain = 'pain',
Shock = 'shock',
Stun = 'stun',
}
export enum CharacterPoint {
Rez = 'rez',
Customization = 'customization',
Morph = 'morph',
Gear = 'gear',
Credits = 'credits',
}
export enum EgoSetting {
CanDefault = 'canDefault',
TrackMentalHealth = 'trackMentalHealth',
TrackPoints = 'trackPoints',
TrackReputations = 'trackReputations',
CharacterDetails = 'characterDetails',
ThreatDetails = 'threatDetails',
UseThreat = 'useThreat',
IgnoreOverburdened = 'ignoreOverburdened',
}
export enum ShellType {
SynthMorph = 'synthmorph',
Vehicle = 'vehicle',
Bot = 'bot',
}
export enum VehicleType {
Aircraft = 'aircraft',
Exoskeleton = 'exoskeleton',
GroundCraft = 'groundcraft',
Hardsuit = 'hardsuit',
Hybrid = 'hybrid',
NauticalCraft = 'nauticalCraft',
PersonalTransportDevice = 'personalTransportDevice',
Spacecraft = 'spacecraft',
}
export enum BotType {
Combat = 'combat',
Exploration = 'exploration',
Medical = 'medical',
Personal = 'personal',
Recon = 'recon',
Utility = 'utility',
}
export enum ShellHostType {
ALI = 'aliOnly',
Cyberbrain = 'cyberbrain',
}
export enum SubstanceType {
Chemical = 'chemical',
Drug = 'drug',
Toxin = 'toxin',
}
export enum SubstanceClassification {
Biochem = 'biochem',
Nano = 'nano',
Electronic = 'electronic',
}
export enum SubstanceApplicationMethod {
Dermal = 'dermal',
Inhalation = 'inhalation',
Injected = 'injection',
Oral = 'oral',
}
export enum DrugAddiction {
Mental = 'mental',
Physical = 'physical',
Both = 'mental/physical',
}
export enum DrugCategory {
Cognitive = 'cognitive',
Combat = 'combat',
Health = 'health',
Nano = 'nanodrug',
Narco = 'narcoalgorithm',
Petal = 'petal',
Psi = 'psi',
Recreational = 'recreational',
Social = 'social',
}
export enum ExplosiveSize {
Micro = 'micro',
Mini = 'mini', |
export enum ExplosiveType {
Grenade = 'grenade',
Missile = 'missile',
Generic = 'generic',
}
export enum AreaEffectType {
Uniform = 'uniform',
Centered = 'centered',
Cone = 'cone',
}
export enum CalledShot {
BypassArmor = 'bypassArmor',
Disarm = 'disarm',
Knockdown = 'knockdown',
Redirect = 'redirect',
SpecificTarget = 'specificTarget',
}
export enum WeaponAttackType {
Primary = 'primaryAttack',
Secondary = 'secondaryAttack',
}
export enum PoolEffectUsability {
UsableTwice = 'usableTwice',
Disable = 'disable',
}
export enum PhysicalServiceType {
Generic = 'generic',
FakeId = 'fakeEgoId',
}
export enum KineticWeaponClass {
HoldoutPistol = 'holdoutPistol',
MediumPistol = 'mediumPistol',
HeavyPistol = 'heavyPistol',
MachinePistol = 'machinePistol',
SubmachineGun = 'submachineGun',
AssaultRifle = 'assaultRifle',
BattleRifle = 'battleRifle',
MachineGun = 'machineGun',
SniperRifle = 'sniperRifle',
}
export enum FirearmAmmoModifierType {
Formula = 'formula',
Halve = 'halve',
NoDamage = 'noDamage',
}
export enum SprayPayload {
CoatAmmunition = 'coatAmmunition',
FirePayload = 'firePayload',
}
export enum ExplosiveTrigger {
Airburst = 'airburst',
Impact = 'impact',
Proximity = 'proximity',
Signal = 'signal',
Timer = 'timer',
}
export enum Demolition {
DamageAgainsStructures = 'damageAgainstStructures',
ShapeCentered = 'shape',
StructuralWeakpoint = 'structuralWeakpoint',
DisarmDifficulty = 'disarmDifficulty',
}
export enum FabType {
Specialized = 'specialized',
Gland = 'gland',
General = 'general',
}
export enum EgoBackground {
Colonist = 'colonist',
Enclaver = 'enclaver',
Freelancer = 'freelancer',
Hyperelite = 'hyperelite',
Indenture = 'indenture',
Infolife = 'infolife',
Isolate = 'isolate',
Lost = 'lost',
Underclass = 'underclass',
Uplift = 'uplift',
}
export enum EgoCareer {
Academic = 'academic',
CovertOperative = 'covertOperative',
Enforcer = 'enforcer',
Explorer = 'explorer',
Face = 'face',
Generhacker = 'genehacker',
Hacker = 'hacker',
Investigator = 'investigator',
Medic = 'medic',
Mindhacker = 'mindhacker',
Scavenger = 'scavenger',
Scientist = 'scientist',
Soldier = 'soldier',
Techie = 'techie',
}
export enum EgoInterest {
AnimalHandler = 'animalHandler',
ArtistOrIcon = 'artist/icon',
Async = 'async',
Commander = 'commander',
Fighter = 'fighter',
ForensicSpecialists = 'forensicsSpecialist',
JackOfAllTrades = 'jack-of-all-trades',
Jammer = 'jammer',
Networker = 'networker',
Paramedic = 'paramedic',
Pilot = 'pilot',
Rogue = 'rogue',
Slacker = 'slacker',
Spacer = 'spacer',
Student = 'student',
Survivalist = 'survivalist',
}
export enum EgoFaction {
Anarchist = 'anarchist',
Argonaut = 'argonaut',
Barsoomian = 'barsoomian',
Brinker = 'brinker',
Criminal = 'criminal',
Extropian = 'extropian',
Hypercorp = 'hypercorp',
Jovian = 'jovian',
LunarOrOrbital = 'lunar/orbital',
Mercurial = 'mercurial',
Reclaimer = 'reclaimer',
Scum = 'scum',
Socialite = 'socialite',
Titanian = 'titanian',
Venusian = 'venusian',
Regional = 'regional',
}
export enum WeaponSkillOption {
None = 'none',
Exotic = 'exotic',
}
export enum ExsurgentStrain {
Alter = 'alter',
HauntingVirus = 'hauntingVirus',
Mindstealer = 'mindstealer',
Skrik = 'skrik',
WattsMacleod = 'Watts-MacLeod',
Whisper = 'whisper',
Xenomorph = 'xenomorph',
}
export enum FullDefenseType {
Physical = 'physical',
Mental = 'mental',
}
export enum SuperiorResultEffect {
Quality = 'quality',
Quantity = 'quantity',
Details = 'detail',
Time = 'time',
Covertness = 'covertness',
Damage = 'damage',
} | Standard = 'standard',
} | random_line_split |
data-enums.ts | import type { LangEntry } from './foundry/localization';
const valuedEnums = new WeakMap();
export const enumValues = <T extends Record<string, LangEntry>>(
o: T,
): ReadonlyArray<T[keyof T]> => {
let existing = valuedEnums.get(o);
if (!existing) |
return existing;
};
export enum Fork {
Alpha = 'alpha',
Beta = 'beta',
Gamma = 'gamma',
}
export enum ThreatInfo {
Classification = 'classification',
Niche = 'niche',
Numbers = 'numbers',
StressValue = 'stressValue',
MinStressValue = 'minStressValue',
ThreatLevel = 'threatLevel',
}
export enum ThreatLevel {
Yellow = 'yellow',
Orange = 'orange',
Red = 'red',
Ultraviolet = 'ultraviolet',
}
export enum MinStressOption {
None = 'none',
Half = 'half',
Value = 'value',
}
export enum EgoType {
AGI = 'agi',
ALI = 'ali',
ASI = 'asi',
Alien = 'alien',
Exhuman = 'exhuman',
Exsurgent = 'exsurgent',
Neogenetic = 'neogenetic',
Titan = 'titan',
Transhuman = 'transhuman',
Uplift = 'uplift',
Xenofauna = 'xenofauna',
}
export enum CharacterDetail {
Age = 'age',
Aliases = 'aliases',
Background = 'background',
Career = 'career',
Faction = 'faction',
Gender = 'gender',
Interest = 'interest',
Languages = 'languages',
}
export enum AptitudeType {
Cognition = 'cog',
Intuition = 'int',
Reflexes = 'ref',
Savvy = 'sav',
Somatics = 'som',
Willpower = 'wil',
}
export enum PoolType {
Insight = 'insight',
Moxie = 'moxie',
Vigor = 'vigor',
Flex = 'flex',
Threat = 'threat',
}
export enum MorphType {
Biomorph = 'biomorph',
Synthmorph = 'synthmorph',
Pod = 'pod',
// Vehicle = "vehicle",
// Bot = "bot",
// Swarm = "swarm",
// Infomorph = "infomorph"
}
export enum BiologicalType {
Biomorph = 'biomorph',
Pod = 'pod',
Create = 'creature',
}
export enum MorphCost {
GearPoints = 'gearPoints',
MorphPoints = 'morphPoints',
}
export enum Frame {
Biological = 'biological',
Robotic = 'robotic',
}
export enum Brain {
Organic = 'organic',
Synthetic = 'synthetic',
}
export enum Complexity {
Minor = 'minor',
Moderate = 'moderate',
Major = 'major',
Rare = 'rare',
}
export enum RechargeType {
Short = 'shortRecharge',
Long = 'longRecharge',
}
export enum TraitSource {
Ego = 'ego',
Morph = 'morph',
}
export enum TraitType {
Negative = 'negative',
Positive = 'positive',
}
export enum Refresh {
Daily = 'daily',
Weekly = 'weekly',
Arc = 'arc',
}
export enum PsiPush {
ExtraTarget = 'extraTarget',
IncreasedDuration = 'increasedDuration',
IncreasedEffect = 'increasedEffect',
IncreasedPenetration = 'increasedPenetration',
IncreasedPower = 'increasedPower',
IncreasedRange = 'increasedRange',
}
export enum SleightDuration {
ActionTurns = 'actionTurns',
Constant = 'constant',
Hours = 'hours',
Instant = 'instant',
Minutes = 'minutes',
Sustained = 'sustained',
}
export enum SleightType {
Chi = 'chi',
Gamma = 'gamma',
Epsilon = 'epsilon',
}
export enum SleightTarget {
Self = 'self',
BiologicalLife = 'biologicalLife',
PsiUser = 'psiUser',
}
export enum SleightSpecial {
Attack = 'attack',
Heal = 'heal',
MentalArmor = 'mentalArmor',
}
export enum SurpriseState {
None = 'none',
Alerted = 'alerted',
Surprised = 'surprised',
}
export enum PhysicalWare {
Bio = 'bioware',
Cyber = 'cyberware',
Hard = 'hardware',
Nano = 'nanoware',
}
export enum GearQuality {
TopOfTheLine = 'topOfTheLine',
StateOfTheArt = 'stateOfTheArt',
WellCrafted = 'wellCrafted',
Average = 'average',
Outdated = 'outdated',
Shoddy = 'shoddy',
InDisrepair = 'inDisrepair',
}
export enum BlueprintType {
SingleUse = 'singleUse',
LimitedUse = 'limitedUse',
MultiUse = 'multiUse',
OpenSource = 'openSource',
}
export enum DeviceType {
Mote = 'mote',
Host = 'host',
Server = 'server',
}
export enum SoftwareType {
App = 'app',
AppAsService = 'appAsService',
AppAsWare = 'appAsWare',
MeshService = 'meshService',
Meshware = 'meshware',
}
export enum Activation {
None = 'none',
Toggle = 'toggle',
Use = 'use',
}
export enum GearTrait {
Concealable = 'concealable',
Fragile = 'fragile',
SingleUse = 'singleUse',
TwoHanded = 'twoHanded',
}
export enum RangedWeaponTrait {
Fixed = 'fixed',
Long = 'long',
NoPointBlank = 'noPointBlank',
NoClose = 'noClose',
}
export enum RangeRating {
PointBlank = 'pointBlank',
Close = 'close',
Range = 'withinRange',
BeyondRange = 'beyondRange',
}
export enum RangedWeaponAccessory {
ArmSlide = 'armSlide',
ExtendedMagazine = 'extendedMagazine',
Gyromount = 'gyromount',
ImagingScope = 'imagingScope',
FlashSuppressor = 'flashSuppressor',
LaserSight = 'laserSight',
SafetySystem = 'safetySystem',
ShockSafety = 'shockSafety',
Silencer = 'silencer',
Smartlink = 'smartlink',
SmartMagazine = 'smartMagazine',
}
export enum AttackTrait {
Blinding = 'blinding',
Entangling = 'entangling',
Knockdown = 'knockdown',
Pain = 'pain',
Shock = 'shock',
Stun = 'stun',
}
export enum CharacterPoint {
Rez = 'rez',
Customization = 'customization',
Morph = 'morph',
Gear = 'gear',
Credits = 'credits',
}
export enum EgoSetting {
CanDefault = 'canDefault',
TrackMentalHealth = 'trackMentalHealth',
TrackPoints = 'trackPoints',
TrackReputations = 'trackReputations',
CharacterDetails = 'characterDetails',
ThreatDetails = 'threatDetails',
UseThreat = 'useThreat',
IgnoreOverburdened = 'ignoreOverburdened',
}
export enum ShellType {
SynthMorph = 'synthmorph',
Vehicle = 'vehicle',
Bot = 'bot',
}
export enum VehicleType {
Aircraft = 'aircraft',
Exoskeleton = 'exoskeleton',
GroundCraft = 'groundcraft',
Hardsuit = 'hardsuit',
Hybrid = 'hybrid',
NauticalCraft = 'nauticalCraft',
PersonalTransportDevice = 'personalTransportDevice',
Spacecraft = 'spacecraft',
}
export enum BotType {
Combat = 'combat',
Exploration = 'exploration',
Medical = 'medical',
Personal = 'personal',
Recon = 'recon',
Utility = 'utility',
}
export enum ShellHostType {
ALI = 'aliOnly',
Cyberbrain = 'cyberbrain',
}
export enum SubstanceType {
Chemical = 'chemical',
Drug = 'drug',
Toxin = 'toxin',
}
export enum SubstanceClassification {
Biochem = 'biochem',
Nano = 'nano',
Electronic = 'electronic',
}
export enum SubstanceApplicationMethod {
Dermal = 'dermal',
Inhalation = 'inhalation',
Injected = 'injection',
Oral = 'oral',
}
export enum DrugAddiction {
Mental = 'mental',
Physical = 'physical',
Both = 'mental/physical',
}
export enum DrugCategory {
Cognitive = 'cognitive',
Combat = 'combat',
Health = 'health',
Nano = 'nanodrug',
Narco = 'narcoalgorithm',
Petal = 'petal',
Psi = 'psi',
Recreational = 'recreational',
Social = 'social',
}
export enum ExplosiveSize {
Micro = 'micro',
Mini = 'mini',
Standard = 'standard',
}
export enum ExplosiveType {
Grenade = 'grenade',
Missile = 'missile',
Generic = 'generic',
}
export enum AreaEffectType {
Uniform = 'uniform',
Centered = 'centered',
Cone = 'cone',
}
export enum CalledShot {
BypassArmor = 'bypassArmor',
Disarm = 'disarm',
Knockdown = 'knockdown',
Redirect = 'redirect',
SpecificTarget = 'specificTarget',
}
export enum WeaponAttackType {
Primary = 'primaryAttack',
Secondary = 'secondaryAttack',
}
export enum PoolEffectUsability {
UsableTwice = 'usableTwice',
Disable = 'disable',
}
export enum PhysicalServiceType {
Generic = 'generic',
FakeId = 'fakeEgoId',
}
export enum KineticWeaponClass {
HoldoutPistol = 'holdoutPistol',
MediumPistol = 'mediumPistol',
HeavyPistol = 'heavyPistol',
MachinePistol = 'machinePistol',
SubmachineGun = 'submachineGun',
AssaultRifle = 'assaultRifle',
BattleRifle = 'battleRifle',
MachineGun = 'machineGun',
SniperRifle = 'sniperRifle',
}
export enum FirearmAmmoModifierType {
Formula = 'formula',
Halve = 'halve',
NoDamage = 'noDamage',
}
export enum SprayPayload {
CoatAmmunition = 'coatAmmunition',
FirePayload = 'firePayload',
}
export enum ExplosiveTrigger {
Airburst = 'airburst',
Impact = 'impact',
Proximity = 'proximity',
Signal = 'signal',
Timer = 'timer',
}
export enum Demolition {
DamageAgainsStructures = 'damageAgainstStructures',
ShapeCentered = 'shape',
StructuralWeakpoint = 'structuralWeakpoint',
DisarmDifficulty = 'disarmDifficulty',
}
export enum FabType {
Specialized = 'specialized',
Gland = 'gland',
General = 'general',
}
export enum EgoBackground {
Colonist = 'colonist',
Enclaver = 'enclaver',
Freelancer = 'freelancer',
Hyperelite = 'hyperelite',
Indenture = 'indenture',
Infolife = 'infolife',
Isolate = 'isolate',
Lost = 'lost',
Underclass = 'underclass',
Uplift = 'uplift',
}
export enum EgoCareer {
Academic = 'academic',
CovertOperative = 'covertOperative',
Enforcer = 'enforcer',
Explorer = 'explorer',
Face = 'face',
Generhacker = 'genehacker',
Hacker = 'hacker',
Investigator = 'investigator',
Medic = 'medic',
Mindhacker = 'mindhacker',
Scavenger = 'scavenger',
Scientist = 'scientist',
Soldier = 'soldier',
Techie = 'techie',
}
export enum EgoInterest {
AnimalHandler = 'animalHandler',
ArtistOrIcon = 'artist/icon',
Async = 'async',
Commander = 'commander',
Fighter = 'fighter',
ForensicSpecialists = 'forensicsSpecialist',
JackOfAllTrades = 'jack-of-all-trades',
Jammer = 'jammer',
Networker = 'networker',
Paramedic = 'paramedic',
Pilot = 'pilot',
Rogue = 'rogue',
Slacker = 'slacker',
Spacer = 'spacer',
Student = 'student',
Survivalist = 'survivalist',
}
export enum EgoFaction {
Anarchist = 'anarchist',
Argonaut = 'argonaut',
Barsoomian = 'barsoomian',
Brinker = 'brinker',
Criminal = 'criminal',
Extropian = 'extropian',
Hypercorp = 'hypercorp',
Jovian = 'jovian',
LunarOrOrbital = 'lunar/orbital',
Mercurial = 'mercurial',
Reclaimer = 'reclaimer',
Scum = 'scum',
Socialite = 'socialite',
Titanian = 'titanian',
Venusian = 'venusian',
Regional = 'regional',
}
export enum WeaponSkillOption {
None = 'none',
Exotic = 'exotic',
}
export enum ExsurgentStrain {
Alter = 'alter',
HauntingVirus = 'hauntingVirus',
Mindstealer = 'mindstealer',
Skrik = 'skrik',
WattsMacleod = 'Watts-MacLeod',
Whisper = 'whisper',
Xenomorph = 'xenomorph',
}
export enum FullDefenseType {
Physical = 'physical',
Mental = 'mental',
}
export enum SuperiorResultEffect {
Quality = 'quality',
Quantity = 'quantity',
Details = 'detail',
Time = 'time',
Covertness = 'covertness',
Damage = 'damage',
}
| {
existing = Object.freeze(Object.values(o));
valuedEnums.set(o, existing);
} | conditional_block |
config.go | package pkg
import (
"context"
"cto-github.cisco.com/NFV-BU/go-msx/cli"
"cto-github.cisco.com/NFV-BU/go-msx/config"
"cto-github.cisco.com/NFV-BU/go-msx/config/pflagprovider"
"cto-github.cisco.com/NFV-BU/go-msx/fs"
"cto-github.cisco.com/NFV-BU/go-msx/log"
"cto-github.cisco.com/NFV-BU/go-msx/resource"
"encoding/base64"
"fmt"
"path"
"path/filepath"
"runtime"
"strconv"
"time"
)
var logger = log.NewLogger("build")
const (
// build.yml
configRootMsx = "msx"
configRootLibrary = "library"
configRootExecutable = "executable"
configRootBuild = "build"
configRootDocker = "docker"
configRootKubernetes = "kubernetes"
configRootManifest = "manifest"
configRootGo = "go"
configRootGenerate = "generate"
configRootResources = "resources"
configRootAssemblies = "assemblies"
configRootBinaries = "artifactory"
// bootstrap.yml
configRootAppInfo = "info.app"
configRootServer = "server"
// Output directories
configOutputRootPath = "dist/root"
configAssemblyPath = "dist/assembly"
configTestPath = "test"
)
var (
defaultConfigs = map[string]string{
"spring.application.name": "build",
"msx.platform.includegroups": "com.cisco.**",
"msx.platform.swaggerartifact": "com.cisco.nfv:nfv-swagger",
"msx.platform.swaggerwebjar": "org.webjars:swagger-ui:3.23.11",
"msx.deploymentGroup": "${spring.application.name}",
"build.number": "SNAPSHOT",
"build.group": "com.cisco.msx",
"manifest.folder": "Build-Stable",
"kubernetes.group": "platformms",
"docker.dockerfile": "docker/Dockerfile", // TODO: v1.0.0: switch to default 'build/package/Dockerfile'
"docker.baseimage": "msx-base-buster:3.9.0-70",
"docker.repository": "dockerhub.cisco.com/vms-platform-dev-docker",
"docker.username": "",
"docker.password": "",
"go.env.all.GOPRIVATE": "cto-github.cisco.com/NFV-BU",
"go.env.all.GOPROXY": "https://engci-maven.cisco.com/artifactory/go/,https://proxy.golang.org,direct",
"go.env.linux.GOFLAGS": `-buildmode=pie -i -ldflags="-extldflags=-Wl,-z,now,-z,relro" -ldflags=-s -ldflags=-w`,
"go.env.darwin.GOFLAGS": `-i`,
"library.name": "",
"assemblies.root": "platform-common",
"artifactory.assemblies": "true",
"artifactory.repository": "https://engci-maven-master.cisco.com/artifactory/symphony-group/vms-3.0-binaries",
"artifactory.installer": "deployments/kubernetes",
"artifactory.username": "",
"artifactory.password": "",
}
)
type AppInfo struct {
Name string
Attributes struct {
DisplayName string
}
}
type Server struct {
Port int
ContextPath string
StaticPath string
}
func (p Server) PortString() string {
return strconv.Itoa(p.Port)
}
type Executable struct {
Cmd string // refers to `cmd/<name>/main.go`
ConfigFiles []string
}
type Library struct {
Name string
}
type Go struct {
Env struct {
All map[string]string
Linux map[string]string
Darwin map[string]string
}
}
func (g Go) Environment() map[string]string {
result := make(map[string]string)
copyMap := func(source map[string]string) {
for k, v := range source {
result[k] = v
}
}
copyMap(g.Env.All)
switch runtime.GOOS {
case "linux":
copyMap(g.Env.Linux)
case "darwin":
copyMap(g.Env.Darwin)
}
return result
}
type MsxParams struct {
Release string
Platform struct {
ParentArtifacts []string
SwaggerArtifact string
SwaggerWebJar string
Version string
IncludeGroups string
}
DeploymentGroup string
}
type Build struct {
Number string
Group string
}
type Manifest struct {
Folder string
}
type Docker struct {
Dockerfile string
BaseImage string
Repository string
Username string
Password string
}
type Kubernetes struct {
Group string
}
type Generate struct {
Path string
Command string `config:"default="`
VfsGen *GenerateVfs
}
type GenerateVfs struct {
Root string `config:"default="`
Filename string `config:"default=assets.go"`
VariableName string `config:"default=assets"`
Includes []string
Excludes []string `config:"default="`
}
// TODO: 1.0 : Move to format similar to Generate
type Resources struct {
Includes []string
Excludes []string
Mappings []PathMapping
}
type PathMapping struct {
From string
To string
}
type Assemblies struct {
Root string
Custom []Assembly
}
type Assembly struct {
Path string // Source path of files
PathPrefix string // Add leading path in archive
ManifestPrefix string // Leading name of archive
ManifestKey string // Output path in json manifest
Includes []string `config:"default=/**/*"`
Excludes []string
}
func (a Assembly) filename() string {
return fmt.Sprintf("%s-%s.tar", a.ManifestPrefix, BuildConfig.FullBuildNumber())
}
func (a Assembly) OutputFile() string {
return filepath.Join(BuildConfig.AssemblyPath(), a.filename())
}
func (a Assembly) PublishUrl() string {
return path.Join(BuildConfig.BinariesUrl(), a.filename())
}
type Binaries struct {
Assemblies bool // Include all assemblies in binaries publishing
Installer string // Folder with more installer binaries
Repository string // Root URL of artifactory binaries repository
Username string // Injected from Jenkins credentials store via ARTIFACTORY_USERNAME
Password string // Injected from Jenkins credentials store via ARTIFACTORY_PASSWORD
}
func (b Binaries) Authorization() string {
return "Basic " + base64.StdEncoding.EncodeToString([]byte(b.Username+":"+b.Password))
}
type Config struct {
Timestamp time.Time
Library Library
Msx MsxParams
Go Go
Executable Executable
Build Build
App AppInfo
Server Server
Docker Docker
Kubernetes Kubernetes
Manifest Manifest
Generate []Generate
Resources Resources
Assemblies Assemblies
Binaries Binaries
Fs *fs.FileSystemConfig
Cfg *config.Config
}
func (p Config) FullBuildNumber() string {
return fmt.Sprintf("%s-%s", p.Msx.Release, p.Build.Number)
}
func (p Config) OutputRoot() string {
return configOutputRootPath
}
func (p Config) TestPath() string {
return configTestPath
}
func (p Config) AssemblyPath() string {
return configAssemblyPath
}
func (p Config) InputCommandRoot() string {
return path.Join("cmd", p.Executable.Cmd)
}
func (p Config) | () string {
return strconv.Itoa(p.Server.Port)
}
func (p Config) OutputConfigPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Configs)
}
func (p Config) OutputResourcesPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Resources)
}
func (p Config) OutputBinaryPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Binaries)
}
func (p Config) OutputStaticPath() string {
return path.Join(p.OutputResourcesPath(), "www")
}
func (p Config) BinariesUrl() string {
return path.Join(
BuildConfig.Binaries.Repository,
BuildConfig.Msx.DeploymentGroup,
BuildConfig.FullBuildNumber())
}
var BuildConfig = new(Config)
func LoadAppBuildConfig(ctx context.Context, cfg *config.Config, providers []config.Provider) (finalConfig *config.Config, err error) {
if err = cfg.Populate(&BuildConfig.Msx, configRootMsx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Executable, configRootExecutable); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Build, configRootBuild); err != nil {
return
}
for _, v := range BuildConfig.Executable.ConfigFiles {
filePath := path.Join(BuildConfig.InputCommandRoot(), v)
fileProvider := config.NewFileProvider(v, filePath)
providers = append(providers, fileProvider)
}
cfg = config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.App, configRootAppInfo); err != nil {
return
}
// Set the spring app name if it is not set
springAppName, _ := cfg.StringOr("spring.application.name", "build")
if springAppName == "build" {
defaultConfigs["spring.application.name"] = BuildConfig.App.Name
_ = cfg.Load(ctx)
}
if err = cfg.Populate(&BuildConfig.Server, configRootServer); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Docker, configRootDocker); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Kubernetes, configRootKubernetes); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Manifest, configRootManifest); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Resources, configRootResources); err != nil {
return
}
return cfg, nil
}
func LoadBuildConfig(ctx context.Context, configFiles []string) (err error) {
var providers = []config.Provider{
config.NewStatic("defaults", defaultConfigs),
}
defaultFilesProviders := config.NewHttpFileProvidersFromGlob("Defaults", resource.Defaults, "**/defaults-*")
providers = append(providers, defaultFilesProviders...)
for _, configFile := range configFiles {
fileProvider := config.NewFileProvider("Build", configFile)
providers = append(providers, fileProvider)
}
envProvider := config.NewEnvironment("Environment")
providers = append(providers, envProvider)
cliProvider := pflagprovider.NewPflagSource("CommandLine", cli.RootCmd().Flags(), "cli.flag.")
providers = append(providers, cliProvider)
cfg := config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
BuildConfig.Timestamp = time.Now().UTC()
if err = cfg.Populate(&BuildConfig.Library, configRootLibrary); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Go, configRootGo); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Generate, configRootGenerate); err != nil {
return
}
if BuildConfig.Library.Name == "" {
if newCfg, err := LoadAppBuildConfig(ctx, cfg, providers); err != nil {
return err
} else {
cfg = newCfg
}
}
if BuildConfig.Fs, err = fs.NewFileSystemConfig(cfg); err != nil {
return err
}
if BuildConfig.Msx.DeploymentGroup != "" {
if err = cfg.Populate(&BuildConfig.Assemblies, configRootAssemblies); err != nil {
return err
}
if err = cfg.Populate(&BuildConfig.Binaries, configRootBinaries); err != nil {
return err
}
}
BuildConfig.Cfg = cfg
return nil
}
| Port | identifier_name |
config.go | package pkg
import (
"context"
"cto-github.cisco.com/NFV-BU/go-msx/cli"
"cto-github.cisco.com/NFV-BU/go-msx/config"
"cto-github.cisco.com/NFV-BU/go-msx/config/pflagprovider"
"cto-github.cisco.com/NFV-BU/go-msx/fs"
"cto-github.cisco.com/NFV-BU/go-msx/log"
"cto-github.cisco.com/NFV-BU/go-msx/resource"
"encoding/base64"
"fmt"
"path"
"path/filepath"
"runtime"
"strconv"
"time"
)
var logger = log.NewLogger("build")
const (
// build.yml
configRootMsx = "msx"
configRootLibrary = "library"
configRootExecutable = "executable"
configRootBuild = "build"
configRootDocker = "docker"
configRootKubernetes = "kubernetes"
configRootManifest = "manifest"
configRootGo = "go"
configRootGenerate = "generate"
configRootResources = "resources"
configRootAssemblies = "assemblies"
configRootBinaries = "artifactory"
// bootstrap.yml
configRootAppInfo = "info.app"
configRootServer = "server"
// Output directories
configOutputRootPath = "dist/root"
configAssemblyPath = "dist/assembly"
configTestPath = "test"
)
var (
defaultConfigs = map[string]string{
"spring.application.name": "build",
"msx.platform.includegroups": "com.cisco.**",
"msx.platform.swaggerartifact": "com.cisco.nfv:nfv-swagger",
"msx.platform.swaggerwebjar": "org.webjars:swagger-ui:3.23.11",
"msx.deploymentGroup": "${spring.application.name}",
"build.number": "SNAPSHOT",
"build.group": "com.cisco.msx",
"manifest.folder": "Build-Stable",
"kubernetes.group": "platformms",
"docker.dockerfile": "docker/Dockerfile", // TODO: v1.0.0: switch to default 'build/package/Dockerfile'
"docker.baseimage": "msx-base-buster:3.9.0-70",
"docker.repository": "dockerhub.cisco.com/vms-platform-dev-docker",
"docker.username": "",
"docker.password": "",
"go.env.all.GOPRIVATE": "cto-github.cisco.com/NFV-BU",
"go.env.all.GOPROXY": "https://engci-maven.cisco.com/artifactory/go/,https://proxy.golang.org,direct",
"go.env.linux.GOFLAGS": `-buildmode=pie -i -ldflags="-extldflags=-Wl,-z,now,-z,relro" -ldflags=-s -ldflags=-w`,
"go.env.darwin.GOFLAGS": `-i`,
"library.name": "",
"assemblies.root": "platform-common",
"artifactory.assemblies": "true",
"artifactory.repository": "https://engci-maven-master.cisco.com/artifactory/symphony-group/vms-3.0-binaries",
"artifactory.installer": "deployments/kubernetes",
"artifactory.username": "",
"artifactory.password": "",
}
)
type AppInfo struct {
Name string
Attributes struct {
DisplayName string
}
}
type Server struct {
Port int
ContextPath string
StaticPath string
}
func (p Server) PortString() string {
return strconv.Itoa(p.Port)
}
type Executable struct {
Cmd string // refers to `cmd/<name>/main.go`
ConfigFiles []string
}
type Library struct {
Name string
}
type Go struct {
Env struct {
All map[string]string
Linux map[string]string
Darwin map[string]string
}
}
func (g Go) Environment() map[string]string {
result := make(map[string]string)
copyMap := func(source map[string]string) {
for k, v := range source {
result[k] = v
}
}
copyMap(g.Env.All)
switch runtime.GOOS {
case "linux":
copyMap(g.Env.Linux)
case "darwin":
copyMap(g.Env.Darwin)
}
return result
}
type MsxParams struct {
Release string
Platform struct {
ParentArtifacts []string
SwaggerArtifact string
SwaggerWebJar string
Version string
IncludeGroups string
}
DeploymentGroup string
}
type Build struct {
Number string
Group string
}
type Manifest struct {
Folder string
}
type Docker struct {
Dockerfile string
BaseImage string
Repository string
Username string
Password string
}
type Kubernetes struct {
Group string
}
type Generate struct {
Path string
Command string `config:"default="`
VfsGen *GenerateVfs
}
type GenerateVfs struct {
Root string `config:"default="`
Filename string `config:"default=assets.go"`
VariableName string `config:"default=assets"`
Includes []string
Excludes []string `config:"default="`
}
// TODO: 1.0 : Move to format similar to Generate
type Resources struct {
Includes []string
Excludes []string
Mappings []PathMapping
}
type PathMapping struct {
From string
To string
}
type Assemblies struct {
Root string
Custom []Assembly
}
type Assembly struct {
Path string // Source path of files
PathPrefix string // Add leading path in archive
ManifestPrefix string // Leading name of archive
ManifestKey string // Output path in json manifest
Includes []string `config:"default=/**/*"`
Excludes []string
}
func (a Assembly) filename() string {
return fmt.Sprintf("%s-%s.tar", a.ManifestPrefix, BuildConfig.FullBuildNumber())
}
func (a Assembly) OutputFile() string {
return filepath.Join(BuildConfig.AssemblyPath(), a.filename())
}
func (a Assembly) PublishUrl() string {
return path.Join(BuildConfig.BinariesUrl(), a.filename())
}
type Binaries struct {
Assemblies bool // Include all assemblies in binaries publishing
Installer string // Folder with more installer binaries
Repository string // Root URL of artifactory binaries repository
Username string // Injected from Jenkins credentials store via ARTIFACTORY_USERNAME
Password string // Injected from Jenkins credentials store via ARTIFACTORY_PASSWORD
}
func (b Binaries) Authorization() string {
return "Basic " + base64.StdEncoding.EncodeToString([]byte(b.Username+":"+b.Password))
}
type Config struct {
Timestamp time.Time
Library Library
Msx MsxParams
Go Go
Executable Executable
Build Build
App AppInfo
Server Server
Docker Docker
Kubernetes Kubernetes
Manifest Manifest
Generate []Generate
Resources Resources
Assemblies Assemblies
Binaries Binaries
Fs *fs.FileSystemConfig
Cfg *config.Config
}
func (p Config) FullBuildNumber() string |
func (p Config) OutputRoot() string {
return configOutputRootPath
}
func (p Config) TestPath() string {
return configTestPath
}
func (p Config) AssemblyPath() string {
return configAssemblyPath
}
func (p Config) InputCommandRoot() string {
return path.Join("cmd", p.Executable.Cmd)
}
func (p Config) Port() string {
return strconv.Itoa(p.Server.Port)
}
func (p Config) OutputConfigPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Configs)
}
func (p Config) OutputResourcesPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Resources)
}
func (p Config) OutputBinaryPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Binaries)
}
func (p Config) OutputStaticPath() string {
return path.Join(p.OutputResourcesPath(), "www")
}
func (p Config) BinariesUrl() string {
return path.Join(
BuildConfig.Binaries.Repository,
BuildConfig.Msx.DeploymentGroup,
BuildConfig.FullBuildNumber())
}
var BuildConfig = new(Config)
func LoadAppBuildConfig(ctx context.Context, cfg *config.Config, providers []config.Provider) (finalConfig *config.Config, err error) {
if err = cfg.Populate(&BuildConfig.Msx, configRootMsx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Executable, configRootExecutable); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Build, configRootBuild); err != nil {
return
}
for _, v := range BuildConfig.Executable.ConfigFiles {
filePath := path.Join(BuildConfig.InputCommandRoot(), v)
fileProvider := config.NewFileProvider(v, filePath)
providers = append(providers, fileProvider)
}
cfg = config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.App, configRootAppInfo); err != nil {
return
}
// Set the spring app name if it is not set
springAppName, _ := cfg.StringOr("spring.application.name", "build")
if springAppName == "build" {
defaultConfigs["spring.application.name"] = BuildConfig.App.Name
_ = cfg.Load(ctx)
}
if err = cfg.Populate(&BuildConfig.Server, configRootServer); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Docker, configRootDocker); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Kubernetes, configRootKubernetes); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Manifest, configRootManifest); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Resources, configRootResources); err != nil {
return
}
return cfg, nil
}
func LoadBuildConfig(ctx context.Context, configFiles []string) (err error) {
var providers = []config.Provider{
config.NewStatic("defaults", defaultConfigs),
}
defaultFilesProviders := config.NewHttpFileProvidersFromGlob("Defaults", resource.Defaults, "**/defaults-*")
providers = append(providers, defaultFilesProviders...)
for _, configFile := range configFiles {
fileProvider := config.NewFileProvider("Build", configFile)
providers = append(providers, fileProvider)
}
envProvider := config.NewEnvironment("Environment")
providers = append(providers, envProvider)
cliProvider := pflagprovider.NewPflagSource("CommandLine", cli.RootCmd().Flags(), "cli.flag.")
providers = append(providers, cliProvider)
cfg := config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
BuildConfig.Timestamp = time.Now().UTC()
if err = cfg.Populate(&BuildConfig.Library, configRootLibrary); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Go, configRootGo); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Generate, configRootGenerate); err != nil {
return
}
if BuildConfig.Library.Name == "" {
if newCfg, err := LoadAppBuildConfig(ctx, cfg, providers); err != nil {
return err
} else {
cfg = newCfg
}
}
if BuildConfig.Fs, err = fs.NewFileSystemConfig(cfg); err != nil {
return err
}
if BuildConfig.Msx.DeploymentGroup != "" {
if err = cfg.Populate(&BuildConfig.Assemblies, configRootAssemblies); err != nil {
return err
}
if err = cfg.Populate(&BuildConfig.Binaries, configRootBinaries); err != nil {
return err
}
}
BuildConfig.Cfg = cfg
return nil
}
| {
return fmt.Sprintf("%s-%s", p.Msx.Release, p.Build.Number)
} | identifier_body |
config.go | package pkg
import (
"context"
"cto-github.cisco.com/NFV-BU/go-msx/cli"
"cto-github.cisco.com/NFV-BU/go-msx/config"
"cto-github.cisco.com/NFV-BU/go-msx/config/pflagprovider"
"cto-github.cisco.com/NFV-BU/go-msx/fs"
"cto-github.cisco.com/NFV-BU/go-msx/log"
"cto-github.cisco.com/NFV-BU/go-msx/resource"
"encoding/base64"
"fmt"
"path"
"path/filepath"
"runtime"
"strconv"
"time"
)
var logger = log.NewLogger("build")
const (
// build.yml
configRootMsx = "msx"
configRootLibrary = "library"
configRootExecutable = "executable"
configRootBuild = "build"
configRootDocker = "docker"
configRootKubernetes = "kubernetes"
configRootManifest = "manifest"
configRootGo = "go"
configRootGenerate = "generate"
configRootResources = "resources"
configRootAssemblies = "assemblies"
configRootBinaries = "artifactory"
// bootstrap.yml
configRootAppInfo = "info.app"
configRootServer = "server"
// Output directories
configOutputRootPath = "dist/root"
configAssemblyPath = "dist/assembly"
configTestPath = "test"
)
var (
defaultConfigs = map[string]string{
"spring.application.name": "build",
"msx.platform.includegroups": "com.cisco.**",
"msx.platform.swaggerartifact": "com.cisco.nfv:nfv-swagger",
"msx.platform.swaggerwebjar": "org.webjars:swagger-ui:3.23.11",
"msx.deploymentGroup": "${spring.application.name}",
"build.number": "SNAPSHOT",
"build.group": "com.cisco.msx",
"manifest.folder": "Build-Stable",
"kubernetes.group": "platformms",
"docker.dockerfile": "docker/Dockerfile", // TODO: v1.0.0: switch to default 'build/package/Dockerfile'
"docker.baseimage": "msx-base-buster:3.9.0-70",
"docker.repository": "dockerhub.cisco.com/vms-platform-dev-docker",
"docker.username": "",
"docker.password": "",
"go.env.all.GOPRIVATE": "cto-github.cisco.com/NFV-BU",
"go.env.all.GOPROXY": "https://engci-maven.cisco.com/artifactory/go/,https://proxy.golang.org,direct",
"go.env.linux.GOFLAGS": `-buildmode=pie -i -ldflags="-extldflags=-Wl,-z,now,-z,relro" -ldflags=-s -ldflags=-w`,
"go.env.darwin.GOFLAGS": `-i`,
"library.name": "",
"assemblies.root": "platform-common",
"artifactory.assemblies": "true",
"artifactory.repository": "https://engci-maven-master.cisco.com/artifactory/symphony-group/vms-3.0-binaries",
"artifactory.installer": "deployments/kubernetes",
"artifactory.username": "",
"artifactory.password": "",
}
)
type AppInfo struct {
Name string
Attributes struct {
DisplayName string
}
}
type Server struct {
Port int
ContextPath string
StaticPath string
}
func (p Server) PortString() string {
return strconv.Itoa(p.Port)
}
type Executable struct {
Cmd string // refers to `cmd/<name>/main.go`
ConfigFiles []string
}
type Library struct {
Name string
}
type Go struct {
Env struct {
All map[string]string
Linux map[string]string
Darwin map[string]string
}
}
func (g Go) Environment() map[string]string {
result := make(map[string]string)
copyMap := func(source map[string]string) {
for k, v := range source {
result[k] = v
}
}
copyMap(g.Env.All)
switch runtime.GOOS {
case "linux":
copyMap(g.Env.Linux)
case "darwin":
copyMap(g.Env.Darwin)
}
return result
}
type MsxParams struct {
Release string
Platform struct {
ParentArtifacts []string
SwaggerArtifact string
SwaggerWebJar string
Version string
IncludeGroups string
}
DeploymentGroup string
}
type Build struct {
Number string
Group string
}
type Manifest struct {
Folder string
}
type Docker struct {
Dockerfile string
BaseImage string
Repository string
Username string
Password string
}
type Kubernetes struct {
Group string
}
type Generate struct {
Path string
Command string `config:"default="`
VfsGen *GenerateVfs
}
type GenerateVfs struct {
Root string `config:"default="`
Filename string `config:"default=assets.go"`
VariableName string `config:"default=assets"`
Includes []string
Excludes []string `config:"default="`
}
// TODO: 1.0 : Move to format similar to Generate
type Resources struct {
Includes []string
Excludes []string
Mappings []PathMapping
}
type PathMapping struct {
From string
To string
}
type Assemblies struct {
Root string
Custom []Assembly
}
type Assembly struct {
Path string // Source path of files
PathPrefix string // Add leading path in archive
ManifestPrefix string // Leading name of archive
ManifestKey string // Output path in json manifest
Includes []string `config:"default=/**/*"`
Excludes []string
}
func (a Assembly) filename() string {
return fmt.Sprintf("%s-%s.tar", a.ManifestPrefix, BuildConfig.FullBuildNumber())
}
func (a Assembly) OutputFile() string {
return filepath.Join(BuildConfig.AssemblyPath(), a.filename())
}
func (a Assembly) PublishUrl() string {
return path.Join(BuildConfig.BinariesUrl(), a.filename())
}
type Binaries struct {
Assemblies bool // Include all assemblies in binaries publishing
Installer string // Folder with more installer binaries
Repository string // Root URL of artifactory binaries repository
Username string // Injected from Jenkins credentials store via ARTIFACTORY_USERNAME
Password string // Injected from Jenkins credentials store via ARTIFACTORY_PASSWORD
}
func (b Binaries) Authorization() string {
return "Basic " + base64.StdEncoding.EncodeToString([]byte(b.Username+":"+b.Password))
}
type Config struct {
Timestamp time.Time
Library Library
Msx MsxParams
Go Go
Executable Executable
Build Build
App AppInfo
Server Server
Docker Docker
Kubernetes Kubernetes
Manifest Manifest
Generate []Generate
Resources Resources
Assemblies Assemblies
Binaries Binaries
Fs *fs.FileSystemConfig
Cfg *config.Config
}
func (p Config) FullBuildNumber() string {
return fmt.Sprintf("%s-%s", p.Msx.Release, p.Build.Number)
}
func (p Config) OutputRoot() string {
return configOutputRootPath
}
func (p Config) TestPath() string {
return configTestPath
}
func (p Config) AssemblyPath() string {
return configAssemblyPath
}
func (p Config) InputCommandRoot() string {
return path.Join("cmd", p.Executable.Cmd)
}
func (p Config) Port() string {
return strconv.Itoa(p.Server.Port)
}
func (p Config) OutputConfigPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Configs)
}
func (p Config) OutputResourcesPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Resources)
}
func (p Config) OutputBinaryPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Binaries)
}
func (p Config) OutputStaticPath() string {
return path.Join(p.OutputResourcesPath(), "www")
}
func (p Config) BinariesUrl() string {
return path.Join(
BuildConfig.Binaries.Repository,
BuildConfig.Msx.DeploymentGroup,
BuildConfig.FullBuildNumber())
}
var BuildConfig = new(Config)
func LoadAppBuildConfig(ctx context.Context, cfg *config.Config, providers []config.Provider) (finalConfig *config.Config, err error) {
if err = cfg.Populate(&BuildConfig.Msx, configRootMsx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Executable, configRootExecutable); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Build, configRootBuild); err != nil {
return
}
for _, v := range BuildConfig.Executable.ConfigFiles {
filePath := path.Join(BuildConfig.InputCommandRoot(), v)
fileProvider := config.NewFileProvider(v, filePath)
providers = append(providers, fileProvider)
}
cfg = config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.App, configRootAppInfo); err != nil {
return
}
// Set the spring app name if it is not set
springAppName, _ := cfg.StringOr("spring.application.name", "build") | _ = cfg.Load(ctx)
}
if err = cfg.Populate(&BuildConfig.Server, configRootServer); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Docker, configRootDocker); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Kubernetes, configRootKubernetes); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Manifest, configRootManifest); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Resources, configRootResources); err != nil {
return
}
return cfg, nil
}
func LoadBuildConfig(ctx context.Context, configFiles []string) (err error) {
var providers = []config.Provider{
config.NewStatic("defaults", defaultConfigs),
}
defaultFilesProviders := config.NewHttpFileProvidersFromGlob("Defaults", resource.Defaults, "**/defaults-*")
providers = append(providers, defaultFilesProviders...)
for _, configFile := range configFiles {
fileProvider := config.NewFileProvider("Build", configFile)
providers = append(providers, fileProvider)
}
envProvider := config.NewEnvironment("Environment")
providers = append(providers, envProvider)
cliProvider := pflagprovider.NewPflagSource("CommandLine", cli.RootCmd().Flags(), "cli.flag.")
providers = append(providers, cliProvider)
cfg := config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
BuildConfig.Timestamp = time.Now().UTC()
if err = cfg.Populate(&BuildConfig.Library, configRootLibrary); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Go, configRootGo); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Generate, configRootGenerate); err != nil {
return
}
if BuildConfig.Library.Name == "" {
if newCfg, err := LoadAppBuildConfig(ctx, cfg, providers); err != nil {
return err
} else {
cfg = newCfg
}
}
if BuildConfig.Fs, err = fs.NewFileSystemConfig(cfg); err != nil {
return err
}
if BuildConfig.Msx.DeploymentGroup != "" {
if err = cfg.Populate(&BuildConfig.Assemblies, configRootAssemblies); err != nil {
return err
}
if err = cfg.Populate(&BuildConfig.Binaries, configRootBinaries); err != nil {
return err
}
}
BuildConfig.Cfg = cfg
return nil
} | if springAppName == "build" {
defaultConfigs["spring.application.name"] = BuildConfig.App.Name | random_line_split |
config.go | package pkg
import (
"context"
"cto-github.cisco.com/NFV-BU/go-msx/cli"
"cto-github.cisco.com/NFV-BU/go-msx/config"
"cto-github.cisco.com/NFV-BU/go-msx/config/pflagprovider"
"cto-github.cisco.com/NFV-BU/go-msx/fs"
"cto-github.cisco.com/NFV-BU/go-msx/log"
"cto-github.cisco.com/NFV-BU/go-msx/resource"
"encoding/base64"
"fmt"
"path"
"path/filepath"
"runtime"
"strconv"
"time"
)
var logger = log.NewLogger("build")
const (
// build.yml
configRootMsx = "msx"
configRootLibrary = "library"
configRootExecutable = "executable"
configRootBuild = "build"
configRootDocker = "docker"
configRootKubernetes = "kubernetes"
configRootManifest = "manifest"
configRootGo = "go"
configRootGenerate = "generate"
configRootResources = "resources"
configRootAssemblies = "assemblies"
configRootBinaries = "artifactory"
// bootstrap.yml
configRootAppInfo = "info.app"
configRootServer = "server"
// Output directories
configOutputRootPath = "dist/root"
configAssemblyPath = "dist/assembly"
configTestPath = "test"
)
var (
defaultConfigs = map[string]string{
"spring.application.name": "build",
"msx.platform.includegroups": "com.cisco.**",
"msx.platform.swaggerartifact": "com.cisco.nfv:nfv-swagger",
"msx.platform.swaggerwebjar": "org.webjars:swagger-ui:3.23.11",
"msx.deploymentGroup": "${spring.application.name}",
"build.number": "SNAPSHOT",
"build.group": "com.cisco.msx",
"manifest.folder": "Build-Stable",
"kubernetes.group": "platformms",
"docker.dockerfile": "docker/Dockerfile", // TODO: v1.0.0: switch to default 'build/package/Dockerfile'
"docker.baseimage": "msx-base-buster:3.9.0-70",
"docker.repository": "dockerhub.cisco.com/vms-platform-dev-docker",
"docker.username": "",
"docker.password": "",
"go.env.all.GOPRIVATE": "cto-github.cisco.com/NFV-BU",
"go.env.all.GOPROXY": "https://engci-maven.cisco.com/artifactory/go/,https://proxy.golang.org,direct",
"go.env.linux.GOFLAGS": `-buildmode=pie -i -ldflags="-extldflags=-Wl,-z,now,-z,relro" -ldflags=-s -ldflags=-w`,
"go.env.darwin.GOFLAGS": `-i`,
"library.name": "",
"assemblies.root": "platform-common",
"artifactory.assemblies": "true",
"artifactory.repository": "https://engci-maven-master.cisco.com/artifactory/symphony-group/vms-3.0-binaries",
"artifactory.installer": "deployments/kubernetes",
"artifactory.username": "",
"artifactory.password": "",
}
)
type AppInfo struct {
Name string
Attributes struct {
DisplayName string
}
}
type Server struct {
Port int
ContextPath string
StaticPath string
}
func (p Server) PortString() string {
return strconv.Itoa(p.Port)
}
type Executable struct {
Cmd string // refers to `cmd/<name>/main.go`
ConfigFiles []string
}
type Library struct {
Name string
}
type Go struct {
Env struct {
All map[string]string
Linux map[string]string
Darwin map[string]string
}
}
func (g Go) Environment() map[string]string {
result := make(map[string]string)
copyMap := func(source map[string]string) {
for k, v := range source {
result[k] = v
}
}
copyMap(g.Env.All)
switch runtime.GOOS {
case "linux":
copyMap(g.Env.Linux)
case "darwin":
copyMap(g.Env.Darwin)
}
return result
}
type MsxParams struct {
Release string
Platform struct {
ParentArtifacts []string
SwaggerArtifact string
SwaggerWebJar string
Version string
IncludeGroups string
}
DeploymentGroup string
}
type Build struct {
Number string
Group string
}
type Manifest struct {
Folder string
}
type Docker struct {
Dockerfile string
BaseImage string
Repository string
Username string
Password string
}
type Kubernetes struct {
Group string
}
type Generate struct {
Path string
Command string `config:"default="`
VfsGen *GenerateVfs
}
type GenerateVfs struct {
Root string `config:"default="`
Filename string `config:"default=assets.go"`
VariableName string `config:"default=assets"`
Includes []string
Excludes []string `config:"default="`
}
// TODO: 1.0 : Move to format similar to Generate
type Resources struct {
Includes []string
Excludes []string
Mappings []PathMapping
}
type PathMapping struct {
From string
To string
}
type Assemblies struct {
Root string
Custom []Assembly
}
type Assembly struct {
Path string // Source path of files
PathPrefix string // Add leading path in archive
ManifestPrefix string // Leading name of archive
ManifestKey string // Output path in json manifest
Includes []string `config:"default=/**/*"`
Excludes []string
}
func (a Assembly) filename() string {
return fmt.Sprintf("%s-%s.tar", a.ManifestPrefix, BuildConfig.FullBuildNumber())
}
func (a Assembly) OutputFile() string {
return filepath.Join(BuildConfig.AssemblyPath(), a.filename())
}
func (a Assembly) PublishUrl() string {
return path.Join(BuildConfig.BinariesUrl(), a.filename())
}
type Binaries struct {
Assemblies bool // Include all assemblies in binaries publishing
Installer string // Folder with more installer binaries
Repository string // Root URL of artifactory binaries repository
Username string // Injected from Jenkins credentials store via ARTIFACTORY_USERNAME
Password string // Injected from Jenkins credentials store via ARTIFACTORY_PASSWORD
}
func (b Binaries) Authorization() string {
return "Basic " + base64.StdEncoding.EncodeToString([]byte(b.Username+":"+b.Password))
}
type Config struct {
Timestamp time.Time
Library Library
Msx MsxParams
Go Go
Executable Executable
Build Build
App AppInfo
Server Server
Docker Docker
Kubernetes Kubernetes
Manifest Manifest
Generate []Generate
Resources Resources
Assemblies Assemblies
Binaries Binaries
Fs *fs.FileSystemConfig
Cfg *config.Config
}
func (p Config) FullBuildNumber() string {
return fmt.Sprintf("%s-%s", p.Msx.Release, p.Build.Number)
}
func (p Config) OutputRoot() string {
return configOutputRootPath
}
func (p Config) TestPath() string {
return configTestPath
}
func (p Config) AssemblyPath() string {
return configAssemblyPath
}
func (p Config) InputCommandRoot() string {
return path.Join("cmd", p.Executable.Cmd)
}
func (p Config) Port() string {
return strconv.Itoa(p.Server.Port)
}
func (p Config) OutputConfigPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Configs)
}
func (p Config) OutputResourcesPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Resources)
}
func (p Config) OutputBinaryPath() string {
return path.Join(configOutputRootPath, p.Fs.Root, p.Fs.Binaries)
}
func (p Config) OutputStaticPath() string {
return path.Join(p.OutputResourcesPath(), "www")
}
func (p Config) BinariesUrl() string {
return path.Join(
BuildConfig.Binaries.Repository,
BuildConfig.Msx.DeploymentGroup,
BuildConfig.FullBuildNumber())
}
var BuildConfig = new(Config)
func LoadAppBuildConfig(ctx context.Context, cfg *config.Config, providers []config.Provider) (finalConfig *config.Config, err error) {
if err = cfg.Populate(&BuildConfig.Msx, configRootMsx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Executable, configRootExecutable); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Build, configRootBuild); err != nil {
return
}
for _, v := range BuildConfig.Executable.ConfigFiles {
filePath := path.Join(BuildConfig.InputCommandRoot(), v)
fileProvider := config.NewFileProvider(v, filePath)
providers = append(providers, fileProvider)
}
cfg = config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.App, configRootAppInfo); err != nil |
// Set the spring app name if it is not set
springAppName, _ := cfg.StringOr("spring.application.name", "build")
if springAppName == "build" {
defaultConfigs["spring.application.name"] = BuildConfig.App.Name
_ = cfg.Load(ctx)
}
if err = cfg.Populate(&BuildConfig.Server, configRootServer); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Docker, configRootDocker); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Kubernetes, configRootKubernetes); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Manifest, configRootManifest); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Resources, configRootResources); err != nil {
return
}
return cfg, nil
}
func LoadBuildConfig(ctx context.Context, configFiles []string) (err error) {
var providers = []config.Provider{
config.NewStatic("defaults", defaultConfigs),
}
defaultFilesProviders := config.NewHttpFileProvidersFromGlob("Defaults", resource.Defaults, "**/defaults-*")
providers = append(providers, defaultFilesProviders...)
for _, configFile := range configFiles {
fileProvider := config.NewFileProvider("Build", configFile)
providers = append(providers, fileProvider)
}
envProvider := config.NewEnvironment("Environment")
providers = append(providers, envProvider)
cliProvider := pflagprovider.NewPflagSource("CommandLine", cli.RootCmd().Flags(), "cli.flag.")
providers = append(providers, cliProvider)
cfg := config.NewConfig(providers...)
if err = cfg.Load(ctx); err != nil {
return
}
BuildConfig.Timestamp = time.Now().UTC()
if err = cfg.Populate(&BuildConfig.Library, configRootLibrary); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Go, configRootGo); err != nil {
return
}
if err = cfg.Populate(&BuildConfig.Generate, configRootGenerate); err != nil {
return
}
if BuildConfig.Library.Name == "" {
if newCfg, err := LoadAppBuildConfig(ctx, cfg, providers); err != nil {
return err
} else {
cfg = newCfg
}
}
if BuildConfig.Fs, err = fs.NewFileSystemConfig(cfg); err != nil {
return err
}
if BuildConfig.Msx.DeploymentGroup != "" {
if err = cfg.Populate(&BuildConfig.Assemblies, configRootAssemblies); err != nil {
return err
}
if err = cfg.Populate(&BuildConfig.Binaries, configRootBinaries); err != nil {
return err
}
}
BuildConfig.Cfg = cfg
return nil
}
| {
return
} | conditional_block |
scan.go | package scan
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"github.com/zricethezav/gitleaks/v6/manager"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
fdiff "github.com/go-git/go-git/v5/plumbing/format/diff"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/sergi/go-diff/diffmatchpatch"
log "github.com/sirupsen/logrus"
)
// Bundle contains various git information for scans.
type Bundle struct {
Commit *object.Commit
Patch string
Content string
FilePath string
Operation fdiff.Operation
reader io.Reader
lineLookup map[string]bool
scanType int
}
// commitScanner is a function signature for scanning commits. There is some
// redundant work needed by scanning all files at a commit (--files-at-commit=) and scanning
// the patches generated by a commit (--commit=). The function scanCommit wraps that redundant work
// and accepts a commitScanner for the different logic needed between the two cases described above.
type commitScanner func(c *object.Commit, repo *Repo) error
const (
// We need to differentiate between scans as the logic for line searching is different between
// scanning patches, commits, and uncommitted files.
patchScan int = iota + 1
uncommittedScan
commitScan
)
// Scan is responsible for scanning the entire history (default behavior) of a
// git repo. Options that can change the behavior of this function include: --Commit, --depth, --branch.
// See options/options.go for an explanation on these options.
func (repo *Repo) Scan() error {
if err := repo.setupTimeout(); err != nil {
return err
}
if repo.cancel != nil {
defer repo.cancel()
}
if repo.Repository == nil {
return fmt.Errorf("%s repo is empty", repo.Name)
}
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig |
scanTimeStart := time.Now()
// See https://github.com/zricethezav/gitleaks/issues/326
// Scan commit patches, all files at a commit, or a range of commits
if repo.Manager.Opts.Commit != "" {
return scanCommit(repo.Manager.Opts.Commit, repo, scanCommitPatches)
} else if repo.Manager.Opts.FilesAtCommit != "" {
return scanCommit(repo.Manager.Opts.FilesAtCommit, repo, scanFilesAtCommit)
} else if repo.Manager.Opts.Commits != "" {
commits := strings.Split(repo.Manager.Opts.Commits, ",")
for _, c := range commits {
err := scanCommit(c, repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
} else if repo.Manager.Opts.CommitsFile != "" {
file, err := os.Open(repo.Manager.Opts.CommitsFile)
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
err := scanCommit(scanner.Text(), repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
}
logOpts, err := getLogOptions(repo)
if err != nil {
return err
}
cIter, err := repo.Log(logOpts)
if err != nil {
return err
}
cc := 0
semaphore := make(chan bool, howManyThreads(repo.Manager.Opts.Threads))
wg := sync.WaitGroup{}
err = cIter.ForEach(func(c *object.Commit) error {
if c == nil || repo.timeoutReached() || repo.depthReached(cc) {
return storer.ErrStop
}
// Check if Commit is allowlisted
if isCommitAllowListed(c.Hash.String(), repo.config.Allowlist.Commits) {
return nil
}
// Check if at root
if len(c.ParentHashes) == 0 {
cc++
err = scanFilesAtCommit(c, repo)
if err != nil {
return err
}
return nil
}
// increase Commit counter
cc++
// inspect first parent only as all other parents will be eventually reached
// (they exist as the tip of other branches, etc)
// See https://github.com/zricethezav/gitleaks/issues/413 for details
parent, err := c.Parent(0)
if err != nil {
return err
}
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
// shouldn't reach this point but just in case
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
log.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
wg.Add(1)
semaphore <- true
go func(c *object.Commit, patch *object.Patch) {
defer func() {
<-semaphore
wg.Done()
}()
scanPatch(patch, c, repo)
}(c, patch)
if c.Hash.String() == repo.Manager.Opts.CommitTo {
return storer.ErrStop
}
return nil
})
wg.Wait()
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
repo.Manager.IncrementCommits(cc)
return nil
}
// scanEmpty scans an empty repo without any commits. See https://github.com/zricethezav/gitleaks/issues/352
func (repo *Repo) scanEmpty() error {
scanTimeStart := time.Now()
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
if err != nil {
return err
}
for fn := range status {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: workTreeBuf.String(),
FilePath: workTreeFile.Name(),
Commit: emptyCommit(),
scanType: uncommittedScan,
})
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scanUncommitted will do a `git diff` and scan changed files that are being tracked. This is useful functionality
// for a pre-Commit hook so you can make sure your code does not have any leaks before committing.
func (repo *Repo) scanUncommitted() error {
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
}
if err := repo.setupTimeout(); err != nil {
return err
}
r, err := repo.Head()
if err == plumbing.ErrReferenceNotFound {
// possibly an empty repo, or maybe its not, either way lets scan all the files in the directory
return repo.scanEmpty()
} else if err != nil {
return err
}
scanTimeStart := time.Now()
c, err := repo.CommitObject(r.Hash())
if err != nil {
return err
}
// Staged change so the Commit details do not yet exist. Insert empty defaults.
c.Hash = plumbing.Hash{}
c.Message = "***STAGED CHANGES***"
c.Author.Name = ""
c.Author.Email = ""
c.Author.When = time.Unix(0, 0).UTC()
prevTree, err := c.Tree()
if err != nil {
return err
}
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
for fn, state := range status {
var (
prevFileContents string
currFileContents string
filename string
)
if state.Staging != git.Untracked {
if state.Staging == git.Deleted {
// file in staging has been deleted, aka it is not on the filesystem
// so the contents of the file are ""
currFileContents = ""
} else {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
currFileContents = workTreeBuf.String()
filename = workTreeFile.Name()
}
// get files at HEAD state
prevFile, err := prevTree.File(fn)
if err != nil {
prevFileContents = ""
} else {
prevFileContents, err = prevFile.Contents()
if err != nil {
return err
}
if filename == "" {
filename = prevFile.Name
}
}
dmp := diffmatchpatch.New()
diffs := dmp.DiffCleanupSemantic(dmp.DiffMain(prevFileContents, currFileContents, false))
var diffContents string
for _, d := range diffs {
if d.Type == diffmatchpatch.DiffInsert {
diffContents += fmt.Sprintf("%s\n", d.Text)
}
}
repo.CheckRules(&Bundle{
Content: diffContents,
FilePath: filename,
Commit: c,
scanType: uncommittedScan,
})
}
}
if err != nil {
return err
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scan accepts a Patch, Commit, and repo. If the patches contains files that are
// binary, then gitleaks will skip scanning that file OR if a file is matched on
// allowlisted files set in the configuration. If a global rule for files is defined and a filename
// matches said global rule, then a leak is sent to the manager.
// After that, file chunks are created which are then inspected by InspectString()
func scanPatch(patch *object.Patch, c *object.Commit, repo *Repo) {
bundle := Bundle{
Commit: c,
Patch: patch.String(),
scanType: patchScan,
}
for _, f := range patch.FilePatches() {
if repo.timeoutReached() {
return
}
if f.IsBinary() {
continue
}
for _, chunk := range f.Chunks() {
if chunk.Type() == fdiff.Add || (repo.Manager.Opts.Deletion && chunk.Type() == fdiff.Delete) {
bundle.Content = chunk.Content()
bundle.Operation = chunk.Type()
// get filepath
from, to := f.Files()
if from != nil {
bundle.FilePath = from.Path()
} else if to != nil {
bundle.FilePath = to.Path()
} else {
bundle.FilePath = "???"
}
repo.CheckRules(&bundle)
}
}
}
}
// scanCommit accepts a Commit hash, repo, and commit scanning function. A new Commit
// object will be created from the hash which will be passed into either scanCommitPatches
// or scanFilesAtCommit depending on the options set.
func scanCommit(commit string, repo *Repo, f commitScanner) error {
if commit == "latest" {
ref, err := repo.Repository.Head()
if err != nil {
return err
}
commit = ref.Hash().String()
}
repo.Manager.IncrementCommits(1)
h := plumbing.NewHash(commit)
c, err := repo.CommitObject(h)
if err != nil {
return err
}
return f(c, repo)
}
// scanCommitPatches accepts a Commit object and a repo. This function is only called when the --Commit=
// option has been set. That option tells gitleaks to look only at a single Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func scanCommitPatches(c *object.Commit, repo *Repo) error {
if len(c.ParentHashes) == 0 {
err := scanFilesAtCommit(c, repo)
if err != nil {
return err
}
}
return c.Parents().ForEach(func(parent *object.Commit) error {
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
return fmt.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
scanPatch(patch, c, repo)
return nil
})
}
// scanFilesAtCommit accepts a Commit object and a repo. This function is only called when the --files-at-Commit=
// option has been set. That option tells gitleaks to look only at ALL the files at a Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func scanFilesAtCommit(c *object.Commit, repo *Repo) error {
fIter, err := c.Files()
if err != nil {
return err
}
err = fIter.ForEach(func(f *object.File) error {
bin, err := f.IsBinary()
if bin || repo.timeoutReached() {
return nil
} else if err != nil {
return err
}
content, err := f.Contents()
if err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: content,
FilePath: f.Name,
Commit: c,
scanType: commitScan,
Operation: fdiff.Add,
})
return nil
})
return err
}
// depthReached checks if i meets the depth (--depth=) if set
func (repo *Repo) depthReached(i int) bool {
if repo.Manager.Opts.Depth != 0 && repo.Manager.Opts.Depth == i {
log.Warnf("Exceeded depth limit (%d)", i)
return true
}
return false
}
// emptyCommit generates an empty commit used for scanning uncommitted changes
func emptyCommit() *object.Commit {
return &object.Commit{
Hash: plumbing.Hash{},
Message: "***STAGED CHANGES***",
Author: object.Signature{
Name: "",
Email: "",
When: time.Unix(0, 0).UTC(),
},
}
}
| {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
} | conditional_block |
scan.go | package scan
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"github.com/zricethezav/gitleaks/v6/manager"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
fdiff "github.com/go-git/go-git/v5/plumbing/format/diff"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/sergi/go-diff/diffmatchpatch"
log "github.com/sirupsen/logrus"
)
// Bundle contains various git information for scans.
type Bundle struct {
Commit *object.Commit
Patch string
Content string
FilePath string
Operation fdiff.Operation
reader io.Reader
lineLookup map[string]bool
scanType int
}
// commitScanner is a function signature for scanning commits. There is some
// redundant work needed by scanning all files at a commit (--files-at-commit=) and scanning
// the patches generated by a commit (--commit=). The function scanCommit wraps that redundant work
// and accepts a commitScanner for the different logic needed between the two cases described above.
type commitScanner func(c *object.Commit, repo *Repo) error
const (
// We need to differentiate between scans as the logic for line searching is different between
// scanning patches, commits, and uncommitted files.
patchScan int = iota + 1
uncommittedScan
commitScan
)
// Scan is responsible for scanning the entire history (default behavior) of a
// git repo. Options that can change the behavior of this function include: --Commit, --depth, --branch.
// See options/options.go for an explanation on these options.
func (repo *Repo) Scan() error {
if err := repo.setupTimeout(); err != nil {
return err
}
if repo.cancel != nil {
defer repo.cancel()
}
if repo.Repository == nil {
return fmt.Errorf("%s repo is empty", repo.Name)
}
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
}
scanTimeStart := time.Now()
// See https://github.com/zricethezav/gitleaks/issues/326
// Scan commit patches, all files at a commit, or a range of commits
if repo.Manager.Opts.Commit != "" {
return scanCommit(repo.Manager.Opts.Commit, repo, scanCommitPatches)
} else if repo.Manager.Opts.FilesAtCommit != "" {
return scanCommit(repo.Manager.Opts.FilesAtCommit, repo, scanFilesAtCommit)
} else if repo.Manager.Opts.Commits != "" {
commits := strings.Split(repo.Manager.Opts.Commits, ",")
for _, c := range commits {
err := scanCommit(c, repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
} else if repo.Manager.Opts.CommitsFile != "" {
file, err := os.Open(repo.Manager.Opts.CommitsFile)
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
err := scanCommit(scanner.Text(), repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
}
logOpts, err := getLogOptions(repo)
if err != nil {
return err
}
cIter, err := repo.Log(logOpts)
if err != nil {
return err
}
cc := 0
semaphore := make(chan bool, howManyThreads(repo.Manager.Opts.Threads))
wg := sync.WaitGroup{}
err = cIter.ForEach(func(c *object.Commit) error {
if c == nil || repo.timeoutReached() || repo.depthReached(cc) {
return storer.ErrStop
}
// Check if Commit is allowlisted
if isCommitAllowListed(c.Hash.String(), repo.config.Allowlist.Commits) {
return nil
}
// Check if at root
if len(c.ParentHashes) == 0 {
cc++
err = scanFilesAtCommit(c, repo)
if err != nil {
return err
}
return nil
}
// increase Commit counter
cc++
// inspect first parent only as all other parents will be eventually reached
// (they exist as the tip of other branches, etc)
// See https://github.com/zricethezav/gitleaks/issues/413 for details
parent, err := c.Parent(0)
if err != nil {
return err
}
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
// shouldn't reach this point but just in case
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
log.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
wg.Add(1)
semaphore <- true
go func(c *object.Commit, patch *object.Patch) {
defer func() {
<-semaphore
wg.Done()
}()
scanPatch(patch, c, repo)
}(c, patch)
if c.Hash.String() == repo.Manager.Opts.CommitTo {
return storer.ErrStop
}
return nil
})
wg.Wait()
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
repo.Manager.IncrementCommits(cc)
return nil
}
// scanEmpty scans an empty repo without any commits. See https://github.com/zricethezav/gitleaks/issues/352
func (repo *Repo) scanEmpty() error {
scanTimeStart := time.Now()
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
if err != nil {
return err
}
for fn := range status {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: workTreeBuf.String(),
FilePath: workTreeFile.Name(),
Commit: emptyCommit(),
scanType: uncommittedScan,
})
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scanUncommitted will do a `git diff` and scan changed files that are being tracked. This is useful functionality
// for a pre-Commit hook so you can make sure your code does not have any leaks before committing.
func (repo *Repo) scanUncommitted() error {
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
}
if err := repo.setupTimeout(); err != nil {
return err
}
r, err := repo.Head()
if err == plumbing.ErrReferenceNotFound {
// possibly an empty repo, or maybe its not, either way lets scan all the files in the directory
return repo.scanEmpty()
} else if err != nil {
return err
}
scanTimeStart := time.Now()
c, err := repo.CommitObject(r.Hash())
if err != nil {
return err
}
// Staged change so the Commit details do not yet exist. Insert empty defaults.
c.Hash = plumbing.Hash{}
c.Message = "***STAGED CHANGES***"
c.Author.Name = ""
c.Author.Email = ""
c.Author.When = time.Unix(0, 0).UTC()
prevTree, err := c.Tree()
if err != nil {
return err
}
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
for fn, state := range status {
var (
prevFileContents string
currFileContents string
filename string
)
if state.Staging != git.Untracked {
if state.Staging == git.Deleted {
// file in staging has been deleted, aka it is not on the filesystem
// so the contents of the file are ""
currFileContents = ""
} else {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
currFileContents = workTreeBuf.String()
filename = workTreeFile.Name()
}
// get files at HEAD state
prevFile, err := prevTree.File(fn)
if err != nil {
prevFileContents = ""
} else {
prevFileContents, err = prevFile.Contents()
if err != nil {
return err
}
if filename == "" {
filename = prevFile.Name
}
}
dmp := diffmatchpatch.New()
diffs := dmp.DiffCleanupSemantic(dmp.DiffMain(prevFileContents, currFileContents, false))
var diffContents string
for _, d := range diffs {
if d.Type == diffmatchpatch.DiffInsert {
diffContents += fmt.Sprintf("%s\n", d.Text)
}
}
repo.CheckRules(&Bundle{
Content: diffContents,
FilePath: filename,
Commit: c,
scanType: uncommittedScan,
})
}
}
if err != nil {
return err
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scan accepts a Patch, Commit, and repo. If the patches contains files that are
// binary, then gitleaks will skip scanning that file OR if a file is matched on
// allowlisted files set in the configuration. If a global rule for files is defined and a filename
// matches said global rule, then a leak is sent to the manager.
// After that, file chunks are created which are then inspected by InspectString()
func scanPatch(patch *object.Patch, c *object.Commit, repo *Repo) {
bundle := Bundle{
Commit: c,
Patch: patch.String(),
scanType: patchScan,
}
for _, f := range patch.FilePatches() {
if repo.timeoutReached() {
return
}
if f.IsBinary() {
continue
}
for _, chunk := range f.Chunks() {
if chunk.Type() == fdiff.Add || (repo.Manager.Opts.Deletion && chunk.Type() == fdiff.Delete) {
bundle.Content = chunk.Content()
bundle.Operation = chunk.Type()
// get filepath
from, to := f.Files()
if from != nil {
bundle.FilePath = from.Path()
} else if to != nil {
bundle.FilePath = to.Path()
} else {
bundle.FilePath = "???"
}
repo.CheckRules(&bundle)
}
}
}
}
// scanCommit accepts a Commit hash, repo, and commit scanning function. A new Commit
// object will be created from the hash which will be passed into either scanCommitPatches
// or scanFilesAtCommit depending on the options set.
func scanCommit(commit string, repo *Repo, f commitScanner) error {
if commit == "latest" {
ref, err := repo.Repository.Head()
if err != nil {
return err
}
commit = ref.Hash().String()
}
repo.Manager.IncrementCommits(1)
h := plumbing.NewHash(commit)
c, err := repo.CommitObject(h)
if err != nil {
return err
}
return f(c, repo)
}
// scanCommitPatches accepts a Commit object and a repo. This function is only called when the --Commit=
// option has been set. That option tells gitleaks to look only at a single Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func scanCommitPatches(c *object.Commit, repo *Repo) error {
if len(c.ParentHashes) == 0 {
err := scanFilesAtCommit(c, repo)
if err != nil {
return err
}
}
return c.Parents().ForEach(func(parent *object.Commit) error {
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
return fmt.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
scanPatch(patch, c, repo)
return nil
})
}
// scanFilesAtCommit accepts a Commit object and a repo. This function is only called when the --files-at-Commit=
// option has been set. That option tells gitleaks to look only at ALL the files at a Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func | (c *object.Commit, repo *Repo) error {
fIter, err := c.Files()
if err != nil {
return err
}
err = fIter.ForEach(func(f *object.File) error {
bin, err := f.IsBinary()
if bin || repo.timeoutReached() {
return nil
} else if err != nil {
return err
}
content, err := f.Contents()
if err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: content,
FilePath: f.Name,
Commit: c,
scanType: commitScan,
Operation: fdiff.Add,
})
return nil
})
return err
}
// depthReached checks if i meets the depth (--depth=) if set
func (repo *Repo) depthReached(i int) bool {
if repo.Manager.Opts.Depth != 0 && repo.Manager.Opts.Depth == i {
log.Warnf("Exceeded depth limit (%d)", i)
return true
}
return false
}
// emptyCommit generates an empty commit used for scanning uncommitted changes
func emptyCommit() *object.Commit {
return &object.Commit{
Hash: plumbing.Hash{},
Message: "***STAGED CHANGES***",
Author: object.Signature{
Name: "",
Email: "",
When: time.Unix(0, 0).UTC(),
},
}
}
| scanFilesAtCommit | identifier_name |
scan.go | package scan
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"github.com/zricethezav/gitleaks/v6/manager"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
fdiff "github.com/go-git/go-git/v5/plumbing/format/diff"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/sergi/go-diff/diffmatchpatch"
log "github.com/sirupsen/logrus"
)
// Bundle contains various git information for scans.
type Bundle struct {
Commit *object.Commit
Patch string
Content string
FilePath string
Operation fdiff.Operation
reader io.Reader
lineLookup map[string]bool
scanType int
}
// commitScanner is a function signature for scanning commits. There is some
// redundant work needed by scanning all files at a commit (--files-at-commit=) and scanning
// the patches generated by a commit (--commit=). The function scanCommit wraps that redundant work
// and accepts a commitScanner for the different logic needed between the two cases described above.
type commitScanner func(c *object.Commit, repo *Repo) error
const (
// We need to differentiate between scans as the logic for line searching is different between
// scanning patches, commits, and uncommitted files.
patchScan int = iota + 1
uncommittedScan
commitScan
)
// Scan is responsible for scanning the entire history (default behavior) of a
// git repo. Options that can change the behavior of this function include: --Commit, --depth, --branch.
// See options/options.go for an explanation on these options.
func (repo *Repo) Scan() error |
// scanEmpty scans an empty repo without any commits. See https://github.com/zricethezav/gitleaks/issues/352
func (repo *Repo) scanEmpty() error {
scanTimeStart := time.Now()
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
if err != nil {
return err
}
for fn := range status {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: workTreeBuf.String(),
FilePath: workTreeFile.Name(),
Commit: emptyCommit(),
scanType: uncommittedScan,
})
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scanUncommitted will do a `git diff` and scan changed files that are being tracked. This is useful functionality
// for a pre-Commit hook so you can make sure your code does not have any leaks before committing.
func (repo *Repo) scanUncommitted() error {
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
}
if err := repo.setupTimeout(); err != nil {
return err
}
r, err := repo.Head()
if err == plumbing.ErrReferenceNotFound {
// possibly an empty repo, or maybe its not, either way lets scan all the files in the directory
return repo.scanEmpty()
} else if err != nil {
return err
}
scanTimeStart := time.Now()
c, err := repo.CommitObject(r.Hash())
if err != nil {
return err
}
// Staged change so the Commit details do not yet exist. Insert empty defaults.
c.Hash = plumbing.Hash{}
c.Message = "***STAGED CHANGES***"
c.Author.Name = ""
c.Author.Email = ""
c.Author.When = time.Unix(0, 0).UTC()
prevTree, err := c.Tree()
if err != nil {
return err
}
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
for fn, state := range status {
var (
prevFileContents string
currFileContents string
filename string
)
if state.Staging != git.Untracked {
if state.Staging == git.Deleted {
// file in staging has been deleted, aka it is not on the filesystem
// so the contents of the file are ""
currFileContents = ""
} else {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
currFileContents = workTreeBuf.String()
filename = workTreeFile.Name()
}
// get files at HEAD state
prevFile, err := prevTree.File(fn)
if err != nil {
prevFileContents = ""
} else {
prevFileContents, err = prevFile.Contents()
if err != nil {
return err
}
if filename == "" {
filename = prevFile.Name
}
}
dmp := diffmatchpatch.New()
diffs := dmp.DiffCleanupSemantic(dmp.DiffMain(prevFileContents, currFileContents, false))
var diffContents string
for _, d := range diffs {
if d.Type == diffmatchpatch.DiffInsert {
diffContents += fmt.Sprintf("%s\n", d.Text)
}
}
repo.CheckRules(&Bundle{
Content: diffContents,
FilePath: filename,
Commit: c,
scanType: uncommittedScan,
})
}
}
if err != nil {
return err
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scan accepts a Patch, Commit, and repo. If the patches contains files that are
// binary, then gitleaks will skip scanning that file OR if a file is matched on
// allowlisted files set in the configuration. If a global rule for files is defined and a filename
// matches said global rule, then a leak is sent to the manager.
// After that, file chunks are created which are then inspected by InspectString()
func scanPatch(patch *object.Patch, c *object.Commit, repo *Repo) {
bundle := Bundle{
Commit: c,
Patch: patch.String(),
scanType: patchScan,
}
for _, f := range patch.FilePatches() {
if repo.timeoutReached() {
return
}
if f.IsBinary() {
continue
}
for _, chunk := range f.Chunks() {
if chunk.Type() == fdiff.Add || (repo.Manager.Opts.Deletion && chunk.Type() == fdiff.Delete) {
bundle.Content = chunk.Content()
bundle.Operation = chunk.Type()
// get filepath
from, to := f.Files()
if from != nil {
bundle.FilePath = from.Path()
} else if to != nil {
bundle.FilePath = to.Path()
} else {
bundle.FilePath = "???"
}
repo.CheckRules(&bundle)
}
}
}
}
// scanCommit accepts a Commit hash, repo, and commit scanning function. A new Commit
// object will be created from the hash which will be passed into either scanCommitPatches
// or scanFilesAtCommit depending on the options set.
func scanCommit(commit string, repo *Repo, f commitScanner) error {
if commit == "latest" {
ref, err := repo.Repository.Head()
if err != nil {
return err
}
commit = ref.Hash().String()
}
repo.Manager.IncrementCommits(1)
h := plumbing.NewHash(commit)
c, err := repo.CommitObject(h)
if err != nil {
return err
}
return f(c, repo)
}
// scanCommitPatches accepts a Commit object and a repo. This function is only called when the --Commit=
// option has been set. That option tells gitleaks to look only at a single Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func scanCommitPatches(c *object.Commit, repo *Repo) error {
if len(c.ParentHashes) == 0 {
err := scanFilesAtCommit(c, repo)
if err != nil {
return err
}
}
return c.Parents().ForEach(func(parent *object.Commit) error {
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
return fmt.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
scanPatch(patch, c, repo)
return nil
})
}
// scanFilesAtCommit accepts a Commit object and a repo. This function is only called when the --files-at-Commit=
// option has been set. That option tells gitleaks to look only at ALL the files at a Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func scanFilesAtCommit(c *object.Commit, repo *Repo) error {
fIter, err := c.Files()
if err != nil {
return err
}
err = fIter.ForEach(func(f *object.File) error {
bin, err := f.IsBinary()
if bin || repo.timeoutReached() {
return nil
} else if err != nil {
return err
}
content, err := f.Contents()
if err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: content,
FilePath: f.Name,
Commit: c,
scanType: commitScan,
Operation: fdiff.Add,
})
return nil
})
return err
}
// depthReached checks if i meets the depth (--depth=) if set
func (repo *Repo) depthReached(i int) bool {
if repo.Manager.Opts.Depth != 0 && repo.Manager.Opts.Depth == i {
log.Warnf("Exceeded depth limit (%d)", i)
return true
}
return false
}
// emptyCommit generates an empty commit used for scanning uncommitted changes
func emptyCommit() *object.Commit {
return &object.Commit{
Hash: plumbing.Hash{},
Message: "***STAGED CHANGES***",
Author: object.Signature{
Name: "",
Email: "",
When: time.Unix(0, 0).UTC(),
},
}
}
| {
if err := repo.setupTimeout(); err != nil {
return err
}
if repo.cancel != nil {
defer repo.cancel()
}
if repo.Repository == nil {
return fmt.Errorf("%s repo is empty", repo.Name)
}
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
}
scanTimeStart := time.Now()
// See https://github.com/zricethezav/gitleaks/issues/326
// Scan commit patches, all files at a commit, or a range of commits
if repo.Manager.Opts.Commit != "" {
return scanCommit(repo.Manager.Opts.Commit, repo, scanCommitPatches)
} else if repo.Manager.Opts.FilesAtCommit != "" {
return scanCommit(repo.Manager.Opts.FilesAtCommit, repo, scanFilesAtCommit)
} else if repo.Manager.Opts.Commits != "" {
commits := strings.Split(repo.Manager.Opts.Commits, ",")
for _, c := range commits {
err := scanCommit(c, repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
} else if repo.Manager.Opts.CommitsFile != "" {
file, err := os.Open(repo.Manager.Opts.CommitsFile)
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
err := scanCommit(scanner.Text(), repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
}
logOpts, err := getLogOptions(repo)
if err != nil {
return err
}
cIter, err := repo.Log(logOpts)
if err != nil {
return err
}
cc := 0
semaphore := make(chan bool, howManyThreads(repo.Manager.Opts.Threads))
wg := sync.WaitGroup{}
err = cIter.ForEach(func(c *object.Commit) error {
if c == nil || repo.timeoutReached() || repo.depthReached(cc) {
return storer.ErrStop
}
// Check if Commit is allowlisted
if isCommitAllowListed(c.Hash.String(), repo.config.Allowlist.Commits) {
return nil
}
// Check if at root
if len(c.ParentHashes) == 0 {
cc++
err = scanFilesAtCommit(c, repo)
if err != nil {
return err
}
return nil
}
// increase Commit counter
cc++
// inspect first parent only as all other parents will be eventually reached
// (they exist as the tip of other branches, etc)
// See https://github.com/zricethezav/gitleaks/issues/413 for details
parent, err := c.Parent(0)
if err != nil {
return err
}
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
// shouldn't reach this point but just in case
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
log.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
wg.Add(1)
semaphore <- true
go func(c *object.Commit, patch *object.Patch) {
defer func() {
<-semaphore
wg.Done()
}()
scanPatch(patch, c, repo)
}(c, patch)
if c.Hash.String() == repo.Manager.Opts.CommitTo {
return storer.ErrStop
}
return nil
})
wg.Wait()
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
repo.Manager.IncrementCommits(cc)
return nil
} | identifier_body |
scan.go | package scan
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"github.com/zricethezav/gitleaks/v6/manager"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
fdiff "github.com/go-git/go-git/v5/plumbing/format/diff"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/sergi/go-diff/diffmatchpatch"
log "github.com/sirupsen/logrus"
)
// Bundle contains various git information for scans.
type Bundle struct {
Commit *object.Commit
Patch string
Content string
FilePath string
Operation fdiff.Operation
reader io.Reader
lineLookup map[string]bool
scanType int
}
// commitScanner is a function signature for scanning commits. There is some
// redundant work needed by scanning all files at a commit (--files-at-commit=) and scanning
// the patches generated by a commit (--commit=). The function scanCommit wraps that redundant work
// and accepts a commitScanner for the different logic needed between the two cases described above.
type commitScanner func(c *object.Commit, repo *Repo) error
const (
// We need to differentiate between scans as the logic for line searching is different between
// scanning patches, commits, and uncommitted files.
patchScan int = iota + 1
uncommittedScan
commitScan
)
// Scan is responsible for scanning the entire history (default behavior) of a
// git repo. Options that can change the behavior of this function include: --Commit, --depth, --branch.
// See options/options.go for an explanation on these options.
func (repo *Repo) Scan() error {
if err := repo.setupTimeout(); err != nil {
return err
}
if repo.cancel != nil {
defer repo.cancel()
}
if repo.Repository == nil {
return fmt.Errorf("%s repo is empty", repo.Name)
}
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
}
scanTimeStart := time.Now()
// See https://github.com/zricethezav/gitleaks/issues/326
// Scan commit patches, all files at a commit, or a range of commits
if repo.Manager.Opts.Commit != "" {
return scanCommit(repo.Manager.Opts.Commit, repo, scanCommitPatches)
} else if repo.Manager.Opts.FilesAtCommit != "" {
return scanCommit(repo.Manager.Opts.FilesAtCommit, repo, scanFilesAtCommit)
} else if repo.Manager.Opts.Commits != "" {
commits := strings.Split(repo.Manager.Opts.Commits, ",")
for _, c := range commits {
err := scanCommit(c, repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
} else if repo.Manager.Opts.CommitsFile != "" {
file, err := os.Open(repo.Manager.Opts.CommitsFile)
if err != nil {
return err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
err := scanCommit(scanner.Text(), repo, scanCommitPatches)
if err != nil {
return err
}
}
return nil
}
logOpts, err := getLogOptions(repo)
if err != nil {
return err
}
cIter, err := repo.Log(logOpts)
if err != nil {
return err
}
cc := 0
semaphore := make(chan bool, howManyThreads(repo.Manager.Opts.Threads))
wg := sync.WaitGroup{}
err = cIter.ForEach(func(c *object.Commit) error {
if c == nil || repo.timeoutReached() || repo.depthReached(cc) {
return storer.ErrStop
}
// Check if Commit is allowlisted
if isCommitAllowListed(c.Hash.String(), repo.config.Allowlist.Commits) {
return nil
}
// Check if at root
if len(c.ParentHashes) == 0 {
cc++
err = scanFilesAtCommit(c, repo)
if err != nil {
return err
}
return nil
}
// increase Commit counter
cc++
// inspect first parent only as all other parents will be eventually reached
// (they exist as the tip of other branches, etc)
// See https://github.com/zricethezav/gitleaks/issues/413 for details
parent, err := c.Parent(0)
if err != nil {
return err
}
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
// shouldn't reach this point but just in case
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
log.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
wg.Add(1)
semaphore <- true | wg.Done()
}()
scanPatch(patch, c, repo)
}(c, patch)
if c.Hash.String() == repo.Manager.Opts.CommitTo {
return storer.ErrStop
}
return nil
})
wg.Wait()
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
repo.Manager.IncrementCommits(cc)
return nil
}
// scanEmpty scans an empty repo without any commits. See https://github.com/zricethezav/gitleaks/issues/352
func (repo *Repo) scanEmpty() error {
scanTimeStart := time.Now()
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
if err != nil {
return err
}
for fn := range status {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: workTreeBuf.String(),
FilePath: workTreeFile.Name(),
Commit: emptyCommit(),
scanType: uncommittedScan,
})
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scanUncommitted will do a `git diff` and scan changed files that are being tracked. This is useful functionality
// for a pre-Commit hook so you can make sure your code does not have any leaks before committing.
func (repo *Repo) scanUncommitted() error {
// load up alternative config if possible, if not use manager's config
if repo.Manager.Opts.RepoConfig {
cfg, err := repo.loadRepoConfig()
if err != nil {
return err
}
repo.config = cfg
}
if err := repo.setupTimeout(); err != nil {
return err
}
r, err := repo.Head()
if err == plumbing.ErrReferenceNotFound {
// possibly an empty repo, or maybe its not, either way lets scan all the files in the directory
return repo.scanEmpty()
} else if err != nil {
return err
}
scanTimeStart := time.Now()
c, err := repo.CommitObject(r.Hash())
if err != nil {
return err
}
// Staged change so the Commit details do not yet exist. Insert empty defaults.
c.Hash = plumbing.Hash{}
c.Message = "***STAGED CHANGES***"
c.Author.Name = ""
c.Author.Email = ""
c.Author.When = time.Unix(0, 0).UTC()
prevTree, err := c.Tree()
if err != nil {
return err
}
wt, err := repo.Worktree()
if err != nil {
return err
}
status, err := wt.Status()
for fn, state := range status {
var (
prevFileContents string
currFileContents string
filename string
)
if state.Staging != git.Untracked {
if state.Staging == git.Deleted {
// file in staging has been deleted, aka it is not on the filesystem
// so the contents of the file are ""
currFileContents = ""
} else {
workTreeBuf := bytes.NewBuffer(nil)
workTreeFile, err := wt.Filesystem.Open(fn)
if err != nil {
continue
}
if _, err := io.Copy(workTreeBuf, workTreeFile); err != nil {
return err
}
currFileContents = workTreeBuf.String()
filename = workTreeFile.Name()
}
// get files at HEAD state
prevFile, err := prevTree.File(fn)
if err != nil {
prevFileContents = ""
} else {
prevFileContents, err = prevFile.Contents()
if err != nil {
return err
}
if filename == "" {
filename = prevFile.Name
}
}
dmp := diffmatchpatch.New()
diffs := dmp.DiffCleanupSemantic(dmp.DiffMain(prevFileContents, currFileContents, false))
var diffContents string
for _, d := range diffs {
if d.Type == diffmatchpatch.DiffInsert {
diffContents += fmt.Sprintf("%s\n", d.Text)
}
}
repo.CheckRules(&Bundle{
Content: diffContents,
FilePath: filename,
Commit: c,
scanType: uncommittedScan,
})
}
}
if err != nil {
return err
}
repo.Manager.RecordTime(manager.ScanTime(howLong(scanTimeStart)))
return nil
}
// scan accepts a Patch, Commit, and repo. If the patches contains files that are
// binary, then gitleaks will skip scanning that file OR if a file is matched on
// allowlisted files set in the configuration. If a global rule for files is defined and a filename
// matches said global rule, then a leak is sent to the manager.
// After that, file chunks are created which are then inspected by InspectString()
func scanPatch(patch *object.Patch, c *object.Commit, repo *Repo) {
bundle := Bundle{
Commit: c,
Patch: patch.String(),
scanType: patchScan,
}
for _, f := range patch.FilePatches() {
if repo.timeoutReached() {
return
}
if f.IsBinary() {
continue
}
for _, chunk := range f.Chunks() {
if chunk.Type() == fdiff.Add || (repo.Manager.Opts.Deletion && chunk.Type() == fdiff.Delete) {
bundle.Content = chunk.Content()
bundle.Operation = chunk.Type()
// get filepath
from, to := f.Files()
if from != nil {
bundle.FilePath = from.Path()
} else if to != nil {
bundle.FilePath = to.Path()
} else {
bundle.FilePath = "???"
}
repo.CheckRules(&bundle)
}
}
}
}
// scanCommit accepts a Commit hash, repo, and commit scanning function. A new Commit
// object will be created from the hash which will be passed into either scanCommitPatches
// or scanFilesAtCommit depending on the options set.
func scanCommit(commit string, repo *Repo, f commitScanner) error {
if commit == "latest" {
ref, err := repo.Repository.Head()
if err != nil {
return err
}
commit = ref.Hash().String()
}
repo.Manager.IncrementCommits(1)
h := plumbing.NewHash(commit)
c, err := repo.CommitObject(h)
if err != nil {
return err
}
return f(c, repo)
}
// scanCommitPatches accepts a Commit object and a repo. This function is only called when the --Commit=
// option has been set. That option tells gitleaks to look only at a single Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func scanCommitPatches(c *object.Commit, repo *Repo) error {
if len(c.ParentHashes) == 0 {
err := scanFilesAtCommit(c, repo)
if err != nil {
return err
}
}
return c.Parents().ForEach(func(parent *object.Commit) error {
defer func() {
if err := recover(); err != nil {
// sometimes the Patch generation will fail due to a known bug in
// sergi's go-diff: https://github.com/sergi/go-diff/issues/89.
// Once a fix has been merged I will remove this recover.
return
}
}()
if repo.timeoutReached() {
return nil
}
if parent == nil {
return nil
}
start := time.Now()
patch, err := parent.Patch(c)
if err != nil {
return fmt.Errorf("could not generate Patch")
}
repo.Manager.RecordTime(manager.PatchTime(howLong(start)))
scanPatch(patch, c, repo)
return nil
})
}
// scanFilesAtCommit accepts a Commit object and a repo. This function is only called when the --files-at-Commit=
// option has been set. That option tells gitleaks to look only at ALL the files at a Commit and check the contents
// of said Commit. Similar to scan(), if the files contained in the Commit are a binaries or if they are
// allowlisted then those files will be skipped.
func scanFilesAtCommit(c *object.Commit, repo *Repo) error {
fIter, err := c.Files()
if err != nil {
return err
}
err = fIter.ForEach(func(f *object.File) error {
bin, err := f.IsBinary()
if bin || repo.timeoutReached() {
return nil
} else if err != nil {
return err
}
content, err := f.Contents()
if err != nil {
return err
}
repo.CheckRules(&Bundle{
Content: content,
FilePath: f.Name,
Commit: c,
scanType: commitScan,
Operation: fdiff.Add,
})
return nil
})
return err
}
// depthReached checks if i meets the depth (--depth=) if set
func (repo *Repo) depthReached(i int) bool {
if repo.Manager.Opts.Depth != 0 && repo.Manager.Opts.Depth == i {
log.Warnf("Exceeded depth limit (%d)", i)
return true
}
return false
}
// emptyCommit generates an empty commit used for scanning uncommitted changes
func emptyCommit() *object.Commit {
return &object.Commit{
Hash: plumbing.Hash{},
Message: "***STAGED CHANGES***",
Author: object.Signature{
Name: "",
Email: "",
When: time.Unix(0, 0).UTC(),
},
}
} | go func(c *object.Commit, patch *object.Patch) {
defer func() {
<-semaphore | random_line_split |
main.go | package main
import (
"errors"
"flag"
"os"
"os/signal"
"path/filepath"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
"github.com/go-redis/redis/v8"
"github.com/sarulabs/di/v2"
"github.com/sirupsen/logrus"
"github.com/zekroTJA/shinpuru/internal/config"
"github.com/zekroTJA/shinpuru/internal/inits"
"github.com/zekroTJA/shinpuru/internal/listeners"
"github.com/zekroTJA/shinpuru/internal/middleware"
"github.com/zekroTJA/shinpuru/internal/services/backup"
"github.com/zekroTJA/shinpuru/internal/services/database"
"github.com/zekroTJA/shinpuru/internal/services/guildlog"
"github.com/zekroTJA/shinpuru/internal/services/karma"
"github.com/zekroTJA/shinpuru/internal/services/kvcache"
"github.com/zekroTJA/shinpuru/internal/services/report"
"github.com/zekroTJA/shinpuru/internal/services/webserver/auth"
"github.com/zekroTJA/shinpuru/internal/util"
"github.com/zekroTJA/shinpuru/internal/util/embedded"
"github.com/zekroTJA/shinpuru/internal/util/startupmsg"
"github.com/zekroTJA/shinpuru/internal/util/static"
"github.com/zekroTJA/shinpuru/pkg/onetimeauth/v2"
"github.com/zekroTJA/shinpuru/pkg/startuptime"
"github.com/zekroTJA/shireikan"
"github.com/zekroTJA/shinpuru/pkg/angularservice"
)
var (
flagConfig = flag.String("c", "config.yml", "The location of the main config file")
flagDocker = flag.Bool("docker", false, "wether shinpuru is running in a docker container or not")
flagDevMode = flag.Bool("devmode", false, "start in development mode")
flagForceColor = flag.Bool("forcecolor", false, "force log color")
flagProfile = flag.String("cpuprofile", "", "Records a CPU profile to the desired location")
flagQuiet = flag.Bool("quiet", false, "Dont print startup message")
)
const (
envKeyProfile = "CPUPROFILE"
)
//////////////////////////////////////////////////////////////////////
//
// SHINPURU
// --------
// This is the main initialization for shinpuru which initializes
// all instances like the database middleware, the twitch notify
// listener service, life cycle timer, storage middleware,
// permission middleware, command handler and - finally -
// initializes the discord session event loop.
// shinpuru is configured via a configuration file which location
// can be passed via the '-c' parameter.
// When shinpuru is run in a Docker container, the '-docker' flag
// should be passed to fix configuration values like the location
// of the sqlite3 database (when the sqlite3 driver is used) or
// the web server exposure port.
//
//////////////////////////////////////////////////////////////////////
func main() {
// Parse command line flags
flag.Parse()
if !*flagQuiet {
startupmsg.Output(os.Stdout)
}
// Initialize dependency injection builder
diBuilder, _ := di.NewBuilder()
// Setup config parser
diBuilder.Add(di.Def{
Name: static.DiConfigParser,
Build: func(ctn di.Container) (p interface{}, err error) {
ext := strings.ToLower(filepath.Ext(*flagConfig))
switch ext {
case ".yml", ".yaml":
p = new(config.YAMLConfigParser)
case ".json":
p = new(config.JSONConfigParser)
default:
err = errors.New("unsupported configuration file")
}
return
},
})
// Initialize config
diBuilder.Add(di.Def{
Name: static.DiConfig,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitConfig(*flagConfig, ctn), nil
},
})
// Initialize metrics server
diBuilder.Add(di.Def{
Name: static.DiMetrics,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitMetrics(ctn), nil
},
})
// Initialize redis client
diBuilder.Add(di.Def{
Name: static.DiRedis,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get(static.DiConfig).(*config.Config)
return redis.NewClient(&redis.Options{
Addr: config.Database.Redis.Addr,
Password: config.Database.Redis.Password,
DB: config.Database.Redis.Type,
}), nil
},
})
// Initialize database middleware and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDatabase,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDatabase(ctn), nil
},
Close: func(obj interface{}) error {
database := obj.(database.Database)
logrus.Info("Shutting down database connection...")
database.Close()
return nil
},
})
// Initialize twitch notification listener
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyListener,
Build: func(ctn di.Container) (interface{}, error) {
return listeners.NewListenerTwitchNotify(ctn), nil
},
Close: func(obj interface{}) error {
listener := obj.(*listeners.ListenerTwitchNotify)
logrus.Info("Shutting down twitch notify listener...")
listener.TearDown()
return nil
},
})
// Initialize twitch notification worker
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyWorker,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitTwitchNotifyWorker(ctn), nil
},
})
// Initialize life cycle timer
diBuilder.Add(di.Def{
Name: static.DiLifecycleTimer,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitLTCTimer(ctn), nil
},
})
// Initialize storage middleware
diBuilder.Add(di.Def{
Name: static.DiObjectStorage,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitStorage(ctn), nil
},
})
// Initialize permissions command handler middleware
diBuilder.Add(di.Def{
Name: static.DiPermissionMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewPermissionMiddleware(ctn), nil
},
})
// Initialize ghost ping ignore command handler middleware
diBuilder.Add(di.Def{
Name: static.DiGhostpingIgnoreMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewGhostPingIgnoreMiddleware(), nil
},
})
// Initialize discord bot session and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDiscordSession,
Build: func(ctn di.Container) (interface{}, error) {
return discordgo.New()
},
Close: func(obj interface{}) error {
session := obj.(*discordgo.Session)
logrus.Info("Shutting down bot session...")
session.Close()
return nil
},
})
// Initialize Discord OAuth Module
diBuilder.Add(di.Def{
Name: static.DiDiscordOAuthModule,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDiscordOAuth(ctn), nil
},
})
// Initialize auth refresh token handler
diBuilder.Add(di.Def{
Name: static.DiAuthRefreshTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseRefreshTokenHandler(ctn), nil
},
})
// Initialize auth access token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAccessTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewJWTAccessTokenHandler(ctn)
},
})
// Initialize auth API token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAPITokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseAPITokenHandler(ctn)
},
})
// Initialize OAuth API handler implementation
diBuilder.Add(di.Def{
Name: static.DiOAuthHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewRefreshTokenRequestHandler(ctn), nil
},
})
// Initialize access token authorization middleware
diBuilder.Add(di.Def{
Name: static.DiAuthMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewAccessTokenMiddleware(ctn), nil
},
})
// Initialize OTA generator
diBuilder.Add(di.Def{
Name: static.DiOneTimeAuth,
Build: func(ctn di.Container) (interface{}, error) {
return onetimeauth.NewJwt(&onetimeauth.JwtOptions{
Issuer: "shinpuru v." + embedded.AppVersion,
})
},
})
// Initialize backup handler
diBuilder.Add(di.Def{
Name: static.DiBackupHandler,
Build: func(ctn di.Container) (interface{}, error) {
return backup.New(ctn), nil
},
})
// Initialize command handler
diBuilder.Add(di.Def{
Name: static.DiCommandHandler,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCommandHandler(ctn), nil
},
})
// Initialize web server
diBuilder.Add(di.Def{
Name: static.DiWebserver,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitWebServer(ctn), nil
},
})
// Initialize code execution factroy
diBuilder.Add(di.Def{
Name: static.DiCodeExecFactory,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCodeExec(ctn), nil
},
})
// Initialize karma service
diBuilder.Add(di.Def{
Name: static.DiKarma,
Build: func(ctn di.Container) (interface{}, error) {
return karma.NewKarmaService(ctn), nil
},
})
// Initialize report service
diBuilder.Add(di.Def{
Name: static.DiReport,
Build: func(ctn di.Container) (interface{}, error) {
return report.New(ctn), nil
},
})
// Initialize guild logger
diBuilder.Add(di.Def{
Name: static.DiGuildLog,
Build: func(ctn di.Container) (interface{}, error) {
return guildlog.New(ctn), nil
},
})
// Initialize KV cache
diBuilder.Add(di.Def{
Name: static.DiKVCache,
Build: func(ctn di.Container) (interface{}, error) {
return kvcache.NewTimedmapCache(10 * time.Minute), nil
},
})
diBuilder.Add(di.Def{
Name: static.DiState,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitState(ctn)
},
})
// Build dependency injection container
ctn := diBuilder.Build()
// Setting log level from config
cfg := ctn.Get(static.DiConfig).(*config.Config)
logrus.SetLevel(logrus.Level(cfg.Logging.LogLevel))
logrus.SetFormatter(&logrus.TextFormatter{
ForceColors: true,
FullTimestamp: true,
TimestampFormat: "2006/01/02 15:04:05 MST",
})
// Initial log output
logrus.Info("Starting up...")
if profLoc := util.GetEnv(envKeyProfile, *flagProfile); profLoc != "" {
setupProfiler(profLoc)
}
if *flagDevMode {
setupDevMode()
}
// Initialize discord session and event
// handlers
inits.InitDiscordBotSession(ctn)
// This is currently the really hacky workaround
// to bypass the di.Container when trying to get
// the Command handler instance inside a command
// context, because the handler can not resolve
// itself on build, so it is bypassed here using
// shireikans object map. Maybe I find a better
// solution for that at some time.
handler := ctn.Get(static.DiCommandHandler).(shireikan.Handler)
handler.SetObject(static.DiCommandHandler, handler)
// Get Web WebServer instance to start web
// server listener
ctn.Get(static.DiWebserver)
// Get Backup Handler to ensure backup
// timer is running.
ctn.Get(static.DiBackupHandler)
// Get Metrics Server to start metrics
// endpoint.
ctn.Get(static.DiMetrics)
// Block main go routine until one of the following
// specified exit syscalls occure.
logrus.Info("Started event loop. Stop with CTRL-C...")
logrus.WithField("took", startuptime.Took().String()).Info("Initialization finished")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Tear down dependency instances
ctn.DeleteWithSubContainers()
}
func setupDevMode() {
if embedded.IsRelease() {
logrus.Fatal("development mode is not available in production builds")
}
util.DevModeEnabled = true
// Angular dev server
angServ := angularservice.New(angularservice.Options{
Stdout: os.Stdout,
Stderr: os.Stderr,
Cd: "web",
Port: 8081,
})
logrus.Info("Starting Angular dev server...")
if err := angServ.Start(); err != nil {
logrus.WithError(err).Fatal("Failed starting Angular dev server")
}
defer func() {
logrus.Info("Shutting down Angular dev server...")
angServ.Stop()
}()
}
func setupProfiler(profLoc string) | {
f, err := os.Create(profLoc)
if err != nil {
logrus.WithError(err).Fatal("failed starting profiler")
}
pprof.StartCPUProfile(f)
logrus.WithField("location", profLoc).Warning("CPU profiling is active")
defer pprof.StopCPUProfile()
} | identifier_body | |
main.go | package main
import (
"errors"
"flag"
"os"
"os/signal"
"path/filepath"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
"github.com/go-redis/redis/v8"
"github.com/sarulabs/di/v2"
"github.com/sirupsen/logrus"
"github.com/zekroTJA/shinpuru/internal/config"
"github.com/zekroTJA/shinpuru/internal/inits"
"github.com/zekroTJA/shinpuru/internal/listeners"
"github.com/zekroTJA/shinpuru/internal/middleware"
"github.com/zekroTJA/shinpuru/internal/services/backup"
"github.com/zekroTJA/shinpuru/internal/services/database"
"github.com/zekroTJA/shinpuru/internal/services/guildlog"
"github.com/zekroTJA/shinpuru/internal/services/karma"
"github.com/zekroTJA/shinpuru/internal/services/kvcache"
"github.com/zekroTJA/shinpuru/internal/services/report"
"github.com/zekroTJA/shinpuru/internal/services/webserver/auth"
"github.com/zekroTJA/shinpuru/internal/util"
"github.com/zekroTJA/shinpuru/internal/util/embedded"
"github.com/zekroTJA/shinpuru/internal/util/startupmsg"
"github.com/zekroTJA/shinpuru/internal/util/static"
"github.com/zekroTJA/shinpuru/pkg/onetimeauth/v2"
"github.com/zekroTJA/shinpuru/pkg/startuptime"
"github.com/zekroTJA/shireikan"
"github.com/zekroTJA/shinpuru/pkg/angularservice"
)
var (
flagConfig = flag.String("c", "config.yml", "The location of the main config file")
flagDocker = flag.Bool("docker", false, "wether shinpuru is running in a docker container or not")
flagDevMode = flag.Bool("devmode", false, "start in development mode")
flagForceColor = flag.Bool("forcecolor", false, "force log color")
flagProfile = flag.String("cpuprofile", "", "Records a CPU profile to the desired location")
flagQuiet = flag.Bool("quiet", false, "Dont print startup message")
)
const (
envKeyProfile = "CPUPROFILE"
)
//////////////////////////////////////////////////////////////////////
//
// SHINPURU
// --------
// This is the main initialization for shinpuru which initializes
// all instances like the database middleware, the twitch notify
// listener service, life cycle timer, storage middleware,
// permission middleware, command handler and - finally -
// initializes the discord session event loop.
// shinpuru is configured via a configuration file which location
// can be passed via the '-c' parameter.
// When shinpuru is run in a Docker container, the '-docker' flag
// should be passed to fix configuration values like the location
// of the sqlite3 database (when the sqlite3 driver is used) or
// the web server exposure port.
//
//////////////////////////////////////////////////////////////////////
func main() {
// Parse command line flags
flag.Parse()
if !*flagQuiet {
startupmsg.Output(os.Stdout)
}
// Initialize dependency injection builder
diBuilder, _ := di.NewBuilder()
// Setup config parser
diBuilder.Add(di.Def{
Name: static.DiConfigParser,
Build: func(ctn di.Container) (p interface{}, err error) {
ext := strings.ToLower(filepath.Ext(*flagConfig))
switch ext {
case ".yml", ".yaml":
p = new(config.YAMLConfigParser)
case ".json":
p = new(config.JSONConfigParser)
default:
err = errors.New("unsupported configuration file")
}
return
},
})
// Initialize config
diBuilder.Add(di.Def{
Name: static.DiConfig,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitConfig(*flagConfig, ctn), nil
},
})
// Initialize metrics server
diBuilder.Add(di.Def{
Name: static.DiMetrics,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitMetrics(ctn), nil
},
})
// Initialize redis client
diBuilder.Add(di.Def{
Name: static.DiRedis,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get(static.DiConfig).(*config.Config)
return redis.NewClient(&redis.Options{
Addr: config.Database.Redis.Addr,
Password: config.Database.Redis.Password,
DB: config.Database.Redis.Type,
}), nil
},
})
// Initialize database middleware and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDatabase,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDatabase(ctn), nil
},
Close: func(obj interface{}) error {
database := obj.(database.Database)
logrus.Info("Shutting down database connection...")
database.Close()
return nil
},
})
// Initialize twitch notification listener
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyListener,
Build: func(ctn di.Container) (interface{}, error) {
return listeners.NewListenerTwitchNotify(ctn), nil
},
Close: func(obj interface{}) error {
listener := obj.(*listeners.ListenerTwitchNotify)
logrus.Info("Shutting down twitch notify listener...")
listener.TearDown()
return nil
},
})
// Initialize twitch notification worker
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyWorker,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitTwitchNotifyWorker(ctn), nil
},
})
// Initialize life cycle timer
diBuilder.Add(di.Def{
Name: static.DiLifecycleTimer,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitLTCTimer(ctn), nil
},
})
// Initialize storage middleware
diBuilder.Add(di.Def{
Name: static.DiObjectStorage,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitStorage(ctn), nil
},
})
// Initialize permissions command handler middleware
diBuilder.Add(di.Def{
Name: static.DiPermissionMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewPermissionMiddleware(ctn), nil
},
})
// Initialize ghost ping ignore command handler middleware
diBuilder.Add(di.Def{
Name: static.DiGhostpingIgnoreMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewGhostPingIgnoreMiddleware(), nil
},
})
// Initialize discord bot session and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDiscordSession,
Build: func(ctn di.Container) (interface{}, error) {
return discordgo.New()
},
Close: func(obj interface{}) error {
session := obj.(*discordgo.Session)
logrus.Info("Shutting down bot session...")
session.Close()
return nil
},
})
// Initialize Discord OAuth Module
diBuilder.Add(di.Def{
Name: static.DiDiscordOAuthModule,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDiscordOAuth(ctn), nil
}, | })
// Initialize auth refresh token handler
diBuilder.Add(di.Def{
Name: static.DiAuthRefreshTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseRefreshTokenHandler(ctn), nil
},
})
// Initialize auth access token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAccessTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewJWTAccessTokenHandler(ctn)
},
})
// Initialize auth API token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAPITokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseAPITokenHandler(ctn)
},
})
// Initialize OAuth API handler implementation
diBuilder.Add(di.Def{
Name: static.DiOAuthHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewRefreshTokenRequestHandler(ctn), nil
},
})
// Initialize access token authorization middleware
diBuilder.Add(di.Def{
Name: static.DiAuthMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewAccessTokenMiddleware(ctn), nil
},
})
// Initialize OTA generator
diBuilder.Add(di.Def{
Name: static.DiOneTimeAuth,
Build: func(ctn di.Container) (interface{}, error) {
return onetimeauth.NewJwt(&onetimeauth.JwtOptions{
Issuer: "shinpuru v." + embedded.AppVersion,
})
},
})
// Initialize backup handler
diBuilder.Add(di.Def{
Name: static.DiBackupHandler,
Build: func(ctn di.Container) (interface{}, error) {
return backup.New(ctn), nil
},
})
// Initialize command handler
diBuilder.Add(di.Def{
Name: static.DiCommandHandler,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCommandHandler(ctn), nil
},
})
// Initialize web server
diBuilder.Add(di.Def{
Name: static.DiWebserver,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitWebServer(ctn), nil
},
})
// Initialize code execution factroy
diBuilder.Add(di.Def{
Name: static.DiCodeExecFactory,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCodeExec(ctn), nil
},
})
// Initialize karma service
diBuilder.Add(di.Def{
Name: static.DiKarma,
Build: func(ctn di.Container) (interface{}, error) {
return karma.NewKarmaService(ctn), nil
},
})
// Initialize report service
diBuilder.Add(di.Def{
Name: static.DiReport,
Build: func(ctn di.Container) (interface{}, error) {
return report.New(ctn), nil
},
})
// Initialize guild logger
diBuilder.Add(di.Def{
Name: static.DiGuildLog,
Build: func(ctn di.Container) (interface{}, error) {
return guildlog.New(ctn), nil
},
})
// Initialize KV cache
diBuilder.Add(di.Def{
Name: static.DiKVCache,
Build: func(ctn di.Container) (interface{}, error) {
return kvcache.NewTimedmapCache(10 * time.Minute), nil
},
})
diBuilder.Add(di.Def{
Name: static.DiState,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitState(ctn)
},
})
// Build dependency injection container
ctn := diBuilder.Build()
// Setting log level from config
cfg := ctn.Get(static.DiConfig).(*config.Config)
logrus.SetLevel(logrus.Level(cfg.Logging.LogLevel))
logrus.SetFormatter(&logrus.TextFormatter{
ForceColors: true,
FullTimestamp: true,
TimestampFormat: "2006/01/02 15:04:05 MST",
})
// Initial log output
logrus.Info("Starting up...")
if profLoc := util.GetEnv(envKeyProfile, *flagProfile); profLoc != "" {
setupProfiler(profLoc)
}
if *flagDevMode {
setupDevMode()
}
// Initialize discord session and event
// handlers
inits.InitDiscordBotSession(ctn)
// This is currently the really hacky workaround
// to bypass the di.Container when trying to get
// the Command handler instance inside a command
// context, because the handler can not resolve
// itself on build, so it is bypassed here using
// shireikans object map. Maybe I find a better
// solution for that at some time.
handler := ctn.Get(static.DiCommandHandler).(shireikan.Handler)
handler.SetObject(static.DiCommandHandler, handler)
// Get Web WebServer instance to start web
// server listener
ctn.Get(static.DiWebserver)
// Get Backup Handler to ensure backup
// timer is running.
ctn.Get(static.DiBackupHandler)
// Get Metrics Server to start metrics
// endpoint.
ctn.Get(static.DiMetrics)
// Block main go routine until one of the following
// specified exit syscalls occure.
logrus.Info("Started event loop. Stop with CTRL-C...")
logrus.WithField("took", startuptime.Took().String()).Info("Initialization finished")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Tear down dependency instances
ctn.DeleteWithSubContainers()
}
func setupDevMode() {
if embedded.IsRelease() {
logrus.Fatal("development mode is not available in production builds")
}
util.DevModeEnabled = true
// Angular dev server
angServ := angularservice.New(angularservice.Options{
Stdout: os.Stdout,
Stderr: os.Stderr,
Cd: "web",
Port: 8081,
})
logrus.Info("Starting Angular dev server...")
if err := angServ.Start(); err != nil {
logrus.WithError(err).Fatal("Failed starting Angular dev server")
}
defer func() {
logrus.Info("Shutting down Angular dev server...")
angServ.Stop()
}()
}
func setupProfiler(profLoc string) {
f, err := os.Create(profLoc)
if err != nil {
logrus.WithError(err).Fatal("failed starting profiler")
}
pprof.StartCPUProfile(f)
logrus.WithField("location", profLoc).Warning("CPU profiling is active")
defer pprof.StopCPUProfile()
} | random_line_split | |
main.go | package main
import (
"errors"
"flag"
"os"
"os/signal"
"path/filepath"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
"github.com/go-redis/redis/v8"
"github.com/sarulabs/di/v2"
"github.com/sirupsen/logrus"
"github.com/zekroTJA/shinpuru/internal/config"
"github.com/zekroTJA/shinpuru/internal/inits"
"github.com/zekroTJA/shinpuru/internal/listeners"
"github.com/zekroTJA/shinpuru/internal/middleware"
"github.com/zekroTJA/shinpuru/internal/services/backup"
"github.com/zekroTJA/shinpuru/internal/services/database"
"github.com/zekroTJA/shinpuru/internal/services/guildlog"
"github.com/zekroTJA/shinpuru/internal/services/karma"
"github.com/zekroTJA/shinpuru/internal/services/kvcache"
"github.com/zekroTJA/shinpuru/internal/services/report"
"github.com/zekroTJA/shinpuru/internal/services/webserver/auth"
"github.com/zekroTJA/shinpuru/internal/util"
"github.com/zekroTJA/shinpuru/internal/util/embedded"
"github.com/zekroTJA/shinpuru/internal/util/startupmsg"
"github.com/zekroTJA/shinpuru/internal/util/static"
"github.com/zekroTJA/shinpuru/pkg/onetimeauth/v2"
"github.com/zekroTJA/shinpuru/pkg/startuptime"
"github.com/zekroTJA/shireikan"
"github.com/zekroTJA/shinpuru/pkg/angularservice"
)
var (
flagConfig = flag.String("c", "config.yml", "The location of the main config file")
flagDocker = flag.Bool("docker", false, "wether shinpuru is running in a docker container or not")
flagDevMode = flag.Bool("devmode", false, "start in development mode")
flagForceColor = flag.Bool("forcecolor", false, "force log color")
flagProfile = flag.String("cpuprofile", "", "Records a CPU profile to the desired location")
flagQuiet = flag.Bool("quiet", false, "Dont print startup message")
)
const (
envKeyProfile = "CPUPROFILE"
)
//////////////////////////////////////////////////////////////////////
//
// SHINPURU
// --------
// This is the main initialization for shinpuru which initializes
// all instances like the database middleware, the twitch notify
// listener service, life cycle timer, storage middleware,
// permission middleware, command handler and - finally -
// initializes the discord session event loop.
// shinpuru is configured via a configuration file which location
// can be passed via the '-c' parameter.
// When shinpuru is run in a Docker container, the '-docker' flag
// should be passed to fix configuration values like the location
// of the sqlite3 database (when the sqlite3 driver is used) or
// the web server exposure port.
//
//////////////////////////////////////////////////////////////////////
func | () {
// Parse command line flags
flag.Parse()
if !*flagQuiet {
startupmsg.Output(os.Stdout)
}
// Initialize dependency injection builder
diBuilder, _ := di.NewBuilder()
// Setup config parser
diBuilder.Add(di.Def{
Name: static.DiConfigParser,
Build: func(ctn di.Container) (p interface{}, err error) {
ext := strings.ToLower(filepath.Ext(*flagConfig))
switch ext {
case ".yml", ".yaml":
p = new(config.YAMLConfigParser)
case ".json":
p = new(config.JSONConfigParser)
default:
err = errors.New("unsupported configuration file")
}
return
},
})
// Initialize config
diBuilder.Add(di.Def{
Name: static.DiConfig,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitConfig(*flagConfig, ctn), nil
},
})
// Initialize metrics server
diBuilder.Add(di.Def{
Name: static.DiMetrics,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitMetrics(ctn), nil
},
})
// Initialize redis client
diBuilder.Add(di.Def{
Name: static.DiRedis,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get(static.DiConfig).(*config.Config)
return redis.NewClient(&redis.Options{
Addr: config.Database.Redis.Addr,
Password: config.Database.Redis.Password,
DB: config.Database.Redis.Type,
}), nil
},
})
// Initialize database middleware and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDatabase,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDatabase(ctn), nil
},
Close: func(obj interface{}) error {
database := obj.(database.Database)
logrus.Info("Shutting down database connection...")
database.Close()
return nil
},
})
// Initialize twitch notification listener
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyListener,
Build: func(ctn di.Container) (interface{}, error) {
return listeners.NewListenerTwitchNotify(ctn), nil
},
Close: func(obj interface{}) error {
listener := obj.(*listeners.ListenerTwitchNotify)
logrus.Info("Shutting down twitch notify listener...")
listener.TearDown()
return nil
},
})
// Initialize twitch notification worker
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyWorker,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitTwitchNotifyWorker(ctn), nil
},
})
// Initialize life cycle timer
diBuilder.Add(di.Def{
Name: static.DiLifecycleTimer,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitLTCTimer(ctn), nil
},
})
// Initialize storage middleware
diBuilder.Add(di.Def{
Name: static.DiObjectStorage,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitStorage(ctn), nil
},
})
// Initialize permissions command handler middleware
diBuilder.Add(di.Def{
Name: static.DiPermissionMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewPermissionMiddleware(ctn), nil
},
})
// Initialize ghost ping ignore command handler middleware
diBuilder.Add(di.Def{
Name: static.DiGhostpingIgnoreMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewGhostPingIgnoreMiddleware(), nil
},
})
// Initialize discord bot session and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDiscordSession,
Build: func(ctn di.Container) (interface{}, error) {
return discordgo.New()
},
Close: func(obj interface{}) error {
session := obj.(*discordgo.Session)
logrus.Info("Shutting down bot session...")
session.Close()
return nil
},
})
// Initialize Discord OAuth Module
diBuilder.Add(di.Def{
Name: static.DiDiscordOAuthModule,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDiscordOAuth(ctn), nil
},
})
// Initialize auth refresh token handler
diBuilder.Add(di.Def{
Name: static.DiAuthRefreshTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseRefreshTokenHandler(ctn), nil
},
})
// Initialize auth access token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAccessTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewJWTAccessTokenHandler(ctn)
},
})
// Initialize auth API token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAPITokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseAPITokenHandler(ctn)
},
})
// Initialize OAuth API handler implementation
diBuilder.Add(di.Def{
Name: static.DiOAuthHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewRefreshTokenRequestHandler(ctn), nil
},
})
// Initialize access token authorization middleware
diBuilder.Add(di.Def{
Name: static.DiAuthMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewAccessTokenMiddleware(ctn), nil
},
})
// Initialize OTA generator
diBuilder.Add(di.Def{
Name: static.DiOneTimeAuth,
Build: func(ctn di.Container) (interface{}, error) {
return onetimeauth.NewJwt(&onetimeauth.JwtOptions{
Issuer: "shinpuru v." + embedded.AppVersion,
})
},
})
// Initialize backup handler
diBuilder.Add(di.Def{
Name: static.DiBackupHandler,
Build: func(ctn di.Container) (interface{}, error) {
return backup.New(ctn), nil
},
})
// Initialize command handler
diBuilder.Add(di.Def{
Name: static.DiCommandHandler,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCommandHandler(ctn), nil
},
})
// Initialize web server
diBuilder.Add(di.Def{
Name: static.DiWebserver,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitWebServer(ctn), nil
},
})
// Initialize code execution factroy
diBuilder.Add(di.Def{
Name: static.DiCodeExecFactory,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCodeExec(ctn), nil
},
})
// Initialize karma service
diBuilder.Add(di.Def{
Name: static.DiKarma,
Build: func(ctn di.Container) (interface{}, error) {
return karma.NewKarmaService(ctn), nil
},
})
// Initialize report service
diBuilder.Add(di.Def{
Name: static.DiReport,
Build: func(ctn di.Container) (interface{}, error) {
return report.New(ctn), nil
},
})
// Initialize guild logger
diBuilder.Add(di.Def{
Name: static.DiGuildLog,
Build: func(ctn di.Container) (interface{}, error) {
return guildlog.New(ctn), nil
},
})
// Initialize KV cache
diBuilder.Add(di.Def{
Name: static.DiKVCache,
Build: func(ctn di.Container) (interface{}, error) {
return kvcache.NewTimedmapCache(10 * time.Minute), nil
},
})
diBuilder.Add(di.Def{
Name: static.DiState,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitState(ctn)
},
})
// Build dependency injection container
ctn := diBuilder.Build()
// Setting log level from config
cfg := ctn.Get(static.DiConfig).(*config.Config)
logrus.SetLevel(logrus.Level(cfg.Logging.LogLevel))
logrus.SetFormatter(&logrus.TextFormatter{
ForceColors: true,
FullTimestamp: true,
TimestampFormat: "2006/01/02 15:04:05 MST",
})
// Initial log output
logrus.Info("Starting up...")
if profLoc := util.GetEnv(envKeyProfile, *flagProfile); profLoc != "" {
setupProfiler(profLoc)
}
if *flagDevMode {
setupDevMode()
}
// Initialize discord session and event
// handlers
inits.InitDiscordBotSession(ctn)
// This is currently the really hacky workaround
// to bypass the di.Container when trying to get
// the Command handler instance inside a command
// context, because the handler can not resolve
// itself on build, so it is bypassed here using
// shireikans object map. Maybe I find a better
// solution for that at some time.
handler := ctn.Get(static.DiCommandHandler).(shireikan.Handler)
handler.SetObject(static.DiCommandHandler, handler)
// Get Web WebServer instance to start web
// server listener
ctn.Get(static.DiWebserver)
// Get Backup Handler to ensure backup
// timer is running.
ctn.Get(static.DiBackupHandler)
// Get Metrics Server to start metrics
// endpoint.
ctn.Get(static.DiMetrics)
// Block main go routine until one of the following
// specified exit syscalls occure.
logrus.Info("Started event loop. Stop with CTRL-C...")
logrus.WithField("took", startuptime.Took().String()).Info("Initialization finished")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Tear down dependency instances
ctn.DeleteWithSubContainers()
}
func setupDevMode() {
if embedded.IsRelease() {
logrus.Fatal("development mode is not available in production builds")
}
util.DevModeEnabled = true
// Angular dev server
angServ := angularservice.New(angularservice.Options{
Stdout: os.Stdout,
Stderr: os.Stderr,
Cd: "web",
Port: 8081,
})
logrus.Info("Starting Angular dev server...")
if err := angServ.Start(); err != nil {
logrus.WithError(err).Fatal("Failed starting Angular dev server")
}
defer func() {
logrus.Info("Shutting down Angular dev server...")
angServ.Stop()
}()
}
func setupProfiler(profLoc string) {
f, err := os.Create(profLoc)
if err != nil {
logrus.WithError(err).Fatal("failed starting profiler")
}
pprof.StartCPUProfile(f)
logrus.WithField("location", profLoc).Warning("CPU profiling is active")
defer pprof.StopCPUProfile()
}
| main | identifier_name |
main.go | package main
import (
"errors"
"flag"
"os"
"os/signal"
"path/filepath"
"runtime/pprof"
"strings"
"syscall"
"time"
"github.com/bwmarrin/discordgo"
"github.com/go-redis/redis/v8"
"github.com/sarulabs/di/v2"
"github.com/sirupsen/logrus"
"github.com/zekroTJA/shinpuru/internal/config"
"github.com/zekroTJA/shinpuru/internal/inits"
"github.com/zekroTJA/shinpuru/internal/listeners"
"github.com/zekroTJA/shinpuru/internal/middleware"
"github.com/zekroTJA/shinpuru/internal/services/backup"
"github.com/zekroTJA/shinpuru/internal/services/database"
"github.com/zekroTJA/shinpuru/internal/services/guildlog"
"github.com/zekroTJA/shinpuru/internal/services/karma"
"github.com/zekroTJA/shinpuru/internal/services/kvcache"
"github.com/zekroTJA/shinpuru/internal/services/report"
"github.com/zekroTJA/shinpuru/internal/services/webserver/auth"
"github.com/zekroTJA/shinpuru/internal/util"
"github.com/zekroTJA/shinpuru/internal/util/embedded"
"github.com/zekroTJA/shinpuru/internal/util/startupmsg"
"github.com/zekroTJA/shinpuru/internal/util/static"
"github.com/zekroTJA/shinpuru/pkg/onetimeauth/v2"
"github.com/zekroTJA/shinpuru/pkg/startuptime"
"github.com/zekroTJA/shireikan"
"github.com/zekroTJA/shinpuru/pkg/angularservice"
)
var (
flagConfig = flag.String("c", "config.yml", "The location of the main config file")
flagDocker = flag.Bool("docker", false, "wether shinpuru is running in a docker container or not")
flagDevMode = flag.Bool("devmode", false, "start in development mode")
flagForceColor = flag.Bool("forcecolor", false, "force log color")
flagProfile = flag.String("cpuprofile", "", "Records a CPU profile to the desired location")
flagQuiet = flag.Bool("quiet", false, "Dont print startup message")
)
const (
envKeyProfile = "CPUPROFILE"
)
//////////////////////////////////////////////////////////////////////
//
// SHINPURU
// --------
// This is the main initialization for shinpuru which initializes
// all instances like the database middleware, the twitch notify
// listener service, life cycle timer, storage middleware,
// permission middleware, command handler and - finally -
// initializes the discord session event loop.
// shinpuru is configured via a configuration file which location
// can be passed via the '-c' parameter.
// When shinpuru is run in a Docker container, the '-docker' flag
// should be passed to fix configuration values like the location
// of the sqlite3 database (when the sqlite3 driver is used) or
// the web server exposure port.
//
//////////////////////////////////////////////////////////////////////
func main() {
// Parse command line flags
flag.Parse()
if !*flagQuiet |
// Initialize dependency injection builder
diBuilder, _ := di.NewBuilder()
// Setup config parser
diBuilder.Add(di.Def{
Name: static.DiConfigParser,
Build: func(ctn di.Container) (p interface{}, err error) {
ext := strings.ToLower(filepath.Ext(*flagConfig))
switch ext {
case ".yml", ".yaml":
p = new(config.YAMLConfigParser)
case ".json":
p = new(config.JSONConfigParser)
default:
err = errors.New("unsupported configuration file")
}
return
},
})
// Initialize config
diBuilder.Add(di.Def{
Name: static.DiConfig,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitConfig(*flagConfig, ctn), nil
},
})
// Initialize metrics server
diBuilder.Add(di.Def{
Name: static.DiMetrics,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitMetrics(ctn), nil
},
})
// Initialize redis client
diBuilder.Add(di.Def{
Name: static.DiRedis,
Build: func(ctn di.Container) (interface{}, error) {
config := ctn.Get(static.DiConfig).(*config.Config)
return redis.NewClient(&redis.Options{
Addr: config.Database.Redis.Addr,
Password: config.Database.Redis.Password,
DB: config.Database.Redis.Type,
}), nil
},
})
// Initialize database middleware and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDatabase,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDatabase(ctn), nil
},
Close: func(obj interface{}) error {
database := obj.(database.Database)
logrus.Info("Shutting down database connection...")
database.Close()
return nil
},
})
// Initialize twitch notification listener
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyListener,
Build: func(ctn di.Container) (interface{}, error) {
return listeners.NewListenerTwitchNotify(ctn), nil
},
Close: func(obj interface{}) error {
listener := obj.(*listeners.ListenerTwitchNotify)
logrus.Info("Shutting down twitch notify listener...")
listener.TearDown()
return nil
},
})
// Initialize twitch notification worker
diBuilder.Add(di.Def{
Name: static.DiTwitchNotifyWorker,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitTwitchNotifyWorker(ctn), nil
},
})
// Initialize life cycle timer
diBuilder.Add(di.Def{
Name: static.DiLifecycleTimer,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitLTCTimer(ctn), nil
},
})
// Initialize storage middleware
diBuilder.Add(di.Def{
Name: static.DiObjectStorage,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitStorage(ctn), nil
},
})
// Initialize permissions command handler middleware
diBuilder.Add(di.Def{
Name: static.DiPermissionMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewPermissionMiddleware(ctn), nil
},
})
// Initialize ghost ping ignore command handler middleware
diBuilder.Add(di.Def{
Name: static.DiGhostpingIgnoreMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return middleware.NewGhostPingIgnoreMiddleware(), nil
},
})
// Initialize discord bot session and shutdown routine
diBuilder.Add(di.Def{
Name: static.DiDiscordSession,
Build: func(ctn di.Container) (interface{}, error) {
return discordgo.New()
},
Close: func(obj interface{}) error {
session := obj.(*discordgo.Session)
logrus.Info("Shutting down bot session...")
session.Close()
return nil
},
})
// Initialize Discord OAuth Module
diBuilder.Add(di.Def{
Name: static.DiDiscordOAuthModule,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitDiscordOAuth(ctn), nil
},
})
// Initialize auth refresh token handler
diBuilder.Add(di.Def{
Name: static.DiAuthRefreshTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseRefreshTokenHandler(ctn), nil
},
})
// Initialize auth access token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAccessTokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewJWTAccessTokenHandler(ctn)
},
})
// Initialize auth API token handler
diBuilder.Add(di.Def{
Name: static.DiAuthAPITokenHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewDatabaseAPITokenHandler(ctn)
},
})
// Initialize OAuth API handler implementation
diBuilder.Add(di.Def{
Name: static.DiOAuthHandler,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewRefreshTokenRequestHandler(ctn), nil
},
})
// Initialize access token authorization middleware
diBuilder.Add(di.Def{
Name: static.DiAuthMiddleware,
Build: func(ctn di.Container) (interface{}, error) {
return auth.NewAccessTokenMiddleware(ctn), nil
},
})
// Initialize OTA generator
diBuilder.Add(di.Def{
Name: static.DiOneTimeAuth,
Build: func(ctn di.Container) (interface{}, error) {
return onetimeauth.NewJwt(&onetimeauth.JwtOptions{
Issuer: "shinpuru v." + embedded.AppVersion,
})
},
})
// Initialize backup handler
diBuilder.Add(di.Def{
Name: static.DiBackupHandler,
Build: func(ctn di.Container) (interface{}, error) {
return backup.New(ctn), nil
},
})
// Initialize command handler
diBuilder.Add(di.Def{
Name: static.DiCommandHandler,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCommandHandler(ctn), nil
},
})
// Initialize web server
diBuilder.Add(di.Def{
Name: static.DiWebserver,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitWebServer(ctn), nil
},
})
// Initialize code execution factroy
diBuilder.Add(di.Def{
Name: static.DiCodeExecFactory,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitCodeExec(ctn), nil
},
})
// Initialize karma service
diBuilder.Add(di.Def{
Name: static.DiKarma,
Build: func(ctn di.Container) (interface{}, error) {
return karma.NewKarmaService(ctn), nil
},
})
// Initialize report service
diBuilder.Add(di.Def{
Name: static.DiReport,
Build: func(ctn di.Container) (interface{}, error) {
return report.New(ctn), nil
},
})
// Initialize guild logger
diBuilder.Add(di.Def{
Name: static.DiGuildLog,
Build: func(ctn di.Container) (interface{}, error) {
return guildlog.New(ctn), nil
},
})
// Initialize KV cache
diBuilder.Add(di.Def{
Name: static.DiKVCache,
Build: func(ctn di.Container) (interface{}, error) {
return kvcache.NewTimedmapCache(10 * time.Minute), nil
},
})
diBuilder.Add(di.Def{
Name: static.DiState,
Build: func(ctn di.Container) (interface{}, error) {
return inits.InitState(ctn)
},
})
// Build dependency injection container
ctn := diBuilder.Build()
// Setting log level from config
cfg := ctn.Get(static.DiConfig).(*config.Config)
logrus.SetLevel(logrus.Level(cfg.Logging.LogLevel))
logrus.SetFormatter(&logrus.TextFormatter{
ForceColors: true,
FullTimestamp: true,
TimestampFormat: "2006/01/02 15:04:05 MST",
})
// Initial log output
logrus.Info("Starting up...")
if profLoc := util.GetEnv(envKeyProfile, *flagProfile); profLoc != "" {
setupProfiler(profLoc)
}
if *flagDevMode {
setupDevMode()
}
// Initialize discord session and event
// handlers
inits.InitDiscordBotSession(ctn)
// This is currently the really hacky workaround
// to bypass the di.Container when trying to get
// the Command handler instance inside a command
// context, because the handler can not resolve
// itself on build, so it is bypassed here using
// shireikans object map. Maybe I find a better
// solution for that at some time.
handler := ctn.Get(static.DiCommandHandler).(shireikan.Handler)
handler.SetObject(static.DiCommandHandler, handler)
// Get Web WebServer instance to start web
// server listener
ctn.Get(static.DiWebserver)
// Get Backup Handler to ensure backup
// timer is running.
ctn.Get(static.DiBackupHandler)
// Get Metrics Server to start metrics
// endpoint.
ctn.Get(static.DiMetrics)
// Block main go routine until one of the following
// specified exit syscalls occure.
logrus.Info("Started event loop. Stop with CTRL-C...")
logrus.WithField("took", startuptime.Took().String()).Info("Initialization finished")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-sc
// Tear down dependency instances
ctn.DeleteWithSubContainers()
}
func setupDevMode() {
if embedded.IsRelease() {
logrus.Fatal("development mode is not available in production builds")
}
util.DevModeEnabled = true
// Angular dev server
angServ := angularservice.New(angularservice.Options{
Stdout: os.Stdout,
Stderr: os.Stderr,
Cd: "web",
Port: 8081,
})
logrus.Info("Starting Angular dev server...")
if err := angServ.Start(); err != nil {
logrus.WithError(err).Fatal("Failed starting Angular dev server")
}
defer func() {
logrus.Info("Shutting down Angular dev server...")
angServ.Stop()
}()
}
func setupProfiler(profLoc string) {
f, err := os.Create(profLoc)
if err != nil {
logrus.WithError(err).Fatal("failed starting profiler")
}
pprof.StartCPUProfile(f)
logrus.WithField("location", profLoc).Warning("CPU profiling is active")
defer pprof.StopCPUProfile()
}
| {
startupmsg.Output(os.Stdout)
} | conditional_block |
server_http_response.go | // Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zanzibar
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/buger/jsonparser"
"github.com/pkg/errors"
"github.com/uber-go/tally"
"github.com/uber/zanzibar/runtime/jsonwrapper"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// ServerHTTPResponse struct manages server http response
type ServerHTTPResponse struct {
Request *ServerHTTPRequest
StatusCode int
responseWriter http.ResponseWriter
flushed bool
finished bool
finishTime time.Time
DownstreamFinishTime time.Duration
ClientType string
pendingBodyBytes []byte
pendingBodyObj interface{}
pendingStatusCode int
contextLogger ContextLogger
scope tally.Scope
jsonWrapper jsonwrapper.JSONWrapper
Err error
}
// NewServerHTTPResponse is helper function to alloc ServerHTTPResponse
func NewServerHTTPResponse(
w http.ResponseWriter,
req *ServerHTTPRequest,
) *ServerHTTPResponse {
return &ServerHTTPResponse{
Request: req,
StatusCode: 200,
responseWriter: w,
contextLogger: req.contextLogger,
scope: req.scope,
jsonWrapper: req.jsonWrapper,
}
}
// finish will handle final logic, like metrics
func (res *ServerHTTPResponse) finish(ctx context.Context) {
logFields := GetLogFieldsFromCtx(ctx)
if !res.Request.started {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Forgot to start server response",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
if res.finished {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Finished a server response multiple times",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
res.finished = true
res.finishTime = time.Now()
_, known := knownStatusCodes[res.StatusCode]
tagged := res.scope.Tagged(map[string]string{
scopeTagStatus: fmt.Sprintf("%d", res.StatusCode), // no need to put this tag on the context because this is the end of response life cycle
scopeTagClientType: res.ClientType,
})
delta := res.finishTime.Sub(res.Request.startTime)
tagged.Timer(endpointLatency).Record(delta)
tagged.Histogram(endpointLatencyHist, tally.DefaultBuckets).RecordDuration(delta)
if res.DownstreamFinishTime != 0 {
overhead := delta - res.DownstreamFinishTime
overheadRatio := overhead.Seconds() / delta.Seconds()
tagged.Timer(endpointOverheadLatency).Record(overhead)
tagged.Histogram(endpointOverheadLatencyHist, tally.DefaultBuckets).RecordDuration(overhead)
tagged.Gauge(endpointOverheadRatio).Update(overheadRatio)
}
if !known {
res.contextLogger.Error(ctx,
"Unknown status code",
append(logFields, zap.Int("UnknownStatusCode", res.StatusCode))...,
)
} else {
tagged.Counter(endpointStatus).Inc(1)
}
logFn := res.contextLogger.Debug
if !known || res.StatusCode >= 400 && res.StatusCode < 600 {
tagged.Counter(endpointAppErrors).Inc(1)
logFn = res.contextLogger.WarnZ
}
span := res.Request.GetSpan()
if span != nil {
span.Finish()
}
logFn(ctx,
fmt.Sprintf("Finished an incoming server HTTP request with %d status code", res.StatusCode),
append(logFields, serverHTTPLogFields(res.Request, res)...)...,
)
}
func serverHTTPLogFields(req *ServerHTTPRequest, res *ServerHTTPResponse) []zapcore.Field {
fields := []zapcore.Field{
zap.Int(logFieldResponseStatusCode, res.StatusCode),
}
for k, v := range res.Headers() {
if len(v) > 0 {
fields = append(fields, zap.String(
fmt.Sprintf("%s-%s", logFieldEndpointResponseHeaderPrefix, k),
strings.Join(v, ", "),
))
}
}
if res.Err != nil {
fields = append(fields, zap.Error(res.Err))
cause := errors.Cause(res.Err)
if cause != nil && cause != res.Err {
fields = append(fields, zap.NamedError("errorCause", cause))
}
}
return fields
}
// SendErrorString helper to send an error string
func (res *ServerHTTPResponse) SendErrorString(
statusCode int, errMsg string,
) {
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// SendError helper to send an server error message, propagates underlying cause to logs etc.
func (res *ServerHTTPResponse) SendError(
statusCode int, errMsg string, errCause error,
) {
res.Err = errCause
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// WriteBytes writes a byte[] slice that is valid Response
func (res *ServerHTTPResponse) WriteBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
res.responseWriter.Header().Set(k, v)
}
}
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
}
// WriteJSONBytes writes a byte[] slice that is valid json to Response
func (res *ServerHTTPResponse) WriteJSONBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers == nil {
headers = ServerHTTPHeader{}
}
headers.Add("content-type", "application/json")
res.WriteBytes(statusCode, headers, bytes)
}
// MarshalResponseJSON serializes a json serializable into bytes
func (res *ServerHTTPResponse) MarshalResponseJSON(body interface{}) []byte {
ctx := res.Request.Context()
if body == nil {
res.SendError(500, "Could not serialize json response", errors.New("No Body JSON"))
res.contextLogger.Error(ctx, "Could not serialize nil pointer body")
return nil
}
bytes, err := res.jsonWrapper.Marshal(body)
if err != nil {
res.SendError(500, "Could not serialize json response", err)
res.contextLogger.Error(ctx, "Could not serialize json response", zap.Error(err))
return nil
}
return bytes
}
// SendResponse sets content-type if not present and fills Response
func (res *ServerHTTPResponse) | (statusCode int, headers Header, body interface{}, bytes []byte) {
contentTypePresent := false
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
if k == "Content-Type" {
contentTypePresent = true
}
res.responseWriter.Header().Set(k, v)
}
}
}
// Set the content-type to application/json if not already available
if !contentTypePresent {
res.responseWriter.Header().
Set("content-type", "application/json")
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
res.pendingBodyObj = body
}
// WriteJSON writes a json serializable struct to Response
func (res *ServerHTTPResponse) WriteJSON(
statusCode int, headers Header, body interface{},
) {
bytes := res.MarshalResponseJSON(body)
if bytes == nil {
return
}
res.SendResponse(statusCode, headers, body, bytes)
}
// PeekBody allows for inspecting a key path inside the body
// that is not flushed yet. This is useful for response middlewares
// that want to inspect the response body.
func (res *ServerHTTPResponse) PeekBody(
keys ...string,
) ([]byte, jsonparser.ValueType, error) {
value, valueType, _, err := jsonparser.Get(
res.pendingBodyBytes, keys...,
)
if err != nil {
return nil, -1, err
}
return value, valueType, nil
}
// Flush will write the body to the response. Before flush is called
// the body is pending. A pending body allows a response middleware to
// write a different body.
func (res *ServerHTTPResponse) flush(ctx context.Context) {
if res.flushed {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Flushed a server response multiple times",
zap.String("path", res.Request.URL.Path),
)
/* coverage ignore next line */
return
}
res.flushed = true
res.writeHeader(res.pendingStatusCode)
if _, noContent := noContentStatusCodes[res.pendingStatusCode]; !noContent {
res.writeBytes(res.pendingBodyBytes)
}
res.finish(ctx)
}
func (res *ServerHTTPResponse) writeHeader(statusCode int) {
res.StatusCode = statusCode
res.responseWriter.WriteHeader(statusCode)
}
// WriteBytes writes raw bytes to output
func (res *ServerHTTPResponse) writeBytes(bytes []byte) {
_, err := res.responseWriter.Write(bytes)
if err != nil {
/* coverage ignore next line */
res.contextLogger.Error(res.Request.Context(),
"Could not write string to resp body",
zap.Error(err),
zap.String("bytesLength", strconv.Itoa(len(bytes))),
)
}
}
// GetPendingResponse lets you read the pending body bytes, obj and status code
// which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponse() ([]byte, int) {
return res.pendingBodyBytes, res.pendingStatusCode
}
// GetPendingResponseObject lets you read the pending body object
// which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponseObject() interface{} {
return res.pendingBodyObj
}
// Headers returns the underlying http response's headers
func (res *ServerHTTPResponse) Headers() http.Header {
return res.responseWriter.Header()
}
| SendResponse | identifier_name |
server_http_response.go | // Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zanzibar
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/buger/jsonparser"
"github.com/pkg/errors"
"github.com/uber-go/tally"
"github.com/uber/zanzibar/runtime/jsonwrapper"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// ServerHTTPResponse struct manages server http response
type ServerHTTPResponse struct {
Request *ServerHTTPRequest
StatusCode int
responseWriter http.ResponseWriter
flushed bool
finished bool
finishTime time.Time
DownstreamFinishTime time.Duration
ClientType string
pendingBodyBytes []byte
pendingBodyObj interface{}
pendingStatusCode int
contextLogger ContextLogger
scope tally.Scope
jsonWrapper jsonwrapper.JSONWrapper
Err error
}
// NewServerHTTPResponse is helper function to alloc ServerHTTPResponse
func NewServerHTTPResponse(
w http.ResponseWriter,
req *ServerHTTPRequest,
) *ServerHTTPResponse {
return &ServerHTTPResponse{
Request: req,
StatusCode: 200,
responseWriter: w,
contextLogger: req.contextLogger,
scope: req.scope,
jsonWrapper: req.jsonWrapper,
}
}
// finish will handle final logic, like metrics
func (res *ServerHTTPResponse) finish(ctx context.Context) {
logFields := GetLogFieldsFromCtx(ctx)
if !res.Request.started {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Forgot to start server response",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
if res.finished {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Finished a server response multiple times",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
res.finished = true
res.finishTime = time.Now()
_, known := knownStatusCodes[res.StatusCode]
tagged := res.scope.Tagged(map[string]string{
scopeTagStatus: fmt.Sprintf("%d", res.StatusCode), // no need to put this tag on the context because this is the end of response life cycle
scopeTagClientType: res.ClientType,
})
delta := res.finishTime.Sub(res.Request.startTime)
tagged.Timer(endpointLatency).Record(delta)
tagged.Histogram(endpointLatencyHist, tally.DefaultBuckets).RecordDuration(delta)
if res.DownstreamFinishTime != 0 {
overhead := delta - res.DownstreamFinishTime
overheadRatio := overhead.Seconds() / delta.Seconds()
tagged.Timer(endpointOverheadLatency).Record(overhead)
tagged.Histogram(endpointOverheadLatencyHist, tally.DefaultBuckets).RecordDuration(overhead)
tagged.Gauge(endpointOverheadRatio).Update(overheadRatio)
}
if !known {
res.contextLogger.Error(ctx,
"Unknown status code",
append(logFields, zap.Int("UnknownStatusCode", res.StatusCode))...,
)
} else {
tagged.Counter(endpointStatus).Inc(1)
}
logFn := res.contextLogger.Debug
if !known || res.StatusCode >= 400 && res.StatusCode < 600 {
tagged.Counter(endpointAppErrors).Inc(1)
logFn = res.contextLogger.WarnZ
}
span := res.Request.GetSpan()
if span != nil {
span.Finish()
}
logFn(ctx,
fmt.Sprintf("Finished an incoming server HTTP request with %d status code", res.StatusCode),
append(logFields, serverHTTPLogFields(res.Request, res)...)...,
)
}
func serverHTTPLogFields(req *ServerHTTPRequest, res *ServerHTTPResponse) []zapcore.Field {
fields := []zapcore.Field{
zap.Int(logFieldResponseStatusCode, res.StatusCode),
}
for k, v := range res.Headers() {
if len(v) > 0 {
fields = append(fields, zap.String(
fmt.Sprintf("%s-%s", logFieldEndpointResponseHeaderPrefix, k),
strings.Join(v, ", "),
))
}
}
if res.Err != nil {
fields = append(fields, zap.Error(res.Err))
cause := errors.Cause(res.Err)
if cause != nil && cause != res.Err {
fields = append(fields, zap.NamedError("errorCause", cause))
}
}
return fields
}
// SendErrorString helper to send an error string
func (res *ServerHTTPResponse) SendErrorString(
statusCode int, errMsg string,
) {
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// SendError helper to send an server error message, propagates underlying cause to logs etc.
func (res *ServerHTTPResponse) SendError(
statusCode int, errMsg string, errCause error,
) {
res.Err = errCause
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// WriteBytes writes a byte[] slice that is valid Response
func (res *ServerHTTPResponse) WriteBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
res.responseWriter.Header().Set(k, v)
}
}
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
}
// WriteJSONBytes writes a byte[] slice that is valid json to Response
func (res *ServerHTTPResponse) WriteJSONBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers == nil {
headers = ServerHTTPHeader{}
}
headers.Add("content-type", "application/json")
res.WriteBytes(statusCode, headers, bytes)
}
// MarshalResponseJSON serializes a json serializable into bytes
func (res *ServerHTTPResponse) MarshalResponseJSON(body interface{}) []byte {
ctx := res.Request.Context()
if body == nil {
res.SendError(500, "Could not serialize json response", errors.New("No Body JSON"))
res.contextLogger.Error(ctx, "Could not serialize nil pointer body")
return nil
}
bytes, err := res.jsonWrapper.Marshal(body)
if err != nil {
res.SendError(500, "Could not serialize json response", err)
res.contextLogger.Error(ctx, "Could not serialize json response", zap.Error(err))
return nil
}
return bytes
}
// SendResponse sets content-type if not present and fills Response
func (res *ServerHTTPResponse) SendResponse(statusCode int, headers Header, body interface{}, bytes []byte) {
contentTypePresent := false
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
if k == "Content-Type" {
contentTypePresent = true
}
res.responseWriter.Header().Set(k, v)
}
}
}
// Set the content-type to application/json if not already available
if !contentTypePresent {
res.responseWriter.Header().
Set("content-type", "application/json")
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
res.pendingBodyObj = body
}
// WriteJSON writes a json serializable struct to Response
func (res *ServerHTTPResponse) WriteJSON(
statusCode int, headers Header, body interface{},
) {
bytes := res.MarshalResponseJSON(body)
if bytes == nil {
return
}
res.SendResponse(statusCode, headers, body, bytes)
}
// PeekBody allows for inspecting a key path inside the body
// that is not flushed yet. This is useful for response middlewares
// that want to inspect the response body.
func (res *ServerHTTPResponse) PeekBody(
keys ...string,
) ([]byte, jsonparser.ValueType, error) {
value, valueType, _, err := jsonparser.Get(
res.pendingBodyBytes, keys...,
)
if err != nil |
return value, valueType, nil
}
// Flush will write the body to the response. Before flush is called
// the body is pending. A pending body allows a response middleware to
// write a different body.
func (res *ServerHTTPResponse) flush(ctx context.Context) {
if res.flushed {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Flushed a server response multiple times",
zap.String("path", res.Request.URL.Path),
)
/* coverage ignore next line */
return
}
res.flushed = true
res.writeHeader(res.pendingStatusCode)
if _, noContent := noContentStatusCodes[res.pendingStatusCode]; !noContent {
res.writeBytes(res.pendingBodyBytes)
}
res.finish(ctx)
}
func (res *ServerHTTPResponse) writeHeader(statusCode int) {
res.StatusCode = statusCode
res.responseWriter.WriteHeader(statusCode)
}
// WriteBytes writes raw bytes to output
func (res *ServerHTTPResponse) writeBytes(bytes []byte) {
_, err := res.responseWriter.Write(bytes)
if err != nil {
/* coverage ignore next line */
res.contextLogger.Error(res.Request.Context(),
"Could not write string to resp body",
zap.Error(err),
zap.String("bytesLength", strconv.Itoa(len(bytes))),
)
}
}
// GetPendingResponse lets you read the pending body bytes, obj and status code
// which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponse() ([]byte, int) {
return res.pendingBodyBytes, res.pendingStatusCode
}
// GetPendingResponseObject lets you read the pending body object
// which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponseObject() interface{} {
return res.pendingBodyObj
}
// Headers returns the underlying http response's headers
func (res *ServerHTTPResponse) Headers() http.Header {
return res.responseWriter.Header()
}
| {
return nil, -1, err
} | conditional_block |
server_http_response.go | // Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zanzibar
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/buger/jsonparser"
"github.com/pkg/errors"
"github.com/uber-go/tally"
"github.com/uber/zanzibar/runtime/jsonwrapper"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// ServerHTTPResponse struct manages server http response
type ServerHTTPResponse struct {
Request *ServerHTTPRequest
StatusCode int
responseWriter http.ResponseWriter
flushed bool
finished bool
finishTime time.Time
DownstreamFinishTime time.Duration
ClientType string
pendingBodyBytes []byte
pendingBodyObj interface{}
pendingStatusCode int
contextLogger ContextLogger
scope tally.Scope
jsonWrapper jsonwrapper.JSONWrapper
Err error
}
// NewServerHTTPResponse is helper function to alloc ServerHTTPResponse
func NewServerHTTPResponse(
w http.ResponseWriter,
req *ServerHTTPRequest,
) *ServerHTTPResponse {
return &ServerHTTPResponse{
Request: req,
StatusCode: 200,
responseWriter: w,
contextLogger: req.contextLogger,
scope: req.scope,
jsonWrapper: req.jsonWrapper,
}
}
// finish will handle final logic, like metrics
func (res *ServerHTTPResponse) finish(ctx context.Context) {
logFields := GetLogFieldsFromCtx(ctx)
if !res.Request.started {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Forgot to start server response",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
if res.finished {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Finished a server response multiple times",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
res.finished = true
res.finishTime = time.Now()
_, known := knownStatusCodes[res.StatusCode]
tagged := res.scope.Tagged(map[string]string{
scopeTagStatus: fmt.Sprintf("%d", res.StatusCode), // no need to put this tag on the context because this is the end of response life cycle
scopeTagClientType: res.ClientType,
})
delta := res.finishTime.Sub(res.Request.startTime)
tagged.Timer(endpointLatency).Record(delta)
tagged.Histogram(endpointLatencyHist, tally.DefaultBuckets).RecordDuration(delta)
if res.DownstreamFinishTime != 0 {
overhead := delta - res.DownstreamFinishTime
overheadRatio := overhead.Seconds() / delta.Seconds()
tagged.Timer(endpointOverheadLatency).Record(overhead)
tagged.Histogram(endpointOverheadLatencyHist, tally.DefaultBuckets).RecordDuration(overhead)
tagged.Gauge(endpointOverheadRatio).Update(overheadRatio)
}
if !known {
res.contextLogger.Error(ctx,
"Unknown status code",
append(logFields, zap.Int("UnknownStatusCode", res.StatusCode))...,
)
} else {
tagged.Counter(endpointStatus).Inc(1)
}
logFn := res.contextLogger.Debug
if !known || res.StatusCode >= 400 && res.StatusCode < 600 {
tagged.Counter(endpointAppErrors).Inc(1)
logFn = res.contextLogger.WarnZ
}
span := res.Request.GetSpan()
if span != nil {
span.Finish()
}
logFn(ctx,
fmt.Sprintf("Finished an incoming server HTTP request with %d status code", res.StatusCode),
append(logFields, serverHTTPLogFields(res.Request, res)...)...,
)
}
func serverHTTPLogFields(req *ServerHTTPRequest, res *ServerHTTPResponse) []zapcore.Field {
fields := []zapcore.Field{
zap.Int(logFieldResponseStatusCode, res.StatusCode),
}
for k, v := range res.Headers() {
if len(v) > 0 {
fields = append(fields, zap.String(
fmt.Sprintf("%s-%s", logFieldEndpointResponseHeaderPrefix, k),
strings.Join(v, ", "),
))
}
}
if res.Err != nil {
fields = append(fields, zap.Error(res.Err))
cause := errors.Cause(res.Err)
if cause != nil && cause != res.Err {
fields = append(fields, zap.NamedError("errorCause", cause))
}
}
return fields
}
// SendErrorString helper to send an error string
func (res *ServerHTTPResponse) SendErrorString(
statusCode int, errMsg string,
) {
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// SendError helper to send an server error message, propagates underlying cause to logs etc.
func (res *ServerHTTPResponse) SendError(
statusCode int, errMsg string, errCause error,
) {
res.Err = errCause
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// WriteBytes writes a byte[] slice that is valid Response
func (res *ServerHTTPResponse) WriteBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
res.responseWriter.Header().Set(k, v)
}
}
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
}
// WriteJSONBytes writes a byte[] slice that is valid json to Response
func (res *ServerHTTPResponse) WriteJSONBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers == nil {
headers = ServerHTTPHeader{}
}
headers.Add("content-type", "application/json")
res.WriteBytes(statusCode, headers, bytes)
}
// MarshalResponseJSON serializes a json serializable into bytes
func (res *ServerHTTPResponse) MarshalResponseJSON(body interface{}) []byte {
ctx := res.Request.Context()
if body == nil {
res.SendError(500, "Could not serialize json response", errors.New("No Body JSON"))
res.contextLogger.Error(ctx, "Could not serialize nil pointer body")
return nil
}
bytes, err := res.jsonWrapper.Marshal(body)
if err != nil {
res.SendError(500, "Could not serialize json response", err)
res.contextLogger.Error(ctx, "Could not serialize json response", zap.Error(err))
return nil
}
return bytes
}
// SendResponse sets content-type if not present and fills Response
func (res *ServerHTTPResponse) SendResponse(statusCode int, headers Header, body interface{}, bytes []byte) {
contentTypePresent := false
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
if k == "Content-Type" {
contentTypePresent = true
}
res.responseWriter.Header().Set(k, v)
}
}
}
// Set the content-type to application/json if not already available
if !contentTypePresent {
res.responseWriter.Header().
Set("content-type", "application/json")
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
res.pendingBodyObj = body
}
// WriteJSON writes a json serializable struct to Response
func (res *ServerHTTPResponse) WriteJSON(
statusCode int, headers Header, body interface{},
) {
bytes := res.MarshalResponseJSON(body)
if bytes == nil {
return
}
res.SendResponse(statusCode, headers, body, bytes)
}
// PeekBody allows for inspecting a key path inside the body
// that is not flushed yet. This is useful for response middlewares
// that want to inspect the response body.
func (res *ServerHTTPResponse) PeekBody(
keys ...string,
) ([]byte, jsonparser.ValueType, error) {
value, valueType, _, err := jsonparser.Get(
res.pendingBodyBytes, keys...,
)
if err != nil {
return nil, -1, err
}
return value, valueType, nil
}
// Flush will write the body to the response. Before flush is called
// the body is pending. A pending body allows a response middleware to
// write a different body.
func (res *ServerHTTPResponse) flush(ctx context.Context) {
if res.flushed {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Flushed a server response multiple times",
zap.String("path", res.Request.URL.Path),
)
/* coverage ignore next line */
return
}
res.flushed = true
res.writeHeader(res.pendingStatusCode)
if _, noContent := noContentStatusCodes[res.pendingStatusCode]; !noContent {
res.writeBytes(res.pendingBodyBytes)
}
res.finish(ctx)
}
func (res *ServerHTTPResponse) writeHeader(statusCode int) {
res.StatusCode = statusCode
res.responseWriter.WriteHeader(statusCode)
}
// WriteBytes writes raw bytes to output
func (res *ServerHTTPResponse) writeBytes(bytes []byte) {
_, err := res.responseWriter.Write(bytes)
if err != nil {
/* coverage ignore next line */
res.contextLogger.Error(res.Request.Context(),
"Could not write string to resp body",
zap.Error(err),
zap.String("bytesLength", strconv.Itoa(len(bytes))),
)
}
}
// GetPendingResponse lets you read the pending body bytes, obj and status code
// which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponse() ([]byte, int) {
return res.pendingBodyBytes, res.pendingStatusCode
}
// GetPendingResponseObject lets you read the pending body object
// which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponseObject() interface{} |
// Headers returns the underlying http response's headers
func (res *ServerHTTPResponse) Headers() http.Header {
return res.responseWriter.Header()
}
| {
return res.pendingBodyObj
} | identifier_body |
server_http_response.go | // Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zanzibar
import (
"context"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/buger/jsonparser"
"github.com/pkg/errors"
"github.com/uber-go/tally"
"github.com/uber/zanzibar/runtime/jsonwrapper"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// ServerHTTPResponse struct manages server http response
type ServerHTTPResponse struct {
Request *ServerHTTPRequest
StatusCode int
responseWriter http.ResponseWriter
flushed bool
finished bool
finishTime time.Time
DownstreamFinishTime time.Duration
ClientType string
pendingBodyBytes []byte
pendingBodyObj interface{}
pendingStatusCode int
contextLogger ContextLogger
scope tally.Scope
jsonWrapper jsonwrapper.JSONWrapper
Err error
}
// NewServerHTTPResponse is helper function to alloc ServerHTTPResponse
func NewServerHTTPResponse(
w http.ResponseWriter,
req *ServerHTTPRequest,
) *ServerHTTPResponse {
return &ServerHTTPResponse{
Request: req,
StatusCode: 200,
responseWriter: w,
contextLogger: req.contextLogger,
scope: req.scope,
jsonWrapper: req.jsonWrapper,
}
}
// finish will handle final logic, like metrics
func (res *ServerHTTPResponse) finish(ctx context.Context) {
logFields := GetLogFieldsFromCtx(ctx)
if !res.Request.started {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Forgot to start server response",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
if res.finished {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Finished a server response multiple times",
append(logFields, zap.String("path", res.Request.URL.Path))...,
)
/* coverage ignore next line */
return
}
res.finished = true
res.finishTime = time.Now()
_, known := knownStatusCodes[res.StatusCode]
tagged := res.scope.Tagged(map[string]string{
scopeTagStatus: fmt.Sprintf("%d", res.StatusCode), // no need to put this tag on the context because this is the end of response life cycle
scopeTagClientType: res.ClientType,
})
delta := res.finishTime.Sub(res.Request.startTime)
tagged.Timer(endpointLatency).Record(delta)
tagged.Histogram(endpointLatencyHist, tally.DefaultBuckets).RecordDuration(delta)
if res.DownstreamFinishTime != 0 {
overhead := delta - res.DownstreamFinishTime
overheadRatio := overhead.Seconds() / delta.Seconds()
tagged.Timer(endpointOverheadLatency).Record(overhead)
tagged.Histogram(endpointOverheadLatencyHist, tally.DefaultBuckets).RecordDuration(overhead)
tagged.Gauge(endpointOverheadRatio).Update(overheadRatio)
}
if !known {
res.contextLogger.Error(ctx,
"Unknown status code",
append(logFields, zap.Int("UnknownStatusCode", res.StatusCode))...,
)
} else {
tagged.Counter(endpointStatus).Inc(1)
}
logFn := res.contextLogger.Debug
if !known || res.StatusCode >= 400 && res.StatusCode < 600 {
tagged.Counter(endpointAppErrors).Inc(1)
logFn = res.contextLogger.WarnZ
}
span := res.Request.GetSpan()
if span != nil {
span.Finish()
}
logFn(ctx,
fmt.Sprintf("Finished an incoming server HTTP request with %d status code", res.StatusCode),
append(logFields, serverHTTPLogFields(res.Request, res)...)...,
)
}
func serverHTTPLogFields(req *ServerHTTPRequest, res *ServerHTTPResponse) []zapcore.Field {
fields := []zapcore.Field{
zap.Int(logFieldResponseStatusCode, res.StatusCode),
}
for k, v := range res.Headers() {
if len(v) > 0 {
fields = append(fields, zap.String(
fmt.Sprintf("%s-%s", logFieldEndpointResponseHeaderPrefix, k),
strings.Join(v, ", "),
))
}
}
if res.Err != nil {
fields = append(fields, zap.Error(res.Err))
cause := errors.Cause(res.Err)
if cause != nil && cause != res.Err {
fields = append(fields, zap.NamedError("errorCause", cause))
}
}
return fields
}
// SendErrorString helper to send an error string
func (res *ServerHTTPResponse) SendErrorString(
statusCode int, errMsg string,
) {
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// SendError helper to send an server error message, propagates underlying cause to logs etc.
func (res *ServerHTTPResponse) SendError(
statusCode int, errMsg string, errCause error,
) {
res.Err = errCause
res.WriteJSONBytes(statusCode, nil,
[]byte(`{"error":"`+errMsg+`"}`),
)
}
// WriteBytes writes a byte[] slice that is valid Response
func (res *ServerHTTPResponse) WriteBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
res.responseWriter.Header().Set(k, v)
}
}
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
}
// WriteJSONBytes writes a byte[] slice that is valid json to Response
func (res *ServerHTTPResponse) WriteJSONBytes(
statusCode int, headers Header, bytes []byte,
) {
if headers == nil {
headers = ServerHTTPHeader{}
}
headers.Add("content-type", "application/json")
res.WriteBytes(statusCode, headers, bytes)
}
// MarshalResponseJSON serializes a json serializable into bytes
func (res *ServerHTTPResponse) MarshalResponseJSON(body interface{}) []byte {
ctx := res.Request.Context()
if body == nil {
res.SendError(500, "Could not serialize json response", errors.New("No Body JSON"))
res.contextLogger.Error(ctx, "Could not serialize nil pointer body")
return nil
}
bytes, err := res.jsonWrapper.Marshal(body)
if err != nil {
res.SendError(500, "Could not serialize json response", err)
res.contextLogger.Error(ctx, "Could not serialize json response", zap.Error(err))
return nil
}
return bytes
}
// SendResponse sets content-type if not present and fills Response
func (res *ServerHTTPResponse) SendResponse(statusCode int, headers Header, body interface{}, bytes []byte) {
contentTypePresent := false
if headers != nil {
for _, k := range headers.Keys() {
v, ok := headers.Get(k)
if ok {
if k == "Content-Type" {
contentTypePresent = true
}
res.responseWriter.Header().Set(k, v)
}
}
}
// Set the content-type to application/json if not already available
if !contentTypePresent {
res.responseWriter.Header().
Set("content-type", "application/json")
}
res.pendingStatusCode = statusCode
res.pendingBodyBytes = bytes
res.pendingBodyObj = body
}
// WriteJSON writes a json serializable struct to Response
func (res *ServerHTTPResponse) WriteJSON(
statusCode int, headers Header, body interface{},
) {
bytes := res.MarshalResponseJSON(body)
if bytes == nil {
return
}
res.SendResponse(statusCode, headers, body, bytes)
}
// PeekBody allows for inspecting a key path inside the body
// that is not flushed yet. This is useful for response middlewares
// that want to inspect the response body.
func (res *ServerHTTPResponse) PeekBody(
keys ...string,
) ([]byte, jsonparser.ValueType, error) {
value, valueType, _, err := jsonparser.Get(
res.pendingBodyBytes, keys...,
)
if err != nil {
return nil, -1, err
}
return value, valueType, nil
}
// Flush will write the body to the response. Before flush is called
// the body is pending. A pending body allows a response middleware to
// write a different body.
func (res *ServerHTTPResponse) flush(ctx context.Context) {
if res.flushed {
/* coverage ignore next line */
res.contextLogger.Error(ctx,
"Flushed a server response multiple times",
zap.String("path", res.Request.URL.Path),
)
/* coverage ignore next line */
return
}
res.flushed = true
res.writeHeader(res.pendingStatusCode)
if _, noContent := noContentStatusCodes[res.pendingStatusCode]; !noContent {
res.writeBytes(res.pendingBodyBytes)
}
res.finish(ctx)
}
func (res *ServerHTTPResponse) writeHeader(statusCode int) {
res.StatusCode = statusCode
res.responseWriter.WriteHeader(statusCode)
}
// WriteBytes writes raw bytes to output
func (res *ServerHTTPResponse) writeBytes(bytes []byte) {
_, err := res.responseWriter.Write(bytes)
if err != nil {
/* coverage ignore next line */
res.contextLogger.Error(res.Request.Context(),
"Could not write string to resp body",
zap.Error(err),
zap.String("bytesLength", strconv.Itoa(len(bytes))),
)
}
}
// GetPendingResponse lets you read the pending body bytes, obj and status code | // GetPendingResponseObject lets you read the pending body object
// which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponseObject() interface{} {
return res.pendingBodyObj
}
// Headers returns the underlying http response's headers
func (res *ServerHTTPResponse) Headers() http.Header {
return res.responseWriter.Header()
} | // which isn't sent back yet.
func (res *ServerHTTPResponse) GetPendingResponse() ([]byte, int) {
return res.pendingBodyBytes, res.pendingStatusCode
}
| random_line_split |
push_service.go | package apns
import (
"container/list"
"crypto/tls"
"encoding/binary"
"encoding/hex"
"fmt"
"strings"
"sync"
"time"
)
func newPushService() (*pushService, error) {
ps := &pushService{}
if err := ps.createConnection(); err != nil {
return nil, err
}
ps.waitClose = &sync.WaitGroup{}
ps.waitClose.Add(3)
ps.sentQueue = make(chan *ApnsMessage, 1000)
ps.confirmedQueue = make(chan *apnsReceiveMessage, 1000)
ps.doSenderClose = make(chan bool, 2)
go ps.monitorReply()
go ps.monitorMessages()
go ps.messageSender()
return ps, nil
}
type pushService struct {
conn *tls.Conn
lastError error
sentQueue chan *ApnsMessage
confirmedQueue chan *apnsReceiveMessage
doSenderClose chan bool
waitClose *sync.WaitGroup
}
func (ps *pushService) createConnection() error {
conf, err := getTlsConfig()
if err != nil {
return err
}
conn, err := tls.Dial("tcp", defaultConfig.gateway, conf)
if err != nil {
return err
}
ps.conn = conn
return nil
}
func (ps *pushService) destroy() {
logdebug("Destroy push service %p, waiting to finish", ps)
ps.doSenderClose <- true //this will close the sender
ps.conn.Close() //this will close the socket and force the read to exit
//(will remove the last message if not confirmed ??? it's ok ???)
//when sender/reader are closed the sending monitor will close
ps.waitClose.Wait()
logdebug("Destroy push service %p finish", ps)
}
func (ps *pushService) messageSender() {
defer func() {
ps.waitClose.Done()
logdebug("Message sender stopped %p", ps)
}()
logdebug("Message sender started %p", ps)
for {
select {
case <-ps.doSenderClose:
close(ps.sentQueue)
return
case messages := <-mainSendingMessage:
logdebug("Sending new message: %d to %s", messages.id, messages.DeviceToken)
data, err := createBinaryNotification(messages, messages.id, messages.DeviceToken)
if err != nil {
messages.SetStatus(err, true)
break
}
_, err = ps.conn.Write(data)
if err != nil {
ps.lastError = nil
pushPool.releasedServices <- ps
close(ps.sentQueue)
return
}
logdebug("Message id: %d sent", messages.id)
ps.sentQueue <- messages
pushPool.reportQueue <- ¬ifReport{sent: 1}
}
}
}
func (ps *pushService) monitorReply() {
defer func() {
ps.waitClose.Done()
logdebug("Message reply stopped %p", ps)
}()
logdebug("Message reply started %p", ps)
readb := [6]byte{}
for {
n, err := ps.conn.Read(readb[:])
if err != nil {
logerr("APNS read channel is closed %s", err)
//when the read socket channel is closed the last payload i think isn't sent ok
//i don't know the last message id so we consider id 0 as last message and don't use this socket
ps.lastError = err
ps.confirmedQueue <- &apnsReceiveMessage{lastMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
if n == 6 {
// if status 0 (this i think isn't going to happen as apple says)
// if status 2 to 8 it's device token or payload error, don't try to resend
// if status 1 it's a proccessing error, we should retry sending the payload
// if status 10 the payload was sent but we must not send anything else on this socket (is shutdown)
// if status 255 it's unknown error (let's retry to send the payload) - close socket
// if status number unknown ???? (ok, let's retry to send the payload) - close socket
status := readb[1]
id := binary.BigEndian.Uint32(readb[2:])
ps.confirmedQueue <- &apnsReceiveMessage{status: status, id: id}
logdebug("Received confirmation for id: %d with status: %d", id, status)
if status != 0 {
ps.lastError = fmt.Errorf("APNs server reply with status %d (%s), closing", status, statusToError(status))
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
} else {
//unknow data sent from apple
// let's close the socket and mark all the messagess with error
logerr("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
//we consinder that max uint32 means to delete all the messages from the queue
ps.lastError = fmt.Errorf("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
ps.confirmedQueue <- &apnsReceiveMessage{unknownMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
}
}
func (ps *pushService) monitorMessages() {
defer func() {
ps.waitClose.Done()
logdebug("Message monitor stopped %p", ps)
}()
logdebug("Message monitor started %p", ps)
check := time.Tick(time.Millisecond * 50)
messageList := newMessageQueue()
readerStopped := false
writerStopped := false
//make sure that we don't have messages in the list
//what we have on exit we mark as error
defer func() {
messageList.SetAllFailed(fmt.Errorf("Message in the remained queue on close"))
logdebug("Push service is stopping, all the messages from the queue will be reposted")
if ps.lastError != nil { //anounnce the pool master that we had an error
pushPool.releasedServices <- ps
}
}()
for {
select {
case <-check: // check if we have messages older then SuccessTimeout so we can mark them
// logdebug("Check to mark message too old as success: %p", ps)
messageList.SetSuccessOlder(defaultConfig.SuccessTimeout)
case message, is_open := <-ps.sentQueue:
if !is_open {
//writer has exited we return only if the reader is stopped too
//we are going to clean the message list on exit
logdebug("Sent queue channel closed for %p", ps)
writerStopped = true
if readerStopped {
logdebug("Confirm channel closed too for: %p", ps)
return
}
ps.sentQueue = nil
break
}
logdebug("New message %d in the confirmation queue %p", message.id, ps)
messageList.PushBack(message)
case finishMessage, is_open := <-ps.confirmedQueue:
if !is_open {
//reader has exited we return only if the writer is stopped too
//we are going to clean the message list on exit
logdebug("Confirm queue channel closed for %p", ps)
readerStopped = true
if writerStopped {
logdebug("Sent channel closed too for: %p", ps)
return
}
ps.confirmedQueue = nil
break
}
//mark all excluding last one as success, we are going to close
if finishMessage.lastMessage |
//mark all as failed
if finishMessage.unknownMessage {
messageList.SetAllFailed(fmt.Errorf("Unknown error response from apple, all failed"))
logdebug("All messages are marked as error :%p", ps)
break
}
//mark all from front to the id as success
if finishMessage.status == 0 {
messageList.SetSuccess(finishMessage.id)
logdebug("Mark message %d as success: %p", finishMessage.id, ps)
break
}
//mark all until this one as success and this one mark it as temporar error
if finishMessage.status == 1 || finishMessage.status == 255 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), false)
logdebug("Mark message %d with temporar error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
} //success until excluding id, id failed
//mark all until this one as success and this one mark as permanent error
if finishMessage.status >= 2 && finishMessage.status <= 8 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), true)
logdebug("Mark message %d with permanend error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
}
//mark all including this one as success
if finishMessage.status == 10 {
messageList.SetSuccess(finishMessage.id)
messageList.SetAllFailed(fmt.Errorf("Apple it's in mantainance mode, socket shutdown"))
logdebug("Mark message %d with success and the rest with temp error %p", finishMessage.id, ps)
break
} //success until including id
//the rest of status codes we don't know, we consider that it's another problem
//from apple so mark the message as temporar error and the front as success
messageList.SetAllFailed(statusToError(finishMessage.status))
logdebug("Mark all messages with error: %s - %p", statusToError(finishMessage.status), ps)
}
}
}
type apnsReceiveMessage struct {
unknownMessage bool
lastMessage bool
status uint8
id uint32
}
type ApnsMessage struct {
PayloadInterface
DeviceToken string
Error error
id uint32
lastSentTime time.Time
receivedTime time.Time
sentTimes uint
}
func (a *ApnsMessage) SetStatus(err error, permanentError bool) {
a.Error = err
shouldConfirm := false
if err == nil || permanentError {
shouldConfirm = true
}
a.sentTimes++
if a.sentTimes >= defaultConfig.MaxMessageRetry {
shouldConfirm = true
}
if shouldConfirm {
nr := ¬ifReport{confirmTime: time.Since(a.lastSentTime), sendingTime: time.Since(a.receivedTime)}
if a.Error != nil {
nr.failed = 1
} else {
nr.success = 1
}
pushPool.reportQueue <- nr
confirmMessage(a)
} else {
pushPool.reportQueue <- ¬ifReport{failed: 1, resent: 1, confirmTime: time.Since(a.lastSentTime)}
resendMessage(a)
}
}
func newMessageQueue() *apnsMessageQueue {
return &apnsMessageQueue{list.New()}
}
type apnsMessageQueue struct {
mList *list.List
}
func (amq *apnsMessageQueue) PushBack(m *ApnsMessage) {
m.lastSentTime = time.Now()
amq.mList.PushBack(m)
}
func (amq *apnsMessageQueue) SetSuccessOlder(older time.Duration) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
if time.Since(message.lastSentTime) > older {
logdebug("Mark message %d as success after timeout", message.id)
message.SetStatus(nil, false)
amq.mList.Remove(e)
}
}
}
func (amq *apnsMessageQueue) SetSuccess(id uint32) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(nil, false)
amq.mList.Remove(e)
if message.id == id {
return
}
}
logwarn("I didn't found message %d to set it as success", id)
}
func (amq *apnsMessageQueue) SetFailed(id uint32, err error, permanentError bool) {
var next *list.Element
logdebug("Set message %d to status failed permanent: %t (%s)", id, permanentError, err)
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
amq.mList.Remove(e)
if message.id == id {
message.SetStatus(err, permanentError)
return
} else {
message.SetStatus(nil, false)
}
}
logwarn("I didn't found message %d to set it as failed, permanent: %t (%s)", id, permanentError, err)
}
func (amq *apnsMessageQueue) SetLastFailed(err error) {
last := amq.mList.Back()
if last == nil {
logdebug("No message found in the queue when i wanted to delete the last one")
return
}
message := last.Value.(*ApnsMessage)
logdebug("Set LAST message %d to status failed", message.id)
amq.SetFailed(message.id, err, true)
}
func (amq *apnsMessageQueue) SetAllFailed(err error) {
logdebug("Set all messages to failed: %s", err)
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(err, false)
amq.mList.Remove(e)
}
}
type PayloadInterface interface {
ToJson() ([]byte, error)
Config() (expiration uint32, priority uint8)
}
func statusToError(status uint8) error {
e, ok := appleErrorMap[status]
if ok {
return fmt.Errorf(e)
}
return fmt.Errorf("Unknown apple status error, check doc, maybe it's new: %d", status)
}
var appleErrorMap = map[uint8]string{
0: "No errors encountered",
1: "Processing Errors",
2: "Missing device token",
3: "Missing topic",
4: "Missing payload",
5: "Invalid token size",
6: "Invalid topic size",
7: "Invalid payload size",
8: "Invalid token",
10: "Shutdown",
255: "None (unknown)",
}
var cachedTlsConfig *tls.Config
func getTlsConfig() (cfg *tls.Config, err error) {
if cachedTlsConfig != nil {
return cachedTlsConfig, nil
}
var cert tls.Certificate
if defaultConfig.isCertificateFile {
cert, err = tls.LoadX509KeyPair(defaultConfig.Certificate, defaultConfig.CertificateKey)
} else {
cert, err = tls.X509KeyPair([]byte(defaultConfig.Certificate), []byte(defaultConfig.CertificateKey))
}
if err != nil {
return nil, err
}
gatewayParts := strings.Split(defaultConfig.gateway, ":")
cachedTlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: gatewayParts[0],
}
return cachedTlsConfig, nil
}
| {
messageList.SetLastFailed(fmt.Errorf("Unknow error after last message sent"))
logdebug("Going to remove last message from queue with error: %p", ps)
break
} | conditional_block |
push_service.go | package apns
import (
"container/list"
"crypto/tls"
"encoding/binary"
"encoding/hex"
"fmt"
"strings"
"sync"
"time"
)
func newPushService() (*pushService, error) {
ps := &pushService{}
if err := ps.createConnection(); err != nil {
return nil, err
}
ps.waitClose = &sync.WaitGroup{}
ps.waitClose.Add(3)
ps.sentQueue = make(chan *ApnsMessage, 1000)
ps.confirmedQueue = make(chan *apnsReceiveMessage, 1000)
ps.doSenderClose = make(chan bool, 2)
go ps.monitorReply()
go ps.monitorMessages()
go ps.messageSender()
return ps, nil
}
type pushService struct {
conn *tls.Conn
lastError error
sentQueue chan *ApnsMessage
confirmedQueue chan *apnsReceiveMessage
doSenderClose chan bool
waitClose *sync.WaitGroup
}
func (ps *pushService) createConnection() error {
conf, err := getTlsConfig()
if err != nil {
return err
}
conn, err := tls.Dial("tcp", defaultConfig.gateway, conf)
if err != nil {
return err
}
ps.conn = conn
return nil
}
func (ps *pushService) destroy() {
logdebug("Destroy push service %p, waiting to finish", ps)
ps.doSenderClose <- true //this will close the sender
ps.conn.Close() //this will close the socket and force the read to exit
//(will remove the last message if not confirmed ??? it's ok ???)
//when sender/reader are closed the sending monitor will close
ps.waitClose.Wait()
logdebug("Destroy push service %p finish", ps)
}
func (ps *pushService) messageSender() {
defer func() {
ps.waitClose.Done()
logdebug("Message sender stopped %p", ps)
}()
logdebug("Message sender started %p", ps)
for {
select {
case <-ps.doSenderClose:
close(ps.sentQueue)
return
case messages := <-mainSendingMessage:
logdebug("Sending new message: %d to %s", messages.id, messages.DeviceToken)
data, err := createBinaryNotification(messages, messages.id, messages.DeviceToken)
if err != nil {
messages.SetStatus(err, true)
break
}
_, err = ps.conn.Write(data)
if err != nil {
ps.lastError = nil
pushPool.releasedServices <- ps
close(ps.sentQueue)
return
}
logdebug("Message id: %d sent", messages.id)
ps.sentQueue <- messages
pushPool.reportQueue <- ¬ifReport{sent: 1}
}
}
}
func (ps *pushService) monitorReply() {
defer func() {
ps.waitClose.Done()
logdebug("Message reply stopped %p", ps)
}()
logdebug("Message reply started %p", ps)
readb := [6]byte{}
for {
n, err := ps.conn.Read(readb[:])
if err != nil {
logerr("APNS read channel is closed %s", err)
//when the read socket channel is closed the last payload i think isn't sent ok
//i don't know the last message id so we consider id 0 as last message and don't use this socket
ps.lastError = err
ps.confirmedQueue <- &apnsReceiveMessage{lastMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
if n == 6 {
// if status 0 (this i think isn't going to happen as apple says)
// if status 2 to 8 it's device token or payload error, don't try to resend
// if status 1 it's a proccessing error, we should retry sending the payload
// if status 10 the payload was sent but we must not send anything else on this socket (is shutdown)
// if status 255 it's unknown error (let's retry to send the payload) - close socket
// if status number unknown ???? (ok, let's retry to send the payload) - close socket
status := readb[1]
id := binary.BigEndian.Uint32(readb[2:])
ps.confirmedQueue <- &apnsReceiveMessage{status: status, id: id}
logdebug("Received confirmation for id: %d with status: %d", id, status)
if status != 0 {
ps.lastError = fmt.Errorf("APNs server reply with status %d (%s), closing", status, statusToError(status))
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
} else {
//unknow data sent from apple
// let's close the socket and mark all the messagess with error
logerr("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
//we consinder that max uint32 means to delete all the messages from the queue
ps.lastError = fmt.Errorf("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
ps.confirmedQueue <- &apnsReceiveMessage{unknownMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
}
}
func (ps *pushService) monitorMessages() {
defer func() {
ps.waitClose.Done()
logdebug("Message monitor stopped %p", ps)
}()
logdebug("Message monitor started %p", ps)
check := time.Tick(time.Millisecond * 50)
messageList := newMessageQueue()
readerStopped := false
writerStopped := false
//make sure that we don't have messages in the list
//what we have on exit we mark as error
defer func() {
messageList.SetAllFailed(fmt.Errorf("Message in the remained queue on close"))
logdebug("Push service is stopping, all the messages from the queue will be reposted")
if ps.lastError != nil { //anounnce the pool master that we had an error
pushPool.releasedServices <- ps
}
}()
for {
select {
case <-check: // check if we have messages older then SuccessTimeout so we can mark them
// logdebug("Check to mark message too old as success: %p", ps)
messageList.SetSuccessOlder(defaultConfig.SuccessTimeout)
case message, is_open := <-ps.sentQueue:
if !is_open {
//writer has exited we return only if the reader is stopped too
//we are going to clean the message list on exit
logdebug("Sent queue channel closed for %p", ps)
writerStopped = true
if readerStopped {
logdebug("Confirm channel closed too for: %p", ps)
return
}
ps.sentQueue = nil
break
}
logdebug("New message %d in the confirmation queue %p", message.id, ps)
messageList.PushBack(message)
case finishMessage, is_open := <-ps.confirmedQueue:
if !is_open {
//reader has exited we return only if the writer is stopped too
//we are going to clean the message list on exit
logdebug("Confirm queue channel closed for %p", ps)
readerStopped = true
if writerStopped {
logdebug("Sent channel closed too for: %p", ps)
return
}
ps.confirmedQueue = nil
break
}
//mark all excluding last one as success, we are going to close
if finishMessage.lastMessage {
messageList.SetLastFailed(fmt.Errorf("Unknow error after last message sent"))
logdebug("Going to remove last message from queue with error: %p", ps)
break
}
//mark all as failed
if finishMessage.unknownMessage {
messageList.SetAllFailed(fmt.Errorf("Unknown error response from apple, all failed"))
logdebug("All messages are marked as error :%p", ps)
break
}
//mark all from front to the id as success
if finishMessage.status == 0 {
messageList.SetSuccess(finishMessage.id)
logdebug("Mark message %d as success: %p", finishMessage.id, ps)
break
}
//mark all until this one as success and this one mark it as temporar error
if finishMessage.status == 1 || finishMessage.status == 255 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), false)
logdebug("Mark message %d with temporar error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
} //success until excluding id, id failed
//mark all until this one as success and this one mark as permanent error
if finishMessage.status >= 2 && finishMessage.status <= 8 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), true)
logdebug("Mark message %d with permanend error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
}
//mark all including this one as success
if finishMessage.status == 10 {
messageList.SetSuccess(finishMessage.id)
messageList.SetAllFailed(fmt.Errorf("Apple it's in mantainance mode, socket shutdown"))
logdebug("Mark message %d with success and the rest with temp error %p", finishMessage.id, ps)
break
} //success until including id
//the rest of status codes we don't know, we consider that it's another problem
//from apple so mark the message as temporar error and the front as success
messageList.SetAllFailed(statusToError(finishMessage.status))
logdebug("Mark all messages with error: %s - %p", statusToError(finishMessage.status), ps)
}
}
}
type apnsReceiveMessage struct {
unknownMessage bool
lastMessage bool
status uint8
id uint32
}
type ApnsMessage struct {
PayloadInterface
DeviceToken string
Error error
id uint32
lastSentTime time.Time
receivedTime time.Time
sentTimes uint
}
func (a *ApnsMessage) SetStatus(err error, permanentError bool) {
a.Error = err
shouldConfirm := false
if err == nil || permanentError {
shouldConfirm = true
}
a.sentTimes++
if a.sentTimes >= defaultConfig.MaxMessageRetry {
shouldConfirm = true
}
if shouldConfirm {
nr := ¬ifReport{confirmTime: time.Since(a.lastSentTime), sendingTime: time.Since(a.receivedTime)}
if a.Error != nil {
nr.failed = 1
} else {
nr.success = 1
}
pushPool.reportQueue <- nr
confirmMessage(a)
} else {
pushPool.reportQueue <- ¬ifReport{failed: 1, resent: 1, confirmTime: time.Since(a.lastSentTime)}
resendMessage(a)
}
}
func newMessageQueue() *apnsMessageQueue {
return &apnsMessageQueue{list.New()}
}
type apnsMessageQueue struct {
mList *list.List
}
func (amq *apnsMessageQueue) PushBack(m *ApnsMessage) {
m.lastSentTime = time.Now()
amq.mList.PushBack(m)
}
func (amq *apnsMessageQueue) SetSuccessOlder(older time.Duration) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
if time.Since(message.lastSentTime) > older {
logdebug("Mark message %d as success after timeout", message.id)
message.SetStatus(nil, false)
amq.mList.Remove(e)
}
}
}
func (amq *apnsMessageQueue) SetSuccess(id uint32) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(nil, false)
amq.mList.Remove(e)
if message.id == id {
return
}
}
logwarn("I didn't found message %d to set it as success", id)
}
func (amq *apnsMessageQueue) SetFailed(id uint32, err error, permanentError bool) {
var next *list.Element
logdebug("Set message %d to status failed permanent: %t (%s)", id, permanentError, err)
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
amq.mList.Remove(e)
if message.id == id {
message.SetStatus(err, permanentError)
return
} else {
message.SetStatus(nil, false)
}
}
logwarn("I didn't found message %d to set it as failed, permanent: %t (%s)", id, permanentError, err)
}
func (amq *apnsMessageQueue) SetLastFailed(err error) {
last := amq.mList.Back()
if last == nil {
logdebug("No message found in the queue when i wanted to delete the last one")
return
}
message := last.Value.(*ApnsMessage)
logdebug("Set LAST message %d to status failed", message.id)
amq.SetFailed(message.id, err, true)
}
func (amq *apnsMessageQueue) SetAllFailed(err error) {
logdebug("Set all messages to failed: %s", err)
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(err, false)
amq.mList.Remove(e)
}
}
type PayloadInterface interface {
ToJson() ([]byte, error) | if ok {
return fmt.Errorf(e)
}
return fmt.Errorf("Unknown apple status error, check doc, maybe it's new: %d", status)
}
var appleErrorMap = map[uint8]string{
0: "No errors encountered",
1: "Processing Errors",
2: "Missing device token",
3: "Missing topic",
4: "Missing payload",
5: "Invalid token size",
6: "Invalid topic size",
7: "Invalid payload size",
8: "Invalid token",
10: "Shutdown",
255: "None (unknown)",
}
var cachedTlsConfig *tls.Config
func getTlsConfig() (cfg *tls.Config, err error) {
if cachedTlsConfig != nil {
return cachedTlsConfig, nil
}
var cert tls.Certificate
if defaultConfig.isCertificateFile {
cert, err = tls.LoadX509KeyPair(defaultConfig.Certificate, defaultConfig.CertificateKey)
} else {
cert, err = tls.X509KeyPair([]byte(defaultConfig.Certificate), []byte(defaultConfig.CertificateKey))
}
if err != nil {
return nil, err
}
gatewayParts := strings.Split(defaultConfig.gateway, ":")
cachedTlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: gatewayParts[0],
}
return cachedTlsConfig, nil
} | Config() (expiration uint32, priority uint8)
}
func statusToError(status uint8) error {
e, ok := appleErrorMap[status] | random_line_split |
push_service.go | package apns
import (
"container/list"
"crypto/tls"
"encoding/binary"
"encoding/hex"
"fmt"
"strings"
"sync"
"time"
)
func newPushService() (*pushService, error) {
ps := &pushService{}
if err := ps.createConnection(); err != nil {
return nil, err
}
ps.waitClose = &sync.WaitGroup{}
ps.waitClose.Add(3)
ps.sentQueue = make(chan *ApnsMessage, 1000)
ps.confirmedQueue = make(chan *apnsReceiveMessage, 1000)
ps.doSenderClose = make(chan bool, 2)
go ps.monitorReply()
go ps.monitorMessages()
go ps.messageSender()
return ps, nil
}
type pushService struct {
conn *tls.Conn
lastError error
sentQueue chan *ApnsMessage
confirmedQueue chan *apnsReceiveMessage
doSenderClose chan bool
waitClose *sync.WaitGroup
}
func (ps *pushService) createConnection() error {
conf, err := getTlsConfig()
if err != nil {
return err
}
conn, err := tls.Dial("tcp", defaultConfig.gateway, conf)
if err != nil {
return err
}
ps.conn = conn
return nil
}
func (ps *pushService) destroy() {
logdebug("Destroy push service %p, waiting to finish", ps)
ps.doSenderClose <- true //this will close the sender
ps.conn.Close() //this will close the socket and force the read to exit
//(will remove the last message if not confirmed ??? it's ok ???)
//when sender/reader are closed the sending monitor will close
ps.waitClose.Wait()
logdebug("Destroy push service %p finish", ps)
}
func (ps *pushService) messageSender() {
defer func() {
ps.waitClose.Done()
logdebug("Message sender stopped %p", ps)
}()
logdebug("Message sender started %p", ps)
for {
select {
case <-ps.doSenderClose:
close(ps.sentQueue)
return
case messages := <-mainSendingMessage:
logdebug("Sending new message: %d to %s", messages.id, messages.DeviceToken)
data, err := createBinaryNotification(messages, messages.id, messages.DeviceToken)
if err != nil {
messages.SetStatus(err, true)
break
}
_, err = ps.conn.Write(data)
if err != nil {
ps.lastError = nil
pushPool.releasedServices <- ps
close(ps.sentQueue)
return
}
logdebug("Message id: %d sent", messages.id)
ps.sentQueue <- messages
pushPool.reportQueue <- ¬ifReport{sent: 1}
}
}
}
func (ps *pushService) monitorReply() {
defer func() {
ps.waitClose.Done()
logdebug("Message reply stopped %p", ps)
}()
logdebug("Message reply started %p", ps)
readb := [6]byte{}
for {
n, err := ps.conn.Read(readb[:])
if err != nil {
logerr("APNS read channel is closed %s", err)
//when the read socket channel is closed the last payload i think isn't sent ok
//i don't know the last message id so we consider id 0 as last message and don't use this socket
ps.lastError = err
ps.confirmedQueue <- &apnsReceiveMessage{lastMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
if n == 6 {
// if status 0 (this i think isn't going to happen as apple says)
// if status 2 to 8 it's device token or payload error, don't try to resend
// if status 1 it's a proccessing error, we should retry sending the payload
// if status 10 the payload was sent but we must not send anything else on this socket (is shutdown)
// if status 255 it's unknown error (let's retry to send the payload) - close socket
// if status number unknown ???? (ok, let's retry to send the payload) - close socket
status := readb[1]
id := binary.BigEndian.Uint32(readb[2:])
ps.confirmedQueue <- &apnsReceiveMessage{status: status, id: id}
logdebug("Received confirmation for id: %d with status: %d", id, status)
if status != 0 {
ps.lastError = fmt.Errorf("APNs server reply with status %d (%s), closing", status, statusToError(status))
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
} else {
//unknow data sent from apple
// let's close the socket and mark all the messagess with error
logerr("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
//we consinder that max uint32 means to delete all the messages from the queue
ps.lastError = fmt.Errorf("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
ps.confirmedQueue <- &apnsReceiveMessage{unknownMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
}
}
func (ps *pushService) monitorMessages() {
defer func() {
ps.waitClose.Done()
logdebug("Message monitor stopped %p", ps)
}()
logdebug("Message monitor started %p", ps)
check := time.Tick(time.Millisecond * 50)
messageList := newMessageQueue()
readerStopped := false
writerStopped := false
//make sure that we don't have messages in the list
//what we have on exit we mark as error
defer func() {
messageList.SetAllFailed(fmt.Errorf("Message in the remained queue on close"))
logdebug("Push service is stopping, all the messages from the queue will be reposted")
if ps.lastError != nil { //anounnce the pool master that we had an error
pushPool.releasedServices <- ps
}
}()
for {
select {
case <-check: // check if we have messages older then SuccessTimeout so we can mark them
// logdebug("Check to mark message too old as success: %p", ps)
messageList.SetSuccessOlder(defaultConfig.SuccessTimeout)
case message, is_open := <-ps.sentQueue:
if !is_open {
//writer has exited we return only if the reader is stopped too
//we are going to clean the message list on exit
logdebug("Sent queue channel closed for %p", ps)
writerStopped = true
if readerStopped {
logdebug("Confirm channel closed too for: %p", ps)
return
}
ps.sentQueue = nil
break
}
logdebug("New message %d in the confirmation queue %p", message.id, ps)
messageList.PushBack(message)
case finishMessage, is_open := <-ps.confirmedQueue:
if !is_open {
//reader has exited we return only if the writer is stopped too
//we are going to clean the message list on exit
logdebug("Confirm queue channel closed for %p", ps)
readerStopped = true
if writerStopped {
logdebug("Sent channel closed too for: %p", ps)
return
}
ps.confirmedQueue = nil
break
}
//mark all excluding last one as success, we are going to close
if finishMessage.lastMessage {
messageList.SetLastFailed(fmt.Errorf("Unknow error after last message sent"))
logdebug("Going to remove last message from queue with error: %p", ps)
break
}
//mark all as failed
if finishMessage.unknownMessage {
messageList.SetAllFailed(fmt.Errorf("Unknown error response from apple, all failed"))
logdebug("All messages are marked as error :%p", ps)
break
}
//mark all from front to the id as success
if finishMessage.status == 0 {
messageList.SetSuccess(finishMessage.id)
logdebug("Mark message %d as success: %p", finishMessage.id, ps)
break
}
//mark all until this one as success and this one mark it as temporar error
if finishMessage.status == 1 || finishMessage.status == 255 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), false)
logdebug("Mark message %d with temporar error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
} //success until excluding id, id failed
//mark all until this one as success and this one mark as permanent error
if finishMessage.status >= 2 && finishMessage.status <= 8 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), true)
logdebug("Mark message %d with permanend error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
}
//mark all including this one as success
if finishMessage.status == 10 {
messageList.SetSuccess(finishMessage.id)
messageList.SetAllFailed(fmt.Errorf("Apple it's in mantainance mode, socket shutdown"))
logdebug("Mark message %d with success and the rest with temp error %p", finishMessage.id, ps)
break
} //success until including id
//the rest of status codes we don't know, we consider that it's another problem
//from apple so mark the message as temporar error and the front as success
messageList.SetAllFailed(statusToError(finishMessage.status))
logdebug("Mark all messages with error: %s - %p", statusToError(finishMessage.status), ps)
}
}
}
type apnsReceiveMessage struct {
unknownMessage bool
lastMessage bool
status uint8
id uint32
}
type ApnsMessage struct {
PayloadInterface
DeviceToken string
Error error
id uint32
lastSentTime time.Time
receivedTime time.Time
sentTimes uint
}
func (a *ApnsMessage) SetStatus(err error, permanentError bool) {
a.Error = err
shouldConfirm := false
if err == nil || permanentError {
shouldConfirm = true
}
a.sentTimes++
if a.sentTimes >= defaultConfig.MaxMessageRetry {
shouldConfirm = true
}
if shouldConfirm {
nr := ¬ifReport{confirmTime: time.Since(a.lastSentTime), sendingTime: time.Since(a.receivedTime)}
if a.Error != nil {
nr.failed = 1
} else {
nr.success = 1
}
pushPool.reportQueue <- nr
confirmMessage(a)
} else {
pushPool.reportQueue <- ¬ifReport{failed: 1, resent: 1, confirmTime: time.Since(a.lastSentTime)}
resendMessage(a)
}
}
func newMessageQueue() *apnsMessageQueue {
return &apnsMessageQueue{list.New()}
}
type apnsMessageQueue struct {
mList *list.List
}
func (amq *apnsMessageQueue) PushBack(m *ApnsMessage) {
m.lastSentTime = time.Now()
amq.mList.PushBack(m)
}
func (amq *apnsMessageQueue) | (older time.Duration) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
if time.Since(message.lastSentTime) > older {
logdebug("Mark message %d as success after timeout", message.id)
message.SetStatus(nil, false)
amq.mList.Remove(e)
}
}
}
func (amq *apnsMessageQueue) SetSuccess(id uint32) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(nil, false)
amq.mList.Remove(e)
if message.id == id {
return
}
}
logwarn("I didn't found message %d to set it as success", id)
}
func (amq *apnsMessageQueue) SetFailed(id uint32, err error, permanentError bool) {
var next *list.Element
logdebug("Set message %d to status failed permanent: %t (%s)", id, permanentError, err)
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
amq.mList.Remove(e)
if message.id == id {
message.SetStatus(err, permanentError)
return
} else {
message.SetStatus(nil, false)
}
}
logwarn("I didn't found message %d to set it as failed, permanent: %t (%s)", id, permanentError, err)
}
func (amq *apnsMessageQueue) SetLastFailed(err error) {
last := amq.mList.Back()
if last == nil {
logdebug("No message found in the queue when i wanted to delete the last one")
return
}
message := last.Value.(*ApnsMessage)
logdebug("Set LAST message %d to status failed", message.id)
amq.SetFailed(message.id, err, true)
}
func (amq *apnsMessageQueue) SetAllFailed(err error) {
logdebug("Set all messages to failed: %s", err)
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(err, false)
amq.mList.Remove(e)
}
}
type PayloadInterface interface {
ToJson() ([]byte, error)
Config() (expiration uint32, priority uint8)
}
func statusToError(status uint8) error {
e, ok := appleErrorMap[status]
if ok {
return fmt.Errorf(e)
}
return fmt.Errorf("Unknown apple status error, check doc, maybe it's new: %d", status)
}
var appleErrorMap = map[uint8]string{
0: "No errors encountered",
1: "Processing Errors",
2: "Missing device token",
3: "Missing topic",
4: "Missing payload",
5: "Invalid token size",
6: "Invalid topic size",
7: "Invalid payload size",
8: "Invalid token",
10: "Shutdown",
255: "None (unknown)",
}
var cachedTlsConfig *tls.Config
func getTlsConfig() (cfg *tls.Config, err error) {
if cachedTlsConfig != nil {
return cachedTlsConfig, nil
}
var cert tls.Certificate
if defaultConfig.isCertificateFile {
cert, err = tls.LoadX509KeyPair(defaultConfig.Certificate, defaultConfig.CertificateKey)
} else {
cert, err = tls.X509KeyPair([]byte(defaultConfig.Certificate), []byte(defaultConfig.CertificateKey))
}
if err != nil {
return nil, err
}
gatewayParts := strings.Split(defaultConfig.gateway, ":")
cachedTlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: gatewayParts[0],
}
return cachedTlsConfig, nil
}
| SetSuccessOlder | identifier_name |
push_service.go | package apns
import (
"container/list"
"crypto/tls"
"encoding/binary"
"encoding/hex"
"fmt"
"strings"
"sync"
"time"
)
func newPushService() (*pushService, error) {
ps := &pushService{}
if err := ps.createConnection(); err != nil {
return nil, err
}
ps.waitClose = &sync.WaitGroup{}
ps.waitClose.Add(3)
ps.sentQueue = make(chan *ApnsMessage, 1000)
ps.confirmedQueue = make(chan *apnsReceiveMessage, 1000)
ps.doSenderClose = make(chan bool, 2)
go ps.monitorReply()
go ps.monitorMessages()
go ps.messageSender()
return ps, nil
}
type pushService struct {
conn *tls.Conn
lastError error
sentQueue chan *ApnsMessage
confirmedQueue chan *apnsReceiveMessage
doSenderClose chan bool
waitClose *sync.WaitGroup
}
func (ps *pushService) createConnection() error {
conf, err := getTlsConfig()
if err != nil {
return err
}
conn, err := tls.Dial("tcp", defaultConfig.gateway, conf)
if err != nil {
return err
}
ps.conn = conn
return nil
}
func (ps *pushService) destroy() {
logdebug("Destroy push service %p, waiting to finish", ps)
ps.doSenderClose <- true //this will close the sender
ps.conn.Close() //this will close the socket and force the read to exit
//(will remove the last message if not confirmed ??? it's ok ???)
//when sender/reader are closed the sending monitor will close
ps.waitClose.Wait()
logdebug("Destroy push service %p finish", ps)
}
func (ps *pushService) messageSender() {
defer func() {
ps.waitClose.Done()
logdebug("Message sender stopped %p", ps)
}()
logdebug("Message sender started %p", ps)
for {
select {
case <-ps.doSenderClose:
close(ps.sentQueue)
return
case messages := <-mainSendingMessage:
logdebug("Sending new message: %d to %s", messages.id, messages.DeviceToken)
data, err := createBinaryNotification(messages, messages.id, messages.DeviceToken)
if err != nil {
messages.SetStatus(err, true)
break
}
_, err = ps.conn.Write(data)
if err != nil {
ps.lastError = nil
pushPool.releasedServices <- ps
close(ps.sentQueue)
return
}
logdebug("Message id: %d sent", messages.id)
ps.sentQueue <- messages
pushPool.reportQueue <- ¬ifReport{sent: 1}
}
}
}
func (ps *pushService) monitorReply() {
defer func() {
ps.waitClose.Done()
logdebug("Message reply stopped %p", ps)
}()
logdebug("Message reply started %p", ps)
readb := [6]byte{}
for {
n, err := ps.conn.Read(readb[:])
if err != nil {
logerr("APNS read channel is closed %s", err)
//when the read socket channel is closed the last payload i think isn't sent ok
//i don't know the last message id so we consider id 0 as last message and don't use this socket
ps.lastError = err
ps.confirmedQueue <- &apnsReceiveMessage{lastMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
if n == 6 {
// if status 0 (this i think isn't going to happen as apple says)
// if status 2 to 8 it's device token or payload error, don't try to resend
// if status 1 it's a proccessing error, we should retry sending the payload
// if status 10 the payload was sent but we must not send anything else on this socket (is shutdown)
// if status 255 it's unknown error (let's retry to send the payload) - close socket
// if status number unknown ???? (ok, let's retry to send the payload) - close socket
status := readb[1]
id := binary.BigEndian.Uint32(readb[2:])
ps.confirmedQueue <- &apnsReceiveMessage{status: status, id: id}
logdebug("Received confirmation for id: %d with status: %d", id, status)
if status != 0 {
ps.lastError = fmt.Errorf("APNs server reply with status %d (%s), closing", status, statusToError(status))
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
} else {
//unknow data sent from apple
// let's close the socket and mark all the messagess with error
logerr("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
//we consinder that max uint32 means to delete all the messages from the queue
ps.lastError = fmt.Errorf("Unknow apple message (%s) socket will be closed", hex.EncodeToString(readb[:n]))
ps.confirmedQueue <- &apnsReceiveMessage{unknownMessage: true}
ps.doSenderClose <- true
close(ps.confirmedQueue)
break
}
}
}
func (ps *pushService) monitorMessages() {
defer func() {
ps.waitClose.Done()
logdebug("Message monitor stopped %p", ps)
}()
logdebug("Message monitor started %p", ps)
check := time.Tick(time.Millisecond * 50)
messageList := newMessageQueue()
readerStopped := false
writerStopped := false
//make sure that we don't have messages in the list
//what we have on exit we mark as error
defer func() {
messageList.SetAllFailed(fmt.Errorf("Message in the remained queue on close"))
logdebug("Push service is stopping, all the messages from the queue will be reposted")
if ps.lastError != nil { //anounnce the pool master that we had an error
pushPool.releasedServices <- ps
}
}()
for {
select {
case <-check: // check if we have messages older then SuccessTimeout so we can mark them
// logdebug("Check to mark message too old as success: %p", ps)
messageList.SetSuccessOlder(defaultConfig.SuccessTimeout)
case message, is_open := <-ps.sentQueue:
if !is_open {
//writer has exited we return only if the reader is stopped too
//we are going to clean the message list on exit
logdebug("Sent queue channel closed for %p", ps)
writerStopped = true
if readerStopped {
logdebug("Confirm channel closed too for: %p", ps)
return
}
ps.sentQueue = nil
break
}
logdebug("New message %d in the confirmation queue %p", message.id, ps)
messageList.PushBack(message)
case finishMessage, is_open := <-ps.confirmedQueue:
if !is_open {
//reader has exited we return only if the writer is stopped too
//we are going to clean the message list on exit
logdebug("Confirm queue channel closed for %p", ps)
readerStopped = true
if writerStopped {
logdebug("Sent channel closed too for: %p", ps)
return
}
ps.confirmedQueue = nil
break
}
//mark all excluding last one as success, we are going to close
if finishMessage.lastMessage {
messageList.SetLastFailed(fmt.Errorf("Unknow error after last message sent"))
logdebug("Going to remove last message from queue with error: %p", ps)
break
}
//mark all as failed
if finishMessage.unknownMessage {
messageList.SetAllFailed(fmt.Errorf("Unknown error response from apple, all failed"))
logdebug("All messages are marked as error :%p", ps)
break
}
//mark all from front to the id as success
if finishMessage.status == 0 {
messageList.SetSuccess(finishMessage.id)
logdebug("Mark message %d as success: %p", finishMessage.id, ps)
break
}
//mark all until this one as success and this one mark it as temporar error
if finishMessage.status == 1 || finishMessage.status == 255 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), false)
logdebug("Mark message %d with temporar error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
} //success until excluding id, id failed
//mark all until this one as success and this one mark as permanent error
if finishMessage.status >= 2 && finishMessage.status <= 8 {
messageList.SetFailed(finishMessage.id, statusToError(finishMessage.status), true)
logdebug("Mark message %d with permanend error on status: %d %p", finishMessage.id, finishMessage.status, ps)
break
}
//mark all including this one as success
if finishMessage.status == 10 {
messageList.SetSuccess(finishMessage.id)
messageList.SetAllFailed(fmt.Errorf("Apple it's in mantainance mode, socket shutdown"))
logdebug("Mark message %d with success and the rest with temp error %p", finishMessage.id, ps)
break
} //success until including id
//the rest of status codes we don't know, we consider that it's another problem
//from apple so mark the message as temporar error and the front as success
messageList.SetAllFailed(statusToError(finishMessage.status))
logdebug("Mark all messages with error: %s - %p", statusToError(finishMessage.status), ps)
}
}
}
type apnsReceiveMessage struct {
unknownMessage bool
lastMessage bool
status uint8
id uint32
}
type ApnsMessage struct {
PayloadInterface
DeviceToken string
Error error
id uint32
lastSentTime time.Time
receivedTime time.Time
sentTimes uint
}
func (a *ApnsMessage) SetStatus(err error, permanentError bool) {
a.Error = err
shouldConfirm := false
if err == nil || permanentError {
shouldConfirm = true
}
a.sentTimes++
if a.sentTimes >= defaultConfig.MaxMessageRetry {
shouldConfirm = true
}
if shouldConfirm {
nr := ¬ifReport{confirmTime: time.Since(a.lastSentTime), sendingTime: time.Since(a.receivedTime)}
if a.Error != nil {
nr.failed = 1
} else {
nr.success = 1
}
pushPool.reportQueue <- nr
confirmMessage(a)
} else {
pushPool.reportQueue <- ¬ifReport{failed: 1, resent: 1, confirmTime: time.Since(a.lastSentTime)}
resendMessage(a)
}
}
func newMessageQueue() *apnsMessageQueue {
return &apnsMessageQueue{list.New()}
}
type apnsMessageQueue struct {
mList *list.List
}
func (amq *apnsMessageQueue) PushBack(m *ApnsMessage) {
m.lastSentTime = time.Now()
amq.mList.PushBack(m)
}
func (amq *apnsMessageQueue) SetSuccessOlder(older time.Duration) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
if time.Since(message.lastSentTime) > older {
logdebug("Mark message %d as success after timeout", message.id)
message.SetStatus(nil, false)
amq.mList.Remove(e)
}
}
}
func (amq *apnsMessageQueue) SetSuccess(id uint32) {
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(nil, false)
amq.mList.Remove(e)
if message.id == id {
return
}
}
logwarn("I didn't found message %d to set it as success", id)
}
func (amq *apnsMessageQueue) SetFailed(id uint32, err error, permanentError bool) |
func (amq *apnsMessageQueue) SetLastFailed(err error) {
last := amq.mList.Back()
if last == nil {
logdebug("No message found in the queue when i wanted to delete the last one")
return
}
message := last.Value.(*ApnsMessage)
logdebug("Set LAST message %d to status failed", message.id)
amq.SetFailed(message.id, err, true)
}
func (amq *apnsMessageQueue) SetAllFailed(err error) {
logdebug("Set all messages to failed: %s", err)
var next *list.Element
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
message.SetStatus(err, false)
amq.mList.Remove(e)
}
}
type PayloadInterface interface {
ToJson() ([]byte, error)
Config() (expiration uint32, priority uint8)
}
func statusToError(status uint8) error {
e, ok := appleErrorMap[status]
if ok {
return fmt.Errorf(e)
}
return fmt.Errorf("Unknown apple status error, check doc, maybe it's new: %d", status)
}
var appleErrorMap = map[uint8]string{
0: "No errors encountered",
1: "Processing Errors",
2: "Missing device token",
3: "Missing topic",
4: "Missing payload",
5: "Invalid token size",
6: "Invalid topic size",
7: "Invalid payload size",
8: "Invalid token",
10: "Shutdown",
255: "None (unknown)",
}
var cachedTlsConfig *tls.Config
func getTlsConfig() (cfg *tls.Config, err error) {
if cachedTlsConfig != nil {
return cachedTlsConfig, nil
}
var cert tls.Certificate
if defaultConfig.isCertificateFile {
cert, err = tls.LoadX509KeyPair(defaultConfig.Certificate, defaultConfig.CertificateKey)
} else {
cert, err = tls.X509KeyPair([]byte(defaultConfig.Certificate), []byte(defaultConfig.CertificateKey))
}
if err != nil {
return nil, err
}
gatewayParts := strings.Split(defaultConfig.gateway, ":")
cachedTlsConfig = &tls.Config{
Certificates: []tls.Certificate{cert},
ServerName: gatewayParts[0],
}
return cachedTlsConfig, nil
}
| {
var next *list.Element
logdebug("Set message %d to status failed permanent: %t (%s)", id, permanentError, err)
for e := amq.mList.Front(); e != nil; e = next {
next = e.Next()
message := e.Value.(*ApnsMessage)
amq.mList.Remove(e)
if message.id == id {
message.SetStatus(err, permanentError)
return
} else {
message.SetStatus(nil, false)
}
}
logwarn("I didn't found message %d to set it as failed, permanent: %t (%s)", id, permanentError, err)
} | identifier_body |
view_test.go | package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/labstack/echo/v4"
"github.com/labstack/gommon/log"
"github.com/nsf/jsondiff"
"github.com/udovin/solve/config"
"github.com/udovin/solve/core"
"github.com/udovin/solve/db"
"github.com/udovin/solve/invoker"
"github.com/udovin/solve/migrations"
"github.com/udovin/solve/models"
)
type TestEnv struct {
tb testing.TB
checks *testCheckState
Core *core.Core
Server *httptest.Server
Client *testClient
Socket *testClient
Now time.Time
Rand *rand.Rand
}
func (e *TestEnv) SyncStores() {
ctx := context.Background()
if err := e.Core.Accounts.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Users.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Sessions.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Roles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.RoleEdges.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.AccountRoles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Contests.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Problems.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Compilers.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Settings.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
}
func (e *TestEnv) CreateUserRoles(login string, roles ...string) error {
for _, role := range roles {
if _, err := e.Socket.CreateUserRole(context.Background(), login, role); err != nil {
return err
}
}
return nil
}
func (e *TestEnv) Check(data any) {
e.checks.Check(data)
}
func (e *TestEnv) Close() {
e.Server.Close()
e.Core.Stop()
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
e.checks.Close()
}
func (e *TestEnv) WaitProblemUpdated(id int64) {
for {
if err := e.Core.Tasks.Sync(context.Background()); err != nil {
e.tb.Fatal("Error:", err)
}
tasks, err := e.Core.Tasks.FindByProblem(id)
if err != nil {
e.tb.Fatal("Error:", err)
}
if len(tasks) == 0 {
e.tb.Fatal("Empty problem tasks")
}
if tasks[0].Status == models.SucceededTask {
return
}
if tasks[0].Status == models.FailedTask {
e.tb.Fatalf("Task failed: %q", string(tasks[0].State))
}
time.Sleep(time.Second)
}
}
type TestEnvOption interface {
UpdateConfig(*config.Config)
Setup(*TestEnv) error
}
type WithInvoker struct{}
func (o WithInvoker) UpdateConfig(cfg *config.Config) {
cfg.Invoker = &config.Invoker{
Workers: 1,
Safeexec: config.Safeexec{
Path: "../safeexec/safeexec",
},
}
}
func (o WithInvoker) Setup(env *TestEnv) error {
return invoker.New(env.Core).Start()
}
func NewTestEnv(tb testing.TB, options ...TestEnvOption) *TestEnv {
env := TestEnv{
tb: tb,
checks: newTestCheckState(tb),
Now: time.Date(2020, 1, 1, 10, 0, 0, 0, time.UTC),
Rand: rand.New(rand.NewSource(42)),
}
cfg := config.Config{
DB: config.DB{
Options: config.SQLiteOptions{Path: ":memory:"},
},
Security: &config.Security{
PasswordSalt: "qwerty123",
},
Storage: &config.Storage{
Options: config.LocalStorageOptions{
FilesDir: tb.TempDir(),
},
},
}
if _, ok := tb.(*testing.B); ok || os.Getenv("TEST_ENABLE_LOGS") != "1" {
log.SetLevel(log.OFF)
cfg.LogLevel = config.LogLevel(log.OFF)
}
for _, option := range options {
option.UpdateConfig(&cfg)
}
if c, err := core.NewCore(cfg); err != nil {
tb.Fatal("Error:", err)
} else {
env.Core = c
}
env.Core.SetupAllStores()
ctx := context.Background()
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema); err != nil {
tb.Fatal("Error:", err)
}
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data); err != nil {
tb.Fatal("Error:", err)
}
if err := env.Core.Start(); err != nil {
tb.Fatal("Error:", err)
}
e := echo.New()
e.Logger = env.Core.Logger()
view := NewView(env.Core)
nowFn := func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set(nowKey, env.Now)
return next(c)
}
}
e.Use(nowFn)
view.Register(e.Group("/api"))
view.RegisterSocket(e.Group("/socket"))
view.StartDaemons()
env.Server = httptest.NewServer(e)
env.Client = newTestClient(env.Server.URL + "/api")
env.Socket = newTestClient(env.Server.URL + "/socket")
for _, option := range options {
option.Setup(&env)
}
return &env
}
type TestUser struct {
User
Password string
env *TestEnv
}
func (u *TestUser) LoginClient() {
_, err := u.env.Client.Login(context.Background(), u.User.Login, u.Password)
if err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) LogoutClient() {
if err := u.env.Client.Logout(context.Background()); err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) AddRoles(names ...string) |
func NewTestUser(e *TestEnv) *TestUser {
login := fmt.Sprintf("login-%d", e.Rand.Int31())
password := fmt.Sprintf("password-%d", e.Rand.Int63())
user, err := e.Client.Register(context.Background(), RegisterUserForm{
Login: login,
Email: login + "@example.com",
Password: password,
FirstName: "First",
LastName: "Last",
MiddleName: "Middle",
})
if err != nil {
e.tb.Fatal("Error:", err)
}
return &TestUser{
User: user,
Password: password,
env: e,
}
}
type testCheckState struct {
tb testing.TB
checks []json.RawMessage
pos int
reset bool
path string
}
func (s *testCheckState) Check(data any) {
raw, err := json.MarshalIndent(data, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal data:", data)
}
if s.pos > len(s.checks) {
s.tb.Fatalf("Invalid check position: %d", s.pos)
}
if s.pos == len(s.checks) {
if s.reset {
s.checks = append(s.checks, raw)
s.pos++
return
}
s.tb.Errorf("Unexpected check with data: %s", raw)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
options := jsondiff.DefaultConsoleOptions()
diff, report := jsondiff.Compare(s.checks[s.pos], raw, &options)
if diff != jsondiff.FullMatch {
if s.reset {
s.checks[s.pos] = raw
s.pos++
return
}
s.tb.Errorf("Unexpected result difference: %s", report)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
s.pos++
}
func (s *testCheckState) Close() {
if s.reset {
if s.pos == 0 {
_ = os.Remove(s.path)
return
}
raw, err := json.MarshalIndent(s.checks, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal test data:", err)
}
if err := os.WriteFile(
s.path, raw, os.ModePerm,
); err != nil {
s.tb.Fatal("Error:", err)
}
}
}
func newTestCheckState(tb testing.TB) *testCheckState {
state := testCheckState{
tb: tb,
reset: os.Getenv("TEST_RESET_DATA") == "1",
path: filepath.Join("testdata", tb.Name()+".json"),
}
if !state.reset {
file, err := os.Open(state.path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
tb.Fatal("Error:", err)
}
} else {
defer file.Close()
if err := json.NewDecoder(file).Decode(&state.checks); err != nil {
tb.Fatal("Error:", err)
}
}
}
return &state
}
type testClient struct {
*Client
}
type testJar struct {
mutex sync.Mutex
cookies map[string]*http.Cookie
}
func (j *testJar) Cookies(*url.URL) []*http.Cookie {
j.mutex.Lock()
defer j.mutex.Unlock()
var cookies []*http.Cookie
for _, cookie := range j.cookies {
cookies = append(cookies, cookie)
}
return cookies
}
func (j *testJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.mutex.Lock()
defer j.mutex.Unlock()
if j.cookies == nil {
j.cookies = map[string]*http.Cookie{}
}
for _, cookie := range cookies {
j.cookies[cookie.Name] = cookie
}
}
func newTestClient(endpoint string) *testClient {
client := NewClient(endpoint)
client.client.Jar = &testJar{}
client.Headers = map[string]string{"X-Solve-Sync": "1"}
return &testClient{client}
}
func (c *testClient) Status() (Status, error) {
req, err := http.NewRequest(http.MethodGet, c.getURL("/v0/status"), nil)
if err != nil {
return Status{}, err
}
var respData Status
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveUser(login string) (User, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/users/%s", login), nil,
)
if err != nil {
return User{}, err
}
var respData User
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveContests() (Contests, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/contests"), nil,
)
if err != nil {
return Contests{}, err
}
var respData Contests
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) CreateContest(form createContestForm) (Contest, error) {
data, err := json.Marshal(form)
if err != nil {
return Contest{}, err
}
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/contests"),
bytes.NewReader(data),
)
if err != nil {
return Contest{}, err
}
var respData Contest
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateContestProblem(
contestID int64,
form createContestProblemForm,
) (ContestProblem, error) {
data, err := json.Marshal(form)
if err != nil {
return ContestProblem{}, err
}
req, err := http.NewRequest(
http.MethodPost,
c.getURL("/v0/contests/%d/problems", contestID),
bytes.NewReader(data),
)
if err != nil {
return ContestProblem{}, err
}
var respData ContestProblem
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) DeleteRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodDelete, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func TestPing(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Ping(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func TestHealth(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Health(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func TestHealthUnhealthy(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Core.DB.Close(); err != nil {
t.Fatal("Error:", err)
}
err := e.Client.Health(context.Background())
resp, ok := err.(statusCodeResponse)
if !ok {
t.Fatal("Invalid error:", err)
}
expectStatus(t, http.StatusInternalServerError, resp.StatusCode())
}
func expectStatus(tb testing.TB, expected, got int) {
if got != expected {
tb.Fatalf("Expected %v, got %v", expected, got)
}
}
| {
if err := u.env.CreateUserRoles(u.User.Login, names...); err != nil {
u.env.tb.Fatal("Error:", err)
}
u.env.SyncStores()
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.