file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
main.rs
use crossbeam_queue::ArrayQueue; use crossbeam_utils::thread; use pico_args::Arguments; use std::arch::x86_64::*; use std::{ cell::Cell, f32::consts::PI, ops::Neg, ops::{Add, AddAssign, BitAnd, BitOr, Div, Mul, MulAssign, Sub}, time::Instant, }; const TOLERANCE: f32 = 0.0001; const SIMD_WIDTH: usize = 8; #[derive(Debug, Copy, Clone)] struct WideI32(__m256i); impl WideI32 { fn new(e7: i32, e6: i32, e5: i32, e4: i32, e3: i32, e2: i32, e1: i32, e0: i32) -> Self { Self(unsafe { _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0) }) } fn splat(x: i32) -> Self { Self(unsafe { _mm256_set1_epi32(x) }) } fn select(x: WideI32, y: WideI32, mask: WideF32) -> Self { Self(unsafe { _mm256_castps_si256(_mm256_blendv_ps( _mm256_castsi256_ps(x.0), _mm256_castsi256_ps(y.0), mask.0, )) }) } } impl Add for WideI32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_epi32(self.0, other.0) }) } } impl AddAssign for WideI32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_epi32(self.0, other.0) } } } #[derive(Debug, Copy, Clone)] struct WideF32(__m256); impl WideF32 { fn load(x: &[f32]) -> Self { assert!(x.len() >= SIMD_WIDTH); // aligning this would require some hoops on vec alloc // https://stackoverflow.com/questions/60180121/how-do-i-allocate-a-vecu8-that-is-aligned-to-the-size-of-the-cache-line Self(unsafe { _mm256_loadu_ps(x.as_ptr()) }) } fn any(&self) -> bool { self.mask() != 0 } fn mask(&self) -> i32 { unsafe { _mm256_movemask_ps(self.0) } } fn hmin(&self) -> f32 { unsafe { /* This can be done entirely in avx with permute2f128, but that is allegedly very slow on AMD prior to Zen2 (and is anecdotally slower on my Intels as well) initial m256 1 2 3 4 5 6 7 8 extract half, cast the other half down to m128, min 1 2 3 4 5 6 7 8 = 1 2 3 4 permute backwards, min 1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self { Self(unsafe { _mm256_or_ps(self.0, other.0) }) } } impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct Spheres { xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state != 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse =>
} } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.95, 0.95, 0.95), t: MaterialType::Specular, }; let spheres = Spheres::new(vec![ Sphere::new(V3(0.0, 0.0, -100.0), 100.0, ground), Sphere::new(V3(0.0, 0.0, 1.0), 1.0, center), Sphere::new(V3(-2.0, -3.0, 1.5), 0.3, right.clone()), Sphere::new(V3(-3.0, -6.0, 0.0), 0.3, right.clone()), Sphere::new(V3(-3.0, -5.0, 2.0), 0.5, left.clone()), Sphere::new(V3(3.0, -3.0, 0.8), 1.0, right.clone()), Sphere::new(V3(-3.0, -3.0, 2.0), 0.5, left.clone()), Sphere::new(V3(5.0, -3.0, 0.8), 1.0, right), Sphere::new(V3(3.0, -3.0, 2.0), 0.5, left), ]); let width = 1920; let height = 1080; let inv_width = 1.0 / (width as f32 - 1.0); let inv_height = 1.0 / (height as f32 - 1.0); let mut pixels = vec![V3(0.0, 0.0, 0.0); width * height]; let cam = Camera::new( V3(0.0, -10.0, 1.0), V3(0.0, 0.0, 0.0), width as f32 / height as f32, ); let threads = num_cpus::get(); let chunk_size = 1024; assert!(width * height % chunk_size == 0); let start = Instant::now(); { let jobs = ArrayQueue::new(width * height / chunk_size); pixels .chunks_mut(chunk_size) .enumerate() .for_each(|(i, chunk)| { jobs.push((i, chunk)).unwrap(); }); thread::scope(|s| { let handles: Vec<_> = (0..threads) .map(|_| { s.spawn(|_| { let mut rng_state = rand_seed(); let mut ray_count: u64 = 0; while let Some((i, chunk)) = jobs.pop() { let mut i = i * chunk.len(); for color in chunk { let image_x = (i % width) as f32; let image_y = (height - (i / width) - 1) as f32; // flip image right-side-up i += 1; for _ in 0..rays_per_pixel { // calculate ratio we've moved along the image (y/height), step proportionally within the film let rand_x = randf(&mut rng_state); let rand_y = randf(&mut rng_state); let film_x = cam.x * cam.film_width * (image_x + rand_x) * inv_width; let film_y = cam.y * cam.film_height * (image_y + rand_y) * inv_height; let film_p = cam.film_lower_left + film_x + film_y; // remember that a pixel in float-space is a _range_. We want to send multiple rays within that range // to do this we take the start of that range (what we calculated as the image projecting onto our film), // then add a random [0,1) float let ray_dir = (film_p - cam.origin).normalize(); let (c, r) = cast( &mut rng_state, &bg, &spheres, cam.origin, ray_dir, bounces, ); *color += c; ray_count += 1 + r as u64; } } } ray_count }) }) .collect(); let total_rays_shot: u64 = handles.into_iter().map(|h| h.join().unwrap()).sum(); println!( "{:.3} Mray/s", total_rays_shot as f64 / 1_000_000.0 / start.elapsed().as_secs_f64() ); }) .unwrap(); } let mut buf: Vec<u8> = Vec::with_capacity(width * height * 3); for p in &pixels { let p = *p / rays_per_pixel as f32; buf.push((255.0 * linear_to_srgb(p.0)) as u8); buf.push((255.0 * linear_to_srgb(p.1)) as u8); buf.push((255.0 * linear_to_srgb(p.2)) as u8); } image::save_buffer( filename, &buf, width as u32, height as u32, image::ColorType::Rgb8, )?; println!("Rendering took {:.3}s", start.elapsed().as_secs_f32()); Ok(()) }
{ let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) }
conditional_block
main.rs
use crossbeam_queue::ArrayQueue; use crossbeam_utils::thread; use pico_args::Arguments; use std::arch::x86_64::*; use std::{ cell::Cell, f32::consts::PI, ops::Neg, ops::{Add, AddAssign, BitAnd, BitOr, Div, Mul, MulAssign, Sub}, time::Instant, }; const TOLERANCE: f32 = 0.0001; const SIMD_WIDTH: usize = 8; #[derive(Debug, Copy, Clone)] struct WideI32(__m256i); impl WideI32 { fn new(e7: i32, e6: i32, e5: i32, e4: i32, e3: i32, e2: i32, e1: i32, e0: i32) -> Self { Self(unsafe { _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0) }) } fn splat(x: i32) -> Self { Self(unsafe { _mm256_set1_epi32(x) }) } fn select(x: WideI32, y: WideI32, mask: WideF32) -> Self { Self(unsafe { _mm256_castps_si256(_mm256_blendv_ps( _mm256_castsi256_ps(x.0), _mm256_castsi256_ps(y.0), mask.0, )) }) } } impl Add for WideI32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_epi32(self.0, other.0) }) } } impl AddAssign for WideI32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_epi32(self.0, other.0) } } } #[derive(Debug, Copy, Clone)] struct WideF32(__m256); impl WideF32 { fn load(x: &[f32]) -> Self { assert!(x.len() >= SIMD_WIDTH); // aligning this would require some hoops on vec alloc // https://stackoverflow.com/questions/60180121/how-do-i-allocate-a-vecu8-that-is-aligned-to-the-size-of-the-cache-line Self(unsafe { _mm256_loadu_ps(x.as_ptr()) }) } fn any(&self) -> bool { self.mask() != 0 } fn mask(&self) -> i32 { unsafe { _mm256_movemask_ps(self.0) } } fn hmin(&self) -> f32 { unsafe { /* This can be done entirely in avx with permute2f128, but that is allegedly very slow on AMD prior to Zen2 (and is anecdotally slower on my Intels as well) initial m256 1 2 3 4 5 6 7 8 extract half, cast the other half down to m128, min 1 2 3 4 5 6 7 8 = 1 2 3 4 permute backwards, min 1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self
} impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct Spheres { xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state != 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse => { let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) } } } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.95, 0.95, 0.95), t: MaterialType::Specular, }; let spheres = Spheres::new(vec![ Sphere::new(V3(0.0, 0.0, -100.0), 100.0, ground), Sphere::new(V3(0.0, 0.0, 1.0), 1.0, center), Sphere::new(V3(-2.0, -3.0, 1.5), 0.3, right.clone()), Sphere::new(V3(-3.0, -6.0, 0.0), 0.3, right.clone()), Sphere::new(V3(-3.0, -5.0, 2.0), 0.5, left.clone()), Sphere::new(V3(3.0, -3.0, 0.8), 1.0, right.clone()), Sphere::new(V3(-3.0, -3.0, 2.0), 0.5, left.clone()), Sphere::new(V3(5.0, -3.0, 0.8), 1.0, right), Sphere::new(V3(3.0, -3.0, 2.0), 0.5, left), ]); let width = 1920; let height = 1080; let inv_width = 1.0 / (width as f32 - 1.0); let inv_height = 1.0 / (height as f32 - 1.0); let mut pixels = vec![V3(0.0, 0.0, 0.0); width * height]; let cam = Camera::new( V3(0.0, -10.0, 1.0), V3(0.0, 0.0, 0.0), width as f32 / height as f32, ); let threads = num_cpus::get(); let chunk_size = 1024; assert!(width * height % chunk_size == 0); let start = Instant::now(); { let jobs = ArrayQueue::new(width * height / chunk_size); pixels .chunks_mut(chunk_size) .enumerate() .for_each(|(i, chunk)| { jobs.push((i, chunk)).unwrap(); }); thread::scope(|s| { let handles: Vec<_> = (0..threads) .map(|_| { s.spawn(|_| { let mut rng_state = rand_seed(); let mut ray_count: u64 = 0; while let Some((i, chunk)) = jobs.pop() { let mut i = i * chunk.len(); for color in chunk { let image_x = (i % width) as f32; let image_y = (height - (i / width) - 1) as f32; // flip image right-side-up i += 1; for _ in 0..rays_per_pixel { // calculate ratio we've moved along the image (y/height), step proportionally within the film let rand_x = randf(&mut rng_state); let rand_y = randf(&mut rng_state); let film_x = cam.x * cam.film_width * (image_x + rand_x) * inv_width; let film_y = cam.y * cam.film_height * (image_y + rand_y) * inv_height; let film_p = cam.film_lower_left + film_x + film_y; // remember that a pixel in float-space is a _range_. We want to send multiple rays within that range // to do this we take the start of that range (what we calculated as the image projecting onto our film), // then add a random [0,1) float let ray_dir = (film_p - cam.origin).normalize(); let (c, r) = cast( &mut rng_state, &bg, &spheres, cam.origin, ray_dir, bounces, ); *color += c; ray_count += 1 + r as u64; } } } ray_count }) }) .collect(); let total_rays_shot: u64 = handles.into_iter().map(|h| h.join().unwrap()).sum(); println!( "{:.3} Mray/s", total_rays_shot as f64 / 1_000_000.0 / start.elapsed().as_secs_f64() ); }) .unwrap(); } let mut buf: Vec<u8> = Vec::with_capacity(width * height * 3); for p in &pixels { let p = *p / rays_per_pixel as f32; buf.push((255.0 * linear_to_srgb(p.0)) as u8); buf.push((255.0 * linear_to_srgb(p.1)) as u8); buf.push((255.0 * linear_to_srgb(p.2)) as u8); } image::save_buffer( filename, &buf, width as u32, height as u32, image::ColorType::Rgb8, )?; println!("Rendering took {:.3}s", start.elapsed().as_secs_f32()); Ok(()) }
{ Self(unsafe { _mm256_or_ps(self.0, other.0) }) }
identifier_body
main.rs
use crossbeam_queue::ArrayQueue; use crossbeam_utils::thread; use pico_args::Arguments; use std::arch::x86_64::*; use std::{ cell::Cell, f32::consts::PI, ops::Neg, ops::{Add, AddAssign, BitAnd, BitOr, Div, Mul, MulAssign, Sub}, time::Instant, }; const TOLERANCE: f32 = 0.0001; const SIMD_WIDTH: usize = 8; #[derive(Debug, Copy, Clone)] struct WideI32(__m256i); impl WideI32 { fn new(e7: i32, e6: i32, e5: i32, e4: i32, e3: i32, e2: i32, e1: i32, e0: i32) -> Self { Self(unsafe { _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0) }) } fn splat(x: i32) -> Self { Self(unsafe { _mm256_set1_epi32(x) }) } fn select(x: WideI32, y: WideI32, mask: WideF32) -> Self { Self(unsafe { _mm256_castps_si256(_mm256_blendv_ps( _mm256_castsi256_ps(x.0), _mm256_castsi256_ps(y.0), mask.0, )) }) } } impl Add for WideI32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_epi32(self.0, other.0) }) } } impl AddAssign for WideI32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_epi32(self.0, other.0) } } } #[derive(Debug, Copy, Clone)] struct WideF32(__m256); impl WideF32 { fn load(x: &[f32]) -> Self { assert!(x.len() >= SIMD_WIDTH); // aligning this would require some hoops on vec alloc // https://stackoverflow.com/questions/60180121/how-do-i-allocate-a-vecu8-that-is-aligned-to-the-size-of-the-cache-line Self(unsafe { _mm256_loadu_ps(x.as_ptr()) }) } fn any(&self) -> bool { self.mask() != 0 } fn mask(&self) -> i32 { unsafe { _mm256_movemask_ps(self.0) } } fn hmin(&self) -> f32 { unsafe { /* This can be done entirely in avx with permute2f128, but that is allegedly very slow on AMD prior to Zen2 (and is anecdotally slower on my Intels as well) initial m256 1 2 3 4 5 6 7 8 extract half, cast the other half down to m128, min 1 2 3 4 5 6 7 8 = 1 2 3 4 permute backwards, min 1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self { Self(unsafe { _mm256_or_ps(self.0, other.0) }) } } impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct
{ xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state != 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse => { let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) } } } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.95, 0.95, 0.95), t: MaterialType::Specular, }; let spheres = Spheres::new(vec![ Sphere::new(V3(0.0, 0.0, -100.0), 100.0, ground), Sphere::new(V3(0.0, 0.0, 1.0), 1.0, center), Sphere::new(V3(-2.0, -3.0, 1.5), 0.3, right.clone()), Sphere::new(V3(-3.0, -6.0, 0.0), 0.3, right.clone()), Sphere::new(V3(-3.0, -5.0, 2.0), 0.5, left.clone()), Sphere::new(V3(3.0, -3.0, 0.8), 1.0, right.clone()), Sphere::new(V3(-3.0, -3.0, 2.0), 0.5, left.clone()), Sphere::new(V3(5.0, -3.0, 0.8), 1.0, right), Sphere::new(V3(3.0, -3.0, 2.0), 0.5, left), ]); let width = 1920; let height = 1080; let inv_width = 1.0 / (width as f32 - 1.0); let inv_height = 1.0 / (height as f32 - 1.0); let mut pixels = vec![V3(0.0, 0.0, 0.0); width * height]; let cam = Camera::new( V3(0.0, -10.0, 1.0), V3(0.0, 0.0, 0.0), width as f32 / height as f32, ); let threads = num_cpus::get(); let chunk_size = 1024; assert!(width * height % chunk_size == 0); let start = Instant::now(); { let jobs = ArrayQueue::new(width * height / chunk_size); pixels .chunks_mut(chunk_size) .enumerate() .for_each(|(i, chunk)| { jobs.push((i, chunk)).unwrap(); }); thread::scope(|s| { let handles: Vec<_> = (0..threads) .map(|_| { s.spawn(|_| { let mut rng_state = rand_seed(); let mut ray_count: u64 = 0; while let Some((i, chunk)) = jobs.pop() { let mut i = i * chunk.len(); for color in chunk { let image_x = (i % width) as f32; let image_y = (height - (i / width) - 1) as f32; // flip image right-side-up i += 1; for _ in 0..rays_per_pixel { // calculate ratio we've moved along the image (y/height), step proportionally within the film let rand_x = randf(&mut rng_state); let rand_y = randf(&mut rng_state); let film_x = cam.x * cam.film_width * (image_x + rand_x) * inv_width; let film_y = cam.y * cam.film_height * (image_y + rand_y) * inv_height; let film_p = cam.film_lower_left + film_x + film_y; // remember that a pixel in float-space is a _range_. We want to send multiple rays within that range // to do this we take the start of that range (what we calculated as the image projecting onto our film), // then add a random [0,1) float let ray_dir = (film_p - cam.origin).normalize(); let (c, r) = cast( &mut rng_state, &bg, &spheres, cam.origin, ray_dir, bounces, ); *color += c; ray_count += 1 + r as u64; } } } ray_count }) }) .collect(); let total_rays_shot: u64 = handles.into_iter().map(|h| h.join().unwrap()).sum(); println!( "{:.3} Mray/s", total_rays_shot as f64 / 1_000_000.0 / start.elapsed().as_secs_f64() ); }) .unwrap(); } let mut buf: Vec<u8> = Vec::with_capacity(width * height * 3); for p in &pixels { let p = *p / rays_per_pixel as f32; buf.push((255.0 * linear_to_srgb(p.0)) as u8); buf.push((255.0 * linear_to_srgb(p.1)) as u8); buf.push((255.0 * linear_to_srgb(p.2)) as u8); } image::save_buffer( filename, &buf, width as u32, height as u32, image::ColorType::Rgb8, )?; println!("Rendering took {:.3}s", start.elapsed().as_secs_f32()); Ok(()) }
Spheres
identifier_name
main.rs
use crossbeam_queue::ArrayQueue; use crossbeam_utils::thread; use pico_args::Arguments; use std::arch::x86_64::*; use std::{ cell::Cell, f32::consts::PI, ops::Neg, ops::{Add, AddAssign, BitAnd, BitOr, Div, Mul, MulAssign, Sub}, time::Instant, }; const TOLERANCE: f32 = 0.0001; const SIMD_WIDTH: usize = 8; #[derive(Debug, Copy, Clone)] struct WideI32(__m256i); impl WideI32 { fn new(e7: i32, e6: i32, e5: i32, e4: i32, e3: i32, e2: i32, e1: i32, e0: i32) -> Self { Self(unsafe { _mm256_set_epi32(e7, e6, e5, e4, e3, e2, e1, e0) }) } fn splat(x: i32) -> Self { Self(unsafe { _mm256_set1_epi32(x) }) } fn select(x: WideI32, y: WideI32, mask: WideF32) -> Self { Self(unsafe { _mm256_castps_si256(_mm256_blendv_ps( _mm256_castsi256_ps(x.0), _mm256_castsi256_ps(y.0), mask.0, )) }) } } impl Add for WideI32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_epi32(self.0, other.0) }) } } impl AddAssign for WideI32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_epi32(self.0, other.0) } } } #[derive(Debug, Copy, Clone)] struct WideF32(__m256); impl WideF32 { fn load(x: &[f32]) -> Self { assert!(x.len() >= SIMD_WIDTH); // aligning this would require some hoops on vec alloc // https://stackoverflow.com/questions/60180121/how-do-i-allocate-a-vecu8-that-is-aligned-to-the-size-of-the-cache-line Self(unsafe { _mm256_loadu_ps(x.as_ptr()) }) } fn any(&self) -> bool {
unsafe { _mm256_movemask_ps(self.0) } } fn hmin(&self) -> f32 { unsafe { /* This can be done entirely in avx with permute2f128, but that is allegedly very slow on AMD prior to Zen2 (and is anecdotally slower on my Intels as well) initial m256 1 2 3 4 5 6 7 8 extract half, cast the other half down to m128, min 1 2 3 4 5 6 7 8 = 1 2 3 4 permute backwards, min 1 2 3 4 4 3 2 1 = 1 2 2 1 unpack hi, min 1 2 2 1 1 1 2 2 = 1 1 2 1 */ let x = self.0; let y = _mm256_extractf128_ps(x, 1); let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y); let m2 = _mm_permute_ps(m1, 27); let m2 = _mm_min_ps(m1, m2); let m3 = _mm_unpackhi_ps(m2, m2); let m = _mm_min_ps(m2, m3); _mm_cvtss_f32(m) } } fn splat(x: f32) -> Self { Self(unsafe { _mm256_set1_ps(x) }) } fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self { Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) }) } fn sqrt(&self) -> Self { Self(unsafe { _mm256_sqrt_ps(self.0) }) } #[allow(dead_code)] fn rsqrt(&self) -> Self { Self(unsafe { _mm256_rsqrt_ps(self.0) }) } // approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson // https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots // Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake // for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower #[allow(dead_code)] fn approx_sqrt(self) -> Self { let half = WideF32::splat(0.5); let three = WideF32::splat(3.0); let rsqrt = self.rsqrt(); let x = three - rsqrt * rsqrt * self; rsqrt * half * x * self } fn gt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) }) } fn lt(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) }) } fn eq(&self, other: Self) -> Self { Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) }) } fn mul_add(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) }) } fn mul_sub(x: Self, y: Self, z: Self) -> Self { Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) }) } } impl Add for WideF32 { type Output = Self; fn add(self, other: Self) -> Self { Self(unsafe { _mm256_add_ps(self.0, other.0) }) } } impl AddAssign for WideF32 { fn add_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_add_ps(self.0, other.0) } } } impl BitAnd for WideF32 { type Output = Self; fn bitand(self, other: Self) -> Self { Self(unsafe { _mm256_and_ps(self.0, other.0) }) } } impl BitOr for WideF32 { type Output = Self; fn bitor(self, other: Self) -> Self { Self(unsafe { _mm256_or_ps(self.0, other.0) }) } } impl Div for WideF32 { type Output = Self; fn div(self, other: Self) -> Self { Self(unsafe { _mm256_div_ps(self.0, other.0) }) } } impl Sub for WideF32 { type Output = Self; fn sub(self, other: Self) -> Self { Self(unsafe { _mm256_sub_ps(self.0, other.0) }) } } impl Mul for WideF32 { type Output = Self; fn mul(self, other: Self) -> Self { Self(unsafe { _mm256_mul_ps(self.0, other.0) }) } } impl MulAssign for WideF32 { fn mul_assign(&mut self, other: Self) { self.0 = unsafe { _mm256_mul_ps(self.0, other.0) } } } impl Neg for WideF32 { type Output = Self; fn neg(self) -> Self { Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) }) } } #[derive(Debug, Copy, Clone, PartialEq)] struct V3(f32, f32, f32); impl V3 { fn dot(self, other: V3) -> f32 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 } fn cross(self, other: V3) -> V3 { V3( self.1 * other.2 - self.2 * other.1, self.2 * other.0 - self.0 * other.2, self.0 * other.1 - self.1 * other.0, ) } fn normalize(self) -> V3 { self * (1.0 / self.len()) } fn reflect(self, normal: V3) -> V3 { self - normal * self.dot(normal) * 2.0 } fn len(self) -> f32 { self.dot(self).sqrt() } fn is_unit_vector(self) -> bool { (self.dot(self) - 1.0).abs() < TOLERANCE } } impl Add for V3 { type Output = Self; fn add(self, other: Self) -> Self { Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Add<f32> for V3 { type Output = Self; fn add(self, rhs: f32) -> Self { Self(self.0 + rhs, self.1 + rhs, self.2 + rhs) } } impl AddAssign for V3 { fn add_assign(&mut self, other: Self) { *self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2) } } impl Div<f32> for V3 { type Output = Self; fn div(self, rhs: f32) -> Self { Self(self.0 / rhs, self.1 / rhs, self.2 / rhs) } } impl Sub for V3 { type Output = Self; fn sub(self, other: Self) -> Self { Self(self.0 - other.0, self.1 - other.1, self.2 - other.2) } } impl Sub<f32> for V3 { type Output = Self; fn sub(self, rhs: f32) -> Self { Self(self.0 - rhs, self.1 - rhs, self.2 - rhs) } } impl Mul for V3 { type Output = Self; fn mul(self, other: Self) -> Self { Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } impl Mul<f32> for V3 { type Output = Self; fn mul(self, rhs: f32) -> Self { Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign<f32> for V3 { fn mul_assign(&mut self, rhs: f32) { *self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs) } } impl MulAssign for V3 { fn mul_assign(&mut self, other: Self) { *self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2) } } #[derive(Debug)] struct Camera { origin: V3, x: V3, y: V3, z: V3, film_lower_left: V3, film_width: f32, film_height: f32, } impl Camera { fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera { assert!(aspect_ratio > 1.0, "width must be greater than height"); let origin = look_from - look_at; let z = origin.normalize(); let x = V3(0.0, 0.0, 1.0).cross(z).normalize(); let y = z.cross(x).normalize(); let film_height = 1.0; let film_width = film_height * aspect_ratio; let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width; Camera { origin, x, y, z, film_lower_left, film_width, film_height, } } } #[derive(Debug, Clone, PartialEq)] enum MaterialType { Diffuse, Specular, } #[derive(Debug, Clone, PartialEq)] struct Material { emit_color: V3, reflect_color: V3, t: MaterialType, } struct Sphere { p: V3, rsqrd: f32, m: Material, } impl Sphere { fn new(p: V3, r: f32, m: Material) -> Sphere { Sphere { p, rsqrd: r * r, m } } } struct Spheres { xs: Vec<f32>, ys: Vec<f32>, zs: Vec<f32>, rsqrds: Vec<f32>, mats: Vec<Material>, } impl Spheres { fn new(spheres: Vec<Sphere>) -> Self { let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH; let mut me = Self { xs: Vec::with_capacity(len), ys: Vec::with_capacity(len), zs: Vec::with_capacity(len), rsqrds: Vec::with_capacity(len), mats: Vec::with_capacity(len), }; for s in spheres { me.xs.push(s.p.0); me.ys.push(s.p.1); me.zs.push(s.p.2); me.rsqrds.push(s.rsqrd); me.mats.push(s.m); } // pad everything out to the simd width me.xs.resize(len, 0.0); me.ys.resize(len, 0.0); me.zs.resize(len, 0.0); me.rsqrds.resize(len, 0.0); let default_mat = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; me.mats.resize(len, default_mat); me } fn len(&self) -> usize { self.xs.len() } } // https://entropymine.com/imageworsener/srgbformula/ fn linear_to_srgb(x: f32) -> f32 { if x < 0.0 { 0.0 } else if x > 1.0 { 1.0 } else if x > 0.0031308 { 1.055 * x.powf(1.0 / 2.4) - 0.055 } else { x * 12.92 } } thread_local! { static THREAD_RNG: Cell<u64> = { let mut buf = [0u8; 8]; getrandom::getrandom(&mut buf).unwrap(); Cell::new(u64::from_le_bytes(buf)) }; } fn rand_seed() -> u32 { let mut buf = [0u8; 4]; getrandom::getrandom(&mut buf).unwrap(); u32::from_le_bytes(buf) } #[allow(dead_code)] fn thread_rand() -> u32 { // TODO(eli): thread local perf is terrible; causes function call and branching THREAD_RNG.with(|rng_cell| { let mut state = rng_cell.get(); let randu = pcg(&mut state); rng_cell.set(state); randu }) } // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs" // xorshift isn't great, but is good enough for our purposes and has two // nice properties: // 1. it only needs a u32 of state to generate a u32 // 2. it's easy to SIMD fn xorshift(state: &mut u32) -> u32 { debug_assert!(*state != 0, "xorshift cannot be seeded with 0"); let mut x = *state; x ^= x << 13; x ^= x >> 17; x ^= x << 5; *state = x; x } // pcg xsh rs 64/32 (mcg) #[allow(dead_code)] fn pcg(state: &mut u64) -> u32 { let s = *state; *state = s.wrapping_mul(6364136223846793005); (((s >> 22) ^ s) >> ((s >> 61) + 22)) as u32 } fn randf(state: &mut u32) -> f32 { let randu = (xorshift(state) >> 9) | 0x3f800000; let randf = f32::from_bits(randu) - 1.0; randf } fn randf_range(state: &mut u32, min: f32, max: f32) -> f32 { min + (max - min) * randf(state) } #[inline(always)] fn cast( rng_state: &mut u32, bg: &Material, spheres: &Spheres, mut origin: V3, mut dir: V3, mut bounces: u32, ) -> (V3, u32) { let mut color = V3(0.0, 0.0, 0.0); let mut reflectance = V3(1.0, 1.0, 1.0); let orig_bounces = bounces; loop { debug_assert!(dir.is_unit_vector()); let origin_xs = WideF32::splat(origin.0); let origin_ys = WideF32::splat(origin.1); let origin_zs = WideF32::splat(origin.2); let dir_x = WideF32::splat(dir.0); let dir_y = WideF32::splat(dir.1); let dir_z = WideF32::splat(dir.2); let mut hit_ids = WideI32::splat(-1); let mut hit_dists = WideF32::splat(f32::MAX); let mut iteration_ids = WideI32::new(7, 6, 5, 4, 3, 2, 1, 0); // TODO(eli): egregious bounds checking here for i in (0..spheres.len()).step_by(SIMD_WIDTH) { let sphere_xs = WideF32::load(&spheres.xs[i..i + SIMD_WIDTH]); let sphere_ys = WideF32::load(&spheres.ys[i..i + SIMD_WIDTH]); let sphere_zs = WideF32::load(&spheres.zs[i..i + SIMD_WIDTH]); let sphere_rsqrds = WideF32::load(&spheres.rsqrds[i..i + SIMD_WIDTH]); // this is sphere_relative_origin = origin - sphere_origin // but the math is flipped backwards because it saves us having to negate the b term let relative_xs = sphere_xs - origin_xs; let relative_ys = sphere_ys - origin_ys; let relative_zs = sphere_zs - origin_zs; let neg_b = dir_x * relative_xs; let neg_b = WideF32::mul_add(dir_y, relative_ys, neg_b); let neg_b = WideF32::mul_add(dir_z, relative_zs, neg_b); let c = WideF32::mul_sub(relative_xs, relative_xs, sphere_rsqrds); let c = WideF32::mul_add(relative_ys, relative_ys, c); let c = WideF32::mul_add(relative_zs, relative_zs, c); let discr = WideF32::mul_sub(neg_b, neg_b, c); let discrmask = discr.gt(WideF32::splat(0.0)); if discrmask.any() { let root_term = discr.sqrt(); let t0 = neg_b - root_term; let t1 = neg_b + root_term; // t0 if hit, else t1 let t = WideF32::select(t1, t0, t0.gt(WideF32::splat(TOLERANCE))); let mask = discrmask & t.gt(WideF32::splat(TOLERANCE)) & t.lt(hit_dists); hit_ids = WideI32::select(hit_ids, iteration_ids, mask); hit_dists = WideF32::select(hit_dists, t, mask); } iteration_ids += WideI32::splat(SIMD_WIDTH as i32); } let hmin = hit_dists.hmin(); if hmin < f32::MAX { let minmask = hit_dists.eq(WideF32::splat(hmin)).mask(); let min_idx = minmask.trailing_zeros() as usize; let hit_ids_arr: [i32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_ids.0) }; let hit_dists_arr: [f32; SIMD_WIDTH] = unsafe { std::mem::transmute(hit_dists.0) }; let id = hit_ids_arr[min_idx] as usize; let hit_dist = hit_dists_arr[min_idx]; let mat = &spheres.mats[id]; if bounces == 0 { color += reflectance * mat.emit_color; break; } else { bounces -= 1; color += reflectance * mat.emit_color; reflectance *= mat.reflect_color; let hit_point = origin + dir * hit_dist; origin = hit_point; dir = match mat.t { MaterialType::Specular => { let sp = V3(spheres.xs[id], spheres.ys[id], spheres.zs[id]); let hit_normal = (hit_point - sp).normalize(); dir.reflect(hit_normal) } MaterialType::Diffuse => { let a = randf_range(rng_state, 0.0, 2.0 * PI); let z = randf_range(rng_state, -1.0, 1.0); let r = (1.0 - z * z).sqrt(); V3(r * a.cos(), r * a.sin(), z) } } } } else { color += reflectance * bg.emit_color; break; } } (color, orig_bounces - bounces) } fn main() -> Result<(), Box<dyn std::error::Error>> { // flush denormals to zero unsafe { _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON) }; let mut args = Arguments::from_env(); let rays_per_pixel = args.opt_value_from_str(["-r", "--rays"])?.unwrap_or(100); let bounces = args.opt_value_from_str("--bounces")?.unwrap_or(8); let filename = args .opt_value_from_str("-o")? .unwrap_or("out.png".to_string()); args.finish()?; // Materials let bg = Material { emit_color: V3(0.3, 0.4, 0.8), reflect_color: V3(0.0, 0.0, 0.0), t: MaterialType::Specular, }; let ground = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.5, 0.5, 0.5), t: MaterialType::Diffuse, }; let left = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(1.0, 0.0, 0.0), t: MaterialType::Specular, }; let center = Material { emit_color: V3(0.4, 0.8, 0.9), reflect_color: V3(0.8, 0.8, 0.8), t: MaterialType::Specular, }; let right = Material { emit_color: V3(0.0, 0.0, 0.0), reflect_color: V3(0.95, 0.95, 0.95), t: MaterialType::Specular, }; let spheres = Spheres::new(vec![ Sphere::new(V3(0.0, 0.0, -100.0), 100.0, ground), Sphere::new(V3(0.0, 0.0, 1.0), 1.0, center), Sphere::new(V3(-2.0, -3.0, 1.5), 0.3, right.clone()), Sphere::new(V3(-3.0, -6.0, 0.0), 0.3, right.clone()), Sphere::new(V3(-3.0, -5.0, 2.0), 0.5, left.clone()), Sphere::new(V3(3.0, -3.0, 0.8), 1.0, right.clone()), Sphere::new(V3(-3.0, -3.0, 2.0), 0.5, left.clone()), Sphere::new(V3(5.0, -3.0, 0.8), 1.0, right), Sphere::new(V3(3.0, -3.0, 2.0), 0.5, left), ]); let width = 1920; let height = 1080; let inv_width = 1.0 / (width as f32 - 1.0); let inv_height = 1.0 / (height as f32 - 1.0); let mut pixels = vec![V3(0.0, 0.0, 0.0); width * height]; let cam = Camera::new( V3(0.0, -10.0, 1.0), V3(0.0, 0.0, 0.0), width as f32 / height as f32, ); let threads = num_cpus::get(); let chunk_size = 1024; assert!(width * height % chunk_size == 0); let start = Instant::now(); { let jobs = ArrayQueue::new(width * height / chunk_size); pixels .chunks_mut(chunk_size) .enumerate() .for_each(|(i, chunk)| { jobs.push((i, chunk)).unwrap(); }); thread::scope(|s| { let handles: Vec<_> = (0..threads) .map(|_| { s.spawn(|_| { let mut rng_state = rand_seed(); let mut ray_count: u64 = 0; while let Some((i, chunk)) = jobs.pop() { let mut i = i * chunk.len(); for color in chunk { let image_x = (i % width) as f32; let image_y = (height - (i / width) - 1) as f32; // flip image right-side-up i += 1; for _ in 0..rays_per_pixel { // calculate ratio we've moved along the image (y/height), step proportionally within the film let rand_x = randf(&mut rng_state); let rand_y = randf(&mut rng_state); let film_x = cam.x * cam.film_width * (image_x + rand_x) * inv_width; let film_y = cam.y * cam.film_height * (image_y + rand_y) * inv_height; let film_p = cam.film_lower_left + film_x + film_y; // remember that a pixel in float-space is a _range_. We want to send multiple rays within that range // to do this we take the start of that range (what we calculated as the image projecting onto our film), // then add a random [0,1) float let ray_dir = (film_p - cam.origin).normalize(); let (c, r) = cast( &mut rng_state, &bg, &spheres, cam.origin, ray_dir, bounces, ); *color += c; ray_count += 1 + r as u64; } } } ray_count }) }) .collect(); let total_rays_shot: u64 = handles.into_iter().map(|h| h.join().unwrap()).sum(); println!( "{:.3} Mray/s", total_rays_shot as f64 / 1_000_000.0 / start.elapsed().as_secs_f64() ); }) .unwrap(); } let mut buf: Vec<u8> = Vec::with_capacity(width * height * 3); for p in &pixels { let p = *p / rays_per_pixel as f32; buf.push((255.0 * linear_to_srgb(p.0)) as u8); buf.push((255.0 * linear_to_srgb(p.1)) as u8); buf.push((255.0 * linear_to_srgb(p.2)) as u8); } image::save_buffer( filename, &buf, width as u32, height as u32, image::ColorType::Rgb8, )?; println!("Rendering took {:.3}s", start.elapsed().as_secs_f32()); Ok(()) }
self.mask() != 0 } fn mask(&self) -> i32 {
random_line_split
CLCDcurve.py
from Cit_par import mass,rho0,Tempgrad,R,g,Temp0,S,c,A from math import pi import numpy as np import matplotlib.pyplot as plt import pandas as pd from ReadMeas import * from ClCdRef import passmass, Vequi, totalthrustele, totalthrustelestand, totalthrustele_mat, totalthrustele_matstand, CLalpha, b,e,CD0, AOAlist, CLlist,CL2 ##READ DATA AND CREATE ARRAY## time = np.array(pd.read_csv('flight_data/time.csv', delimiter=',', header=None)) time = np.array([time[i][0] for i in range(len(time))]) AOA1 = np.array(pd.read_csv('flight_data/AOA.csv', delimiter=' ', header=None)) TAS = np.array(pd.read_csv('flight_data/TAS.csv', delimiter=' ', header=None)) #TAS in knots TAS2 = TAS * 0.51444444444444 TAT = np.array(pd.read_csv('flight_data/TAT.csv', delimiter=' ', header=None)) Mach = np.array(pd.read_csv('flight_data/Mach.csv', delimiter=' ', header=None)) de = np.array(pd.read_csv('flight_data/delta_e.csv', delimiter=' ', header=None)) xcg = np.array(pd.read_csv('x_cg.csv', delimiter=' ', header=None)) shiftxcg = np.array(pd.read_csv('cg_shift.csv', delimiter=' ', header=None)) alt = np.array(pd.read_csv('flight_data/bcAlt.csv', delimiter=' ', header = None)) alt2 = alt * 0.3048 #ft to meters FUl = np.array(pd.read_csv('flight_data/FUl.csv', delimiter=' ', header = None)) FUr = np.array(pd.read_csv('flight_data/FUr.csv', delimiter=' ', header = None)) FUtot = (FUl + FUr) * 0.453592 #lbs to kg Fele = np.array(pd.read_csv('flight_data/Fele.csv', delimiter=' ', header = None)) SAT1 = np.array(pd.read_csv('flight_data/SAT.csv', delimiter=' ', header = None)) SAT = SAT1 + 273.15 ##Calculate CL and CLalpha## time_CL = time[16710:23911] #to check AOA_CL = np.array(AOA1[16710:23911]) VTAS_CL = np.array(TAS2[16710:23911]) h_CL = np.array(alt2[16710:23911]) FU_CL = np.array(FUtot[16710:23911]) Mach_CL = np.array(Mach[16710:23911]) rho1_CL = rho0 * pow((1 + (Tempgrad*h_CL)/Temp0),(-g/(R*Tempgrad) - 1)) masstot = mass + passmass + fuelblock Weight_CL = [(masstot - FU_CL[i])*g for i in range(len(FU_CL))] CLgraph_mat = Weight_CL/(0.5 * VTAS_CL**2 * rho1_CL * S) print(min(Mach_CL), max(Mach_CL)) #find linear relation for CL measurements clalpha_mat,ma_mat = np.polyfit(AOA_CL[:,0],CLgraph_mat[:,0],1) CLline_CL = clalpha_mat*AOA_CL[:,0] + ma_mat print('Cl_alpha =', clalpha_mat, clalpha_mat*(180/pi)) ##Calculate CD## CDgraph_mat = CD0 + (CLgraph_mat) ** 2 / (pi * A * e) #From Numerical Model# AOAstat = np.array(AOAlist) linecl_stat = CLalpha*AOAstat + b
#Plots CL and CD## # plt.grid() # plt.scatter(AOA_CL,CLgraph_mat,marker= '.', label='Measure point') # # plt.plot(AOAstat,linecl_stat, label='Stationary Flight Measurements') # plt.plot(AOA_CL[:,0],CLline_CL,c='darkorange', label= 'Least Squares of Flightdata') # plt.ylabel('Lift Coefficient [-]') # plt.xlabel('Angle of Attack [deg]') # plt.legend() # # plt.savefig('CLalphacompare.jpg') # plt.show() # plt.grid() plt.scatter(CDgraph_mat,CLgraph_mat, marker='.', label='Measure Point Flightdata') # plt.plot(CDstat,CLlist,c='orange', label='Stationary Flight Measurements') plt.ylabel('Lift Coefficient [-]') plt.xlabel('Drag Coefficient [-]') plt.legend() plt.savefig('CLCDcompare.jpg') plt.show() ##------------Reynolds Number Range-----------## # b = 1.458*10**(-6) #kg/msK^1/2 # St = 110.4 #K # Tst = np.array(SAT[16710:23911]) # mu = (b * Tst ** (3/2))/(Tst + St) # Reyn = np.array([(rho1_CL[i] * VTAS_CL[i] * c/mu[i]) for i in range(len(mu))]) # print('Reynoldsnumber Range =', max(Reyn), min(Reyn)) ##_______________________________________Stationary Flight Data_________________________________________## ##------------Calculate Cmdelta and Cmalpha using Post Flight Data-------------------------## dde1 = [i.de for i in CGshift] dde = (dde1[1] - dde1[0])*(pi/180) dxcg = shiftxcg[1]-shiftxcg[0] hp = CGshift[1].height Vias = CGshift[1].IAS Tm = float(CGshift[1].TAT) + 273.15 VTAS = Vequi(hp,Vias,Tm)[0] rhoTAS = Vequi(hp,Vias,Tm)[1] Fused = CGshift[1].Fused Weight = (mass + passmass + fuelblock - Fused)*g CN = Weight/(0.5*rhoTAS*(VTAS**2)*S) print('CN =', CN) Cmdelta = -(1/dde) * CN * dxcg/c print('Cmdelta =', Cmdelta) ##--------------Elevator Trim Curve Ve-----------------## height = np.array([i.height for i in EleTrimCurve]) V_ias = np.array([i.IAS for i in EleTrimCurve]) Temp = np.array([(i.TAT + 273.15) for i in EleTrimCurve]) # Vtasele = Vequi(height,V_ias,Temp)[0] # rhoele = Vequi(height,V_ias,Temp)[1] V_e = Vequi(height,V_ias,Temp)[2] Fusedele = np.array([i.Fused for i in EleTrimCurve]) mtot_el = mass + passmass + fuelblock - Fusedele Wele = mtot_el * g Ws = 60500 #N Ve_e = V_e * np.sqrt(Ws/Wele) ##-----------Elevator Trim Curve Ele defl eq-----------## Cmtc = -.0064 #reader appendix eledefl = np.array([i.de for i in EleTrimCurve]) aoa = np.array([i.AoA for i in EleTrimCurve]) d_eng = 0.686 #m Tc = totalthrustele/(0.5*rho0*Ve_e**2*S) print(Tc) Tcs = totalthrustelestand/(0.5*rho0*Ve_e**2*d_eng**2) print(Tcs) Cmdelta_veri = -0.41532 deleq = eledefl - (1/Cmdelta *Cmtc * (Tcs - Tc)) ##-------Plotting AoA against Ele delfection and determine Cmalpha------## deda, q = np.polyfit(aoa,deleq,1) line = deda*aoa+q print('deda =', deda) plt.grid() plt.scatter(aoa,deleq, label='Measure Point') plt.plot(aoa,line, c='orange', label='Least Squares') plt.ylim(1.2,-0.5) plt.ylabel('Reduced Elevator Deflection [deg]') plt.xlabel('Angle of Attack [deg]') plt.legend() plt.savefig('DedAOA_verification.jpg') plt.show() Cmalpha = -deda * Cmdelta print('Cmalpha =', Cmalpha) #-------------------Plotting Ele defl against Ve----## Ve_e_dde1 = np.column_stack([Ve_e,deleq]) Ve_e_dde = Ve_e_dde1[Ve_e_dde1[:,0].argsort()] d, f, j = np.polyfit(Ve_e_dde[:,0],Ve_e_dde[:,1],2) line_eleV = d*Ve_e_dde[:,0]**2 + f*Ve_e_dde[:,0] + j # plt.grid() # plt.scatter(Ve_e_dde[:,0],Ve_e_dde[:,1], label='Measure Point') # plt.plot(Ve_e_dde[:,0],d*Ve_e_dde[:,0]**2 + f*Ve_e_dde[:,0] + j, c='orange', label='Least Squares') # plt.ylim(1.2,-0.4) # plt.ylabel('Reduced Elevator Deflection [deg]') # plt.xlabel('Reduced Equivalent Airspeed [m/s]') # plt.legend() # plt.savefig('DedV.jpg') # plt.show() ##------------Reduced Elevator control Curve----------## Femea = np.array([i.Fe for i in EleTrimCurve]) Fe = Femea * (Ws/Wele) Ve_e_Fe1 = np.column_stack([Ve_e,Fe]) Ve_e_Fe = Ve_e_Fe1[Ve_e_Fe1[:,0].argsort()] d, f, j = np.polyfit(Ve_e_Fe[:,0],Ve_e_Fe[:,1],2) line_feele = d*Ve_e_Fe[:,0]**2 + f*Ve_e_Fe[:,0] + j # plt.grid() # plt.scatter(Ve_e_Fe[:,0],Ve_e_Fe[:,1], label='Measure Point') # plt.plot(Ve_e_Fe[:,0],d*Ve_e_Fe[:,0]**2 + f*Ve_e_Fe[:,0] + j, c='orange', label='Least Squares') # plt.ylim(70,-40) # plt.ylabel('Reduced Force on Elevator Control Wheel [N]') # plt.xlabel('Reduced Equivalent Speed [m/s]') # plt.legend() # plt.savefig('FeV.jpg') # plt.show() ##_______________________________________Flight test DATA_______________________________________## ##---------------------Cmdelta determination of matlab data-----------------------------------## time_cg = time[33510:35911] xcg_cg = np.array(xcg[33510:35911]) dxcg_cg1 = np.array([xcg_cg[i] - xcg_cg[i-1] for i in range(1,len(xcg_cg))]) dxcg_cg = min(dxcg_cg1) de_cg = np.array(de[33510:35911]) dde_cg = (de_cg[2000] - de_cg[399]) * pi/180 #determined by exact time of interval stationary data FUtot_cg = FUtot[33510:35911] index = np.where(dxcg_cg1 == np.amin(dxcg_cg1)) W_cg = (masstot - FUtot_cg[2000])*g h_cg = alt2[35512] rho_cg = rho0 * pow((1 + (Tempgrad*h_cg)/Temp0),(-g/(R*Tempgrad) - 1)) Vtas_cg = TAS2[35512] CN_cg = W_cg/(0.5*rho_cg*Vtas_cg**2*S) Cmdelta_mat = -(1/dde_cg) * CN_cg * (dxcg_cg/c) print('Cmdelta matlab =', Cmdelta_mat) ##-------------------------------Elevator Trim Curve Of Matlab Data------------------## time_ele = time[29910:33511] AOA_ele = np.array(AOA1[29910:33511]) de_ele = np.array(de[29910:33511]) Vtas_ele = np.array(TAS2[29910:33511]) h_ele = np.array(alt2[29910:33511]) rho_ele = rho0 * pow((1 + (Tempgrad*h_ele)/Temp0),(-g/(R*Tempgrad) - 1)) Ve_ele = Vtas_ele * np.sqrt(rho_ele/rho0) FUtot_ele = np.array(FUtot[29910:33511]) W_ele = np.array([(masstot-FUtot_ele[i])*g for i in range(len(FUtot_ele))]) Ve_graph = Ve_ele * np.sqrt(Ws/W_ele) # print(len(Ve_graph)) Tc = totalthrustele_mat/(0.5*rho0*Ve_graph**2*S) Tcs = totalthrustele_matstand/(0.5*rho0*Ve_graph**2*d_eng**2) de_elemat = de_ele - (1/Cmdelta_mat * Cmtc) * (Tcs - Tc) # print(len(de_elemat)) f , k, v = np.polyfit(Ve_graph[:,0],de_elemat[:,0],2) # plt.grid() # plt.scatter(Ve_graph[:,0],de_elemat[:,0], marker='.', label='Measure Point') # plt.plot(Ve_graph[:,0], f*Ve_graph[:,0]**2 + k *Ve_graph[:,0] + v, c='orange', label='Least Squares') # # plt.plot(Ve_graph[:,0], f*Ve_graph[:,0]**2 + k *Ve_graph[:,0] + v, c='orange', label='Least Squares of Flightdata') # # plt.plot(Ve_e_dde[:,0],line_eleV, label='Stationary Flight Measurements') # plt.ylim(1.25,-0.7) # plt.ylabel('Reduced Elevator Deflection [deg]') # plt.xlabel('Reduced Equivalent Airspeed [m/s]') # plt.legend() # plt.savefig('DedV_mat.jpg') # plt.show() deda_mat, b_mat = np.polyfit(AOA_ele[:,0], de_elemat[:,0],1) # plt.grid() # # plt.scatter(AOA_ele[:,0],de_elemat[:,0],marker='.', label='Measure Point') # plt.plot(aoa,line, label='Stationary Flight Measurements') # plt.plot(AOA_ele[:,0], deda_mat*AOA_ele[:,0] + b_mat, c='orange', label='Least Squares of Flightdata') # # plt.plot(AOA_ele[:,0], deda_mat*AOA_ele[:,0] + b_mat, c='orange', label='Least Squares') # plt.ylim(1.25,-0.7) # plt.ylabel('Reduced Elevator Deflection [deg]') # plt.xlabel('Angle of Attack [deg]') # plt.legend() # plt.savefig('DedAOA_compare.jpg') # plt.show() Cmalpha_mat = -deda_mat * Cmdelta_mat print('Cmalpha matlab =', Cmalpha_mat) Femea_mat = np.array(Fele[29910:33511]) Fele_mat = Femea_mat * Ws/W_ele w , s, v = np.polyfit(Ve_graph[:,0],Fele_mat[:,0],2) # plt.grid() # plt.scatter(Ve_graph[:,0],Fele_mat[:,0], marker='.', label='Measure Point') # plt.plot(Ve_graph[:,0],w*Ve_graph[:,0]**2 + s * Ve_graph[:,0] + v, c='orange', label='Least Squares') # # plt.plot(Ve_graph[:,0],w*Ve_graph[:,0]**2 + s * Ve_graph[:,0] + v, c='orange', label='Least Squares of Flightdata') # # plt.plot(Ve_e_Fe[:,0],line_feele, label='Stationary Flight Measurements') # plt.ylim(70,-50) # plt.ylabel('Reduced Force on Elevator Control Wheel [N]') # plt.xlabel('Reduced Equivalent Airspeed [m/s]') # plt.legend() # plt.savefig('FeV_mat.jpg') # plt.show() ####-------------------------Old versions----------------------------------##### # AT = np.column_stack([AOA1,TAS2,de,xcg,alt2,TAT,FUtot]) # cut_off = 70 # AT_trimmed = AT[AT[:,1] > cut_off] # print(AT_trimmed.shape) ##Calculate CL and CLalpha## # AOA = AT_trimmed[:,0] # V = AT_trimmed[:,1] # h = AT_trimmed[:,4] # FU = AT_trimmed[:,6] # rho1 = rho0 * pow((1 + (Tempgrad*h)/Temp0),(-g/(R*Tempgrad) - 1)) # masstot = mass + passmass + fuelblock # Weight = [(masstot - FU[i])*g for i in range(len(FU))] # CLgraph = Weight/(0.5 * V**2 * rho1 * S) # t, ma = np.polyfit(AOA,CLgraph,1) # CLline = t*AOA + ma # print('Cl_alpha =', t, t*(180/pi)) # ##Calculate CD## # CDgraph = CD0 + (CLline) ** 2 / (pi * A * e) #Plots CL and CD## # plt.grid() # scatter = plt.scatter(AOA,CLgraph,marker= '.', label='Measure point') # line = plt.plot(AOA,CLline,c='darkorange', label= 'Least squares') # plt.ylabel('Lift Coefficient [-]') # plt.xlabel('Angle of Attack [deg]') # plt.legend() # plt.show() # # plt.grid() # plt.scatter(CDgraph,CLline) # plt.ylabel('Lift Coefficient [-]') # plt.xlabel('Drag Coefficient [-]') # plt.show() ##Calculate Reynolds Range with Sutherland Equation## # b = 1.458*10**(-6) #kg/msK^1/2 # St = 110.4 #K # T = AT_trimmed[:,5] + 273.15 # mu = (b * T ** (3/2))/(T + St) # Reyn = np.array([(rho1[i] * V[i] * c/mu[i]) for i in range(len(mu))]) # print('Reynoldsnumber Range =', max(Reyn), min(Reyn))
CDstat = CD0 + linecl_stat/(pi * A * e)
random_line_split
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("Value", *self, f) } } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else { let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) } } fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" = ...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn export_as(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue
}
{ self.get_aref().get_type_value() }
identifier_body
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("Value", *self, f) } } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else { let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) } } fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" = ...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn
(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue { self.get_aref().get_type_value() } }
export_as
identifier_name
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("Value", *self, f) } } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else
} fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" = ...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn export_as(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue { self.get_aref().get_type_value() } }
{ let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) }
conditional_block
mod.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! Defines a runtime Starlark value ([`Value`]) and traits for defining custom values ([`StarlarkValue`]). //! //! This module contains code for working with Starlark values: //! //! * Most code dealing with Starlark will use [`Value`], as it represents the fundamental values used in //! Starlark. When frozen, they become [`FrozenValue`]. //! * Values are garbage-collected, so a given [`Value`] lives on a [`Heap`]. //! * Rust values (e.g. [`String`], [`Vec`]) can be added to the [`Heap`] with [`AllocValue`], //! and deconstructed from a [`Value`] with //! * To define your own Rust data type that can live in a [`Value`] it must implement the [`StarlarkValue`] //! trait. //! * All the nested modules represent the built-in Starlark values. These are all defined using [`StarlarkValue`], //! so may serve as interesting inspiration for writing your own values, in addition to occuring in Starlark programs. pub use crate::values::{error::*, iter::*, layout::*, owned::*, traits::*, types::*, unpack::*}; use crate::{ collections::{Hashed, SmallHashResult}, eval::Evaluator, values::{function::FUNCTION_TYPE, types::function::FunctionInvoker}, }; pub use gazebo::{any::AnyLifetime, cell::ARef, prelude::*}; use indexmap::Equivalent; use std::{ cell::RefMut, cmp::Ordering, fmt, fmt::{Debug, Display}, }; #[macro_use] mod comparison; // Submodules mod error; pub(crate) mod fast_string; mod index; mod interpolation; mod iter; mod layout; mod owned; mod stack_guard; mod traits; mod types; mod typing; mod unpack; impl Display for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.to_str()) } } impl Display for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", Value::new_frozen(*self).to_str()) } } fn debug_value(typ: &str, v: Value, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple(typ).field(v.get_aref().as_debug()).finish() } impl Debug for Value<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
} } impl Debug for FrozenValue { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_value("FrozenValue", Value::new_frozen(*self), f) } } impl<'v> PartialEq for Value<'v> { fn eq(&self, other: &Value<'v>) -> bool { self.equals(*other).ok() == Some(true) } } impl PartialEq for FrozenValue { fn eq(&self, other: &FrozenValue) -> bool { let v: Value = Value::new_frozen(*self); let other: Value = Value::new_frozen(*other); v.equals(other).ok() == Some(true) } } impl Eq for Value<'_> {} impl Eq for FrozenValue {} impl Equivalent<FrozenValue> for Value<'_> { fn equivalent(&self, key: &FrozenValue) -> bool { key.equals(*self).unwrap() } } impl Equivalent<Value<'_>> for FrozenValue { fn equivalent(&self, key: &Value) -> bool { self.equals(*key).unwrap() } } /// Trait for things that can be allocated on a [`Heap`] producing a [`Value`]. pub trait AllocValue<'v> { fn alloc_value(self, heap: &'v Heap) -> Value<'v>; } impl<'v> AllocValue<'v> for Value<'v> { fn alloc_value(self, _heap: &'v Heap) -> Value<'v> { self } } /// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`]. pub trait AllocFrozenValue { fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue; } impl FrozenHeap { /// Allocate a new value on a [`FrozenHeap`]. pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue { val.alloc_frozen_value(self) } } impl Heap { /// Allocate a new value on a [`Heap`]. pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> { x.alloc_value(self) } } /// Abstract over [`Value`] and [`FrozenValue`]. /// /// The methods on this trait are those required to implement containers, /// allowing implementations of [`ComplexValue`] to be agnostic of their contained type. /// For details about each function, see the documentation for [`Value`], /// which provides the same functions (and more). pub trait ValueLike<'v>: Eq + Copy + Debug { /// Produce a [`Value`] regardless of the type you are starting with. fn to_value(self) -> Value<'v>; fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>; fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.to_value().new_invoker(eval) } fn get_hash(self) -> anyhow::Result<u64> { self.get_aref().get_hash() } fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { Ok(Hashed::new_unchecked( SmallHashResult::new_unchecked(self.get_hash()?), self, )) } fn collect_repr(self, collector: &mut String) { self.get_aref().collect_repr(collector); } fn to_json(self) -> anyhow::Result<String> { self.get_aref().to_json() } fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { if self.to_value().ptr_eq(other) { Ok(true) } else { let _guard = stack_guard::stack_guard()?; self.get_aref().equals(other) } } fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { let _guard = stack_guard::stack_guard()?; self.get_aref().compare(other) } fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { let any = ARef::map(self.get_aref(), |e| e.as_dyn_any()); if any.is::<T>() { Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap())) } else { None } } } impl<'v, V: ValueLike<'v>> Hashed<V> { pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> { // Safe because we know frozen values have the same hash as non-frozen ones Hashed::new_unchecked(self.hash(), self.key().to_value()) } } impl<'v> Hashed<Value<'v>> { fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> { // Safe because we know frozen values have the same hash as non-frozen ones let key = self.key().freeze(freezer); // But it's an easy mistake to make, so actually check it in debug debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash())); Hashed::new_unchecked(self.hash(), key) } } impl<'v> ValueLike<'v> for Value<'v> { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { Value::get_aref(self) } fn to_value(self) -> Value<'v> { self } } impl<'v> ValueLike<'v> for FrozenValue { fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> { ARef::new_ptr(self.get_ref()) } fn to_value(self) -> Value<'v> { Value::new_frozen(self) } } impl FrozenValue { /// Convert a [`FrozenValue`] back to a [`Value`]. pub fn to_value<'v>(self) -> Value<'v> { Value::new_frozen(self) } } /// How an attribute (e.g. `x.f`) should behave. #[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum AttrType { /// The attribute is a field, a direct value with no special behaviour. Field, /// The attribute is a method, which should be called passing the `x` value /// as its first argument. It will either be a function (which is transformed /// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a /// [`NativeAttribute`](crate::values::function::NativeAttribute) /// (which is evaluated immediately). Method, } impl<'v> Value<'v> { /// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd), /// before falling back to [`add`](StarlarkValue::add). pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { let me = self.to_value(); if let Some(v) = other.get_aref().radd(me, heap) { v } else { self.get_aref().add(other, heap) } } /// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`]. pub fn freeze(self, freezer: &Freezer) -> FrozenValue { freezer.freeze(self) } /// Implement the `str()` function - converts a string value to itself, /// otherwise uses `repr()`. pub fn to_str(self) -> String { match self.unpack_str() { None => self.to_repr(), Some(s) => s.to_owned(), } } /// Implement the `repr()` function. pub fn to_repr(self) -> String { let mut s = String::new(); self.collect_repr(&mut s); s } /// Forwards to [`ComplexValue::set_attr`]. pub fn set_attr( self, attribute: &str, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_attr(attribute, alloc_value) } /// Forwards to [`ComplexValue::set_at`]. pub fn set_at( self, index: Value<'v>, alloc_value: Value<'v>, heap: &'v Heap, ) -> anyhow::Result<()> { self.get_ref_mut(heap)?.set_at(index, alloc_value) } /// Return the contents of an iterable collection, as an owned vector. pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> { // You might reasonably think this is mostly called on lists (I think it is), // and thus that a fast-path here would speed things up. But in my experiments // it's completely irrelevant (you pay a bit for the check, you save a bit on each step). Ok(self.iterate(heap)?.iter().collect()) } /// Produce an iterable from a value. pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> { let me: ARef<'v, dyn StarlarkValue> = self.get_aref(); me.iterate()?; Ok(RefIterable::new( heap, ARef::map(me, |e| e.iterate().unwrap()), )) } /// Get the [`Hashed`] version of this [`Value`]. pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> { ValueLike::get_hashed(self) } /// Get a reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function panics if the [`Value`] is borrowed mutably. /// /// In many cases you may wish to call [`FromValue`] instead, as that can /// get a non-frozen value from an underlying frozen value. pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> { ValueLike::downcast_ref(self) } /// Are two values equal. If the values are of different types it will /// return [`false`]. It will only error if there is excessive recursion. pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> { ValueLike::equals(self, other) } /// How are two values comparable. For values of different types will return [`Err`]. pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> { ValueLike::compare(self, other) } /// Get a mutable reference to underlying data or [`None`] /// if contained object has different type than requested. /// /// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen, /// or frozen for iteration. /// /// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will /// _cause a panic_. Therefore, it's super important not to call any Starlark operations, /// even as simple as equality, while holding the [`RefMut`]. pub fn downcast_mut<T: AnyLifetime<'v>>( self, heap: &'v Heap, ) -> anyhow::Result<Option<RefMut<'_, T>>> { let vref = self.get_ref_mut(heap)?; let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut()); Ok(if any.is::<T>() { Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap())) } else { None }) } /// Describe the value, in order to get its metadata in a way that could be used /// to generate prototypes, help information or whatever other descriptive text /// is required. /// Plan is to make this return a data type at some point in the future, possibly /// move on to `StarlarkValue` and include data from members. pub fn describe(self, name: &str) -> String { if self.get_type() == FUNCTION_TYPE { format!("def {}: pass", self.to_repr().replace(" = ...", " = None")) } else { format!("# {} = {}", name, self.to_repr()) } } /// Call `export_as` on the underlying value, but only if the type is mutable. /// Otherwise, does nothing. pub fn export_as(self, name: &str, heap: &'v Heap) { if let Some(mut mv) = self.get_ref_mut_already() { mv.export_as(heap, name) } } /// Return the attribute with the given name. Returns a pair of a boolean and the value. /// /// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`] /// and should be used as a signal that if the attribute is subsequently called, /// e.g. `object.attribute(argument)` then the `object` should be passed as the first /// argument to the function, e.g. `object.attribute(object, argument)`. pub fn get_attr( self, attribute: &str, heap: &'v Heap, ) -> anyhow::Result<(AttrType, Value<'v>)> { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if let Some(v) = methods.get(attribute) { return Ok((AttrType::Method, v)); } } aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v)) } /// Query whether an attribute exists on a type. Should be equivalent to whether /// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient. pub fn has_attr(self, attribute: &str) -> bool { let aref = self.get_aref(); if let Some(methods) = aref.get_methods() { if methods.get(attribute).is_some() { return true; } } aref.has_attr(attribute) } /// Get a list of all the attributes this function supports, used to implement the /// `dir()` function. pub fn dir_attr(self) -> Vec<String> { let aref = self.get_aref(); let mut result = if let Some(methods) = aref.get_methods() { let mut res = methods.names(); res.extend(aref.dir_attr()); res } else { aref.dir_attr() }; result.sort(); result } } /// Methods that just forward to the underlying [`StarlarkValue`]. impl<'v> Value<'v> { pub fn get_type(self) -> &'static str { self.get_aref().get_type() } pub fn to_bool(self) -> bool { // Fast path for the common case if let Some(x) = self.unpack_bool() { x } else { self.get_aref().to_bool() } } pub fn to_int(self) -> anyhow::Result<i32> { // Fast path for the common case if let Some(x) = self.unpack_int() { Ok(x) } else { self.get_aref().to_int() } } pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().at(index, heap) } pub fn slice( self, start: Option<Value<'v>>, stop: Option<Value<'v>>, stride: Option<Value<'v>>, heap: &'v Heap, ) -> anyhow::Result<Value<'v>> { self.get_aref().slice(start, stop, stride, heap) } pub fn length(self) -> anyhow::Result<i32> { self.get_aref().length() } pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> { self.get_aref().is_in(other) } pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().plus(heap) } pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().minus(heap) } pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().sub(other, heap) } pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().mul(other, heap) } pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().percent(other, heap) } pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> { self.get_aref().floor_div(other, heap) } pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_and(other) } pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_or(other) } pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().bit_xor(other) } pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().left_shift(other) } pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> { self.get_aref().right_shift(other) } pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> { self.get_aref().new_invoker(self, eval) } pub fn get_type_value(self) -> &'static ConstFrozenValue { self.get_aref().get_type_value() } }
debug_value("Value", *self, f)
random_line_split
encode.go
package bnet import ( "bytes" "encoding/binary" "reflect" "runtime" "sort" "sync" "sync/atomic" ) // Marshal returns the BNET encoding of v. func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) if err != nil { return nil, err } return e.Bytes(), nil } // Marshaler is the interface implemented by types that can marshal // themselves into valid BNET. type Marshaler interface { MarshalBNet() ([]byte, error) } // An UnsupportedTypeError occurs when attempting to marshal an // unsupported type. type UnsupportedTypeError struct { Type reflect.Type } func (e *UnsupportedTypeError) Error() string { return "bnet: unsupported type: " + e.Type.String() } type encodeState struct { buf bytes.Buffer tagCache map[string]string } func (e *encodeState) Bytes() []byte { return e.buf.Bytes() } func (e *encodeState) marshal(v interface{}) (err error) { defer func() { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } if s, ok := r.(string); ok { panic(s) } err = r.(error) } }() e.reflectValue(reflect.ValueOf(v)) return nil } func (e *encodeState) error(err error) { panic(err) } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Slice: return v.Len() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func (e *encodeState) reflectValue(v reflect.Value) { valueEncoder(v)(e, v) } type encoderFunc func(e *encodeState, v reflect.Value) var encoderCache sync.Map func valueEncoder(v reflect.Value) encoderFunc { if !v.IsValid() { return invalidValueEncoder } return typeEncoder(v.Type()) } func typeEncoder(t reflect.Type) encoderFunc { if fi, ok := encoderCache.Load(t); ok { return fi.(encoderFunc) } var ( wg sync.WaitGroup f encoderFunc ) wg.Add(1) fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) { wg.Wait() f(e, v) })) if loaded { return fi.(encoderFunc) } f = newTypeEncoder(t, true) wg.Done() encoderCache.Store(t, f) return f } var ( // marshalerType = reflect.TypeOf(new(Marshaler)).Elem() // binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem() ) func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { switch t.Kind() { case reflect.Bool: return boolEncoder case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return uintEncoder case reflect.Interface: return interfaceEncoder case reflect.String: return stringEncoder case reflect.Struct: return newStructEncoder(t) case reflect.Slice: return newSliceEncoder(t) case reflect.Array: return newArrayEncoder(t) case reflect.Ptr: return newPtrEncoder(t) default: return unsupportedTypeEncoder } } func invalidValueEncoder(e *encodeState, v reflect.Value) { e.error(&InvalidValueError{}) } func boolEncoder(e *encodeState, v reflect.Value) { if size, ok := e.tagCache["size"]; ok { // @todo TagDefinitionRequiredError switch size { case "uint8": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint8(0x01))) return } uintEncoder(e, reflect.ValueOf(uint8(0x00))) case "uint32": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint32(0x01))) return } uintEncoder(e, reflect.ValueOf(uint32(0x00))) default: e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size}) } } else { e.error(&TagDefinitionRequiredError{Tag: "size"}) } } func uintEncoder(e *encodeState, v reflect.Value) { _, bigendian := e.tagCache["bigendian"] switch v.Kind() { case reflect.Uint8: e.buf.WriteByte(uint8(v.Uint())) case reflect.Uint16: b := []byte{0x00, 0x00} if bigendian { binary.BigEndian.PutUint16(b, uint16(v.Uint())) } else { binary.LittleEndian.PutUint16(b, uint16(v.Uint())) } e.buf.Write(b) case reflect.Uint32: b := []byte{0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint32(b, uint32(v.Uint())) } else { binary.LittleEndian.PutUint32(b, uint32(v.Uint())) } e.buf.Write(b) case reflect.Uint64: b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint64(b, v.Uint()) } else { binary.LittleEndian.PutUint64(b, v.Uint()) } e.buf.Write(b) } } func stringEncoder(e *encodeState, v reflect.Value) { e.buf.WriteString(v.String()) e.buf.WriteByte(0x00) } func interfaceEncoder(e *encodeState, v reflect.Value) { if v.IsNil() { return } e.reflectValue(v.Elem()) } func unsupportedTypeEncoder(e *encodeState, v reflect.Value) { e.error(&UnsupportedTypeError{v.Type()}) } type structEncoder struct { fields []field fieldEncs []encoderFunc } func (se *structEncoder) encode(e *encodeState, v reflect.Value) { for i, f := range se.fields { fv, err := fieldByIndex(v, f.index) if err != nil { e.error(err) } if !fv.IsValid() || isEmptyValue(*fv) { // @todo InvalidValue e.error(&InvalidValueError{}) } e.tagCache = se.fields[i].tags se.fieldEncs[i](e, *fv) e.tagCache = nil } } func newStructEncoder(t reflect.Type) encoderFunc { fields := cachedTypeFields(t) se := &structEncoder{ fields: fields, fieldEncs: make([]encoderFunc, len(fields)), } for i, f := range fields { se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) } return se.encode } func encodeStringSlice(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { stringEncoder(e, v.Index(i)) } e.buf.WriteByte(0x00) } type sliceEncoder struct { arrayEnc encoderFunc } func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } se.arrayEnc(e, v) } func newSliceEncoder(t reflect.Type) encoderFunc { if t.Elem().Kind() == reflect.String { return encodeStringSlice } enc := &sliceEncoder{newArrayEncoder(t)} return enc.encode } type arrayEncoder struct { elemEnc encoderFunc } func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { ae.elemEnc(e, v.Index(i)) } } func newArrayEncoder(t reflect.Type) encoderFunc { enc := &arrayEncoder{typeEncoder(t.Elem())} return enc.encode } func newPtrEncoder(t reflect.Type) encoderFunc { enc := &ptrEncoder{typeEncoder(t.Elem())} return enc.encode } type ptrEncoder struct { ptrEnc encoderFunc } func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } pe.ptrEnc(e, v.Elem()) } func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) { for _, i := range index { if v.Kind() == reflect.Ptr { if v.IsNil() { return nil, &NilPointerError{} } v = v.Elem() } v = v.Field(i) } return &v, nil } func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { if t.Kind() == reflect.Ptr { t = t.Elem() } t = t.Field(i).Type } return t } type field struct { name string tags map[string]string index []int typ reflect.Type } // byIndex sorts field by index sequence. type byIndex []field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { return false } if xik != x[j].index[k] { return xik < x[j].index[k] } } return len(x[i].index) < len(x[j].index) } func dominantField(fields []field) (field, bool) { // The fields are sorted in increasing index-length order. The winner // must therefore be one with the shortest index length. Drop all // longer entries, which is easy: just truncate the slice. length := len(fields[0].index) tagged := -1 // Index of first tagged field. for i, f := range fields { if len(f.index) > length { fields = fields[:i] break } } if tagged >= 0 { return fields[tagged], true } // All remaining fields have the same length. If there's more than one, // we have a conflict (two fields named "X" at the same level) and we // return no field. if len(fields) > 1 { return field{}, false } return fields[0], true } var fieldCache struct { value atomic.Value mu sync.Mutex } func typeFields(t reflect.Type) []field { // Anonymous fields to explore at the current level and the next. current := []field{} next := []field{{typ: t}} // Count of queued names for current level and the next. var count map[reflect.Type]int nextCount := map[reflect.Type]int{} // Types already visited at an earlier level. visited := map[reflect.Type]bool{} // Fields found. var fields []field
for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} for _, f := range current { if visited[f.typ] { continue } visited[f.typ] = true // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type if t.Kind() == reflect.Ptr { t = t.Elem() } if isUnexported && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. } else if isUnexported { // Ignore unexported non-embedded fields. continue } index := make([]int, len(f.index)+1) copy(index, f.index) index[len(f.index)] = i ft := sf.Type if ft.Name() == "" && ft.Kind() == reflect.Ptr { // Follow pointer. ft = ft.Elem() } if !sf.Anonymous || ft.Kind() != reflect.Struct { fields = append(fields, field{ name: sf.Name, tags: parseTag(sf.Tag.Get("bnet")), index: index, typ: ft, }) if count[f.typ] > 1 { fields = append(fields, fields[len(fields)-1]) } continue } // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { next = append(next, field{name: ft.Name(), index: index, typ: ft}) } } } } sort.Slice(fields, func(i, j int) bool { x := fields // sort field by name, breaking ties with depth, then // breaking ties with "name came from json tag", then // breaking ties with index sequence. if x[i].name != x[j].name { return x[i].name < x[j].name } if len(x[i].index) != len(x[j].index) { return len(x[i].index) < len(x[j].index) } return byIndex(x).Less(i, j) }) // Delete all fields that are hidden by the Go rules for embedded fields, // except that fields with JSON tags are promoted. // The fields are sorted in primary order of name, secondary order // of field index length. Loop over names; for each name, delete // hidden fields by choosing the one dominant field that survives. out := fields[:0] for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.name != name { break } } if advance == 1 { // Only one field with this name out = append(out, fi) continue } dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } fields = out sort.Sort(byIndex(fields)) return fields } func cachedTypeFields(t reflect.Type) []field { m, _ := fieldCache.value.Load().(map[reflect.Type][]field) f := m[t] if f != nil { return f } f = typeFields(t) if f == nil { f = []field{} } fieldCache.mu.Lock() m, _ = fieldCache.value.Load().(map[reflect.Type][]field) newM := make(map[reflect.Type][]field, len(m)+1) for k, v := range m { newM[k] = v } newM[t] = f fieldCache.value.Store(newM) fieldCache.mu.Unlock() return f }
random_line_split
encode.go
package bnet import ( "bytes" "encoding/binary" "reflect" "runtime" "sort" "sync" "sync/atomic" ) // Marshal returns the BNET encoding of v. func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) if err != nil { return nil, err } return e.Bytes(), nil } // Marshaler is the interface implemented by types that can marshal // themselves into valid BNET. type Marshaler interface { MarshalBNet() ([]byte, error) } // An UnsupportedTypeError occurs when attempting to marshal an // unsupported type. type UnsupportedTypeError struct { Type reflect.Type } func (e *UnsupportedTypeError) Error() string { return "bnet: unsupported type: " + e.Type.String() } type encodeState struct { buf bytes.Buffer tagCache map[string]string } func (e *encodeState) Bytes() []byte { return e.buf.Bytes() } func (e *encodeState) marshal(v interface{}) (err error) { defer func() { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } if s, ok := r.(string); ok { panic(s) } err = r.(error) } }() e.reflectValue(reflect.ValueOf(v)) return nil } func (e *encodeState) error(err error) { panic(err) } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Slice: return v.Len() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func (e *encodeState) reflectValue(v reflect.Value) { valueEncoder(v)(e, v) } type encoderFunc func(e *encodeState, v reflect.Value) var encoderCache sync.Map func valueEncoder(v reflect.Value) encoderFunc { if !v.IsValid() { return invalidValueEncoder } return typeEncoder(v.Type()) } func typeEncoder(t reflect.Type) encoderFunc { if fi, ok := encoderCache.Load(t); ok { return fi.(encoderFunc) } var ( wg sync.WaitGroup f encoderFunc ) wg.Add(1) fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) { wg.Wait() f(e, v) })) if loaded { return fi.(encoderFunc) } f = newTypeEncoder(t, true) wg.Done() encoderCache.Store(t, f) return f } var ( // marshalerType = reflect.TypeOf(new(Marshaler)).Elem() // binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem() ) func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { switch t.Kind() { case reflect.Bool: return boolEncoder case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return uintEncoder case reflect.Interface: return interfaceEncoder case reflect.String: return stringEncoder case reflect.Struct: return newStructEncoder(t) case reflect.Slice: return newSliceEncoder(t) case reflect.Array: return newArrayEncoder(t) case reflect.Ptr: return newPtrEncoder(t) default: return unsupportedTypeEncoder } } func invalidValueEncoder(e *encodeState, v reflect.Value) { e.error(&InvalidValueError{}) } func boolEncoder(e *encodeState, v reflect.Value) { if size, ok := e.tagCache["size"]; ok { // @todo TagDefinitionRequiredError switch size { case "uint8": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint8(0x01))) return } uintEncoder(e, reflect.ValueOf(uint8(0x00))) case "uint32": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint32(0x01))) return } uintEncoder(e, reflect.ValueOf(uint32(0x00))) default: e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size}) } } else { e.error(&TagDefinitionRequiredError{Tag: "size"}) } } func uintEncoder(e *encodeState, v reflect.Value) { _, bigendian := e.tagCache["bigendian"] switch v.Kind() { case reflect.Uint8: e.buf.WriteByte(uint8(v.Uint())) case reflect.Uint16: b := []byte{0x00, 0x00} if bigendian { binary.BigEndian.PutUint16(b, uint16(v.Uint())) } else { binary.LittleEndian.PutUint16(b, uint16(v.Uint())) } e.buf.Write(b) case reflect.Uint32: b := []byte{0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint32(b, uint32(v.Uint())) } else { binary.LittleEndian.PutUint32(b, uint32(v.Uint())) } e.buf.Write(b) case reflect.Uint64: b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint64(b, v.Uint()) } else { binary.LittleEndian.PutUint64(b, v.Uint()) } e.buf.Write(b) } } func stringEncoder(e *encodeState, v reflect.Value) { e.buf.WriteString(v.String()) e.buf.WriteByte(0x00) } func interfaceEncoder(e *encodeState, v reflect.Value) { if v.IsNil() { return } e.reflectValue(v.Elem()) } func unsupportedTypeEncoder(e *encodeState, v reflect.Value) { e.error(&UnsupportedTypeError{v.Type()}) } type structEncoder struct { fields []field fieldEncs []encoderFunc } func (se *structEncoder) encode(e *encodeState, v reflect.Value) { for i, f := range se.fields { fv, err := fieldByIndex(v, f.index) if err != nil { e.error(err) } if !fv.IsValid() || isEmptyValue(*fv) { // @todo InvalidValue e.error(&InvalidValueError{}) } e.tagCache = se.fields[i].tags se.fieldEncs[i](e, *fv) e.tagCache = nil } } func newStructEncoder(t reflect.Type) encoderFunc { fields := cachedTypeFields(t) se := &structEncoder{ fields: fields, fieldEncs: make([]encoderFunc, len(fields)), } for i, f := range fields { se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) } return se.encode } func encodeStringSlice(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { stringEncoder(e, v.Index(i)) } e.buf.WriteByte(0x00) } type sliceEncoder struct { arrayEnc encoderFunc } func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } se.arrayEnc(e, v) } func newSliceEncoder(t reflect.Type) encoderFunc { if t.Elem().Kind() == reflect.String { return encodeStringSlice } enc := &sliceEncoder{newArrayEncoder(t)} return enc.encode } type arrayEncoder struct { elemEnc encoderFunc } func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { ae.elemEnc(e, v.Index(i)) } } func newArrayEncoder(t reflect.Type) encoderFunc { enc := &arrayEncoder{typeEncoder(t.Elem())} return enc.encode } func newPtrEncoder(t reflect.Type) encoderFunc { enc := &ptrEncoder{typeEncoder(t.Elem())} return enc.encode } type ptrEncoder struct { ptrEnc encoderFunc } func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } pe.ptrEnc(e, v.Elem()) } func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) { for _, i := range index { if v.Kind() == reflect.Ptr { if v.IsNil() { return nil, &NilPointerError{} } v = v.Elem() } v = v.Field(i) } return &v, nil } func
(t reflect.Type, index []int) reflect.Type { for _, i := range index { if t.Kind() == reflect.Ptr { t = t.Elem() } t = t.Field(i).Type } return t } type field struct { name string tags map[string]string index []int typ reflect.Type } // byIndex sorts field by index sequence. type byIndex []field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { return false } if xik != x[j].index[k] { return xik < x[j].index[k] } } return len(x[i].index) < len(x[j].index) } func dominantField(fields []field) (field, bool) { // The fields are sorted in increasing index-length order. The winner // must therefore be one with the shortest index length. Drop all // longer entries, which is easy: just truncate the slice. length := len(fields[0].index) tagged := -1 // Index of first tagged field. for i, f := range fields { if len(f.index) > length { fields = fields[:i] break } } if tagged >= 0 { return fields[tagged], true } // All remaining fields have the same length. If there's more than one, // we have a conflict (two fields named "X" at the same level) and we // return no field. if len(fields) > 1 { return field{}, false } return fields[0], true } var fieldCache struct { value atomic.Value mu sync.Mutex } func typeFields(t reflect.Type) []field { // Anonymous fields to explore at the current level and the next. current := []field{} next := []field{{typ: t}} // Count of queued names for current level and the next. var count map[reflect.Type]int nextCount := map[reflect.Type]int{} // Types already visited at an earlier level. visited := map[reflect.Type]bool{} // Fields found. var fields []field for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} for _, f := range current { if visited[f.typ] { continue } visited[f.typ] = true // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type if t.Kind() == reflect.Ptr { t = t.Elem() } if isUnexported && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. } else if isUnexported { // Ignore unexported non-embedded fields. continue } index := make([]int, len(f.index)+1) copy(index, f.index) index[len(f.index)] = i ft := sf.Type if ft.Name() == "" && ft.Kind() == reflect.Ptr { // Follow pointer. ft = ft.Elem() } if !sf.Anonymous || ft.Kind() != reflect.Struct { fields = append(fields, field{ name: sf.Name, tags: parseTag(sf.Tag.Get("bnet")), index: index, typ: ft, }) if count[f.typ] > 1 { fields = append(fields, fields[len(fields)-1]) } continue } // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { next = append(next, field{name: ft.Name(), index: index, typ: ft}) } } } } sort.Slice(fields, func(i, j int) bool { x := fields // sort field by name, breaking ties with depth, then // breaking ties with "name came from json tag", then // breaking ties with index sequence. if x[i].name != x[j].name { return x[i].name < x[j].name } if len(x[i].index) != len(x[j].index) { return len(x[i].index) < len(x[j].index) } return byIndex(x).Less(i, j) }) // Delete all fields that are hidden by the Go rules for embedded fields, // except that fields with JSON tags are promoted. // The fields are sorted in primary order of name, secondary order // of field index length. Loop over names; for each name, delete // hidden fields by choosing the one dominant field that survives. out := fields[:0] for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.name != name { break } } if advance == 1 { // Only one field with this name out = append(out, fi) continue } dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } fields = out sort.Sort(byIndex(fields)) return fields } func cachedTypeFields(t reflect.Type) []field { m, _ := fieldCache.value.Load().(map[reflect.Type][]field) f := m[t] if f != nil { return f } f = typeFields(t) if f == nil { f = []field{} } fieldCache.mu.Lock() m, _ = fieldCache.value.Load().(map[reflect.Type][]field) newM := make(map[reflect.Type][]field, len(m)+1) for k, v := range m { newM[k] = v } newM[t] = f fieldCache.value.Store(newM) fieldCache.mu.Unlock() return f }
typeByIndex
identifier_name
encode.go
package bnet import ( "bytes" "encoding/binary" "reflect" "runtime" "sort" "sync" "sync/atomic" ) // Marshal returns the BNET encoding of v. func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) if err != nil
return e.Bytes(), nil } // Marshaler is the interface implemented by types that can marshal // themselves into valid BNET. type Marshaler interface { MarshalBNet() ([]byte, error) } // An UnsupportedTypeError occurs when attempting to marshal an // unsupported type. type UnsupportedTypeError struct { Type reflect.Type } func (e *UnsupportedTypeError) Error() string { return "bnet: unsupported type: " + e.Type.String() } type encodeState struct { buf bytes.Buffer tagCache map[string]string } func (e *encodeState) Bytes() []byte { return e.buf.Bytes() } func (e *encodeState) marshal(v interface{}) (err error) { defer func() { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } if s, ok := r.(string); ok { panic(s) } err = r.(error) } }() e.reflectValue(reflect.ValueOf(v)) return nil } func (e *encodeState) error(err error) { panic(err) } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Slice: return v.Len() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func (e *encodeState) reflectValue(v reflect.Value) { valueEncoder(v)(e, v) } type encoderFunc func(e *encodeState, v reflect.Value) var encoderCache sync.Map func valueEncoder(v reflect.Value) encoderFunc { if !v.IsValid() { return invalidValueEncoder } return typeEncoder(v.Type()) } func typeEncoder(t reflect.Type) encoderFunc { if fi, ok := encoderCache.Load(t); ok { return fi.(encoderFunc) } var ( wg sync.WaitGroup f encoderFunc ) wg.Add(1) fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) { wg.Wait() f(e, v) })) if loaded { return fi.(encoderFunc) } f = newTypeEncoder(t, true) wg.Done() encoderCache.Store(t, f) return f } var ( // marshalerType = reflect.TypeOf(new(Marshaler)).Elem() // binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem() ) func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { switch t.Kind() { case reflect.Bool: return boolEncoder case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return uintEncoder case reflect.Interface: return interfaceEncoder case reflect.String: return stringEncoder case reflect.Struct: return newStructEncoder(t) case reflect.Slice: return newSliceEncoder(t) case reflect.Array: return newArrayEncoder(t) case reflect.Ptr: return newPtrEncoder(t) default: return unsupportedTypeEncoder } } func invalidValueEncoder(e *encodeState, v reflect.Value) { e.error(&InvalidValueError{}) } func boolEncoder(e *encodeState, v reflect.Value) { if size, ok := e.tagCache["size"]; ok { // @todo TagDefinitionRequiredError switch size { case "uint8": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint8(0x01))) return } uintEncoder(e, reflect.ValueOf(uint8(0x00))) case "uint32": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint32(0x01))) return } uintEncoder(e, reflect.ValueOf(uint32(0x00))) default: e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size}) } } else { e.error(&TagDefinitionRequiredError{Tag: "size"}) } } func uintEncoder(e *encodeState, v reflect.Value) { _, bigendian := e.tagCache["bigendian"] switch v.Kind() { case reflect.Uint8: e.buf.WriteByte(uint8(v.Uint())) case reflect.Uint16: b := []byte{0x00, 0x00} if bigendian { binary.BigEndian.PutUint16(b, uint16(v.Uint())) } else { binary.LittleEndian.PutUint16(b, uint16(v.Uint())) } e.buf.Write(b) case reflect.Uint32: b := []byte{0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint32(b, uint32(v.Uint())) } else { binary.LittleEndian.PutUint32(b, uint32(v.Uint())) } e.buf.Write(b) case reflect.Uint64: b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint64(b, v.Uint()) } else { binary.LittleEndian.PutUint64(b, v.Uint()) } e.buf.Write(b) } } func stringEncoder(e *encodeState, v reflect.Value) { e.buf.WriteString(v.String()) e.buf.WriteByte(0x00) } func interfaceEncoder(e *encodeState, v reflect.Value) { if v.IsNil() { return } e.reflectValue(v.Elem()) } func unsupportedTypeEncoder(e *encodeState, v reflect.Value) { e.error(&UnsupportedTypeError{v.Type()}) } type structEncoder struct { fields []field fieldEncs []encoderFunc } func (se *structEncoder) encode(e *encodeState, v reflect.Value) { for i, f := range se.fields { fv, err := fieldByIndex(v, f.index) if err != nil { e.error(err) } if !fv.IsValid() || isEmptyValue(*fv) { // @todo InvalidValue e.error(&InvalidValueError{}) } e.tagCache = se.fields[i].tags se.fieldEncs[i](e, *fv) e.tagCache = nil } } func newStructEncoder(t reflect.Type) encoderFunc { fields := cachedTypeFields(t) se := &structEncoder{ fields: fields, fieldEncs: make([]encoderFunc, len(fields)), } for i, f := range fields { se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) } return se.encode } func encodeStringSlice(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { stringEncoder(e, v.Index(i)) } e.buf.WriteByte(0x00) } type sliceEncoder struct { arrayEnc encoderFunc } func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } se.arrayEnc(e, v) } func newSliceEncoder(t reflect.Type) encoderFunc { if t.Elem().Kind() == reflect.String { return encodeStringSlice } enc := &sliceEncoder{newArrayEncoder(t)} return enc.encode } type arrayEncoder struct { elemEnc encoderFunc } func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { ae.elemEnc(e, v.Index(i)) } } func newArrayEncoder(t reflect.Type) encoderFunc { enc := &arrayEncoder{typeEncoder(t.Elem())} return enc.encode } func newPtrEncoder(t reflect.Type) encoderFunc { enc := &ptrEncoder{typeEncoder(t.Elem())} return enc.encode } type ptrEncoder struct { ptrEnc encoderFunc } func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } pe.ptrEnc(e, v.Elem()) } func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) { for _, i := range index { if v.Kind() == reflect.Ptr { if v.IsNil() { return nil, &NilPointerError{} } v = v.Elem() } v = v.Field(i) } return &v, nil } func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { if t.Kind() == reflect.Ptr { t = t.Elem() } t = t.Field(i).Type } return t } type field struct { name string tags map[string]string index []int typ reflect.Type } // byIndex sorts field by index sequence. type byIndex []field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { return false } if xik != x[j].index[k] { return xik < x[j].index[k] } } return len(x[i].index) < len(x[j].index) } func dominantField(fields []field) (field, bool) { // The fields are sorted in increasing index-length order. The winner // must therefore be one with the shortest index length. Drop all // longer entries, which is easy: just truncate the slice. length := len(fields[0].index) tagged := -1 // Index of first tagged field. for i, f := range fields { if len(f.index) > length { fields = fields[:i] break } } if tagged >= 0 { return fields[tagged], true } // All remaining fields have the same length. If there's more than one, // we have a conflict (two fields named "X" at the same level) and we // return no field. if len(fields) > 1 { return field{}, false } return fields[0], true } var fieldCache struct { value atomic.Value mu sync.Mutex } func typeFields(t reflect.Type) []field { // Anonymous fields to explore at the current level and the next. current := []field{} next := []field{{typ: t}} // Count of queued names for current level and the next. var count map[reflect.Type]int nextCount := map[reflect.Type]int{} // Types already visited at an earlier level. visited := map[reflect.Type]bool{} // Fields found. var fields []field for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} for _, f := range current { if visited[f.typ] { continue } visited[f.typ] = true // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type if t.Kind() == reflect.Ptr { t = t.Elem() } if isUnexported && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. } else if isUnexported { // Ignore unexported non-embedded fields. continue } index := make([]int, len(f.index)+1) copy(index, f.index) index[len(f.index)] = i ft := sf.Type if ft.Name() == "" && ft.Kind() == reflect.Ptr { // Follow pointer. ft = ft.Elem() } if !sf.Anonymous || ft.Kind() != reflect.Struct { fields = append(fields, field{ name: sf.Name, tags: parseTag(sf.Tag.Get("bnet")), index: index, typ: ft, }) if count[f.typ] > 1 { fields = append(fields, fields[len(fields)-1]) } continue } // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { next = append(next, field{name: ft.Name(), index: index, typ: ft}) } } } } sort.Slice(fields, func(i, j int) bool { x := fields // sort field by name, breaking ties with depth, then // breaking ties with "name came from json tag", then // breaking ties with index sequence. if x[i].name != x[j].name { return x[i].name < x[j].name } if len(x[i].index) != len(x[j].index) { return len(x[i].index) < len(x[j].index) } return byIndex(x).Less(i, j) }) // Delete all fields that are hidden by the Go rules for embedded fields, // except that fields with JSON tags are promoted. // The fields are sorted in primary order of name, secondary order // of field index length. Loop over names; for each name, delete // hidden fields by choosing the one dominant field that survives. out := fields[:0] for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.name != name { break } } if advance == 1 { // Only one field with this name out = append(out, fi) continue } dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } fields = out sort.Sort(byIndex(fields)) return fields } func cachedTypeFields(t reflect.Type) []field { m, _ := fieldCache.value.Load().(map[reflect.Type][]field) f := m[t] if f != nil { return f } f = typeFields(t) if f == nil { f = []field{} } fieldCache.mu.Lock() m, _ = fieldCache.value.Load().(map[reflect.Type][]field) newM := make(map[reflect.Type][]field, len(m)+1) for k, v := range m { newM[k] = v } newM[t] = f fieldCache.value.Store(newM) fieldCache.mu.Unlock() return f }
{ return nil, err }
conditional_block
encode.go
package bnet import ( "bytes" "encoding/binary" "reflect" "runtime" "sort" "sync" "sync/atomic" ) // Marshal returns the BNET encoding of v. func Marshal(v interface{}) ([]byte, error) { e := &encodeState{} err := e.marshal(v) if err != nil { return nil, err } return e.Bytes(), nil } // Marshaler is the interface implemented by types that can marshal // themselves into valid BNET. type Marshaler interface { MarshalBNet() ([]byte, error) } // An UnsupportedTypeError occurs when attempting to marshal an // unsupported type. type UnsupportedTypeError struct { Type reflect.Type } func (e *UnsupportedTypeError) Error() string { return "bnet: unsupported type: " + e.Type.String() } type encodeState struct { buf bytes.Buffer tagCache map[string]string } func (e *encodeState) Bytes() []byte { return e.buf.Bytes() } func (e *encodeState) marshal(v interface{}) (err error) { defer func() { if r := recover(); r != nil { if _, ok := r.(runtime.Error); ok { panic(r) } if s, ok := r.(string); ok { panic(s) } err = r.(error) } }() e.reflectValue(reflect.ValueOf(v)) return nil } func (e *encodeState) error(err error) { panic(err) } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Slice: return v.Len() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func (e *encodeState) reflectValue(v reflect.Value) { valueEncoder(v)(e, v) } type encoderFunc func(e *encodeState, v reflect.Value) var encoderCache sync.Map func valueEncoder(v reflect.Value) encoderFunc { if !v.IsValid() { return invalidValueEncoder } return typeEncoder(v.Type()) } func typeEncoder(t reflect.Type) encoderFunc { if fi, ok := encoderCache.Load(t); ok { return fi.(encoderFunc) } var ( wg sync.WaitGroup f encoderFunc ) wg.Add(1) fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) { wg.Wait() f(e, v) })) if loaded { return fi.(encoderFunc) } f = newTypeEncoder(t, true) wg.Done() encoderCache.Store(t, f) return f } var ( // marshalerType = reflect.TypeOf(new(Marshaler)).Elem() // binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem() ) func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { switch t.Kind() { case reflect.Bool: return boolEncoder case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return uintEncoder case reflect.Interface: return interfaceEncoder case reflect.String: return stringEncoder case reflect.Struct: return newStructEncoder(t) case reflect.Slice: return newSliceEncoder(t) case reflect.Array: return newArrayEncoder(t) case reflect.Ptr: return newPtrEncoder(t) default: return unsupportedTypeEncoder } } func invalidValueEncoder(e *encodeState, v reflect.Value) { e.error(&InvalidValueError{}) } func boolEncoder(e *encodeState, v reflect.Value) { if size, ok := e.tagCache["size"]; ok { // @todo TagDefinitionRequiredError switch size { case "uint8": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint8(0x01))) return } uintEncoder(e, reflect.ValueOf(uint8(0x00))) case "uint32": if v.Bool() { uintEncoder(e, reflect.ValueOf(uint32(0x01))) return } uintEncoder(e, reflect.ValueOf(uint32(0x00))) default: e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size}) } } else { e.error(&TagDefinitionRequiredError{Tag: "size"}) } } func uintEncoder(e *encodeState, v reflect.Value) { _, bigendian := e.tagCache["bigendian"] switch v.Kind() { case reflect.Uint8: e.buf.WriteByte(uint8(v.Uint())) case reflect.Uint16: b := []byte{0x00, 0x00} if bigendian { binary.BigEndian.PutUint16(b, uint16(v.Uint())) } else { binary.LittleEndian.PutUint16(b, uint16(v.Uint())) } e.buf.Write(b) case reflect.Uint32: b := []byte{0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint32(b, uint32(v.Uint())) } else { binary.LittleEndian.PutUint32(b, uint32(v.Uint())) } e.buf.Write(b) case reflect.Uint64: b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} if bigendian { binary.BigEndian.PutUint64(b, v.Uint()) } else { binary.LittleEndian.PutUint64(b, v.Uint()) } e.buf.Write(b) } } func stringEncoder(e *encodeState, v reflect.Value) { e.buf.WriteString(v.String()) e.buf.WriteByte(0x00) } func interfaceEncoder(e *encodeState, v reflect.Value) { if v.IsNil() { return } e.reflectValue(v.Elem()) } func unsupportedTypeEncoder(e *encodeState, v reflect.Value) { e.error(&UnsupportedTypeError{v.Type()}) } type structEncoder struct { fields []field fieldEncs []encoderFunc } func (se *structEncoder) encode(e *encodeState, v reflect.Value) { for i, f := range se.fields { fv, err := fieldByIndex(v, f.index) if err != nil { e.error(err) } if !fv.IsValid() || isEmptyValue(*fv) { // @todo InvalidValue e.error(&InvalidValueError{}) } e.tagCache = se.fields[i].tags se.fieldEncs[i](e, *fv) e.tagCache = nil } } func newStructEncoder(t reflect.Type) encoderFunc
func encodeStringSlice(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { stringEncoder(e, v.Index(i)) } e.buf.WriteByte(0x00) } type sliceEncoder struct { arrayEnc encoderFunc } func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } se.arrayEnc(e, v) } func newSliceEncoder(t reflect.Type) encoderFunc { if t.Elem().Kind() == reflect.String { return encodeStringSlice } enc := &sliceEncoder{newArrayEncoder(t)} return enc.encode } type arrayEncoder struct { elemEnc encoderFunc } func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) { n := v.Len() for i := 0; i < n; i++ { ae.elemEnc(e, v.Index(i)) } } func newArrayEncoder(t reflect.Type) encoderFunc { enc := &arrayEncoder{typeEncoder(t.Elem())} return enc.encode } func newPtrEncoder(t reflect.Type) encoderFunc { enc := &ptrEncoder{typeEncoder(t.Elem())} return enc.encode } type ptrEncoder struct { ptrEnc encoderFunc } func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) { if v.IsNil() { return } pe.ptrEnc(e, v.Elem()) } func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) { for _, i := range index { if v.Kind() == reflect.Ptr { if v.IsNil() { return nil, &NilPointerError{} } v = v.Elem() } v = v.Field(i) } return &v, nil } func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { if t.Kind() == reflect.Ptr { t = t.Elem() } t = t.Field(i).Type } return t } type field struct { name string tags map[string]string index []int typ reflect.Type } // byIndex sorts field by index sequence. type byIndex []field func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { return false } if xik != x[j].index[k] { return xik < x[j].index[k] } } return len(x[i].index) < len(x[j].index) } func dominantField(fields []field) (field, bool) { // The fields are sorted in increasing index-length order. The winner // must therefore be one with the shortest index length. Drop all // longer entries, which is easy: just truncate the slice. length := len(fields[0].index) tagged := -1 // Index of first tagged field. for i, f := range fields { if len(f.index) > length { fields = fields[:i] break } } if tagged >= 0 { return fields[tagged], true } // All remaining fields have the same length. If there's more than one, // we have a conflict (two fields named "X" at the same level) and we // return no field. if len(fields) > 1 { return field{}, false } return fields[0], true } var fieldCache struct { value atomic.Value mu sync.Mutex } func typeFields(t reflect.Type) []field { // Anonymous fields to explore at the current level and the next. current := []field{} next := []field{{typ: t}} // Count of queued names for current level and the next. var count map[reflect.Type]int nextCount := map[reflect.Type]int{} // Types already visited at an earlier level. visited := map[reflect.Type]bool{} // Fields found. var fields []field for len(next) > 0 { current, next = next, current[:0] count, nextCount = nextCount, map[reflect.Type]int{} for _, f := range current { if visited[f.typ] { continue } visited[f.typ] = true // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type if t.Kind() == reflect.Ptr { t = t.Elem() } if isUnexported && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. } else if isUnexported { // Ignore unexported non-embedded fields. continue } index := make([]int, len(f.index)+1) copy(index, f.index) index[len(f.index)] = i ft := sf.Type if ft.Name() == "" && ft.Kind() == reflect.Ptr { // Follow pointer. ft = ft.Elem() } if !sf.Anonymous || ft.Kind() != reflect.Struct { fields = append(fields, field{ name: sf.Name, tags: parseTag(sf.Tag.Get("bnet")), index: index, typ: ft, }) if count[f.typ] > 1 { fields = append(fields, fields[len(fields)-1]) } continue } // Record new anonymous struct to explore in next round. nextCount[ft]++ if nextCount[ft] == 1 { next = append(next, field{name: ft.Name(), index: index, typ: ft}) } } } } sort.Slice(fields, func(i, j int) bool { x := fields // sort field by name, breaking ties with depth, then // breaking ties with "name came from json tag", then // breaking ties with index sequence. if x[i].name != x[j].name { return x[i].name < x[j].name } if len(x[i].index) != len(x[j].index) { return len(x[i].index) < len(x[j].index) } return byIndex(x).Less(i, j) }) // Delete all fields that are hidden by the Go rules for embedded fields, // except that fields with JSON tags are promoted. // The fields are sorted in primary order of name, secondary order // of field index length. Loop over names; for each name, delete // hidden fields by choosing the one dominant field that survives. out := fields[:0] for advance, i := 0, 0; i < len(fields); i += advance { // One iteration per name. // Find the sequence of fields with the name of this first field. fi := fields[i] name := fi.name for advance = 1; i+advance < len(fields); advance++ { fj := fields[i+advance] if fj.name != name { break } } if advance == 1 { // Only one field with this name out = append(out, fi) continue } dominant, ok := dominantField(fields[i : i+advance]) if ok { out = append(out, dominant) } } fields = out sort.Sort(byIndex(fields)) return fields } func cachedTypeFields(t reflect.Type) []field { m, _ := fieldCache.value.Load().(map[reflect.Type][]field) f := m[t] if f != nil { return f } f = typeFields(t) if f == nil { f = []field{} } fieldCache.mu.Lock() m, _ = fieldCache.value.Load().(map[reflect.Type][]field) newM := make(map[reflect.Type][]field, len(m)+1) for k, v := range m { newM[k] = v } newM[t] = f fieldCache.value.Store(newM) fieldCache.mu.Unlock() return f }
{ fields := cachedTypeFields(t) se := &structEncoder{ fields: fields, fieldEncs: make([]encoderFunc, len(fields)), } for i, f := range fields { se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) } return se.encode }
identifier_body
backend.rs
//! Defines `SimpleJITBackend`. use crate::memory::Memory; use cranelift_codegen::binemit::{Addend, CodeOffset, NullTrapSink, Reloc, RelocSink}; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::{self, ir, settings}; use cranelift_module::{ Backend, DataContext, DataDescription, Init, Linkage, ModuleNamespace, ModuleResult, }; use cranelift_native; use libc; use std::collections::HashMap; use std::ffi::CString; use std::io::Write; use std::ptr; use target_lexicon::PointerWidth; #[cfg(windows)] use winapi; const EXECUTABLE_DATA_ALIGNMENT: u8 = 0x10; const WRITABLE_DATA_ALIGNMENT: u8 = 0x8; const READONLY_DATA_ALIGNMENT: u8 = 0x1; /// A builder for `SimpleJITBackend`. pub struct SimpleJITBuilder { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, } impl SimpleJITBuilder { /// Create a new `SimpleJITBuilder`. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn new(libcall_names: Box<dyn Fn(ir::LibCall) -> String>) -> Self { let flag_builder = settings::builder(); let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| { panic!("host machine is not supported: {}", msg); }); let isa = isa_builder.finish(settings::Flags::new(flag_builder)); Self::with_isa(isa, libcall_names) } /// Create a new `SimpleJITBuilder` with an arbitrary target. This is mainly /// useful for testing. /// /// SimpleJIT requires a `TargetIsa` configured for non-PIC. /// /// To create a `SimpleJITBuilder` for native use, use the `new` constructor /// instead. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn with_isa( isa: Box<dyn TargetIsa>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, ) -> Self { debug_assert!(!isa.flags().is_pic(), "SimpleJIT requires non-PIC code"); let symbols = HashMap::new(); Self { isa, symbols, libcall_names, } } /// Define a symbol in the internal symbol table. /// /// The JIT will use the symbol table to resolve names that are declared, /// but not defined, in the module being compiled. A common example is /// external functions. With this method, functions and data can be exposed /// to the code being compiled which are defined by the host. /// /// If a symbol is defined more than once, the most recent definition will /// be retained. /// /// If the JIT fails to find a symbol in its internal table, it will fall /// back to a platform-specific search (this typically involves searching /// the current process for public symbols, followed by searching the /// platform's C runtime). pub fn symbol<K>(&mut self, name: K, ptr: *const u8) -> &Self where K: Into<String>, { self.symbols.insert(name.into(), ptr); self } /// Define multiple symbols in the internal symbol table. /// /// Using this is equivalent to calling `symbol` on each element. pub fn symbols<It, K>(&mut self, symbols: It) -> &Self where It: IntoIterator<Item = (K, *const u8)>, K: Into<String>, { for (name, ptr) in symbols { self.symbols.insert(name.into(), ptr); } self } } /// A `SimpleJITBackend` implements `Backend` and emits code and data into memory where it can be /// directly called and accessed. /// /// See the `SimpleJITBuilder` for a convenient way to construct `SimpleJITBackend` instances. pub struct SimpleJITBackend { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, code_memory: Memory, readonly_memory: Memory, writable_memory: Memory, } /// A record of a relocation to perform. struct RelocRecord { offset: CodeOffset, reloc: Reloc, name: ir::ExternalName, addend: Addend, } pub struct SimpleJITCompiledFunction { code: *mut u8, size: usize, relocs: Vec<RelocRecord>, } pub struct SimpleJITCompiledData { storage: *mut u8, size: usize, relocs: Vec<RelocRecord>, } impl SimpleJITBackend { fn lookup_symbol(&self, name: &str) -> *const u8 { match self.symbols.get(name) { Some(&ptr) => ptr, None => lookup_with_dlsym(name), } } fn get_definition( &self, namespace: &ModuleNamespace<Self>, name: &ir::ExternalName, ) -> *const u8 { match *name { ir::ExternalName::User { .. } => { if namespace.is_function(name) { let (def, name_str, _signature) = namespace.get_function_definition(&name); match def { Some(compiled) => compiled.code, None => self.lookup_symbol(name_str), } } else { let (def, name_str, _writable) = namespace.get_data_definition(&name); match def { Some(compiled) => compiled.storage, None => self.lookup_symbol(name_str), } } } ir::ExternalName::LibCall(ref libcall) => { let sym = (self.libcall_names)(*libcall); self.lookup_symbol(&sym) } _ => panic!("invalid ExternalName {}", name), } } } impl<'simple_jit_backend> Backend for SimpleJITBackend { type Builder = SimpleJITBuilder; /// SimpleJIT compiled function and data objects may have outstanding /// relocations that need to be performed before the memory can be used. /// These relocations are performed within `finalize_function` and /// `finalize_data`. type CompiledFunction = SimpleJITCompiledFunction; type CompiledData = SimpleJITCompiledData; /// SimpleJIT emits code and data into memory, and provides raw pointers /// to them. type FinalizedFunction = *const u8; type FinalizedData = (*mut u8, usize); /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. type Product = (); /// Create a new `SimpleJITBackend`. fn new(builder: SimpleJITBuilder) -> Self { Self { isa: builder.isa, symbols: builder.symbols, libcall_names: builder.libcall_names, code_memory: Memory::new(), readonly_memory: Memory::new(), writable_memory: Memory::new(), } } fn isa(&self) -> &dyn TargetIsa { &*self.isa } fn declare_function(&mut self, _name: &str, _linkage: Linkage) { // Nothing to do. } fn declare_data( &mut self, _name: &str, _linkage: Linkage, _writable: bool, _align: Option<u8>, ) { // Nothing to do. } fn define_function( &mut self, name: &str, ctx: &cranelift_codegen::Context, _namespace: &ModuleNamespace<Self>, code_size: u32, ) -> ModuleResult<Self::CompiledFunction> { let size = code_size as usize; let ptr = self .code_memory .allocate(size, EXECUTABLE_DATA_ALIGNMENT) .expect("TODO: handle OOM etc."); if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() { let mut map_file = ::std::fs::OpenOptions::new() .create(true) .append(true) .open(format!("/tmp/perf-{}.map", ::std::process::id())) .unwrap(); let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name); } let mut reloc_sink = SimpleJITRelocSink::new(); // Ignore traps for now. For now, frontends should just avoid generating code // that traps. let mut trap_sink = NullTrapSink {}; unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) }; Ok(Self::CompiledFunction { code: ptr, size, relocs: reloc_sink.relocs, }) } fn define_data( &mut self, _name: &str, writable: bool, align: Option<u8>, data: &DataContext, _namespace: &ModuleNamespace<Self>, ) -> ModuleResult<Self::CompiledData> { let &DataDescription { ref init, ref function_decls, ref data_decls, ref function_relocs, ref data_relocs, } = data.description(); let size = init.size(); let storage = if writable { self.writable_memory .allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") } else { self.readonly_memory .allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") }; match *init { Init::Uninitialized => { panic!("data is not initialized yet"); } Init::Zeros { .. } => { unsafe { ptr::write_bytes(storage, 0, size) }; } Init::Bytes { ref contents } => { let src = contents.as_ptr(); unsafe { ptr::copy_nonoverlapping(src, storage, size) }; } } let reloc = match self.isa.triple().pointer_width().unwrap() { PointerWidth::U16 => panic!(), PointerWidth::U32 => Reloc::Abs4, PointerWidth::U64 => Reloc::Abs8, }; let mut relocs = Vec::new(); for &(offset, id) in function_relocs { relocs.push(RelocRecord { reloc, offset, name: function_decls[id].clone(), addend: 0, }); } for &(offset, id, addend) in data_relocs { relocs.push(RelocRecord { reloc, offset, name: data_decls[id].clone(), addend, }); } Ok(Self::CompiledData { storage, size, relocs, }) } fn write_data_funcaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::FuncRef, ) { unimplemented!(); } fn write_data_dataaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::GlobalValue, _usize: Addend, ) { unimplemented!(); } fn finalize_function( &mut self, func: &Self::CompiledFunction, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedFunction { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &func.relocs { let ptr = func.code; debug_assert!((offset as usize) < func.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => { // TODO: Handle overflow. let pcrel = ((what as isize) - (at as isize)) as i32; #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut i32, pcrel) }; } Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"), _ => unimplemented!(), } } func.code } fn
(&self, func: &Self::CompiledFunction) -> Self::FinalizedFunction { func.code } fn finalize_data( &mut self, data: &Self::CompiledData, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedData { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &data.relocs { let ptr = data.storage; debug_assert!((offset as usize) < data.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 | Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected text relocation in data"), _ => unimplemented!(), } } (data.storage, data.size) } fn get_finalized_data(&self, data: &Self::CompiledData) -> Self::FinalizedData { (data.storage, data.size) } fn publish(&mut self) { // Now that we're done patching, prepare the memory for execution! self.readonly_memory.set_readonly(); self.code_memory.set_readable_and_executable(); } /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. fn finish(self) {} } #[cfg(not(windows))] fn lookup_with_dlsym(name: &str) -> *const u8 { let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); let sym = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c_str_ptr) }; if sym.is_null() { panic!("can't resolve symbol {}", name); } sym as *const u8 } #[cfg(windows)] fn lookup_with_dlsym(name: &str) -> *const u8 { const MSVCRT_DLL: &[u8] = b"msvcrt.dll\0"; let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); unsafe { let handles = [ // try to find the searched symbol in the currently running executable ptr::null_mut(), // try to find the searched symbol in local c runtime winapi::um::libloaderapi::GetModuleHandleA(MSVCRT_DLL.as_ptr() as *const i8), ]; for handle in &handles { let addr = winapi::um::libloaderapi::GetProcAddress(*handle, c_str_ptr); if addr.is_null() { continue; } return addr as *const u8; } let msg = if handles[1].is_null() { "(msvcrt not loaded)" } else { "" }; panic!("cannot resolve address of symbol {} {}", name, msg); } } struct SimpleJITRelocSink { pub relocs: Vec<RelocRecord>, } impl SimpleJITRelocSink { pub fn new() -> Self { Self { relocs: Vec::new() } } } impl RelocSink for SimpleJITRelocSink { fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { unimplemented!(); } fn reloc_external( &mut self, offset: CodeOffset, reloc: Reloc, name: &ir::ExternalName, addend: Addend, ) { self.relocs.push(RelocRecord { offset, reloc, name: name.clone(), addend, }); } fn reloc_jt(&mut self, _offset: CodeOffset, reloc: Reloc, _jt: ir::JumpTable) { match reloc { Reloc::X86PCRelRodata4 => { // Not necessary to record this unless we are going to split apart code and its // jumptbl/rodata. } _ => { panic!("Unhandled reloc"); } } } }
get_finalized_function
identifier_name
backend.rs
//! Defines `SimpleJITBackend`. use crate::memory::Memory; use cranelift_codegen::binemit::{Addend, CodeOffset, NullTrapSink, Reloc, RelocSink}; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::{self, ir, settings}; use cranelift_module::{ Backend, DataContext, DataDescription, Init, Linkage, ModuleNamespace, ModuleResult, }; use cranelift_native; use libc; use std::collections::HashMap; use std::ffi::CString; use std::io::Write; use std::ptr; use target_lexicon::PointerWidth; #[cfg(windows)] use winapi; const EXECUTABLE_DATA_ALIGNMENT: u8 = 0x10; const WRITABLE_DATA_ALIGNMENT: u8 = 0x8; const READONLY_DATA_ALIGNMENT: u8 = 0x1; /// A builder for `SimpleJITBackend`. pub struct SimpleJITBuilder { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, } impl SimpleJITBuilder { /// Create a new `SimpleJITBuilder`. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn new(libcall_names: Box<dyn Fn(ir::LibCall) -> String>) -> Self { let flag_builder = settings::builder(); let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| { panic!("host machine is not supported: {}", msg); }); let isa = isa_builder.finish(settings::Flags::new(flag_builder)); Self::with_isa(isa, libcall_names) } /// Create a new `SimpleJITBuilder` with an arbitrary target. This is mainly /// useful for testing. /// /// SimpleJIT requires a `TargetIsa` configured for non-PIC. /// /// To create a `SimpleJITBuilder` for native use, use the `new` constructor /// instead. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn with_isa( isa: Box<dyn TargetIsa>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, ) -> Self { debug_assert!(!isa.flags().is_pic(), "SimpleJIT requires non-PIC code"); let symbols = HashMap::new(); Self { isa, symbols, libcall_names, } } /// Define a symbol in the internal symbol table. /// /// The JIT will use the symbol table to resolve names that are declared, /// but not defined, in the module being compiled. A common example is /// external functions. With this method, functions and data can be exposed /// to the code being compiled which are defined by the host. /// /// If a symbol is defined more than once, the most recent definition will /// be retained. /// /// If the JIT fails to find a symbol in its internal table, it will fall /// back to a platform-specific search (this typically involves searching /// the current process for public symbols, followed by searching the /// platform's C runtime). pub fn symbol<K>(&mut self, name: K, ptr: *const u8) -> &Self where K: Into<String>, { self.symbols.insert(name.into(), ptr); self } /// Define multiple symbols in the internal symbol table. /// /// Using this is equivalent to calling `symbol` on each element. pub fn symbols<It, K>(&mut self, symbols: It) -> &Self where It: IntoIterator<Item = (K, *const u8)>, K: Into<String>, { for (name, ptr) in symbols { self.symbols.insert(name.into(), ptr); } self } } /// A `SimpleJITBackend` implements `Backend` and emits code and data into memory where it can be /// directly called and accessed. /// /// See the `SimpleJITBuilder` for a convenient way to construct `SimpleJITBackend` instances. pub struct SimpleJITBackend { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, code_memory: Memory, readonly_memory: Memory, writable_memory: Memory, } /// A record of a relocation to perform. struct RelocRecord { offset: CodeOffset, reloc: Reloc, name: ir::ExternalName, addend: Addend, } pub struct SimpleJITCompiledFunction { code: *mut u8, size: usize, relocs: Vec<RelocRecord>, } pub struct SimpleJITCompiledData { storage: *mut u8, size: usize, relocs: Vec<RelocRecord>, } impl SimpleJITBackend { fn lookup_symbol(&self, name: &str) -> *const u8 { match self.symbols.get(name) { Some(&ptr) => ptr, None => lookup_with_dlsym(name), } } fn get_definition( &self, namespace: &ModuleNamespace<Self>, name: &ir::ExternalName, ) -> *const u8 { match *name { ir::ExternalName::User { .. } => { if namespace.is_function(name) { let (def, name_str, _signature) = namespace.get_function_definition(&name); match def { Some(compiled) => compiled.code, None => self.lookup_symbol(name_str), } } else { let (def, name_str, _writable) = namespace.get_data_definition(&name); match def { Some(compiled) => compiled.storage, None => self.lookup_symbol(name_str), } } } ir::ExternalName::LibCall(ref libcall) => { let sym = (self.libcall_names)(*libcall); self.lookup_symbol(&sym) } _ => panic!("invalid ExternalName {}", name), } } } impl<'simple_jit_backend> Backend for SimpleJITBackend { type Builder = SimpleJITBuilder; /// SimpleJIT compiled function and data objects may have outstanding /// relocations that need to be performed before the memory can be used. /// These relocations are performed within `finalize_function` and /// `finalize_data`. type CompiledFunction = SimpleJITCompiledFunction; type CompiledData = SimpleJITCompiledData; /// SimpleJIT emits code and data into memory, and provides raw pointers /// to them. type FinalizedFunction = *const u8; type FinalizedData = (*mut u8, usize); /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. type Product = (); /// Create a new `SimpleJITBackend`. fn new(builder: SimpleJITBuilder) -> Self { Self { isa: builder.isa, symbols: builder.symbols, libcall_names: builder.libcall_names, code_memory: Memory::new(), readonly_memory: Memory::new(), writable_memory: Memory::new(), } } fn isa(&self) -> &dyn TargetIsa { &*self.isa } fn declare_function(&mut self, _name: &str, _linkage: Linkage) { // Nothing to do. } fn declare_data( &mut self, _name: &str, _linkage: Linkage, _writable: bool, _align: Option<u8>, ) { // Nothing to do. } fn define_function( &mut self, name: &str, ctx: &cranelift_codegen::Context, _namespace: &ModuleNamespace<Self>, code_size: u32, ) -> ModuleResult<Self::CompiledFunction> { let size = code_size as usize; let ptr = self .code_memory .allocate(size, EXECUTABLE_DATA_ALIGNMENT) .expect("TODO: handle OOM etc."); if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() { let mut map_file = ::std::fs::OpenOptions::new() .create(true) .append(true) .open(format!("/tmp/perf-{}.map", ::std::process::id())) .unwrap(); let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name); } let mut reloc_sink = SimpleJITRelocSink::new(); // Ignore traps for now. For now, frontends should just avoid generating code // that traps. let mut trap_sink = NullTrapSink {}; unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) }; Ok(Self::CompiledFunction { code: ptr, size, relocs: reloc_sink.relocs, }) } fn define_data( &mut self, _name: &str, writable: bool, align: Option<u8>, data: &DataContext, _namespace: &ModuleNamespace<Self>, ) -> ModuleResult<Self::CompiledData> { let &DataDescription { ref init, ref function_decls, ref data_decls, ref function_relocs, ref data_relocs, } = data.description(); let size = init.size(); let storage = if writable { self.writable_memory .allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") } else { self.readonly_memory .allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") }; match *init { Init::Uninitialized => { panic!("data is not initialized yet"); } Init::Zeros { .. } => { unsafe { ptr::write_bytes(storage, 0, size) }; } Init::Bytes { ref contents } => { let src = contents.as_ptr(); unsafe { ptr::copy_nonoverlapping(src, storage, size) }; } } let reloc = match self.isa.triple().pointer_width().unwrap() { PointerWidth::U16 => panic!(), PointerWidth::U32 => Reloc::Abs4, PointerWidth::U64 => Reloc::Abs8, }; let mut relocs = Vec::new(); for &(offset, id) in function_relocs { relocs.push(RelocRecord { reloc, offset, name: function_decls[id].clone(), addend: 0, }); } for &(offset, id, addend) in data_relocs { relocs.push(RelocRecord { reloc, offset, name: data_decls[id].clone(), addend, }); } Ok(Self::CompiledData { storage, size, relocs, }) } fn write_data_funcaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::FuncRef, ) { unimplemented!(); } fn write_data_dataaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::GlobalValue, _usize: Addend, ) { unimplemented!(); } fn finalize_function( &mut self, func: &Self::CompiledFunction, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedFunction { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &func.relocs { let ptr = func.code; debug_assert!((offset as usize) < func.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => { // TODO: Handle overflow. let pcrel = ((what as isize) - (at as isize)) as i32; #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut i32, pcrel) }; } Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"), _ => unimplemented!(), } } func.code } fn get_finalized_function(&self, func: &Self::CompiledFunction) -> Self::FinalizedFunction { func.code } fn finalize_data( &mut self, data: &Self::CompiledData, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedData { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &data.relocs { let ptr = data.storage; debug_assert!((offset as usize) < data.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4
| Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected text relocation in data"), _ => unimplemented!(), } } (data.storage, data.size) } fn get_finalized_data(&self, data: &Self::CompiledData) -> Self::FinalizedData { (data.storage, data.size) } fn publish(&mut self) { // Now that we're done patching, prepare the memory for execution! self.readonly_memory.set_readonly(); self.code_memory.set_readable_and_executable(); } /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. fn finish(self) {} } #[cfg(not(windows))] fn lookup_with_dlsym(name: &str) -> *const u8 { let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); let sym = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c_str_ptr) }; if sym.is_null() { panic!("can't resolve symbol {}", name); } sym as *const u8 } #[cfg(windows)] fn lookup_with_dlsym(name: &str) -> *const u8 { const MSVCRT_DLL: &[u8] = b"msvcrt.dll\0"; let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); unsafe { let handles = [ // try to find the searched symbol in the currently running executable ptr::null_mut(), // try to find the searched symbol in local c runtime winapi::um::libloaderapi::GetModuleHandleA(MSVCRT_DLL.as_ptr() as *const i8), ]; for handle in &handles { let addr = winapi::um::libloaderapi::GetProcAddress(*handle, c_str_ptr); if addr.is_null() { continue; } return addr as *const u8; } let msg = if handles[1].is_null() { "(msvcrt not loaded)" } else { "" }; panic!("cannot resolve address of symbol {} {}", name, msg); } } struct SimpleJITRelocSink { pub relocs: Vec<RelocRecord>, } impl SimpleJITRelocSink { pub fn new() -> Self { Self { relocs: Vec::new() } } } impl RelocSink for SimpleJITRelocSink { fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { unimplemented!(); } fn reloc_external( &mut self, offset: CodeOffset, reloc: Reloc, name: &ir::ExternalName, addend: Addend, ) { self.relocs.push(RelocRecord { offset, reloc, name: name.clone(), addend, }); } fn reloc_jt(&mut self, _offset: CodeOffset, reloc: Reloc, _jt: ir::JumpTable) { match reloc { Reloc::X86PCRelRodata4 => { // Not necessary to record this unless we are going to split apart code and its // jumptbl/rodata. } _ => { panic!("Unhandled reloc"); } } } }
| Reloc::X86CallPCRel4
random_line_split
backend.rs
//! Defines `SimpleJITBackend`. use crate::memory::Memory; use cranelift_codegen::binemit::{Addend, CodeOffset, NullTrapSink, Reloc, RelocSink}; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::{self, ir, settings}; use cranelift_module::{ Backend, DataContext, DataDescription, Init, Linkage, ModuleNamespace, ModuleResult, }; use cranelift_native; use libc; use std::collections::HashMap; use std::ffi::CString; use std::io::Write; use std::ptr; use target_lexicon::PointerWidth; #[cfg(windows)] use winapi; const EXECUTABLE_DATA_ALIGNMENT: u8 = 0x10; const WRITABLE_DATA_ALIGNMENT: u8 = 0x8; const READONLY_DATA_ALIGNMENT: u8 = 0x1; /// A builder for `SimpleJITBackend`. pub struct SimpleJITBuilder { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, } impl SimpleJITBuilder { /// Create a new `SimpleJITBuilder`. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn new(libcall_names: Box<dyn Fn(ir::LibCall) -> String>) -> Self { let flag_builder = settings::builder(); let isa_builder = cranelift_native::builder().unwrap_or_else(|msg| { panic!("host machine is not supported: {}", msg); }); let isa = isa_builder.finish(settings::Flags::new(flag_builder)); Self::with_isa(isa, libcall_names) } /// Create a new `SimpleJITBuilder` with an arbitrary target. This is mainly /// useful for testing. /// /// SimpleJIT requires a `TargetIsa` configured for non-PIC. /// /// To create a `SimpleJITBuilder` for native use, use the `new` constructor /// instead. /// /// The `libcall_names` function provides a way to translate `cranelift_codegen`'s `ir::LibCall` /// enum to symbols. LibCalls are inserted in the IR as part of the legalization for certain /// floating point instructions, and for stack probes. If you don't know what to use for this /// argument, use `cranelift_module::default_libcall_names()`. pub fn with_isa( isa: Box<dyn TargetIsa>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, ) -> Self { debug_assert!(!isa.flags().is_pic(), "SimpleJIT requires non-PIC code"); let symbols = HashMap::new(); Self { isa, symbols, libcall_names, } } /// Define a symbol in the internal symbol table. /// /// The JIT will use the symbol table to resolve names that are declared, /// but not defined, in the module being compiled. A common example is /// external functions. With this method, functions and data can be exposed /// to the code being compiled which are defined by the host. /// /// If a symbol is defined more than once, the most recent definition will /// be retained. /// /// If the JIT fails to find a symbol in its internal table, it will fall /// back to a platform-specific search (this typically involves searching /// the current process for public symbols, followed by searching the /// platform's C runtime). pub fn symbol<K>(&mut self, name: K, ptr: *const u8) -> &Self where K: Into<String>, { self.symbols.insert(name.into(), ptr); self } /// Define multiple symbols in the internal symbol table. /// /// Using this is equivalent to calling `symbol` on each element. pub fn symbols<It, K>(&mut self, symbols: It) -> &Self where It: IntoIterator<Item = (K, *const u8)>, K: Into<String>, { for (name, ptr) in symbols { self.symbols.insert(name.into(), ptr); } self } } /// A `SimpleJITBackend` implements `Backend` and emits code and data into memory where it can be /// directly called and accessed. /// /// See the `SimpleJITBuilder` for a convenient way to construct `SimpleJITBackend` instances. pub struct SimpleJITBackend { isa: Box<dyn TargetIsa>, symbols: HashMap<String, *const u8>, libcall_names: Box<dyn Fn(ir::LibCall) -> String>, code_memory: Memory, readonly_memory: Memory, writable_memory: Memory, } /// A record of a relocation to perform. struct RelocRecord { offset: CodeOffset, reloc: Reloc, name: ir::ExternalName, addend: Addend, } pub struct SimpleJITCompiledFunction { code: *mut u8, size: usize, relocs: Vec<RelocRecord>, } pub struct SimpleJITCompiledData { storage: *mut u8, size: usize, relocs: Vec<RelocRecord>, } impl SimpleJITBackend { fn lookup_symbol(&self, name: &str) -> *const u8 { match self.symbols.get(name) { Some(&ptr) => ptr, None => lookup_with_dlsym(name), } } fn get_definition( &self, namespace: &ModuleNamespace<Self>, name: &ir::ExternalName, ) -> *const u8 { match *name { ir::ExternalName::User { .. } => { if namespace.is_function(name) { let (def, name_str, _signature) = namespace.get_function_definition(&name); match def { Some(compiled) => compiled.code, None => self.lookup_symbol(name_str), } } else { let (def, name_str, _writable) = namespace.get_data_definition(&name); match def { Some(compiled) => compiled.storage, None => self.lookup_symbol(name_str), } } } ir::ExternalName::LibCall(ref libcall) => { let sym = (self.libcall_names)(*libcall); self.lookup_symbol(&sym) } _ => panic!("invalid ExternalName {}", name), } } } impl<'simple_jit_backend> Backend for SimpleJITBackend { type Builder = SimpleJITBuilder; /// SimpleJIT compiled function and data objects may have outstanding /// relocations that need to be performed before the memory can be used. /// These relocations are performed within `finalize_function` and /// `finalize_data`. type CompiledFunction = SimpleJITCompiledFunction; type CompiledData = SimpleJITCompiledData; /// SimpleJIT emits code and data into memory, and provides raw pointers /// to them. type FinalizedFunction = *const u8; type FinalizedData = (*mut u8, usize); /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. type Product = (); /// Create a new `SimpleJITBackend`. fn new(builder: SimpleJITBuilder) -> Self { Self { isa: builder.isa, symbols: builder.symbols, libcall_names: builder.libcall_names, code_memory: Memory::new(), readonly_memory: Memory::new(), writable_memory: Memory::new(), } } fn isa(&self) -> &dyn TargetIsa
fn declare_function(&mut self, _name: &str, _linkage: Linkage) { // Nothing to do. } fn declare_data( &mut self, _name: &str, _linkage: Linkage, _writable: bool, _align: Option<u8>, ) { // Nothing to do. } fn define_function( &mut self, name: &str, ctx: &cranelift_codegen::Context, _namespace: &ModuleNamespace<Self>, code_size: u32, ) -> ModuleResult<Self::CompiledFunction> { let size = code_size as usize; let ptr = self .code_memory .allocate(size, EXECUTABLE_DATA_ALIGNMENT) .expect("TODO: handle OOM etc."); if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() { let mut map_file = ::std::fs::OpenOptions::new() .create(true) .append(true) .open(format!("/tmp/perf-{}.map", ::std::process::id())) .unwrap(); let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name); } let mut reloc_sink = SimpleJITRelocSink::new(); // Ignore traps for now. For now, frontends should just avoid generating code // that traps. let mut trap_sink = NullTrapSink {}; unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) }; Ok(Self::CompiledFunction { code: ptr, size, relocs: reloc_sink.relocs, }) } fn define_data( &mut self, _name: &str, writable: bool, align: Option<u8>, data: &DataContext, _namespace: &ModuleNamespace<Self>, ) -> ModuleResult<Self::CompiledData> { let &DataDescription { ref init, ref function_decls, ref data_decls, ref function_relocs, ref data_relocs, } = data.description(); let size = init.size(); let storage = if writable { self.writable_memory .allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") } else { self.readonly_memory .allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT)) .expect("TODO: handle OOM etc.") }; match *init { Init::Uninitialized => { panic!("data is not initialized yet"); } Init::Zeros { .. } => { unsafe { ptr::write_bytes(storage, 0, size) }; } Init::Bytes { ref contents } => { let src = contents.as_ptr(); unsafe { ptr::copy_nonoverlapping(src, storage, size) }; } } let reloc = match self.isa.triple().pointer_width().unwrap() { PointerWidth::U16 => panic!(), PointerWidth::U32 => Reloc::Abs4, PointerWidth::U64 => Reloc::Abs8, }; let mut relocs = Vec::new(); for &(offset, id) in function_relocs { relocs.push(RelocRecord { reloc, offset, name: function_decls[id].clone(), addend: 0, }); } for &(offset, id, addend) in data_relocs { relocs.push(RelocRecord { reloc, offset, name: data_decls[id].clone(), addend, }); } Ok(Self::CompiledData { storage, size, relocs, }) } fn write_data_funcaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::FuncRef, ) { unimplemented!(); } fn write_data_dataaddr( &mut self, _data: &mut Self::CompiledData, _offset: usize, _what: ir::GlobalValue, _usize: Addend, ) { unimplemented!(); } fn finalize_function( &mut self, func: &Self::CompiledFunction, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedFunction { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &func.relocs { let ptr = func.code; debug_assert!((offset as usize) < func.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => { // TODO: Handle overflow. let pcrel = ((what as isize) - (at as isize)) as i32; #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut i32, pcrel) }; } Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"), _ => unimplemented!(), } } func.code } fn get_finalized_function(&self, func: &Self::CompiledFunction) -> Self::FinalizedFunction { func.code } fn finalize_data( &mut self, data: &Self::CompiledData, namespace: &ModuleNamespace<Self>, ) -> Self::FinalizedData { use std::ptr::write_unaligned; for &RelocRecord { reloc, offset, ref name, addend, } in &data.relocs { let ptr = data.storage; debug_assert!((offset as usize) < data.size); let at = unsafe { ptr.offset(offset as isize) }; let base = self.get_definition(namespace, name); // TODO: Handle overflow. let what = unsafe { base.offset(addend as isize) }; match reloc { Reloc::Abs4 => { // TODO: Handle overflow. #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u32, what as u32) }; } Reloc::Abs8 => { #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))] unsafe { write_unaligned(at as *mut u64, what as u64) }; } Reloc::X86PCRel4 | Reloc::X86CallPCRel4 | Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected text relocation in data"), _ => unimplemented!(), } } (data.storage, data.size) } fn get_finalized_data(&self, data: &Self::CompiledData) -> Self::FinalizedData { (data.storage, data.size) } fn publish(&mut self) { // Now that we're done patching, prepare the memory for execution! self.readonly_memory.set_readonly(); self.code_memory.set_readable_and_executable(); } /// SimpleJIT emits code and data into memory as it processes them, so it /// doesn't need to provide anything after the `Module` is complete. fn finish(self) {} } #[cfg(not(windows))] fn lookup_with_dlsym(name: &str) -> *const u8 { let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); let sym = unsafe { libc::dlsym(libc::RTLD_DEFAULT, c_str_ptr) }; if sym.is_null() { panic!("can't resolve symbol {}", name); } sym as *const u8 } #[cfg(windows)] fn lookup_with_dlsym(name: &str) -> *const u8 { const MSVCRT_DLL: &[u8] = b"msvcrt.dll\0"; let c_str = CString::new(name).unwrap(); let c_str_ptr = c_str.as_ptr(); unsafe { let handles = [ // try to find the searched symbol in the currently running executable ptr::null_mut(), // try to find the searched symbol in local c runtime winapi::um::libloaderapi::GetModuleHandleA(MSVCRT_DLL.as_ptr() as *const i8), ]; for handle in &handles { let addr = winapi::um::libloaderapi::GetProcAddress(*handle, c_str_ptr); if addr.is_null() { continue; } return addr as *const u8; } let msg = if handles[1].is_null() { "(msvcrt not loaded)" } else { "" }; panic!("cannot resolve address of symbol {} {}", name, msg); } } struct SimpleJITRelocSink { pub relocs: Vec<RelocRecord>, } impl SimpleJITRelocSink { pub fn new() -> Self { Self { relocs: Vec::new() } } } impl RelocSink for SimpleJITRelocSink { fn reloc_ebb(&mut self, _offset: CodeOffset, _reloc: Reloc, _ebb_offset: CodeOffset) { unimplemented!(); } fn reloc_external( &mut self, offset: CodeOffset, reloc: Reloc, name: &ir::ExternalName, addend: Addend, ) { self.relocs.push(RelocRecord { offset, reloc, name: name.clone(), addend, }); } fn reloc_jt(&mut self, _offset: CodeOffset, reloc: Reloc, _jt: ir::JumpTable) { match reloc { Reloc::X86PCRelRodata4 => { // Not necessary to record this unless we are going to split apart code and its // jumptbl/rodata. } _ => { panic!("Unhandled reloc"); } } } }
{ &*self.isa }
identifier_body
composition_patches.go
/* Copyright 2020 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "fmt" "reflect" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" ) const ( errPatchSetType = "a patch in a PatchSet cannot be of type PatchSet" errCombineRequiresVariables = "combine patch types require at least one variable" errFmtRequiredField = "%s is required by type %s" errFmtUndefinedPatchSet = "cannot find PatchSet by name %s" errFmtInvalidPatchType = "patch type %s is unsupported" errFmtCombineStrategyNotSupported = "combine strategy %s is not supported" errFmtCombineConfigMissing = "given combine strategy %s requires configuration" errFmtCombineStrategyFailed = "%s strategy could not combine" ) // A PatchType is a type of patch. type PatchType string // Patch types. const ( PatchTypeFromCompositeFieldPath PatchType = "FromCompositeFieldPath" // Default PatchTypePatchSet PatchType = "PatchSet" PatchTypeToCompositeFieldPath PatchType = "ToCompositeFieldPath" PatchTypeCombineFromComposite PatchType = "CombineFromComposite" PatchTypeCombineToComposite PatchType = "CombineToComposite" ) // A FromFieldPathPolicy determines how to patch from a field path. type FromFieldPathPolicy string // FromFieldPath patch policies. const ( FromFieldPathPolicyOptional FromFieldPathPolicy = "Optional" FromFieldPathPolicyRequired FromFieldPathPolicy = "Required" ) // A PatchPolicy configures the specifics of patching behaviour. type PatchPolicy struct { // FromFieldPath specifies how to patch from a field path. The default is // 'Optional', which means the patch will be a no-op if the specified // fromFieldPath does not exist. Use 'Required' if the patch should fail if // the specified path does not exist. // +kubebuilder:validation:Enum=Optional;Required // +optional FromFieldPath *FromFieldPathPolicy `json:"fromFieldPath,omitempty"` MergeOptions *xpv1.MergeOptions `json:"mergeOptions,omitempty"` } // Patch objects are applied between composite and composed resources. Their // behaviour depends on the Type selected. The default Type, // FromCompositeFieldPath, copies a value from the composite resource to // the composed resource, applying any defined transformers. type Patch struct { // Type sets the patching behaviour to be used. Each patch type may require // its' own fields to be set on the Patch object. // +optional // +kubebuilder:validation:Enum=FromCompositeFieldPath;PatchSet;ToCompositeFieldPath;CombineFromComposite;CombineToComposite // +kubebuilder:default=FromCompositeFieldPath Type PatchType `json:"type,omitempty"` // FromFieldPath is the path of the field on the resource whose value is // to be used as input. Required when type is FromCompositeFieldPath or // ToCompositeFieldPath. // +optional FromFieldPath *string `json:"fromFieldPath,omitempty"` // Combine is the patch configuration for a CombineFromComposite or // CombineToComposite patch. // +optional Combine *Combine `json:"combine,omitempty"` // ToFieldPath is the path of the field on the resource whose value will // be changed with the result of transforms. Leave empty if you'd like to // propagate to the same path as fromFieldPath. // +optional ToFieldPath *string `json:"toFieldPath,omitempty"` // PatchSetName to include patches from. Required when type is PatchSet. // +optional PatchSetName *string `json:"patchSetName,omitempty"` // Transforms are the list of functions that are used as a FIFO pipe for the // input to be transformed. // +optional Transforms []Transform `json:"transforms,omitempty"` // Policy configures the specifics of patching behaviour. // +optional Policy *PatchPolicy `json:"policy,omitempty"` } // Apply executes a patching operation between the from and to resources. // Applies all patch types unless an 'only' filter is supplied. func (c *Patch) Apply(cp, cd runtime.Object, only ...PatchType) error { if c.filterPatch(only...) { return nil } switch c.Type { case PatchTypeFromCompositeFieldPath: return c.applyFromFieldPathPatch(cp, cd) case PatchTypeToCompositeFieldPath: return c.applyFromFieldPathPatch(cd, cp) case PatchTypeCombineFromComposite: return c.applyCombineFromVariablesPatch(cp, cd) case PatchTypeCombineToComposite: return c.applyCombineFromVariablesPatch(cd, cp) case PatchTypePatchSet: // Already resolved - nothing to do. } return errors.Errorf(errFmtInvalidPatchType, c.Type) } // filterPatch returns true if patch should be filtered (not applied) func (c *Patch) filterPatch(only ...PatchType) bool { // filter does not apply if not set if len(only) == 0 { return false } for _, patchType := range only { if patchType == c.Type { return false } } return true } // applyTransforms applies a list of transforms to a patch value. func (c *Patch) applyTransforms(input interface{}) (interface{}, error)
// patchFieldValueToObject, given a path, value and "to" object, will // apply the value to the "to" object at the given path, returning // any errors as they occur. func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error { paved, err := fieldpath.PaveObject(to) if err != nil { return err } if err := paved.MergeValue(fieldPath, value, mo); err != nil { return err } return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to) } // applyFromFieldPathPatch patches the "to" resource, using a source field // on the "from" resource. Values may be transformed if any are defined on // the patch. func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error { if c.FromFieldPath == nil { return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type) } // Default to patching the same field on the composed resource. if c.ToFieldPath == nil { c.ToFieldPath = c.FromFieldPath } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath) if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } var mo *xpv1.MergeOptions if c.Policy != nil { mo = c.Policy.MergeOptions } // Apply transform pipeline out, err := c.applyTransforms(in) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, mo) } // applyCombineFromVariablesPatch patches the "to" resource, taking a list of // input variables and combining them into a single output value. // The single output value may then be further transformed if they are defined // on the patch. func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error { // Combine patch requires configuration if c.Combine == nil { return errors.Errorf(errFmtRequiredField, "Combine", c.Type) } // Destination field path is required since we can't default to multiple // fields. if c.ToFieldPath == nil { return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type) } vl := len(c.Combine.Variables) if vl < 1 { return errors.New(errCombineRequiresVariables) } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in := make([]interface{}, vl) // Get value of each variable // NOTE: This currently assumes all variables define a 'fromFieldPath' // value. If we add new variable types, this may not be the case and // this code may be better served split out into a dedicated function. for i, sp := range c.Combine.Variables { iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath) // If any source field is not found, we will not // apply the patch. This is to avoid situations // where a combine patch is expecting a fixed // number of inputs (e.g. a string format // expecting 3 fields '%s-%s-%s' but only // receiving 2 values). if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } in[i] = iv } // Combine input values cb, err := c.Combine.Combine(in) if err != nil { return err } // Apply transform pipeline out, err := c.applyTransforms(cb) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, nil) } // IsOptionalFieldPathNotFound returns true if the supplied error indicates a // field path was not found, and the supplied policy indicates a patch from that // field path was optional. func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool { switch { case s == nil: fallthrough case s.FromFieldPath == nil: fallthrough case *s.FromFieldPath == FromFieldPathPolicyOptional: return fieldpath.IsNotFound(err) default: return false } } // A CombineVariable defines the source of a value that is combined with // others to form and patch an output value. Currently, this only supports // retrieving values from a field path. type CombineVariable struct { // FromFieldPath is the path of the field on the source whose value is // to be used as input. FromFieldPath string `json:"fromFieldPath"` } // A CombineStrategy determines what strategy will be applied to combine // variables. type CombineStrategy string // CombineStrategy strategy definitions. const ( CombineStrategyString CombineStrategy = "string" ) // A Combine configures a patch that combines more than // one input field into a single output field. type Combine struct { // Variables are the list of variables whose values will be retrieved and // combined. // +kubebuilder:validation:MinItems=1 Variables []CombineVariable `json:"variables"` // Strategy defines the strategy to use to combine the input variable values. // Currently only string is supported. // +kubebuilder:validation:Enum=string Strategy CombineStrategy `json:"strategy"` // String declares that input variables should be combined into a single // string, using the relevant settings for formatting purposes. // +optional String *StringCombine `json:"string,omitempty"` } // A StringCombine combines multiple input values into a single string. type StringCombine struct { // Format the input using a Go format string. See // https://golang.org/pkg/fmt/ for details. Format string `json:"fmt"` } // Combine returns a single output by running a string format // with all of its' input variables. func (s *StringCombine) Combine(vars []interface{}) (interface{}, error) { return fmt.Sprintf(s.Format, vars...), nil } // Combine calls the appropriate combiner. func (c *Combine) Combine(vars []interface{}) (interface{}, error) { var combiner interface { Combine(vars []interface{}) (interface{}, error) } switch c.Strategy { case CombineStrategyString: combiner = c.String default: return nil, errors.Errorf(errFmtCombineStrategyNotSupported, string(c.Strategy)) } // Check for nil interface requires reflection. if reflect.ValueOf(combiner).IsNil() { return nil, errors.Errorf(errFmtCombineConfigMissing, string(c.Strategy)) } out, err := combiner.Combine(vars) // Note: There are currently no tests or triggers to exercise this error as // our only strategy ("String") uses fmt.Sprintf, which cannot return an error. return out, errors.Wrapf(err, errFmtCombineStrategyFailed, string(c.Strategy)) } // ComposedTemplates returns a revision's composed resource templates with any // patchsets dereferenced. func (rs *CompositionSpec) ComposedTemplates() ([]ComposedTemplate, error) { pn := make(map[string][]Patch) for _, s := range rs.PatchSets { for _, p := range s.Patches { if p.Type == PatchTypePatchSet { return nil, errors.New(errPatchSetType) } } pn[s.Name] = s.Patches } ct := make([]ComposedTemplate, len(rs.Resources)) for i, r := range rs.Resources { po := []Patch{} for _, p := range r.Patches { if p.Type != PatchTypePatchSet { po = append(po, p) continue } if p.PatchSetName == nil { return nil, errors.Errorf(errFmtRequiredField, "PatchSetName", p.Type) } ps, ok := pn[*p.PatchSetName] if !ok { return nil, errors.Errorf(errFmtUndefinedPatchSet, *p.PatchSetName) } po = append(po, ps...) } ct[i] = r ct[i].Patches = po } return ct, nil }
{ var err error for i, t := range c.Transforms { if input, err = t.Transform(input); err != nil { return nil, errors.Wrapf(err, errFmtTransformAtIndex, i) } } return input, nil }
identifier_body
composition_patches.go
/* Copyright 2020 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "fmt" "reflect" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" ) const ( errPatchSetType = "a patch in a PatchSet cannot be of type PatchSet" errCombineRequiresVariables = "combine patch types require at least one variable" errFmtRequiredField = "%s is required by type %s" errFmtUndefinedPatchSet = "cannot find PatchSet by name %s" errFmtInvalidPatchType = "patch type %s is unsupported" errFmtCombineStrategyNotSupported = "combine strategy %s is not supported" errFmtCombineConfigMissing = "given combine strategy %s requires configuration" errFmtCombineStrategyFailed = "%s strategy could not combine" ) // A PatchType is a type of patch. type PatchType string // Patch types. const ( PatchTypeFromCompositeFieldPath PatchType = "FromCompositeFieldPath" // Default PatchTypePatchSet PatchType = "PatchSet" PatchTypeToCompositeFieldPath PatchType = "ToCompositeFieldPath" PatchTypeCombineFromComposite PatchType = "CombineFromComposite" PatchTypeCombineToComposite PatchType = "CombineToComposite" ) // A FromFieldPathPolicy determines how to patch from a field path. type FromFieldPathPolicy string // FromFieldPath patch policies. const ( FromFieldPathPolicyOptional FromFieldPathPolicy = "Optional" FromFieldPathPolicyRequired FromFieldPathPolicy = "Required" ) // A PatchPolicy configures the specifics of patching behaviour. type PatchPolicy struct { // FromFieldPath specifies how to patch from a field path. The default is // 'Optional', which means the patch will be a no-op if the specified // fromFieldPath does not exist. Use 'Required' if the patch should fail if // the specified path does not exist. // +kubebuilder:validation:Enum=Optional;Required // +optional FromFieldPath *FromFieldPathPolicy `json:"fromFieldPath,omitempty"` MergeOptions *xpv1.MergeOptions `json:"mergeOptions,omitempty"` } // Patch objects are applied between composite and composed resources. Their // behaviour depends on the Type selected. The default Type, // FromCompositeFieldPath, copies a value from the composite resource to // the composed resource, applying any defined transformers. type Patch struct { // Type sets the patching behaviour to be used. Each patch type may require // its' own fields to be set on the Patch object. // +optional // +kubebuilder:validation:Enum=FromCompositeFieldPath;PatchSet;ToCompositeFieldPath;CombineFromComposite;CombineToComposite // +kubebuilder:default=FromCompositeFieldPath Type PatchType `json:"type,omitempty"` // FromFieldPath is the path of the field on the resource whose value is // to be used as input. Required when type is FromCompositeFieldPath or // ToCompositeFieldPath. // +optional FromFieldPath *string `json:"fromFieldPath,omitempty"` // Combine is the patch configuration for a CombineFromComposite or // CombineToComposite patch. // +optional Combine *Combine `json:"combine,omitempty"` // ToFieldPath is the path of the field on the resource whose value will // be changed with the result of transforms. Leave empty if you'd like to // propagate to the same path as fromFieldPath. // +optional ToFieldPath *string `json:"toFieldPath,omitempty"` // PatchSetName to include patches from. Required when type is PatchSet. // +optional PatchSetName *string `json:"patchSetName,omitempty"` // Transforms are the list of functions that are used as a FIFO pipe for the // input to be transformed. // +optional Transforms []Transform `json:"transforms,omitempty"` // Policy configures the specifics of patching behaviour. // +optional Policy *PatchPolicy `json:"policy,omitempty"` } // Apply executes a patching operation between the from and to resources. // Applies all patch types unless an 'only' filter is supplied. func (c *Patch) Apply(cp, cd runtime.Object, only ...PatchType) error { if c.filterPatch(only...) { return nil } switch c.Type { case PatchTypeFromCompositeFieldPath: return c.applyFromFieldPathPatch(cp, cd) case PatchTypeToCompositeFieldPath: return c.applyFromFieldPathPatch(cd, cp) case PatchTypeCombineFromComposite: return c.applyCombineFromVariablesPatch(cp, cd) case PatchTypeCombineToComposite: return c.applyCombineFromVariablesPatch(cd, cp) case PatchTypePatchSet: // Already resolved - nothing to do. } return errors.Errorf(errFmtInvalidPatchType, c.Type) } // filterPatch returns true if patch should be filtered (not applied) func (c *Patch) filterPatch(only ...PatchType) bool { // filter does not apply if not set if len(only) == 0 { return false } for _, patchType := range only { if patchType == c.Type { return false } } return true } // applyTransforms applies a list of transforms to a patch value. func (c *Patch) applyTransforms(input interface{}) (interface{}, error) { var err error for i, t := range c.Transforms { if input, err = t.Transform(input); err != nil { return nil, errors.Wrapf(err, errFmtTransformAtIndex, i) } } return input, nil } // patchFieldValueToObject, given a path, value and "to" object, will // apply the value to the "to" object at the given path, returning // any errors as they occur. func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error { paved, err := fieldpath.PaveObject(to) if err != nil { return err } if err := paved.MergeValue(fieldPath, value, mo); err != nil { return err } return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to) } // applyFromFieldPathPatch patches the "to" resource, using a source field // on the "from" resource. Values may be transformed if any are defined on // the patch. func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error { if c.FromFieldPath == nil { return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type) } // Default to patching the same field on the composed resource. if c.ToFieldPath == nil { c.ToFieldPath = c.FromFieldPath } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath) if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } var mo *xpv1.MergeOptions if c.Policy != nil { mo = c.Policy.MergeOptions } // Apply transform pipeline out, err := c.applyTransforms(in) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, mo) } // applyCombineFromVariablesPatch patches the "to" resource, taking a list of // input variables and combining them into a single output value. // The single output value may then be further transformed if they are defined // on the patch. func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error { // Combine patch requires configuration if c.Combine == nil { return errors.Errorf(errFmtRequiredField, "Combine", c.Type) } // Destination field path is required since we can't default to multiple // fields. if c.ToFieldPath == nil { return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type) } vl := len(c.Combine.Variables) if vl < 1 { return errors.New(errCombineRequiresVariables) } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in := make([]interface{}, vl) // Get value of each variable // NOTE: This currently assumes all variables define a 'fromFieldPath' // value. If we add new variable types, this may not be the case and // this code may be better served split out into a dedicated function. for i, sp := range c.Combine.Variables { iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath) // If any source field is not found, we will not // apply the patch. This is to avoid situations // where a combine patch is expecting a fixed // number of inputs (e.g. a string format // expecting 3 fields '%s-%s-%s' but only // receiving 2 values). if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } in[i] = iv } // Combine input values cb, err := c.Combine.Combine(in) if err != nil { return err } // Apply transform pipeline out, err := c.applyTransforms(cb) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, nil) } // IsOptionalFieldPathNotFound returns true if the supplied error indicates a // field path was not found, and the supplied policy indicates a patch from that // field path was optional. func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool { switch { case s == nil: fallthrough case s.FromFieldPath == nil: fallthrough case *s.FromFieldPath == FromFieldPathPolicyOptional: return fieldpath.IsNotFound(err) default: return false } } // A CombineVariable defines the source of a value that is combined with // others to form and patch an output value. Currently, this only supports // retrieving values from a field path. type CombineVariable struct { // FromFieldPath is the path of the field on the source whose value is // to be used as input. FromFieldPath string `json:"fromFieldPath"` } // A CombineStrategy determines what strategy will be applied to combine // variables. type CombineStrategy string // CombineStrategy strategy definitions. const ( CombineStrategyString CombineStrategy = "string" ) // A Combine configures a patch that combines more than // one input field into a single output field. type Combine struct { // Variables are the list of variables whose values will be retrieved and // combined. // +kubebuilder:validation:MinItems=1 Variables []CombineVariable `json:"variables"` // Strategy defines the strategy to use to combine the input variable values. // Currently only string is supported. // +kubebuilder:validation:Enum=string Strategy CombineStrategy `json:"strategy"` // String declares that input variables should be combined into a single // string, using the relevant settings for formatting purposes. // +optional String *StringCombine `json:"string,omitempty"` } // A StringCombine combines multiple input values into a single string. type StringCombine struct { // Format the input using a Go format string. See // https://golang.org/pkg/fmt/ for details. Format string `json:"fmt"` } // Combine returns a single output by running a string format // with all of its' input variables. func (s *StringCombine) Combine(vars []interface{}) (interface{}, error) { return fmt.Sprintf(s.Format, vars...), nil } // Combine calls the appropriate combiner. func (c *Combine) Combine(vars []interface{}) (interface{}, error) { var combiner interface { Combine(vars []interface{}) (interface{}, error) } switch c.Strategy { case CombineStrategyString: combiner = c.String default: return nil, errors.Errorf(errFmtCombineStrategyNotSupported, string(c.Strategy)) } // Check for nil interface requires reflection. if reflect.ValueOf(combiner).IsNil() { return nil, errors.Errorf(errFmtCombineConfigMissing, string(c.Strategy)) } out, err := combiner.Combine(vars) // Note: There are currently no tests or triggers to exercise this error as // our only strategy ("String") uses fmt.Sprintf, which cannot return an error. return out, errors.Wrapf(err, errFmtCombineStrategyFailed, string(c.Strategy)) } // ComposedTemplates returns a revision's composed resource templates with any // patchsets dereferenced. func (rs *CompositionSpec) ComposedTemplates() ([]ComposedTemplate, error) { pn := make(map[string][]Patch) for _, s := range rs.PatchSets { for _, p := range s.Patches { if p.Type == PatchTypePatchSet { return nil, errors.New(errPatchSetType) } } pn[s.Name] = s.Patches } ct := make([]ComposedTemplate, len(rs.Resources)) for i, r := range rs.Resources { po := []Patch{} for _, p := range r.Patches { if p.Type != PatchTypePatchSet { po = append(po, p) continue
return nil, errors.Errorf(errFmtRequiredField, "PatchSetName", p.Type) } ps, ok := pn[*p.PatchSetName] if !ok { return nil, errors.Errorf(errFmtUndefinedPatchSet, *p.PatchSetName) } po = append(po, ps...) } ct[i] = r ct[i].Patches = po } return ct, nil }
} if p.PatchSetName == nil {
random_line_split
composition_patches.go
/* Copyright 2020 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "fmt" "reflect" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" ) const ( errPatchSetType = "a patch in a PatchSet cannot be of type PatchSet" errCombineRequiresVariables = "combine patch types require at least one variable" errFmtRequiredField = "%s is required by type %s" errFmtUndefinedPatchSet = "cannot find PatchSet by name %s" errFmtInvalidPatchType = "patch type %s is unsupported" errFmtCombineStrategyNotSupported = "combine strategy %s is not supported" errFmtCombineConfigMissing = "given combine strategy %s requires configuration" errFmtCombineStrategyFailed = "%s strategy could not combine" ) // A PatchType is a type of patch. type PatchType string // Patch types. const ( PatchTypeFromCompositeFieldPath PatchType = "FromCompositeFieldPath" // Default PatchTypePatchSet PatchType = "PatchSet" PatchTypeToCompositeFieldPath PatchType = "ToCompositeFieldPath" PatchTypeCombineFromComposite PatchType = "CombineFromComposite" PatchTypeCombineToComposite PatchType = "CombineToComposite" ) // A FromFieldPathPolicy determines how to patch from a field path. type FromFieldPathPolicy string // FromFieldPath patch policies. const ( FromFieldPathPolicyOptional FromFieldPathPolicy = "Optional" FromFieldPathPolicyRequired FromFieldPathPolicy = "Required" ) // A PatchPolicy configures the specifics of patching behaviour. type PatchPolicy struct { // FromFieldPath specifies how to patch from a field path. The default is // 'Optional', which means the patch will be a no-op if the specified // fromFieldPath does not exist. Use 'Required' if the patch should fail if // the specified path does not exist. // +kubebuilder:validation:Enum=Optional;Required // +optional FromFieldPath *FromFieldPathPolicy `json:"fromFieldPath,omitempty"` MergeOptions *xpv1.MergeOptions `json:"mergeOptions,omitempty"` } // Patch objects are applied between composite and composed resources. Their // behaviour depends on the Type selected. The default Type, // FromCompositeFieldPath, copies a value from the composite resource to // the composed resource, applying any defined transformers. type Patch struct { // Type sets the patching behaviour to be used. Each patch type may require // its' own fields to be set on the Patch object. // +optional // +kubebuilder:validation:Enum=FromCompositeFieldPath;PatchSet;ToCompositeFieldPath;CombineFromComposite;CombineToComposite // +kubebuilder:default=FromCompositeFieldPath Type PatchType `json:"type,omitempty"` // FromFieldPath is the path of the field on the resource whose value is // to be used as input. Required when type is FromCompositeFieldPath or // ToCompositeFieldPath. // +optional FromFieldPath *string `json:"fromFieldPath,omitempty"` // Combine is the patch configuration for a CombineFromComposite or // CombineToComposite patch. // +optional Combine *Combine `json:"combine,omitempty"` // ToFieldPath is the path of the field on the resource whose value will // be changed with the result of transforms. Leave empty if you'd like to // propagate to the same path as fromFieldPath. // +optional ToFieldPath *string `json:"toFieldPath,omitempty"` // PatchSetName to include patches from. Required when type is PatchSet. // +optional PatchSetName *string `json:"patchSetName,omitempty"` // Transforms are the list of functions that are used as a FIFO pipe for the // input to be transformed. // +optional Transforms []Transform `json:"transforms,omitempty"` // Policy configures the specifics of patching behaviour. // +optional Policy *PatchPolicy `json:"policy,omitempty"` } // Apply executes a patching operation between the from and to resources. // Applies all patch types unless an 'only' filter is supplied. func (c *Patch) Apply(cp, cd runtime.Object, only ...PatchType) error { if c.filterPatch(only...)
switch c.Type { case PatchTypeFromCompositeFieldPath: return c.applyFromFieldPathPatch(cp, cd) case PatchTypeToCompositeFieldPath: return c.applyFromFieldPathPatch(cd, cp) case PatchTypeCombineFromComposite: return c.applyCombineFromVariablesPatch(cp, cd) case PatchTypeCombineToComposite: return c.applyCombineFromVariablesPatch(cd, cp) case PatchTypePatchSet: // Already resolved - nothing to do. } return errors.Errorf(errFmtInvalidPatchType, c.Type) } // filterPatch returns true if patch should be filtered (not applied) func (c *Patch) filterPatch(only ...PatchType) bool { // filter does not apply if not set if len(only) == 0 { return false } for _, patchType := range only { if patchType == c.Type { return false } } return true } // applyTransforms applies a list of transforms to a patch value. func (c *Patch) applyTransforms(input interface{}) (interface{}, error) { var err error for i, t := range c.Transforms { if input, err = t.Transform(input); err != nil { return nil, errors.Wrapf(err, errFmtTransformAtIndex, i) } } return input, nil } // patchFieldValueToObject, given a path, value and "to" object, will // apply the value to the "to" object at the given path, returning // any errors as they occur. func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error { paved, err := fieldpath.PaveObject(to) if err != nil { return err } if err := paved.MergeValue(fieldPath, value, mo); err != nil { return err } return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to) } // applyFromFieldPathPatch patches the "to" resource, using a source field // on the "from" resource. Values may be transformed if any are defined on // the patch. func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error { if c.FromFieldPath == nil { return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type) } // Default to patching the same field on the composed resource. if c.ToFieldPath == nil { c.ToFieldPath = c.FromFieldPath } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath) if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } var mo *xpv1.MergeOptions if c.Policy != nil { mo = c.Policy.MergeOptions } // Apply transform pipeline out, err := c.applyTransforms(in) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, mo) } // applyCombineFromVariablesPatch patches the "to" resource, taking a list of // input variables and combining them into a single output value. // The single output value may then be further transformed if they are defined // on the patch. func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error { // Combine patch requires configuration if c.Combine == nil { return errors.Errorf(errFmtRequiredField, "Combine", c.Type) } // Destination field path is required since we can't default to multiple // fields. if c.ToFieldPath == nil { return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type) } vl := len(c.Combine.Variables) if vl < 1 { return errors.New(errCombineRequiresVariables) } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in := make([]interface{}, vl) // Get value of each variable // NOTE: This currently assumes all variables define a 'fromFieldPath' // value. If we add new variable types, this may not be the case and // this code may be better served split out into a dedicated function. for i, sp := range c.Combine.Variables { iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath) // If any source field is not found, we will not // apply the patch. This is to avoid situations // where a combine patch is expecting a fixed // number of inputs (e.g. a string format // expecting 3 fields '%s-%s-%s' but only // receiving 2 values). if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } in[i] = iv } // Combine input values cb, err := c.Combine.Combine(in) if err != nil { return err } // Apply transform pipeline out, err := c.applyTransforms(cb) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, nil) } // IsOptionalFieldPathNotFound returns true if the supplied error indicates a // field path was not found, and the supplied policy indicates a patch from that // field path was optional. func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool { switch { case s == nil: fallthrough case s.FromFieldPath == nil: fallthrough case *s.FromFieldPath == FromFieldPathPolicyOptional: return fieldpath.IsNotFound(err) default: return false } } // A CombineVariable defines the source of a value that is combined with // others to form and patch an output value. Currently, this only supports // retrieving values from a field path. type CombineVariable struct { // FromFieldPath is the path of the field on the source whose value is // to be used as input. FromFieldPath string `json:"fromFieldPath"` } // A CombineStrategy determines what strategy will be applied to combine // variables. type CombineStrategy string // CombineStrategy strategy definitions. const ( CombineStrategyString CombineStrategy = "string" ) // A Combine configures a patch that combines more than // one input field into a single output field. type Combine struct { // Variables are the list of variables whose values will be retrieved and // combined. // +kubebuilder:validation:MinItems=1 Variables []CombineVariable `json:"variables"` // Strategy defines the strategy to use to combine the input variable values. // Currently only string is supported. // +kubebuilder:validation:Enum=string Strategy CombineStrategy `json:"strategy"` // String declares that input variables should be combined into a single // string, using the relevant settings for formatting purposes. // +optional String *StringCombine `json:"string,omitempty"` } // A StringCombine combines multiple input values into a single string. type StringCombine struct { // Format the input using a Go format string. See // https://golang.org/pkg/fmt/ for details. Format string `json:"fmt"` } // Combine returns a single output by running a string format // with all of its' input variables. func (s *StringCombine) Combine(vars []interface{}) (interface{}, error) { return fmt.Sprintf(s.Format, vars...), nil } // Combine calls the appropriate combiner. func (c *Combine) Combine(vars []interface{}) (interface{}, error) { var combiner interface { Combine(vars []interface{}) (interface{}, error) } switch c.Strategy { case CombineStrategyString: combiner = c.String default: return nil, errors.Errorf(errFmtCombineStrategyNotSupported, string(c.Strategy)) } // Check for nil interface requires reflection. if reflect.ValueOf(combiner).IsNil() { return nil, errors.Errorf(errFmtCombineConfigMissing, string(c.Strategy)) } out, err := combiner.Combine(vars) // Note: There are currently no tests or triggers to exercise this error as // our only strategy ("String") uses fmt.Sprintf, which cannot return an error. return out, errors.Wrapf(err, errFmtCombineStrategyFailed, string(c.Strategy)) } // ComposedTemplates returns a revision's composed resource templates with any // patchsets dereferenced. func (rs *CompositionSpec) ComposedTemplates() ([]ComposedTemplate, error) { pn := make(map[string][]Patch) for _, s := range rs.PatchSets { for _, p := range s.Patches { if p.Type == PatchTypePatchSet { return nil, errors.New(errPatchSetType) } } pn[s.Name] = s.Patches } ct := make([]ComposedTemplate, len(rs.Resources)) for i, r := range rs.Resources { po := []Patch{} for _, p := range r.Patches { if p.Type != PatchTypePatchSet { po = append(po, p) continue } if p.PatchSetName == nil { return nil, errors.Errorf(errFmtRequiredField, "PatchSetName", p.Type) } ps, ok := pn[*p.PatchSetName] if !ok { return nil, errors.Errorf(errFmtUndefinedPatchSet, *p.PatchSetName) } po = append(po, ps...) } ct[i] = r ct[i].Patches = po } return ct, nil }
{ return nil }
conditional_block
composition_patches.go
/* Copyright 2020 The Crossplane Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( "fmt" "reflect" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/fieldpath" ) const ( errPatchSetType = "a patch in a PatchSet cannot be of type PatchSet" errCombineRequiresVariables = "combine patch types require at least one variable" errFmtRequiredField = "%s is required by type %s" errFmtUndefinedPatchSet = "cannot find PatchSet by name %s" errFmtInvalidPatchType = "patch type %s is unsupported" errFmtCombineStrategyNotSupported = "combine strategy %s is not supported" errFmtCombineConfigMissing = "given combine strategy %s requires configuration" errFmtCombineStrategyFailed = "%s strategy could not combine" ) // A PatchType is a type of patch. type PatchType string // Patch types. const ( PatchTypeFromCompositeFieldPath PatchType = "FromCompositeFieldPath" // Default PatchTypePatchSet PatchType = "PatchSet" PatchTypeToCompositeFieldPath PatchType = "ToCompositeFieldPath" PatchTypeCombineFromComposite PatchType = "CombineFromComposite" PatchTypeCombineToComposite PatchType = "CombineToComposite" ) // A FromFieldPathPolicy determines how to patch from a field path. type FromFieldPathPolicy string // FromFieldPath patch policies. const ( FromFieldPathPolicyOptional FromFieldPathPolicy = "Optional" FromFieldPathPolicyRequired FromFieldPathPolicy = "Required" ) // A PatchPolicy configures the specifics of patching behaviour. type PatchPolicy struct { // FromFieldPath specifies how to patch from a field path. The default is // 'Optional', which means the patch will be a no-op if the specified // fromFieldPath does not exist. Use 'Required' if the patch should fail if // the specified path does not exist. // +kubebuilder:validation:Enum=Optional;Required // +optional FromFieldPath *FromFieldPathPolicy `json:"fromFieldPath,omitempty"` MergeOptions *xpv1.MergeOptions `json:"mergeOptions,omitempty"` } // Patch objects are applied between composite and composed resources. Their // behaviour depends on the Type selected. The default Type, // FromCompositeFieldPath, copies a value from the composite resource to // the composed resource, applying any defined transformers. type Patch struct { // Type sets the patching behaviour to be used. Each patch type may require // its' own fields to be set on the Patch object. // +optional // +kubebuilder:validation:Enum=FromCompositeFieldPath;PatchSet;ToCompositeFieldPath;CombineFromComposite;CombineToComposite // +kubebuilder:default=FromCompositeFieldPath Type PatchType `json:"type,omitempty"` // FromFieldPath is the path of the field on the resource whose value is // to be used as input. Required when type is FromCompositeFieldPath or // ToCompositeFieldPath. // +optional FromFieldPath *string `json:"fromFieldPath,omitempty"` // Combine is the patch configuration for a CombineFromComposite or // CombineToComposite patch. // +optional Combine *Combine `json:"combine,omitempty"` // ToFieldPath is the path of the field on the resource whose value will // be changed with the result of transforms. Leave empty if you'd like to // propagate to the same path as fromFieldPath. // +optional ToFieldPath *string `json:"toFieldPath,omitempty"` // PatchSetName to include patches from. Required when type is PatchSet. // +optional PatchSetName *string `json:"patchSetName,omitempty"` // Transforms are the list of functions that are used as a FIFO pipe for the // input to be transformed. // +optional Transforms []Transform `json:"transforms,omitempty"` // Policy configures the specifics of patching behaviour. // +optional Policy *PatchPolicy `json:"policy,omitempty"` } // Apply executes a patching operation between the from and to resources. // Applies all patch types unless an 'only' filter is supplied. func (c *Patch) Apply(cp, cd runtime.Object, only ...PatchType) error { if c.filterPatch(only...) { return nil } switch c.Type { case PatchTypeFromCompositeFieldPath: return c.applyFromFieldPathPatch(cp, cd) case PatchTypeToCompositeFieldPath: return c.applyFromFieldPathPatch(cd, cp) case PatchTypeCombineFromComposite: return c.applyCombineFromVariablesPatch(cp, cd) case PatchTypeCombineToComposite: return c.applyCombineFromVariablesPatch(cd, cp) case PatchTypePatchSet: // Already resolved - nothing to do. } return errors.Errorf(errFmtInvalidPatchType, c.Type) } // filterPatch returns true if patch should be filtered (not applied) func (c *Patch) filterPatch(only ...PatchType) bool { // filter does not apply if not set if len(only) == 0 { return false } for _, patchType := range only { if patchType == c.Type { return false } } return true } // applyTransforms applies a list of transforms to a patch value. func (c *Patch) applyTransforms(input interface{}) (interface{}, error) { var err error for i, t := range c.Transforms { if input, err = t.Transform(input); err != nil { return nil, errors.Wrapf(err, errFmtTransformAtIndex, i) } } return input, nil } // patchFieldValueToObject, given a path, value and "to" object, will // apply the value to the "to" object at the given path, returning // any errors as they occur. func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error { paved, err := fieldpath.PaveObject(to) if err != nil { return err } if err := paved.MergeValue(fieldPath, value, mo); err != nil { return err } return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to) } // applyFromFieldPathPatch patches the "to" resource, using a source field // on the "from" resource. Values may be transformed if any are defined on // the patch. func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error { if c.FromFieldPath == nil { return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type) } // Default to patching the same field on the composed resource. if c.ToFieldPath == nil { c.ToFieldPath = c.FromFieldPath } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath) if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } var mo *xpv1.MergeOptions if c.Policy != nil { mo = c.Policy.MergeOptions } // Apply transform pipeline out, err := c.applyTransforms(in) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, mo) } // applyCombineFromVariablesPatch patches the "to" resource, taking a list of // input variables and combining them into a single output value. // The single output value may then be further transformed if they are defined // on the patch. func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error { // Combine patch requires configuration if c.Combine == nil { return errors.Errorf(errFmtRequiredField, "Combine", c.Type) } // Destination field path is required since we can't default to multiple // fields. if c.ToFieldPath == nil { return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type) } vl := len(c.Combine.Variables) if vl < 1 { return errors.New(errCombineRequiresVariables) } fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from) if err != nil { return err } in := make([]interface{}, vl) // Get value of each variable // NOTE: This currently assumes all variables define a 'fromFieldPath' // value. If we add new variable types, this may not be the case and // this code may be better served split out into a dedicated function. for i, sp := range c.Combine.Variables { iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath) // If any source field is not found, we will not // apply the patch. This is to avoid situations // where a combine patch is expecting a fixed // number of inputs (e.g. a string format // expecting 3 fields '%s-%s-%s' but only // receiving 2 values). if IsOptionalFieldPathNotFound(err, c.Policy) { return nil } if err != nil { return err } in[i] = iv } // Combine input values cb, err := c.Combine.Combine(in) if err != nil { return err } // Apply transform pipeline out, err := c.applyTransforms(cb) if err != nil { return err } return patchFieldValueToObject(*c.ToFieldPath, out, to, nil) } // IsOptionalFieldPathNotFound returns true if the supplied error indicates a // field path was not found, and the supplied policy indicates a patch from that // field path was optional. func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool { switch { case s == nil: fallthrough case s.FromFieldPath == nil: fallthrough case *s.FromFieldPath == FromFieldPathPolicyOptional: return fieldpath.IsNotFound(err) default: return false } } // A CombineVariable defines the source of a value that is combined with // others to form and patch an output value. Currently, this only supports // retrieving values from a field path. type CombineVariable struct { // FromFieldPath is the path of the field on the source whose value is // to be used as input. FromFieldPath string `json:"fromFieldPath"` } // A CombineStrategy determines what strategy will be applied to combine // variables. type CombineStrategy string // CombineStrategy strategy definitions. const ( CombineStrategyString CombineStrategy = "string" ) // A Combine configures a patch that combines more than // one input field into a single output field. type Combine struct { // Variables are the list of variables whose values will be retrieved and // combined. // +kubebuilder:validation:MinItems=1 Variables []CombineVariable `json:"variables"` // Strategy defines the strategy to use to combine the input variable values. // Currently only string is supported. // +kubebuilder:validation:Enum=string Strategy CombineStrategy `json:"strategy"` // String declares that input variables should be combined into a single // string, using the relevant settings for formatting purposes. // +optional String *StringCombine `json:"string,omitempty"` } // A StringCombine combines multiple input values into a single string. type StringCombine struct { // Format the input using a Go format string. See // https://golang.org/pkg/fmt/ for details. Format string `json:"fmt"` } // Combine returns a single output by running a string format // with all of its' input variables. func (s *StringCombine) Combine(vars []interface{}) (interface{}, error) { return fmt.Sprintf(s.Format, vars...), nil } // Combine calls the appropriate combiner. func (c *Combine)
(vars []interface{}) (interface{}, error) { var combiner interface { Combine(vars []interface{}) (interface{}, error) } switch c.Strategy { case CombineStrategyString: combiner = c.String default: return nil, errors.Errorf(errFmtCombineStrategyNotSupported, string(c.Strategy)) } // Check for nil interface requires reflection. if reflect.ValueOf(combiner).IsNil() { return nil, errors.Errorf(errFmtCombineConfigMissing, string(c.Strategy)) } out, err := combiner.Combine(vars) // Note: There are currently no tests or triggers to exercise this error as // our only strategy ("String") uses fmt.Sprintf, which cannot return an error. return out, errors.Wrapf(err, errFmtCombineStrategyFailed, string(c.Strategy)) } // ComposedTemplates returns a revision's composed resource templates with any // patchsets dereferenced. func (rs *CompositionSpec) ComposedTemplates() ([]ComposedTemplate, error) { pn := make(map[string][]Patch) for _, s := range rs.PatchSets { for _, p := range s.Patches { if p.Type == PatchTypePatchSet { return nil, errors.New(errPatchSetType) } } pn[s.Name] = s.Patches } ct := make([]ComposedTemplate, len(rs.Resources)) for i, r := range rs.Resources { po := []Patch{} for _, p := range r.Patches { if p.Type != PatchTypePatchSet { po = append(po, p) continue } if p.PatchSetName == nil { return nil, errors.Errorf(errFmtRequiredField, "PatchSetName", p.Type) } ps, ok := pn[*p.PatchSetName] if !ok { return nil, errors.Errorf(errFmtUndefinedPatchSet, *p.PatchSetName) } po = append(po, ps...) } ct[i] = r ct[i].Patches = po } return ct, nil }
Combine
identifier_name
Training.py
import csv import math import matplotlib.pyplot as plt from PIL import Image import numpy as np from tensorflow.keras import Model from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, CSVLogger from tensorflow.keras.layers import Conv2D, Reshape, Activation, BatchNormalization from tensorflow.keras.utils import Sequence from tensorflow.keras.backend import epsilon from keras.regularizers import l2 import tensorflow as tf tf.compat.v1.disable_eager_execution() # 0.35, 0.5, 0.75, 1.0 ALPHA = 0.35 # 96, 128, 160, 192, 224 IMAGE_SIZE = 224 EPOCHS = 200 BATCH_SIZE = 32 PATIENCE = 10 MULTI_PROCESSING = True THREADS = 4 # train 과 validation 경로 TRAIN_CSV = "train_shuffle.csv" # train data .csv VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv TEST_CSV = "test_uniform.csv" # test data .csv MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정 class DataGenerator(Sequence): def __init__(self, csv_file): self.paths = [] with open(csv_file, "r") as file: self.coords = np.zeros((sum(1 for line in file), 4)) file.seek(0) reader = csv.reader(file, delimiter=",") for index, row in enumerate(reader): for i, r in enumerate(row[1:7]): # row[i+1] = int(r) row[i+1] = float(r) path, image_height, image_width, x0, y0, width, height, _, _ = row self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height # int형 # self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin # self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin # self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width # self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height # self.coords[index, 2] = width * IMAGE_SIZE / image_width # width # self.coords[index, 3] = height * IMAGE_SIZE / image_height # height self.paths.append(path) def __len__(self): return math.ceil(len(self.coords) / BATCH_SIZE) def __getitem__(self, idx): batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32) for i, f in enumerate(batch_paths): img = Image.open(f) img = img.resize((IMAGE_SIZE, IMAGE_SIZE)) img = img.convert('RGB') batch_images[i] = preprocess_input(np.array(img, dtype=np.float32)) img.close() return batch_images, batch_coords class Training(Callback): def __init__(self, generator): self.generator = generator
poch, logs): train_pos_count = 0 train_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) train_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) train_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(train_gt_len): if train_iou[q] >= 0.5: # iou threshold 0.5 지정 train_pos_count += 1 else: train_neg_count += 1 train_data_count = 59521 # train_총 갯수 train_acc = np.round(train_pos_count / train_data_count, 4) logs["train_acc"] = train_acc print(" - train_acc: {}".format(train_acc)) class Validation(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): mse = 0 intersections = 0 unions = 0 val_pos_count = 0 val_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0] val_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) val_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(val_gt_len): if val_iou[q] >= 0.5: # iou threshold 0.5 지정 val_pos_count += 1 else: val_neg_count += 1 intersections += np.sum(intersection * (union > 0)) unions += np.sum(union) val_data_count = 14879 # validation 총 갯수 val_acc = np.round(val_pos_count / val_data_count, 4) logs["val_acc"] = val_acc iou = np.round(intersections / (unions + epsilon()), 4) logs["val_iou"] = iou mse = np.round(mse, 4) logs["val_mse"] = mse print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc)) class Test_set(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): test_pos_count = 0 test_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) test_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) test_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(test_gt_len): if test_iou[q] >= 0.5: # iou threshold 0.5 지정 test_pos_count += 1 else: test_neg_count += 1 test_data_count = 1245 # test 총 갯수 test_acc = np.round(test_pos_count / test_data_count, 4) logs["test_acc"] = test_acc print(" - test_acc: {}".format(test_acc)) def create_model(trainable=False): # pre-trained 된 moblienetv2 아키텍처 이용 model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4) model.summary() # to freeze layers 레이어 동결(가중치 그대로 사용) for layer in model.layers: layer.trainable = trainable # 입력 Task에 맞는 딥러닝 모델 변경 가능 block = model.get_layer("block_16_project_BN").output x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block) x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(4, kernel_size=7, name="coords")(x) # 사이즈 224 경우 # x = Conv2D(4, kernel_size=3, name="coords")(x) # 사이즈 96 경우 x = Reshape((4,))(x) return Model(inputs=model.input, outputs=x) def main(): model = create_model() model.summary() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) test_datagen = Test_set(generator=DataGenerator(TEST_CSV)) train_acc = Training(generator=DataGenerator(TRAIN_CSV)) model.compile(loss="mean_squared_error", optimizer="adam", metrics=[]) # val_iou 값을 모니터링 하면서 최적 weight일때 저장 checkpoint = ModelCheckpoint("model-{epoch:02d}-{val_iou:.3f}.hdf5", monitor="val_iou", verbose=1, save_best_only=True, mode="max", period=1) # val_iou 값을 모니터링 하면서 stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") # val_iou 값을 모니터링 하면서 learning_rate 조절 reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") # 학습시 loss값 또는 callback 함수에 사용되는 train_acc, val_acc, test_acc 로그기록 남김 cv_csv_logger = CSVLogger('parrot_localization_mobilenetv2.csv') # 모델 학습 model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, test_datagen, train_acc, checkpoint, reduce_lr, stop, cv_csv_logger], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1) if __name__ == "__main__": main()
def on_epoch_end(self, e
identifier_body
Training.py
import csv import math import matplotlib.pyplot as plt from PIL import Image import numpy as np from tensorflow.keras import Model from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, CSVLogger from tensorflow.keras.layers import Conv2D, Reshape, Activation, BatchNormalization from tensorflow.keras.utils import Sequence from tensorflow.keras.backend import epsilon from keras.regularizers import l2 import tensorflow as tf tf.compat.v1.disable_eager_execution() # 0.35, 0.5, 0.75, 1.0 ALPHA = 0.35 # 96, 128, 160, 192, 224 IMAGE_SIZE = 224 EPOCHS = 200 BATCH_SIZE = 32 PATIENCE = 10 MULTI_PROCESSING = True THREADS = 4 # train 과 validation 경로 TRAIN_CSV = "train_shuffle.csv" # train data .csv VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv TEST_CSV = "test_uniform.csv" # test data .csv MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정 class DataGenerator(Sequence): def __init__(self, csv_file): self.paths = [] with open(csv_file, "r") as file: self.coords = np.zeros((sum(1 for line in file), 4)) file.seek(0) reader = csv.reader(file, delimiter=",") for index, row in enumerate(reader): for i, r in enumerate(row[1:7]): # row[i+1] = int(r) row[i+1] = float(r) path, image_height, image_width, x0, y0, width, height, _, _ = row self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height # int형 # self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin # self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin # self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width # self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height # self.coords[index, 2] = width * IMAGE_SIZE / image_width # width # self.coords[index, 3] = height * IMAGE_SIZE / image_height # height self.paths.append(path) def __len__(self): return math.ceil(len(self.coords) / BATCH_SIZE) def __getitem__(self, idx): batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32) for i, f in enumerate(batch_paths): img = Image.open(f) img = img.resize((IMAGE_SIZE, IMAGE_SIZE)) img = img.convert('RGB') batch_images[i] = preprocess_input(np.array(img, dtype=np.float32)) img.close() return batch_images, batch_coords class Training(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): train_pos_count = 0 train_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) train_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) train_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(train_gt_len): if train_iou[q] >= 0.5: # iou threshold 0.5 지정 train_pos_count += 1 else: train_neg_count += 1 train_data_count = 59521 # train_총 갯수 train_acc = np.round(train_pos_count / train_data_count, 4) logs["train_acc"] = train_acc print(" - train_acc: {}".format(train_acc)) class Validation(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): mse = 0 intersections = 0 unions = 0 val_pos_count = 0 val_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0] val_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) val_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(val_gt_len): if val_iou[q] >= 0.5: # iou threshold 0.5 지정 val_pos_count += 1 else: val_neg_count += 1 intersections += np.sum(intersection * (union > 0)) unions += np.sum(union) val_data_count = 14879 # validation 총 갯수 val_acc = np.round(val_pos_count / val_data_count, 4) logs["val_acc"] = val_acc iou = np.round(intersections / (unions + epsilon()), 4) logs["val_iou"] = iou
class Test_set(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): test_pos_count = 0 test_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) test_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) test_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(test_gt_len): if test_iou[q] >= 0.5: # iou threshold 0.5 지정 test_pos_count += 1 else: test_neg_count += 1 test_data_count = 1245 # test 총 갯수 test_acc = np.round(test_pos_count / test_data_count, 4) logs["test_acc"] = test_acc print(" - test_acc: {}".format(test_acc)) def create_model(trainable=False): # pre-trained 된 moblienetv2 아키텍처 이용 model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4) model.summary() # to freeze layers 레이어 동결(가중치 그대로 사용) for layer in model.layers: layer.trainable = trainable # 입력 Task에 맞는 딥러닝 모델 변경 가능 block = model.get_layer("block_16_project_BN").output x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block) x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(4, kernel_size=7, name="coords")(x) # 사이즈 224 경우 # x = Conv2D(4, kernel_size=3, name="coords")(x) # 사이즈 96 경우 x = Reshape((4,))(x) return Model(inputs=model.input, outputs=x) def main(): model = create_model() model.summary() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) test_datagen = Test_set(generator=DataGenerator(TEST_CSV)) train_acc = Training(generator=DataGenerator(TRAIN_CSV)) model.compile(loss="mean_squared_error", optimizer="adam", metrics=[]) # val_iou 값을 모니터링 하면서 최적 weight일때 저장 checkpoint = ModelCheckpoint("model-{epoch:02d}-{val_iou:.3f}.hdf5", monitor="val_iou", verbose=1, save_best_only=True, mode="max", period=1) # val_iou 값을 모니터링 하면서 stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") # val_iou 값을 모니터링 하면서 learning_rate 조절 reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") # 학습시 loss값 또는 callback 함수에 사용되는 train_acc, val_acc, test_acc 로그기록 남김 cv_csv_logger = CSVLogger('parrot_localization_mobilenetv2.csv') # 모델 학습 model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, test_datagen, train_acc, checkpoint, reduce_lr, stop, cv_csv_logger], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1) if __name__ == "__main__": main()
mse = np.round(mse, 4) logs["val_mse"] = mse print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc))
random_line_split
Training.py
import csv import math import matplotlib.pyplot as plt from PIL import Image import numpy as np from tensorflow.keras import Model from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, CSVLogger from tensorflow.keras.layers import Conv2D, Reshape, Activation, BatchNormalization from tensorflow.keras.utils import Sequence from tensorflow.keras.backend import epsilon from keras.regularizers import l2 import tensorflow as tf tf.compat.v1.disable_eager_execution() # 0.35, 0.5, 0.75, 1.0 ALPHA = 0.35 # 96, 128, 160, 192, 224 IMAGE_SIZE = 224 EPOCHS = 200 BATCH_SIZE = 32 PATIENCE = 10 MULTI_PROCESSING = True THREADS = 4 # train 과 validation 경로 TRAIN_CSV = "train_shuffle.csv" # train data .csv VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv TEST_CSV = "test_uniform.csv" # test data .csv MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정 class DataGenerator(Sequence): def __init__(self, csv_file): self.paths = [] with open(csv_file, "r") as file: self.coords = np.zeros((sum(1 for line in file), 4)) file.seek(0) reader = csv.reader(file, delimiter=",") for index, row in enumerate(reader): for i, r in enumerate(row[1:7]): # row[i+1] = int(r) row[i+1] = float(r) path, image_height, image_width, x0, y0, width, height, _, _ = row self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height # int형 # self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin # self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin # self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width # self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height # self.coords[index, 2] = width * IMAGE_SIZE / image_width # width # self.coords[index, 3] = height * IMAGE_SIZE / image_height # height self.paths.append(path) def __len__(self): return math.ceil(len(self.coords) / BATCH_SIZE) def __getitem__(self, idx): batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32) for i, f in enumerate(batch_paths): img = Image.open(f) img = img.resize((IMAGE_SIZE, IMAGE_SIZE)) img = img.convert('RGB') batch_images[i] = preprocess_input(np.array(img, dtype=np.float32)) img.close() return batch_images, batch_coords class Training(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): train_pos_count = 0 train_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) train_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) train_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(train_gt_len): if train_iou[q] >= 0.5: # iou threshold 0.5 지정 train_pos_count += 1 else: train_neg_count += 1 train_data_count = 59521 # train_총 갯수 train_acc = np.round(train_pos_count / train_data_count, 4) logs["train_acc"] = train_acc print(" - train_acc: {}".format(train_acc)) class Validation(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): mse
intersections = 0 unions = 0 val_pos_count = 0 val_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0] val_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) val_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(val_gt_len): if val_iou[q] >= 0.5: # iou threshold 0.5 지정 val_pos_count += 1 else: val_neg_count += 1 intersections += np.sum(intersection * (union > 0)) unions += np.sum(union) val_data_count = 14879 # validation 총 갯수 val_acc = np.round(val_pos_count / val_data_count, 4) logs["val_acc"] = val_acc iou = np.round(intersections / (unions + epsilon()), 4) logs["val_iou"] = iou mse = np.round(mse, 4) logs["val_mse"] = mse print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc)) class Test_set(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): test_pos_count = 0 test_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) test_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) test_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(test_gt_len): if test_iou[q] >= 0.5: # iou threshold 0.5 지정 test_pos_count += 1 else: test_neg_count += 1 test_data_count = 1245 # test 총 갯수 test_acc = np.round(test_pos_count / test_data_count, 4) logs["test_acc"] = test_acc print(" - test_acc: {}".format(test_acc)) def create_model(trainable=False): # pre-trained 된 moblienetv2 아키텍처 이용 model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4) model.summary() # to freeze layers 레이어 동결(가중치 그대로 사용) for layer in model.layers: layer.trainable = trainable # 입력 Task에 맞는 딥러닝 모델 변경 가능 block = model.get_layer("block_16_project_BN").output x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block) x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(4, kernel_size=7, name="coords")(x) # 사이즈 224 경우 # x = Conv2D(4, kernel_size=3, name="coords")(x) # 사이즈 96 경우 x = Reshape((4,))(x) return Model(inputs=model.input, outputs=x) def main(): model = create_model() model.summary() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) test_datagen = Test_set(generator=DataGenerator(TEST_CSV)) train_acc = Training(generator=DataGenerator(TRAIN_CSV)) model.compile(loss="mean_squared_error", optimizer="adam", metrics=[]) # val_iou 값을 모니터링 하면서 최적 weight일때 저장 checkpoint = ModelCheckpoint("model-{epoch:02d}-{val_iou:.3f}.hdf5", monitor="val_iou", verbose=1, save_best_only=True, mode="max", period=1) # val_iou 값을 모니터링 하면서 stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") # val_iou 값을 모니터링 하면서 learning_rate 조절 reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") # 학습시 loss값 또는 callback 함수에 사용되는 train_acc, val_acc, test_acc 로그기록 남김 cv_csv_logger = CSVLogger('parrot_localization_mobilenetv2.csv') # 모델 학습 model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, test_datagen, train_acc, checkpoint, reduce_lr, stop, cv_csv_logger], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1) if __name__ == "__main__": main()
= 0
identifier_name
Training.py
import csv import math import matplotlib.pyplot as plt from PIL import Image import numpy as np from tensorflow.keras import Model from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, CSVLogger from tensorflow.keras.layers import Conv2D, Reshape, Activation, BatchNormalization from tensorflow.keras.utils import Sequence from tensorflow.keras.backend import epsilon from keras.regularizers import l2 import tensorflow as tf tf.compat.v1.disable_eager_execution() # 0.35, 0.5, 0.75, 1.0 ALPHA = 0.35 # 96, 128, 160, 192, 224 IMAGE_SIZE = 224 EPOCHS = 200 BATCH_SIZE = 32 PATIENCE = 10 MULTI_PROCESSING = True THREADS = 4 # train 과 validation 경로 TRAIN_CSV = "train_shuffle.csv" # train data .csv VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv TEST_CSV = "test_uniform.csv" # test data .csv MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정 class DataGenerator(Sequence): def __init__(self, csv_file): self.paths = [] with open(csv_file, "r") as file: self.coords = np.zeros((sum(1 for line in file), 4)) file.seek(0) reader = csv.reader(file, delimiter=",") for index, row in enumerate(reader): for i, r in enumerate(row[1:7]): # row[i+1] = int(r) row[i+1] = float(r) path, image_height, image_width, x0, y0, width, height, _, _ = row self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height # int형 # self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin # self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin # self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width # self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height # self.coords[index, 2] = width * IMAGE_SIZE / image_width # width # self.coords[index, 3] = height * IMAGE_SIZE / image_height # height self.paths.append(path) def __len__(self): return math.ceil(len(self.coords) / BATCH_SIZE) def __getitem__(self, idx): batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE] batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32) for i, f in enumerate(batch_paths): img = Image.open(f) img = img.resize((IMAGE_SIZE, IMAGE_SIZE)) img = img.convert('RGB') batch_images[i] = preprocess_input(np.array(img, dtype=np.float32)) img.close() return batch_images, batch_coords class Training(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): train_pos_count = 0 train_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) train_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) train_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(train_gt_len): if train_iou[q] >= 0.5: # iou thre
train_총 갯수 train_acc = np.round(train_pos_count / train_data_count, 4) logs["train_acc"] = train_acc print(" - train_acc: {}".format(train_acc)) class Validation(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): mse = 0 intersections = 0 unions = 0 val_pos_count = 0 val_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0] val_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) val_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(val_gt_len): if val_iou[q] >= 0.5: # iou threshold 0.5 지정 val_pos_count += 1 else: val_neg_count += 1 intersections += np.sum(intersection * (union > 0)) unions += np.sum(union) val_data_count = 14879 # validation 총 갯수 val_acc = np.round(val_pos_count / val_data_count, 4) logs["val_acc"] = val_acc iou = np.round(intersections / (unions + epsilon()), 4) logs["val_iou"] = iou mse = np.round(mse, 4) logs["val_mse"] = mse print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc)) class Test_set(Callback): def __init__(self, generator): self.generator = generator def on_epoch_end(self, epoch, logs): test_pos_count = 0 test_neg_count = 0 for i in range(len(self.generator)): batch_images, gt = self.generator[i] pred = self.model.predict_on_batch(batch_images) test_gt_len = len(gt) pred = np.maximum(pred, 0) ######################################################################################################## # iou 계산 diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0]) diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1]) intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0) area_gt = gt[:, 2] * gt[:, 3] area_pred = pred[:, 2] * pred[:, 3] union = np.maximum(area_gt + area_pred - intersection, 0) test_iou = np.round(intersection / (union + epsilon()), 4) ######################################################################################################## for q in range(test_gt_len): if test_iou[q] >= 0.5: # iou threshold 0.5 지정 test_pos_count += 1 else: test_neg_count += 1 test_data_count = 1245 # test 총 갯수 test_acc = np.round(test_pos_count / test_data_count, 4) logs["test_acc"] = test_acc print(" - test_acc: {}".format(test_acc)) def create_model(trainable=False): # pre-trained 된 moblienetv2 아키텍처 이용 model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4) model.summary() # to freeze layers 레이어 동결(가중치 그대로 사용) for layer in model.layers: layer.trainable = trainable # 입력 Task에 맞는 딥러닝 모델 변경 가능 block = model.get_layer("block_16_project_BN").output x = Conv2D(112, padding="same", kernel_size=3, strides=1, activation="relu")(block) x = Conv2D(112, padding="same", kernel_size=3, strides=1, use_bias=False)(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(4, kernel_size=7, name="coords")(x) # 사이즈 224 경우 # x = Conv2D(4, kernel_size=3, name="coords")(x) # 사이즈 96 경우 x = Reshape((4,))(x) return Model(inputs=model.input, outputs=x) def main(): model = create_model() model.summary() train_datagen = DataGenerator(TRAIN_CSV) validation_datagen = Validation(generator=DataGenerator(VALIDATION_CSV)) test_datagen = Test_set(generator=DataGenerator(TEST_CSV)) train_acc = Training(generator=DataGenerator(TRAIN_CSV)) model.compile(loss="mean_squared_error", optimizer="adam", metrics=[]) # val_iou 값을 모니터링 하면서 최적 weight일때 저장 checkpoint = ModelCheckpoint("model-{epoch:02d}-{val_iou:.3f}.hdf5", monitor="val_iou", verbose=1, save_best_only=True, mode="max", period=1) # val_iou 값을 모니터링 하면서 stop = EarlyStopping(monitor="val_iou", patience=PATIENCE, mode="max") # val_iou 값을 모니터링 하면서 learning_rate 조절 reduce_lr = ReduceLROnPlateau(monitor="val_iou", factor=0.2, patience=10, min_lr=1e-7, verbose=1, mode="max") # 학습시 loss값 또는 callback 함수에 사용되는 train_acc, val_acc, test_acc 로그기록 남김 cv_csv_logger = CSVLogger('parrot_localization_mobilenetv2.csv') # 모델 학습 model.fit_generator(generator=train_datagen, epochs=EPOCHS, callbacks=[validation_datagen, test_datagen, train_acc, checkpoint, reduce_lr, stop, cv_csv_logger], workers=THREADS, use_multiprocessing=MULTI_PROCESSING, shuffle=True, verbose=1) if __name__ == "__main__": main()
shold 0.5 지정 train_pos_count += 1 else: train_neg_count += 1 train_data_count = 59521 #
conditional_block
models.py
from django.db import models from django.urls import reverse from django.conf import settings from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.core.files.storage import default_storage as storage from django.core.validators import MaxValueValidator, MinValueValidator from PIL import Image from datetime import date HOURS = ( (0, '12 AM'), (1, '1 AM'), (2, '2 AM'), (3, '3 AM'), (4, '4 AM'), (5, '5 AM'), (6, '6 AM'), (7, '7 AM'), (8, '8 AM'), (9, '9 AM'), (10, '10 AM'), (11, '11 AM'), (12, '12 PM'), (13, '1 PM'), (14, '2 PM'), (15, '3 PM'), (16, '4 PM'), (17, '5 PM'), (18, '6 PM'), (19, '7 PM'), (20, '8 PM'), (21, '9 PM'), (22, '10 PM'), (23, '11 PM') ) MINUTES = ( (00, '00'), (15, '15'), (30, '30'), (45, '45') ) LANGUAGES = ( ('Albanian', 'Albanian'), ('Arabic', 'Arabic'), ('Austro-Bavarian', 'Austro-Bavarian'), ('Bengali', 'Bengali'), ('Bulgarian', 'Bulgarian'), ('Croatian', 'Croatian'), ('Czech', 'Czech'), ('Danish', 'Danish'), ('Dutch', 'Dutch'), ('English', 'English'), ('Finnish', 'Finnish'), ('French', 'French'), ('German', 'German'), ('Greek', 'Greek'), ('Hindi', 'Hindi'), ('Hungarian', 'Hungarian'), ('Italian', 'Italian'), ('Japanese', 'Japanese'), ('Javanese', 'Javanese'), ('Korean', 'Korean'), ('Lahnda', 'Lahnda'), ('Mandarin', 'Mandarin'), ('Marathi', 'Marathi'), ('Neapolitan', 'Neapolitan'), ('Norwegian', 'Norwegian'), ('Polish', 'Polish'), ('Portuguese', 'Portuguese'), ('Romanian', 'Romanian'), ('Russian', 'Russian'), ('Serbian', 'Serbian'), ('Slovak', 'Slovak'), ('Spanish', 'Spanish'), ('Swedish', 'Swedish'), ('Tamil', 'Tamil'), ('Telugu', 'Telugu'), ('Turkish', 'Turkish'), ('Ukrainian', 'Ukrainian'), ('Urdu', 'Urdu'), ('Vietnamese', 'Vietnamese') ) CITIES = ( ('Abidjan', 'Abidjan'), ('Accra', 'Accra'), ('Addis Ababa', 'Addis Ababa'), ('Ahmedabad', 'Ahmedabad'), ('Albuquerque', 'Albuquerque'), ('Aleppo', 'Aleppo'), ('Alexandria', 'Alexandria'), ('Algiers', 'Algiers'), ('Anaheim', 'Anaheim'), ('Anchorage', 'Anchorage'), ('Ankara', 'Ankara'), ('Arlington', 'Arlington'), ('Athens', 'Athens'), ('Atlanta', 'Atlanta'), ('Aurora', 'Aurora'), ('Austin', 'Austin'), ('Baghdad', 'Baghdad'), ('Bakersfield', 'Bakersfield'), ('Baku', 'Baku'), ('Baltimore', 'Baltimore'), ('Bandung', 'Bandung'), ('Bangalore', 'Bangalore'), ('Bangkok', 'Bangkok'), ('Barcelona', 'Barcelona'), ('Baton Rouge', 'Baton Rouge'), ('Beijing', 'Beijing'), ('Bekasi', 'Bekasi'), ('Belém', 'Belém'), ('Belo Horizonte', 'Belo Horizonte'), ('Benoni', 'Benoni'), ('Berlin', 'Berlin'), ('Birmingham', 'Birmingham'), ('Bogota', 'Bogota'), ('Boise', 'Boise'), ('Boston', 'Boston'), ('Brasília', 'Brasília'), ('Brooklyn', 'Brooklyn'), ('Buenos Aires', 'Buenos Aires'), ('Buffalo', 'Buffalo'), ('Busan', 'Busan'), ('Cairo', 'Cairo'), ('Cali', 'Cali'), ('Campinas', 'Campinas'), ('Cape Town', 'Cape Town'), ('Caracas', 'Caracas'), ('Casablanca', 'Casablanca'), ('Chandler', 'Chandler'), ('Changchun', 'Changchun'), ('Changsha', 'Changsha'), ('Charlotte', 'Charlotte'), ('Chengdu', 'Chengdu'), ('Chennai', 'Chennai'), ('Chesapeake', 'Chesapeake'), ('Chicago', 'Chicago'), ('Chittagong', 'Chittagong'), ('Chongqing', 'Chongqing'), ('Chula Vista', 'Chula Vista'), ('Cincinnati', 'Cincinnati'), ('Cleveland', 'Cleveland'), ('Colorado Springs', 'Colorado Springs'), ('Columbus', 'Columbus'), ('Corpus Christi', 'Corpus Christi'), ('Curitiba', 'Curitiba'), ('Daegu', 'Daegu'), ('Dakar', 'Dakar'), ('Dalian', 'Dalian'), ('Dallas', 'Dallas'), ('Damascus', 'Damascus'), ('Dar es Salaam', 'Dar es Salaam'), ('Delhi', 'Delhi'), ('Denver', 'Denver'), ('Detroit', 'Detroit'), ('Dhaka', 'Dhaka'), ('Dongguan', 'Dongguan'), ('Durban', 'Durban'), ('Durham', 'Durham'), ('El Giza', 'El Giza'), ('El Paso', 'El Paso'), ('Faisalabad', 'Faisalabad'), ('Fort Wayne', 'Fort Wayne'), ('Fort Worth', 'Fort Worth'), ('Fortaleza', 'Fortaleza'), ('Frankfurt', 'Frankfurt'), ('Fremont', 'Fremont'), ('Fresno', 'Fresno'), ('Fukuoka', 'Fukuoka'), ('Fuzhou', 'Fuzhou'), ('Garland', 'Garland'), ('George Town', 'George Town'), ('Gilbert', 'Gilbert'), ('Glendale', 'Glendale'), ('Greensboro', 'Greensboro'), ('Guadalajara', 'Guadalajara'), ('Guangzhou', 'Guangzhou'), ('Guayaquil', 'Guayaquil'), ('Guiyang', 'Guiyang'), ('Hangzhou', 'Hangzhou'), ('Hanoi', 'Hanoi'), ('Haora', 'Haora'), ('Harbin', 'Harbin'), ('Havana', 'Havana'), ('Hechi', 'Hechi'), ('Henderson', 'Henderson'), ('Hialeah', 'Hialeah'), ('Ho Chi Minh City', 'Ho Chi Minh City'), ('Hong Kong', 'Hong Kong'), ('Honolulu', 'Honolulu'), ('Houston', 'Houston'), ('Hyderabad', 'Hyderabad'),
('Incheon', 'Incheon'), ('Indianapolis', 'Indianapolis'), ('Irvine', 'Irvine'), ('Irving', 'Irving'), ('Istanbul', 'Istanbul'), ('İzmir', 'İzmir'), ('Jacksonville', 'Jacksonville'), ('Jaipur', 'Jaipur'), ('Jakarta', 'Jakarta'), ('Jeddah', 'Jeddah'), ('Jersey City', 'Jersey City'), ('Jilin', 'Jilin'), ('Jinan', 'Jinan'), ('Jinxi', 'Jinxi'), ('Johannesburg', 'Johannesburg'), ('Kabul', 'Kabul'), ('Kano', 'Kano'), ('Kanpur', 'Kanpur'), ('Kansas City', 'Kansas City'), ('Kaohsiung', 'Kaohsiung'), ('Karachi', 'Karachi'), ('Katowice', 'Katowice'), ('Khartoum', 'Khartoum'), ('Kiev', 'Kiev'), ('Kinshasa', 'Kinshasa'), ('Kolkata', 'Kolkata'), ('Kunming', 'Kunming'), ('Lagos', 'Lagos'), ('Lahore', 'Lahore'), ('Lanzhou', 'Lanzhou'), ('Laredo', 'Laredo'), ('Las Vegas', 'Las Vegas'), ('Lexington', 'Lexington'), ('Lima', 'Lima'), ('Lincoln', 'Lincoln'), ('Lisbon', 'Lisbon'), ('London', 'London'), ('Long Beach', 'Long Beach'), ('Los Angeles', 'Los Angeles'), ('Louisville', 'Louisville'), ('Luanda', 'Luanda'), ('Lubbock', 'Lubbock'), ('Lucknow', 'Lucknow'), ('Madison', 'Madison'), ('Madrid', 'Madrid'), ('Manchester', 'Manchester'), ('Manila', 'Manila'), ('Mannheim', 'Mannheim'), ('Mashhad', 'Mashhad'), ('Medan', 'Medan'), ('Medellín', 'Medellín'), ('Melbourne', 'Melbourne'), ('Memphis', 'Memphis'), ('Mesa', 'Mesa'), ('Mexico City', 'Mexico City'), ('Miami', 'Miami'), ('Milan', 'Milan'), ('Milwaukee', 'Milwaukee'), ('Minneapolis', 'Minneapolis'), ('Monterrey', 'Monterrey'), ('Montréal', 'Montréal'), ('Moscow', 'Moscow'), ('Mumbai', 'Mumbai'), ('Nagoya', 'Nagoya'), ('Nagpur', 'Nagpur'), ('Nairobi', 'Nairobi'), ('Nanchang', 'Nanchang'), ('Nanchong', 'Nanchong'), ('Nanjing', 'Nanjing'), ('Nanning', 'Nanning'), ('Naples', 'Naples'), ('Nashville', 'Nashville'), ('New Orleans', 'New Orleans'), ('New York', 'New York'), ('Newark', 'Newark'), ('Norfolk', 'Norfolk'), ('North Las Vegas', 'North Las Vegas'), ('Oakland', 'Oakland'), ('Oklahoma City', 'Oklahoma City'), ('Omaha', 'Omaha'), ('Omdurman', 'Omdurman'), ('Orlando', 'Orlando'), ('Ōsaka', 'Ōsaka'), ('Paris', 'Paris'), ('Patna', 'Patna'), ('Philadelphia', 'Philadelphia'), ('Phoenix', 'Phoenix'), ('Pittsburgh', 'Pittsburgh'), ('Plano', 'Plano'), ('Portland', 'Portland'), ('Porto Alegre', 'Porto Alegre'), ('Puebla', 'Puebla'), ('Pune', 'Pune'), ('Pyongyang', 'Pyongyang'), ('Qingdao', 'Qingdao'), ('Queens', 'Queens'), ('Quezon City', 'Quezon City'), ('Raleigh', 'Raleigh'), ('Rangoon', 'Rangoon'), ('Recife', 'Recife'), ('Reno', 'Reno'), ('Richmond', 'Richmond'), ('Rio de Janeiro', 'Rio de Janeiro'), ('Riverside', 'Riverside'), ('Riyadh', 'Riyadh'), ('Rome', 'Rome'), ('Sacramento', 'Sacramento'), ('Saint Paul', 'Saint Paul'), ('Salvador', 'Salvador'), ('San Antonio', 'San Antonio'), ('San Diego', 'San Diego'), ('San Francisco', 'San Francisco'), ('San Jose', 'San Jose'), ('Santa Ana', 'Santa Ana'), ('Santa Cruz', 'Santa Cruz'), ('Santiago', 'Santiago'), ('Santo Domingo', 'Santo Domingo'), ('São Paulo', 'São Paulo'), ('Sapporo', 'Sapporo'), ('Scottsdale', 'Scottsdale'), ('Seattle', 'Seattle'), ('Sendai', 'Sendai'), ('Seoul', 'Seoul'), ('Shanghai', 'Shanghai'), ('Shenyeng', 'Shenyeng'), ('Shenzhen', 'Shenzhen'), ('Shijianzhuang', 'Shijianzhuang'), ('Singapore', 'Singapore'), ('Spokane', 'Spokane'), ('St. Louis', 'St. Louis'), ('St. Petersburg', 'St. Petersburg'), ('Stockton', 'Stockton'), ('Stuttgart', 'Stuttgart'), ('Surabaya', 'Surabaya'), ('Surat', 'Surat'), ('Sydney', 'Sydney'), ('Taichung', 'Taichung'), ('Taipei', 'Taipei'), ('Taiyuan', 'Taiyuan'), ('Tampa', 'Tampa'), ('Tashkent', 'Tashkent'), ('Tehran', 'Tehran'), ('Tel Aviv-Yafo', 'Tel Aviv-Yafo'), ('Tianjin', 'Tianjin'), ('Tokyo', 'Tokyo'), ('Toledo', 'Toledo'), ('Toronto', 'Toronto'), ('Tripoli', 'Tripoli'), ('Tucson', 'Tucson'), ('Tulsa', 'Tulsa'), ('Tunis', 'Tunis'), ('Ürümqi', 'Ürümqi'), ('Vancouver', 'Vancouver'), ('Vienna', 'Vienna'), ('Virginia Beach', 'Virginia Beach'), ('Washington', 'Washington'), ('Wenzhou', 'Wenzhou'), ('Wichita', 'Wichita'), ('Winston–Salem', 'Winston–Salem'), ('Wuhan', 'Wuhan'), ('Xiamen', 'Xiamen'), ('Xian', 'Xian'), ('Xiangtan', 'Xiangtan'), ('Yantai', 'Yantai'), ('Yokohama', 'Yokohama'), ('Zaozhuang', 'Zaozhuang'), ('Zhangzhou', 'Zhangzhou'), ('Zhengzhou', 'Zhengzhou'), ('Zibo', 'Zibo') ) CATEGORIES = ( ('Art', 'Art'), ('Food', 'Food'), ('Sports', 'Sports'), ('Adventure', 'Adventure'), ('Workshop', 'Workshop'), ('Other', 'Other') ) #----- PROFILE ------ class Profile(models.Model): user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE) url = models.CharField(max_length=200) def __str__(self): return f'{self.user.username} Profile' # ---- EXPERIENCE ------ class Experience(models.Model): title = models.CharField(max_length=100) category = models.CharField(max_length=100, choices=CATEGORIES, default='Food') description = models.TextField(max_length=750) price = models.DecimalField(max_digits=7, decimal_places=2) hours = models.IntegerField(choices=HOURS, default=12) minutes = models.IntegerField(choices=MINUTES, default=0) language = models.CharField(max_length=100, choices=LANGUAGES, default='English') city = models.CharField(max_length=100, choices=CITIES, default='San Francisco') address = models.CharField(max_length=100) zipcode = models.IntegerField(default=99999) # user in this case is equal to the experience host user = models.ForeignKey(User, on_delete=models.CASCADE) def __str__(self): return f'{self.title} ({self.id})' def get_absolute_url(self): return reverse('exp_detail', kwargs = { 'pk': self.id }) # ---- BOOKING ------ class Booking(models.Model): # user in this case is equal to the experience participant user = models.ForeignKey(User, on_delete=models.CASCADE) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) date = models.DateField('booking date') def __str__(self): return f'Booking ({self.id}) by {self.user} ({self.user_id}) for Experience ({self.experience_id})' def get_absolute_url(self): return reverse('bkng_show', kwargs = { 'exp_id': self.experience_id, 'bkng_id': self.id }) class Meta: ordering = ['date'] # ---- REVIEW ------ class Review(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) rating = models.IntegerField(validators=[MinValueValidator(1) , MaxValueValidator(5)]) comment = models.TextField(max_length=250) def get_absolute_url(self): return reverse('exp_detail', kwargs = { 'pk': self.experience.id }) def __str__(self): return f'Review by {self.user} ({self.user_id}) for Experience ({self.experience_id})' # ---- PHOTO ------ class Photo(models.Model): url = models.CharField(max_length=200) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) def __str__(self): return f"Photo for exp_id: {self.experience_id} @{self.url} ({self.id})"
('Ibadan', 'Ibadan'),
random_line_split
models.py
from django.db import models from django.urls import reverse from django.conf import settings from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.core.files.storage import default_storage as storage from django.core.validators import MaxValueValidator, MinValueValidator from PIL import Image from datetime import date HOURS = ( (0, '12 AM'), (1, '1 AM'), (2, '2 AM'), (3, '3 AM'), (4, '4 AM'), (5, '5 AM'), (6, '6 AM'), (7, '7 AM'), (8, '8 AM'), (9, '9 AM'), (10, '10 AM'), (11, '11 AM'), (12, '12 PM'), (13, '1 PM'), (14, '2 PM'), (15, '3 PM'), (16, '4 PM'), (17, '5 PM'), (18, '6 PM'), (19, '7 PM'), (20, '8 PM'), (21, '9 PM'), (22, '10 PM'), (23, '11 PM') ) MINUTES = ( (00, '00'), (15, '15'), (30, '30'), (45, '45') ) LANGUAGES = ( ('Albanian', 'Albanian'), ('Arabic', 'Arabic'), ('Austro-Bavarian', 'Austro-Bavarian'), ('Bengali', 'Bengali'), ('Bulgarian', 'Bulgarian'), ('Croatian', 'Croatian'), ('Czech', 'Czech'), ('Danish', 'Danish'), ('Dutch', 'Dutch'), ('English', 'English'), ('Finnish', 'Finnish'), ('French', 'French'), ('German', 'German'), ('Greek', 'Greek'), ('Hindi', 'Hindi'), ('Hungarian', 'Hungarian'), ('Italian', 'Italian'), ('Japanese', 'Japanese'), ('Javanese', 'Javanese'), ('Korean', 'Korean'), ('Lahnda', 'Lahnda'), ('Mandarin', 'Mandarin'), ('Marathi', 'Marathi'), ('Neapolitan', 'Neapolitan'), ('Norwegian', 'Norwegian'), ('Polish', 'Polish'), ('Portuguese', 'Portuguese'), ('Romanian', 'Romanian'), ('Russian', 'Russian'), ('Serbian', 'Serbian'), ('Slovak', 'Slovak'), ('Spanish', 'Spanish'), ('Swedish', 'Swedish'), ('Tamil', 'Tamil'), ('Telugu', 'Telugu'), ('Turkish', 'Turkish'), ('Ukrainian', 'Ukrainian'), ('Urdu', 'Urdu'), ('Vietnamese', 'Vietnamese') ) CITIES = ( ('Abidjan', 'Abidjan'), ('Accra', 'Accra'), ('Addis Ababa', 'Addis Ababa'), ('Ahmedabad', 'Ahmedabad'), ('Albuquerque', 'Albuquerque'), ('Aleppo', 'Aleppo'), ('Alexandria', 'Alexandria'), ('Algiers', 'Algiers'), ('Anaheim', 'Anaheim'), ('Anchorage', 'Anchorage'), ('Ankara', 'Ankara'), ('Arlington', 'Arlington'), ('Athens', 'Athens'), ('Atlanta', 'Atlanta'), ('Aurora', 'Aurora'), ('Austin', 'Austin'), ('Baghdad', 'Baghdad'), ('Bakersfield', 'Bakersfield'), ('Baku', 'Baku'), ('Baltimore', 'Baltimore'), ('Bandung', 'Bandung'), ('Bangalore', 'Bangalore'), ('Bangkok', 'Bangkok'), ('Barcelona', 'Barcelona'), ('Baton Rouge', 'Baton Rouge'), ('Beijing', 'Beijing'), ('Bekasi', 'Bekasi'), ('Belém', 'Belém'), ('Belo Horizonte', 'Belo Horizonte'), ('Benoni', 'Benoni'), ('Berlin', 'Berlin'), ('Birmingham', 'Birmingham'), ('Bogota', 'Bogota'), ('Boise', 'Boise'), ('Boston', 'Boston'), ('Brasília', 'Brasília'), ('Brooklyn', 'Brooklyn'), ('Buenos Aires', 'Buenos Aires'), ('Buffalo', 'Buffalo'), ('Busan', 'Busan'), ('Cairo', 'Cairo'), ('Cali', 'Cali'), ('Campinas', 'Campinas'), ('Cape Town', 'Cape Town'), ('Caracas', 'Caracas'), ('Casablanca', 'Casablanca'), ('Chandler', 'Chandler'), ('Changchun', 'Changchun'), ('Changsha', 'Changsha'), ('Charlotte', 'Charlotte'), ('Chengdu', 'Chengdu'), ('Chennai', 'Chennai'), ('Chesapeake', 'Chesapeake'), ('Chicago', 'Chicago'), ('Chittagong', 'Chittagong'), ('Chongqing', 'Chongqing'), ('Chula Vista', 'Chula Vista'), ('Cincinnati', 'Cincinnati'), ('Cleveland', 'Cleveland'), ('Colorado Springs', 'Colorado Springs'), ('Columbus', 'Columbus'), ('Corpus Christi', 'Corpus Christi'), ('Curitiba', 'Curitiba'), ('Daegu', 'Daegu'), ('Dakar', 'Dakar'), ('Dalian', 'Dalian'), ('Dallas', 'Dallas'), ('Damascus', 'Damascus'), ('Dar es Salaam', 'Dar es Salaam'), ('Delhi', 'Delhi'), ('Denver', 'Denver'), ('Detroit', 'Detroit'), ('Dhaka', 'Dhaka'), ('Dongguan', 'Dongguan'), ('Durban', 'Durban'), ('Durham', 'Durham'), ('El Giza', 'El Giza'), ('El Paso', 'El Paso'), ('Faisalabad', 'Faisalabad'), ('Fort Wayne', 'Fort Wayne'), ('Fort Worth', 'Fort Worth'), ('Fortaleza', 'Fortaleza'), ('Frankfurt', 'Frankfurt'), ('Fremont', 'Fremont'), ('Fresno', 'Fresno'), ('Fukuoka', 'Fukuoka'), ('Fuzhou', 'Fuzhou'), ('Garland', 'Garland'), ('George Town', 'George Town'), ('Gilbert', 'Gilbert'), ('Glendale', 'Glendale'), ('Greensboro', 'Greensboro'), ('Guadalajara', 'Guadalajara'), ('Guangzhou', 'Guangzhou'), ('Guayaquil', 'Guayaquil'), ('Guiyang', 'Guiyang'), ('Hangzhou', 'Hangzhou'), ('Hanoi', 'Hanoi'), ('Haora', 'Haora'), ('Harbin', 'Harbin'), ('Havana', 'Havana'), ('Hechi', 'Hechi'), ('Henderson', 'Henderson'), ('Hialeah', 'Hialeah'), ('Ho Chi Minh City', 'Ho Chi Minh City'), ('Hong Kong', 'Hong Kong'), ('Honolulu', 'Honolulu'), ('Houston', 'Houston'), ('Hyderabad', 'Hyderabad'), ('Ibadan', 'Ibadan'), ('Incheon', 'Incheon'), ('Indianapolis', 'Indianapolis'), ('Irvine', 'Irvine'), ('Irving', 'Irving'), ('Istanbul', 'Istanbul'), ('İzmir', 'İzmir'), ('Jacksonville', 'Jacksonville'), ('Jaipur', 'Jaipur'), ('Jakarta', 'Jakarta'), ('Jeddah', 'Jeddah'), ('Jersey City', 'Jersey City'), ('Jilin', 'Jilin'), ('Jinan', 'Jinan'), ('Jinxi', 'Jinxi'), ('Johannesburg', 'Johannesburg'), ('Kabul', 'Kabul'), ('Kano', 'Kano'), ('Kanpur', 'Kanpur'), ('Kansas City', 'Kansas City'), ('Kaohsiung', 'Kaohsiung'), ('Karachi', 'Karachi'), ('Katowice', 'Katowice'), ('Khartoum', 'Khartoum'), ('Kiev', 'Kiev'), ('Kinshasa', 'Kinshasa'), ('Kolkata', 'Kolkata'), ('Kunming', 'Kunming'), ('Lagos', 'Lagos'), ('Lahore', 'Lahore'), ('Lanzhou', 'Lanzhou'), ('Laredo', 'Laredo'), ('Las Vegas', 'Las Vegas'), ('Lexington', 'Lexington'), ('Lima', 'Lima'), ('Lincoln', 'Lincoln'), ('Lisbon', 'Lisbon'), ('London', 'London'), ('Long Beach', 'Long Beach'), ('Los Angeles', 'Los Angeles'), ('Louisville', 'Louisville'), ('Luanda', 'Luanda'), ('Lubbock', 'Lubbock'), ('Lucknow', 'Lucknow'), ('Madison', 'Madison'), ('Madrid', 'Madrid'), ('Manchester', 'Manchester'), ('Manila', 'Manila'), ('Mannheim', 'Mannheim'), ('Mashhad', 'Mashhad'), ('Medan', 'Medan'), ('Medellín', 'Medellín'), ('Melbourne', 'Melbourne'), ('Memphis', 'Memphis'), ('Mesa', 'Mesa'), ('Mexico City', 'Mexico City'), ('Miami', 'Miami'), ('Milan', 'Milan'), ('Milwaukee', 'Milwaukee'), ('Minneapolis', 'Minneapolis'), ('Monterrey', 'Monterrey'), ('Montréal', 'Montréal'), ('Moscow', 'Moscow'), ('Mumbai', 'Mumbai'), ('Nagoya', 'Nagoya'), ('Nagpur', 'Nagpur'), ('Nairobi', 'Nairobi'), ('Nanchang', 'Nanchang'), ('Nanchong', 'Nanchong'), ('Nanjing', 'Nanjing'), ('Nanning', 'Nanning'), ('Naples', 'Naples'), ('Nashville', 'Nashville'), ('New Orleans', 'New Orleans'), ('New York', 'New York'), ('Newark', 'Newark'), ('Norfolk', 'Norfolk'), ('North Las Vegas', 'North Las Vegas'), ('Oakland', 'Oakland'), ('Oklahoma City', 'Oklahoma City'), ('Omaha', 'Omaha'), ('Omdurman', 'Omdurman'), ('Orlando', 'Orlando'), ('Ōsaka', 'Ōsaka'), ('Paris', 'Paris'), ('Patna', 'Patna'), ('Philadelphia', 'Philadelphia'), ('Phoenix', 'Phoenix'), ('Pittsburgh', 'Pittsburgh'), ('Plano', 'Plano'), ('Portland', 'Portland'), ('Porto Alegre', 'Porto Alegre'), ('Puebla', 'Puebla'), ('Pune', 'Pune'), ('Pyongyang', 'Pyongyang'), ('Qingdao', 'Qingdao'), ('Queens', 'Queens'), ('Quezon City', 'Quezon City'), ('Raleigh', 'Raleigh'), ('Rangoon', 'Rangoon'), ('Recife', 'Recife'), ('Reno', 'Reno'), ('Richmond', 'Richmond'), ('Rio de Janeiro', 'Rio de Janeiro'), ('Riverside', 'Riverside'), ('Riyadh', 'Riyadh'), ('Rome', 'Rome'), ('Sacramento', 'Sacramento'), ('Saint Paul', 'Saint Paul'), ('Salvador', 'Salvador'), ('San Antonio', 'San Antonio'), ('San Diego', 'San Diego'), ('San Francisco', 'San Francisco'), ('San Jose', 'San Jose'), ('Santa Ana', 'Santa Ana'), ('Santa Cruz', 'Santa Cruz'), ('Santiago', 'Santiago'), ('Santo Domingo', 'Santo Domingo'), ('São Paulo', 'São Paulo'), ('Sapporo', 'Sapporo'), ('Scottsdale', 'Scottsdale'), ('Seattle', 'Seattle'), ('Sendai', 'Sendai'), ('Seoul', 'Seoul'), ('Shanghai', 'Shanghai'), ('Shenyeng', 'Shenyeng'), ('Shenzhen', 'Shenzhen'), ('Shijianzhuang', 'Shijianzhuang'), ('Singapore', 'Singapore'), ('Spokane', 'Spokane'), ('St. Louis', 'St. Louis'), ('St. Petersburg', 'St. Petersburg'), ('Stockton', 'Stockton'), ('Stuttgart', 'Stuttgart'), ('Surabaya', 'Surabaya'), ('Surat', 'Surat'), ('Sydney', 'Sydney'), ('Taichung', 'Taichung'), ('Taipei', 'Taipei'), ('Taiyuan', 'Taiyuan'), ('Tampa', 'Tampa'), ('Tashkent', 'Tashkent'), ('Tehran', 'Tehran'), ('Tel Aviv-Yafo', 'Tel Aviv-Yafo'), ('Tianjin', 'Tianjin'), ('Tokyo', 'Tokyo'), ('Toledo', 'Toledo'), ('Toronto', 'Toronto'), ('Tripoli', 'Tripoli'), ('Tucson', 'Tucson'), ('Tulsa', 'Tulsa'), ('Tunis', 'Tunis'), ('Ürümqi', 'Ürümqi'), ('Vancouver', 'Vancouver'), ('Vienna', 'Vienna'), ('Virginia Beach', 'Virginia Beach'), ('Washington', 'Washington'), ('Wenzhou', 'Wenzhou'), ('Wichita', 'Wichita'), ('Winston–Salem', 'Winston–Salem'), ('Wuhan', 'Wuhan'), ('Xiamen', 'Xiamen'), ('Xian', 'Xian'), ('Xiangtan', 'Xiangtan'), ('Yantai', 'Yantai'), ('Yokohama', 'Yokohama'), ('Zaozhuang', 'Zaozhuang'), ('Zhangzhou', 'Zhangzhou'), ('Zhengzhou', 'Zhengzhou'), ('Zibo', 'Zibo') ) CATEGORIES = ( ('Art', 'Art'), ('Food', 'Food'), ('Sports', 'Sports'), ('Adventure', 'Adventure'), ('Workshop', 'Workshop'), ('Other', 'Other') ) #----- PROFILE ------ class Profile(models.Model): user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE) url = models.CharField(max_length=200) def __str__(self): return f'{self.user.username} Profile' # ---- EXPERIENCE ------ class Experience(models.Model): title = models.CharField(max_length=100) category = models.CharField(max_length=100, choices=CATEGORIES, default='Food') description = models.TextField(max_length=750) price = models.DecimalField(max_digits=7, decimal_places=2) hours = models.IntegerField(choices=HOURS, default=12) minutes = models.IntegerField(choices=MINUTES, default=0) language = models.CharField(max_length=100, choices=LANGUAGES, default='English') city = models.CharField(max_length=100, choices=CITIES, default='San Francisco') address = models.CharField(max_length=100) zipcode = models.IntegerField(default=99999) # user in this case is equal to the experience host user = models.ForeignKey(User, on_delete=models.CASCADE) def __str__(self): return f'{self.title} ({self.id})' def get_absolute_url(self)
reverse('exp_detail', kwargs = { 'pk': self.id }) # ---- BOOKING ------ class Booking(models.Model): # user in this case is equal to the experience participant user = models.ForeignKey(User, on_delete=models.CASCADE) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) date = models.DateField('booking date') def __str__(self): return f'Booking ({self.id}) by {self.user} ({self.user_id}) for Experience ({self.experience_id})' def get_absolute_url(self): return reverse('bkng_show', kwargs = { 'exp_id': self.experience_id, 'bkng_id': self.id }) class Meta: ordering = ['date'] # ---- REVIEW ------ class Review(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) rating = models.IntegerField(validators=[MinValueValidator(1) , MaxValueValidator(5)]) comment = models.TextField(max_length=250) def get_absolute_url(self): return reverse('exp_detail', kwargs = { 'pk': self.experience.id }) def __str__(self): return f'Review by {self.user} ({self.user_id}) for Experience ({self.experience_id})' # ---- PHOTO ------ class Photo(models.Model): url = models.CharField(max_length=200) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) def __str__(self): return f"Photo for exp_id: {self.experience_id} @{self.url} ({self.id})"
: return
identifier_name
models.py
from django.db import models from django.urls import reverse from django.conf import settings from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.core.files.storage import default_storage as storage from django.core.validators import MaxValueValidator, MinValueValidator from PIL import Image from datetime import date HOURS = ( (0, '12 AM'), (1, '1 AM'), (2, '2 AM'), (3, '3 AM'), (4, '4 AM'), (5, '5 AM'), (6, '6 AM'), (7, '7 AM'), (8, '8 AM'), (9, '9 AM'), (10, '10 AM'), (11, '11 AM'), (12, '12 PM'), (13, '1 PM'), (14, '2 PM'), (15, '3 PM'), (16, '4 PM'), (17, '5 PM'), (18, '6 PM'), (19, '7 PM'), (20, '8 PM'), (21, '9 PM'), (22, '10 PM'), (23, '11 PM') ) MINUTES = ( (00, '00'), (15, '15'), (30, '30'), (45, '45') ) LANGUAGES = ( ('Albanian', 'Albanian'), ('Arabic', 'Arabic'), ('Austro-Bavarian', 'Austro-Bavarian'), ('Bengali', 'Bengali'), ('Bulgarian', 'Bulgarian'), ('Croatian', 'Croatian'), ('Czech', 'Czech'), ('Danish', 'Danish'), ('Dutch', 'Dutch'), ('English', 'English'), ('Finnish', 'Finnish'), ('French', 'French'), ('German', 'German'), ('Greek', 'Greek'), ('Hindi', 'Hindi'), ('Hungarian', 'Hungarian'), ('Italian', 'Italian'), ('Japanese', 'Japanese'), ('Javanese', 'Javanese'), ('Korean', 'Korean'), ('Lahnda', 'Lahnda'), ('Mandarin', 'Mandarin'), ('Marathi', 'Marathi'), ('Neapolitan', 'Neapolitan'), ('Norwegian', 'Norwegian'), ('Polish', 'Polish'), ('Portuguese', 'Portuguese'), ('Romanian', 'Romanian'), ('Russian', 'Russian'), ('Serbian', 'Serbian'), ('Slovak', 'Slovak'), ('Spanish', 'Spanish'), ('Swedish', 'Swedish'), ('Tamil', 'Tamil'), ('Telugu', 'Telugu'), ('Turkish', 'Turkish'), ('Ukrainian', 'Ukrainian'), ('Urdu', 'Urdu'), ('Vietnamese', 'Vietnamese') ) CITIES = ( ('Abidjan', 'Abidjan'), ('Accra', 'Accra'), ('Addis Ababa', 'Addis Ababa'), ('Ahmedabad', 'Ahmedabad'), ('Albuquerque', 'Albuquerque'), ('Aleppo', 'Aleppo'), ('Alexandria', 'Alexandria'), ('Algiers', 'Algiers'), ('Anaheim', 'Anaheim'), ('Anchorage', 'Anchorage'), ('Ankara', 'Ankara'), ('Arlington', 'Arlington'), ('Athens', 'Athens'), ('Atlanta', 'Atlanta'), ('Aurora', 'Aurora'), ('Austin', 'Austin'), ('Baghdad', 'Baghdad'), ('Bakersfield', 'Bakersfield'), ('Baku', 'Baku'), ('Baltimore', 'Baltimore'), ('Bandung', 'Bandung'), ('Bangalore', 'Bangalore'), ('Bangkok', 'Bangkok'), ('Barcelona', 'Barcelona'), ('Baton Rouge', 'Baton Rouge'), ('Beijing', 'Beijing'), ('Bekasi', 'Bekasi'), ('Belém', 'Belém'), ('Belo Horizonte', 'Belo Horizonte'), ('Benoni', 'Benoni'), ('Berlin', 'Berlin'), ('Birmingham', 'Birmingham'), ('Bogota', 'Bogota'), ('Boise', 'Boise'), ('Boston', 'Boston'), ('Brasília', 'Brasília'), ('Brooklyn', 'Brooklyn'), ('Buenos Aires', 'Buenos Aires'), ('Buffalo', 'Buffalo'), ('Busan', 'Busan'), ('Cairo', 'Cairo'), ('Cali', 'Cali'), ('Campinas', 'Campinas'), ('Cape Town', 'Cape Town'), ('Caracas', 'Caracas'), ('Casablanca', 'Casablanca'), ('Chandler', 'Chandler'), ('Changchun', 'Changchun'), ('Changsha', 'Changsha'), ('Charlotte', 'Charlotte'), ('Chengdu', 'Chengdu'), ('Chennai', 'Chennai'), ('Chesapeake', 'Chesapeake'), ('Chicago', 'Chicago'), ('Chittagong', 'Chittagong'), ('Chongqing', 'Chongqing'), ('Chula Vista', 'Chula Vista'), ('Cincinnati', 'Cincinnati'), ('Cleveland', 'Cleveland'), ('Colorado Springs', 'Colorado Springs'), ('Columbus', 'Columbus'), ('Corpus Christi', 'Corpus Christi'), ('Curitiba', 'Curitiba'), ('Daegu', 'Daegu'), ('Dakar', 'Dakar'), ('Dalian', 'Dalian'), ('Dallas', 'Dallas'), ('Damascus', 'Damascus'), ('Dar es Salaam', 'Dar es Salaam'), ('Delhi', 'Delhi'), ('Denver', 'Denver'), ('Detroit', 'Detroit'), ('Dhaka', 'Dhaka'), ('Dongguan', 'Dongguan'), ('Durban', 'Durban'), ('Durham', 'Durham'), ('El Giza', 'El Giza'), ('El Paso', 'El Paso'), ('Faisalabad', 'Faisalabad'), ('Fort Wayne', 'Fort Wayne'), ('Fort Worth', 'Fort Worth'), ('Fortaleza', 'Fortaleza'), ('Frankfurt', 'Frankfurt'), ('Fremont', 'Fremont'), ('Fresno', 'Fresno'), ('Fukuoka', 'Fukuoka'), ('Fuzhou', 'Fuzhou'), ('Garland', 'Garland'), ('George Town', 'George Town'), ('Gilbert', 'Gilbert'), ('Glendale', 'Glendale'), ('Greensboro', 'Greensboro'), ('Guadalajara', 'Guadalajara'), ('Guangzhou', 'Guangzhou'), ('Guayaquil', 'Guayaquil'), ('Guiyang', 'Guiyang'), ('Hangzhou', 'Hangzhou'), ('Hanoi', 'Hanoi'), ('Haora', 'Haora'), ('Harbin', 'Harbin'), ('Havana', 'Havana'), ('Hechi', 'Hechi'), ('Henderson', 'Henderson'), ('Hialeah', 'Hialeah'), ('Ho Chi Minh City', 'Ho Chi Minh City'), ('Hong Kong', 'Hong Kong'), ('Honolulu', 'Honolulu'), ('Houston', 'Houston'), ('Hyderabad', 'Hyderabad'), ('Ibadan', 'Ibadan'), ('Incheon', 'Incheon'), ('Indianapolis', 'Indianapolis'), ('Irvine', 'Irvine'), ('Irving', 'Irving'), ('Istanbul', 'Istanbul'), ('İzmir', 'İzmir'), ('Jacksonville', 'Jacksonville'), ('Jaipur', 'Jaipur'), ('Jakarta', 'Jakarta'), ('Jeddah', 'Jeddah'), ('Jersey City', 'Jersey City'), ('Jilin', 'Jilin'), ('Jinan', 'Jinan'), ('Jinxi', 'Jinxi'), ('Johannesburg', 'Johannesburg'), ('Kabul', 'Kabul'), ('Kano', 'Kano'), ('Kanpur', 'Kanpur'), ('Kansas City', 'Kansas City'), ('Kaohsiung', 'Kaohsiung'), ('Karachi', 'Karachi'), ('Katowice', 'Katowice'), ('Khartoum', 'Khartoum'), ('Kiev', 'Kiev'), ('Kinshasa', 'Kinshasa'), ('Kolkata', 'Kolkata'), ('Kunming', 'Kunming'), ('Lagos', 'Lagos'), ('Lahore', 'Lahore'), ('Lanzhou', 'Lanzhou'), ('Laredo', 'Laredo'), ('Las Vegas', 'Las Vegas'), ('Lexington', 'Lexington'), ('Lima', 'Lima'), ('Lincoln', 'Lincoln'), ('Lisbon', 'Lisbon'), ('London', 'London'), ('Long Beach', 'Long Beach'), ('Los Angeles', 'Los Angeles'), ('Louisville', 'Louisville'), ('Luanda', 'Luanda'), ('Lubbock', 'Lubbock'), ('Lucknow', 'Lucknow'), ('Madison', 'Madison'), ('Madrid', 'Madrid'), ('Manchester', 'Manchester'), ('Manila', 'Manila'), ('Mannheim', 'Mannheim'), ('Mashhad', 'Mashhad'), ('Medan', 'Medan'), ('Medellín', 'Medellín'), ('Melbourne', 'Melbourne'), ('Memphis', 'Memphis'), ('Mesa', 'Mesa'), ('Mexico City', 'Mexico City'), ('Miami', 'Miami'), ('Milan', 'Milan'), ('Milwaukee', 'Milwaukee'), ('Minneapolis', 'Minneapolis'), ('Monterrey', 'Monterrey'), ('Montréal', 'Montréal'), ('Moscow', 'Moscow'), ('Mumbai', 'Mumbai'), ('Nagoya', 'Nagoya'), ('Nagpur', 'Nagpur'), ('Nairobi', 'Nairobi'), ('Nanchang', 'Nanchang'), ('Nanchong', 'Nanchong'), ('Nanjing', 'Nanjing'), ('Nanning', 'Nanning'), ('Naples', 'Naples'), ('Nashville', 'Nashville'), ('New Orleans', 'New Orleans'), ('New York', 'New York'), ('Newark', 'Newark'), ('Norfolk', 'Norfolk'), ('North Las Vegas', 'North Las Vegas'), ('Oakland', 'Oakland'), ('Oklahoma City', 'Oklahoma City'), ('Omaha', 'Omaha'), ('Omdurman', 'Omdurman'), ('Orlando', 'Orlando'), ('Ōsaka', 'Ōsaka'), ('Paris', 'Paris'), ('Patna', 'Patna'), ('Philadelphia', 'Philadelphia'), ('Phoenix', 'Phoenix'), ('Pittsburgh', 'Pittsburgh'), ('Plano', 'Plano'), ('Portland', 'Portland'), ('Porto Alegre', 'Porto Alegre'), ('Puebla', 'Puebla'), ('Pune', 'Pune'), ('Pyongyang', 'Pyongyang'), ('Qingdao', 'Qingdao'), ('Queens', 'Queens'), ('Quezon City', 'Quezon City'), ('Raleigh', 'Raleigh'), ('Rangoon', 'Rangoon'), ('Recife', 'Recife'), ('Reno', 'Reno'), ('Richmond', 'Richmond'), ('Rio de Janeiro', 'Rio de Janeiro'), ('Riverside', 'Riverside'), ('Riyadh', 'Riyadh'), ('Rome', 'Rome'), ('Sacramento', 'Sacramento'), ('Saint Paul', 'Saint Paul'), ('Salvador', 'Salvador'), ('San Antonio', 'San Antonio'), ('San Diego', 'San Diego'), ('San Francisco', 'San Francisco'), ('San Jose', 'San Jose'), ('Santa Ana', 'Santa Ana'), ('Santa Cruz', 'Santa Cruz'), ('Santiago', 'Santiago'), ('Santo Domingo', 'Santo Domingo'), ('São Paulo', 'São Paulo'), ('Sapporo', 'Sapporo'), ('Scottsdale', 'Scottsdale'), ('Seattle', 'Seattle'), ('Sendai', 'Sendai'), ('Seoul', 'Seoul'), ('Shanghai', 'Shanghai'), ('Shenyeng', 'Shenyeng'), ('Shenzhen', 'Shenzhen'), ('Shijianzhuang', 'Shijianzhuang'), ('Singapore', 'Singapore'), ('Spokane', 'Spokane'), ('St. Louis', 'St. Louis'), ('St. Petersburg', 'St. Petersburg'), ('Stockton', 'Stockton'), ('Stuttgart', 'Stuttgart'), ('Surabaya', 'Surabaya'), ('Surat', 'Surat'), ('Sydney', 'Sydney'), ('Taichung', 'Taichung'), ('Taipei', 'Taipei'), ('Taiyuan', 'Taiyuan'), ('Tampa', 'Tampa'), ('Tashkent', 'Tashkent'), ('Tehran', 'Tehran'), ('Tel Aviv-Yafo', 'Tel Aviv-Yafo'), ('Tianjin', 'Tianjin'), ('Tokyo', 'Tokyo'), ('Toledo', 'Toledo'), ('Toronto', 'Toronto'), ('Tripoli', 'Tripoli'), ('Tucson', 'Tucson'), ('Tulsa', 'Tulsa'), ('Tunis', 'Tunis'), ('Ürümqi', 'Ürümqi'), ('Vancouver', 'Vancouver'), ('Vienna', 'Vienna'), ('Virginia Beach', 'Virginia Beach'), ('Washington', 'Washington'), ('Wenzhou', 'Wenzhou'), ('Wichita', 'Wichita'), ('Winston–Salem', 'Winston–Salem'), ('Wuhan', 'Wuhan'), ('Xiamen', 'Xiamen'), ('Xian', 'Xian'), ('Xiangtan', 'Xiangtan'), ('Yantai', 'Yantai'), ('Yokohama', 'Yokohama'), ('Zaozhuang', 'Zaozhuang'), ('Zhangzhou', 'Zhangzhou'), ('Zhengzhou', 'Zhengzhou'), ('Zibo', 'Zibo') ) CATEGORIES = ( ('Art', 'Art'), ('Food', 'Food'), ('Sports', 'Sports'), ('Adventure', 'Adventure'), ('Workshop', 'Workshop'), ('Other', 'Other') ) #----- PROFILE ------ class Profile(models.Model): user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE) url = models.CharField(max_length=200) def __str__(self): return f'{self.user.username} Profile' # ---- EXPERIENCE ------ class Experience(models.Model): title = models.CharField(max_length=100) category = models.CharField(max_length=100, choices=CATEGORIES, default='Food') description = models.TextField(max_length=750) price = models.DecimalField(max_digits=7, decimal_places=2) hours = models.IntegerField(choices=HOURS, default=12) minutes = models.IntegerField(choices=MINUTES, default=0) language = models.CharField(max_length=100, choices=LANGUAGES, default='English') city = models.CharField(max_length=100, choices=CITIES, default='San Francisco') address = models.CharField(max_length=100) zipcode = models.IntegerField(default=99999) # user in this case is equal to the experience host user = models.ForeignKey(User, on_delete=models.CASCADE) def __str__(self): return f'{self.title} ({self.id})' def get_absolute_url(self): return reverse('exp_de
-- class Booking(models.Model): # user in this case is equal to the experience participant user = models.ForeignKey(User, on_delete=models.CASCADE) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) date = models.DateField('booking date') def __str__(self): return f'Booking ({self.id}) by {self.user} ({self.user_id}) for Experience ({self.experience_id})' def get_absolute_url(self): return reverse('bkng_show', kwargs = { 'exp_id': self.experience_id, 'bkng_id': self.id }) class Meta: ordering = ['date'] # ---- REVIEW ------ class Review(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) rating = models.IntegerField(validators=[MinValueValidator(1) , MaxValueValidator(5)]) comment = models.TextField(max_length=250) def get_absolute_url(self): return reverse('exp_detail', kwargs = { 'pk': self.experience.id }) def __str__(self): return f'Review by {self.user} ({self.user_id}) for Experience ({self.experience_id})' # ---- PHOTO ------ class Photo(models.Model): url = models.CharField(max_length=200) experience = models.ForeignKey(Experience, on_delete=models.CASCADE) def __str__(self): return f"Photo for exp_id: {self.experience_id} @{self.url} ({self.id})"
tail', kwargs = { 'pk': self.id }) # ---- BOOKING ----
identifier_body
github.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flagutil import ( "crypto/rsa" "errors" "flag" "fmt" "net/url" "strconv" "strings" "time" "github.com/dgrijalva/jwt-go/v4" "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/test-infra/prow/config/secret" "k8s.io/test-infra/prow/git" gitv2 "k8s.io/test-infra/prow/git/v2" "k8s.io/test-infra/prow/github" ) // GitHubOptions holds options for interacting with GitHub. // // Set AllowAnonymous to be true if you want to allow anonymous github access. // Set AllowDirectAccess to be true if you want to suppress warnings on direct github access (without ghproxy). type GitHubOptions struct { Host string endpoint Strings graphqlEndpoint string TokenPath string AllowAnonymous bool AllowDirectAccess bool AppID string AppPrivateKeyPath string ThrottleHourlyTokens int ThrottleAllowBurst int OrgThrottlers Strings parsedOrgThrottlers map[string]throttlerSettings // These will only be set after a github client was retrieved for the first time tokenGenerator github.TokenGenerator userGenerator github.UserGenerator // the following options determine how the client behaves around retries maxRequestTime time.Duration maxRetries int max404Retries int initialDelay time.Duration maxSleepTime time.Duration } type throttlerSettings struct { hourlyTokens int burst int } // flagParams struct is used indirectly by users of this package to customize // the common flags behavior, such as providing their own default values // or suppressing presence of certain flags. type flagParams struct { defaults GitHubOptions disableThrottlerOptions bool } type FlagParameter func(options *flagParams) // ThrottlerDefaults allows to customize the default values of flags // that control the throttler behavior. Setting `hourlyTokens` to zero // disables throttling by default. func ThrottlerDefaults(hourlyTokens, allowedBursts int) FlagParameter { return func(o *flagParams) { o.defaults.ThrottleHourlyTokens = hourlyTokens o.defaults.ThrottleAllowBurst = allowedBursts } } // DisableThrottlerOptions suppresses the presence of throttler-related flags, // effectively disallowing external users to parametrize default throttling // behavior. This is useful mostly when a program creates multiple GH clients // with different behavior. func DisableThrottlerOptions() FlagParameter { return func(o *flagParams) { o.disableThrottlerOptions = true } } // AddCustomizedFlags injects GitHub options into the given FlagSet. Behavior can be customized // via the functional options. func (o *GitHubOptions) AddCustomizedFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { o.addFlags(fs, paramFuncs...) } // AddFlags injects GitHub options into the given FlagSet func (o *GitHubOptions) AddFlags(fs *flag.FlagSet) { o.addFlags(fs) } func (o *GitHubOptions) addFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { params := flagParams{ defaults: GitHubOptions{ Host: github.DefaultHost, endpoint: NewStrings(github.DefaultAPIEndpoint), graphqlEndpoint: github.DefaultGraphQLEndpoint, }, } for _, parametrize := range paramFuncs { parametrize(&params) } defaults := params.defaults fs.StringVar(&o.Host, "github-host", defaults.Host, "GitHub's default host (may differ for enterprise)") o.endpoint = NewStrings(defaults.endpoint.Strings()...) fs.Var(&o.endpoint, "github-endpoint", "GitHub's API endpoint (may differ for enterprise).") fs.StringVar(&o.graphqlEndpoint, "github-graphql-endpoint", defaults.graphqlEndpoint, "GitHub GraphQL API endpoint (may differ for enterprise).") fs.StringVar(&o.TokenPath, "github-token-path", defaults.TokenPath, "Path to the file containing the GitHub OAuth secret.") fs.StringVar(&o.AppID, "github-app-id", defaults.AppID, "ID of the GitHub app. If set, requires --github-app-private-key-path to be set and --github-token-path to be unset.") fs.StringVar(&o.AppPrivateKeyPath, "github-app-private-key-path", defaults.AppPrivateKeyPath, "Path to the private key of the github app. If set, requires --github-app-id to bet set and --github-token-path to be unset") if !params.disableThrottlerOptions { fs.IntVar(&o.ThrottleHourlyTokens, "github-hourly-tokens", defaults.ThrottleHourlyTokens, "If set to a value larger than zero, enable client-side throttling to limit hourly token consumption. If set, --github-allowed-burst must be positive too.") fs.IntVar(&o.ThrottleAllowBurst, "github-allowed-burst", defaults.ThrottleAllowBurst, "Size of token consumption bursts. If set, --github-hourly-tokens must be positive too and set to a higher or equal number.") fs.Var(&o.OrgThrottlers, "github-throttle-org", "Throttler settings for a specific org in org:hourlyTokens:burst format. Can be passed multiple times. Only valid when using github apps auth.") } fs.DurationVar(&o.maxRequestTime, "github-client.request-timeout", github.DefaultMaxSleepTime, "Timeout for any single request to the GitHub API.") fs.IntVar(&o.maxRetries, "github-client.max-retries", github.DefaultMaxRetries, "Maximum number of retries that will be used for a failing request to the GitHub API.") fs.IntVar(&o.max404Retries, "github-client.max-404-retries", github.DefaultMax404Retries, "Maximum number of retries that will be used for a 404-ing request to the GitHub API.") fs.DurationVar(&o.maxSleepTime, "github-client.backoff-timeout", github.DefaultMaxSleepTime, "Largest allowable Retry-After time for requests to the GitHub API.") fs.DurationVar(&o.initialDelay, "github-client.initial-delay", github.DefaultInitialDelay, "Initial delay before retries begin for requests to the GitHub API.") } func (o *GitHubOptions) parseOrgThrottlers() error { if len(o.OrgThrottlers.vals) == 0 { return nil } if o.AppID == "" { return errors.New("--github-throttle-org was passed, but client doesn't use apps auth") } o.parsedOrgThrottlers = make(map[string]throttlerSettings, len(o.OrgThrottlers.vals)) var errs []error for _, orgThrottler := range o.OrgThrottlers.vals { colonSplit := strings.Split(orgThrottler, ":") if len(colonSplit) != 3 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format", orgThrottler)) continue } org, hourlyTokensString, burstString := colonSplit[0], colonSplit[1], colonSplit[2] hourlyTokens, err := strconv.ParseInt(hourlyTokensString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: hourlyTokens is not an int", orgThrottler)) continue } burst, err := strconv.ParseInt(burstString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: burst is not an int", orgThrottler)) continue } if hourlyTokens < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: hourlyTokens must be > 0", orgThrottler)) continue } if burst < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must be > 0", orgThrottler)) continue } if burst > hourlyTokens { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must not be greater than hourlyTokens", orgThrottler)) continue } if _, alreadyExists := o.parsedOrgThrottlers[org]; alreadyExists { errs = append(errs, fmt.Errorf("got multiple -github-throttle-org for the %s org", org)) continue } o.parsedOrgThrottlers[org] = throttlerSettings{hourlyTokens: int(hourlyTokens), burst: int(burst)} } return utilerrors.NewAggregate(errs) } // Validate validates GitHub options. Note that validate updates the GitHubOptions // to add default values for TokenPath and graphqlEndpoint. func (o *GitHubOptions) Validate(bool) error { endpoints := o.endpoint.Strings() for i, uri := range endpoints { if uri == "" { endpoints[i] = github.DefaultAPIEndpoint } else if _, err := url.ParseRequestURI(uri); err != nil { return fmt.Errorf("invalid -github-endpoint URI: %q", uri) } } if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") { return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path") } if o.AppID == "" != (o.AppPrivateKeyPath == "") { return errors.New("--app-id and --app-private-key-path must be set together") } if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess { logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy") } if o.graphqlEndpoint == "" { o.graphqlEndpoint = github.DefaultGraphQLEndpoint } else if _, err := url.Parse(o.graphqlEndpoint); err != nil { return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint) } if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) { if o.ThrottleHourlyTokens == 0 { // Tolerate `--github-hourly-tokens=0` alone to disable throttling o.ThrottleAllowBurst = 0 } else { return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero") } } if o.ThrottleAllowBurst > o.ThrottleHourlyTokens { return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens") } return o.parseOrgThrottlers() } // GitHubClientWithLogFields returns a GitHub client with extra logging fields func (o *GitHubOptions) GitHubClientWithLogFields(dryRun bool, fields logrus.Fields) (github.Client, error) { client, err := o.githubClient(dryRun) if err != nil { return nil, err } return client.WithFields(fields), nil } func (o *GitHubOptions) githubClient(dryRun bool) (github.Client, error) { fields := logrus.Fields{} options := o.baseClientOptions() options.DryRun = dryRun if o.TokenPath == "" && o.AppPrivateKeyPath == "" { logrus.Warn("empty -github-token-path, will use anonymous github client") } if o.TokenPath == "" { options.GetToken = func() []byte { return []byte{} } } else { if err := secret.Add(o.TokenPath); err != nil { return nil, fmt.Errorf("failed to add GitHub token to secret agent: %w", err) } options.GetToken = secret.GetTokenGenerator(o.TokenPath) } if o.AppPrivateKeyPath != "" { apk, err := o.appPrivateKeyGenerator() if err != nil { return nil, err } options.AppPrivateKey = apk } optionallyThrottled := func(c github.Client) (github.Client, error) { // Throttle handles zeros as "disable throttling" so we do not need to call it conditionally if err := c.Throttle(o.ThrottleHourlyTokens, o.ThrottleAllowBurst); err != nil { return nil, fmt.Errorf("failed to throttle: %w", err) } for org, settings := range o.parsedOrgThrottlers { if err := c.Throttle(settings.hourlyTokens, settings.burst, org); err != nil { return nil, fmt.Errorf("failed to set up throttling for org %s: %w", org, err) } } return c, nil } tokenGenerator, userGenerator, client, err := github.NewClientFromOptions(fields, options) if err != nil { return nil, fmt.Errorf("failed to construct github client: %w", err) } o.tokenGenerator = tokenGenerator o.userGenerator = userGenerator return optionallyThrottled(client) } // baseClientOptions populates client options that are derived from flags without processing func (o *GitHubOptions) baseClientOptions() github.ClientOptions { return github.ClientOptions{ Censor: secret.Censor, AppID: o.AppID, GraphqlEndpoint: o.graphqlEndpoint, Bases: o.endpoint.Strings(), MaxRequestTime: o.maxRequestTime, InitialDelay: o.initialDelay, MaxSleepTime: o.maxSleepTime, MaxRetries: o.maxRetries, Max404Retries: o.max404Retries, } } // GitHubClient returns a GitHub client. func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) { return o.GitHubClientWithLogFields(dryRun, logrus.Fields{}) } // GitHubClientWithAccessToken creates a GitHub client from an access token. func (o *GitHubOptions) GitHubClientWithAccessToken(token string) (github.Client, error) { options := o.baseClientOptions() options.GetToken = func() []byte { return []byte(token) } options.AppID = "" // Since we are using a token, we should not use the app auth _, _, client, err := github.NewClientFromOptions(logrus.Fields{}, options) return client, err } // GitClientFactory returns git.ClientFactory. Passing non-empty cookieFilePath // will result in git ClientFactory to work with Gerrit. // TODO(chaodaiG): move this logic to somewhere more appropriate instead of in // github.go. func (o *GitHubOptions) GitClientFactory(cookieFilePath string, cacheDir *string, dryRun, persistCache bool) (gitv2.ClientFactory, error) { var gitClientFactory gitv2.ClientFactory if cookieFilePath != "" && o.TokenPath == "" && o.AppPrivateKeyPath == "" { opts := gitv2.ClientFactoryOpts{ CookieFilePath: cookieFilePath, Persist: &persistCache, } if cacheDir != nil && *cacheDir != "" { opts.CacheDirBase = cacheDir } var err error gitClientFactory, err = gitv2.NewClientFactory(opts.Apply) if err != nil { return nil, fmt.Errorf("failed to create git client from cookieFile: %v\n(cookieFile is only for Gerrit)", err) } } else { gitClient, err := o.GitClient(dryRun) if err != nil { return nil, fmt.Errorf("Error getting git client: %w", err) } gitClientFactory = gitv2.ClientFactoryFrom(gitClient) } return gitClientFactory, nil } // GitClient returns a Git client. func (o *GitHubOptions) GitClient(dryRun bool) (client *git.Client, err error) { client, err = git.NewClientWithHost(o.Host) if err != nil { return nil, err } // We must capture the value of client here to prevent issues related // to the use of named return values when an error is encountered. // Without this, we risk a nil pointer dereference. defer func(client *git.Client) { if err != nil { client.Clean() } }(client) user, generator, err := o.getGitAuthentication(dryRun) if err != nil { return nil, fmt.Errorf("failed to get git authentication: %w", err) } client.SetCredentials(user, generator) return client, nil } func (o *GitHubOptions) getGitAuthentication(dryRun bool) (string, git.GitTokenGenerator, error) { // the client must have been created at least once for us to have generators if o.userGenerator == nil
login, err := o.userGenerator() if err != nil { return "", nil, fmt.Errorf("error getting bot name: %w", err) } return login, git.GitTokenGenerator(o.tokenGenerator), nil } func (o *GitHubOptions) appPrivateKeyGenerator() (func() *rsa.PrivateKey, error) { generator, err := secret.AddWithParser( o.AppPrivateKeyPath, func(raw []byte) (*rsa.PrivateKey, error) { privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(raw) if err != nil { return nil, fmt.Errorf("failed to parse rsa key from pem: %w", err) } return privateKey, nil }, ) if err != nil { return nil, fmt.Errorf("failed to add the key from --app-private-key-path to secret agent: %w", err) } return generator, nil }
{ if _, err := o.GitHubClient(dryRun); err != nil { return "", nil, fmt.Errorf("error getting GitHub client: %w", err) } }
conditional_block
github.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flagutil import ( "crypto/rsa" "errors" "flag" "fmt" "net/url" "strconv" "strings" "time" "github.com/dgrijalva/jwt-go/v4" "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/test-infra/prow/config/secret" "k8s.io/test-infra/prow/git" gitv2 "k8s.io/test-infra/prow/git/v2" "k8s.io/test-infra/prow/github" ) // GitHubOptions holds options for interacting with GitHub. // // Set AllowAnonymous to be true if you want to allow anonymous github access. // Set AllowDirectAccess to be true if you want to suppress warnings on direct github access (without ghproxy). type GitHubOptions struct { Host string endpoint Strings graphqlEndpoint string TokenPath string AllowAnonymous bool AllowDirectAccess bool AppID string AppPrivateKeyPath string ThrottleHourlyTokens int ThrottleAllowBurst int OrgThrottlers Strings parsedOrgThrottlers map[string]throttlerSettings // These will only be set after a github client was retrieved for the first time tokenGenerator github.TokenGenerator userGenerator github.UserGenerator // the following options determine how the client behaves around retries maxRequestTime time.Duration maxRetries int max404Retries int initialDelay time.Duration maxSleepTime time.Duration } type throttlerSettings struct { hourlyTokens int burst int } // flagParams struct is used indirectly by users of this package to customize // the common flags behavior, such as providing their own default values // or suppressing presence of certain flags. type flagParams struct { defaults GitHubOptions disableThrottlerOptions bool } type FlagParameter func(options *flagParams) // ThrottlerDefaults allows to customize the default values of flags // that control the throttler behavior. Setting `hourlyTokens` to zero // disables throttling by default. func
(hourlyTokens, allowedBursts int) FlagParameter { return func(o *flagParams) { o.defaults.ThrottleHourlyTokens = hourlyTokens o.defaults.ThrottleAllowBurst = allowedBursts } } // DisableThrottlerOptions suppresses the presence of throttler-related flags, // effectively disallowing external users to parametrize default throttling // behavior. This is useful mostly when a program creates multiple GH clients // with different behavior. func DisableThrottlerOptions() FlagParameter { return func(o *flagParams) { o.disableThrottlerOptions = true } } // AddCustomizedFlags injects GitHub options into the given FlagSet. Behavior can be customized // via the functional options. func (o *GitHubOptions) AddCustomizedFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { o.addFlags(fs, paramFuncs...) } // AddFlags injects GitHub options into the given FlagSet func (o *GitHubOptions) AddFlags(fs *flag.FlagSet) { o.addFlags(fs) } func (o *GitHubOptions) addFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { params := flagParams{ defaults: GitHubOptions{ Host: github.DefaultHost, endpoint: NewStrings(github.DefaultAPIEndpoint), graphqlEndpoint: github.DefaultGraphQLEndpoint, }, } for _, parametrize := range paramFuncs { parametrize(&params) } defaults := params.defaults fs.StringVar(&o.Host, "github-host", defaults.Host, "GitHub's default host (may differ for enterprise)") o.endpoint = NewStrings(defaults.endpoint.Strings()...) fs.Var(&o.endpoint, "github-endpoint", "GitHub's API endpoint (may differ for enterprise).") fs.StringVar(&o.graphqlEndpoint, "github-graphql-endpoint", defaults.graphqlEndpoint, "GitHub GraphQL API endpoint (may differ for enterprise).") fs.StringVar(&o.TokenPath, "github-token-path", defaults.TokenPath, "Path to the file containing the GitHub OAuth secret.") fs.StringVar(&o.AppID, "github-app-id", defaults.AppID, "ID of the GitHub app. If set, requires --github-app-private-key-path to be set and --github-token-path to be unset.") fs.StringVar(&o.AppPrivateKeyPath, "github-app-private-key-path", defaults.AppPrivateKeyPath, "Path to the private key of the github app. If set, requires --github-app-id to bet set and --github-token-path to be unset") if !params.disableThrottlerOptions { fs.IntVar(&o.ThrottleHourlyTokens, "github-hourly-tokens", defaults.ThrottleHourlyTokens, "If set to a value larger than zero, enable client-side throttling to limit hourly token consumption. If set, --github-allowed-burst must be positive too.") fs.IntVar(&o.ThrottleAllowBurst, "github-allowed-burst", defaults.ThrottleAllowBurst, "Size of token consumption bursts. If set, --github-hourly-tokens must be positive too and set to a higher or equal number.") fs.Var(&o.OrgThrottlers, "github-throttle-org", "Throttler settings for a specific org in org:hourlyTokens:burst format. Can be passed multiple times. Only valid when using github apps auth.") } fs.DurationVar(&o.maxRequestTime, "github-client.request-timeout", github.DefaultMaxSleepTime, "Timeout for any single request to the GitHub API.") fs.IntVar(&o.maxRetries, "github-client.max-retries", github.DefaultMaxRetries, "Maximum number of retries that will be used for a failing request to the GitHub API.") fs.IntVar(&o.max404Retries, "github-client.max-404-retries", github.DefaultMax404Retries, "Maximum number of retries that will be used for a 404-ing request to the GitHub API.") fs.DurationVar(&o.maxSleepTime, "github-client.backoff-timeout", github.DefaultMaxSleepTime, "Largest allowable Retry-After time for requests to the GitHub API.") fs.DurationVar(&o.initialDelay, "github-client.initial-delay", github.DefaultInitialDelay, "Initial delay before retries begin for requests to the GitHub API.") } func (o *GitHubOptions) parseOrgThrottlers() error { if len(o.OrgThrottlers.vals) == 0 { return nil } if o.AppID == "" { return errors.New("--github-throttle-org was passed, but client doesn't use apps auth") } o.parsedOrgThrottlers = make(map[string]throttlerSettings, len(o.OrgThrottlers.vals)) var errs []error for _, orgThrottler := range o.OrgThrottlers.vals { colonSplit := strings.Split(orgThrottler, ":") if len(colonSplit) != 3 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format", orgThrottler)) continue } org, hourlyTokensString, burstString := colonSplit[0], colonSplit[1], colonSplit[2] hourlyTokens, err := strconv.ParseInt(hourlyTokensString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: hourlyTokens is not an int", orgThrottler)) continue } burst, err := strconv.ParseInt(burstString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: burst is not an int", orgThrottler)) continue } if hourlyTokens < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: hourlyTokens must be > 0", orgThrottler)) continue } if burst < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must be > 0", orgThrottler)) continue } if burst > hourlyTokens { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must not be greater than hourlyTokens", orgThrottler)) continue } if _, alreadyExists := o.parsedOrgThrottlers[org]; alreadyExists { errs = append(errs, fmt.Errorf("got multiple -github-throttle-org for the %s org", org)) continue } o.parsedOrgThrottlers[org] = throttlerSettings{hourlyTokens: int(hourlyTokens), burst: int(burst)} } return utilerrors.NewAggregate(errs) } // Validate validates GitHub options. Note that validate updates the GitHubOptions // to add default values for TokenPath and graphqlEndpoint. func (o *GitHubOptions) Validate(bool) error { endpoints := o.endpoint.Strings() for i, uri := range endpoints { if uri == "" { endpoints[i] = github.DefaultAPIEndpoint } else if _, err := url.ParseRequestURI(uri); err != nil { return fmt.Errorf("invalid -github-endpoint URI: %q", uri) } } if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") { return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path") } if o.AppID == "" != (o.AppPrivateKeyPath == "") { return errors.New("--app-id and --app-private-key-path must be set together") } if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess { logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy") } if o.graphqlEndpoint == "" { o.graphqlEndpoint = github.DefaultGraphQLEndpoint } else if _, err := url.Parse(o.graphqlEndpoint); err != nil { return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint) } if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) { if o.ThrottleHourlyTokens == 0 { // Tolerate `--github-hourly-tokens=0` alone to disable throttling o.ThrottleAllowBurst = 0 } else { return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero") } } if o.ThrottleAllowBurst > o.ThrottleHourlyTokens { return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens") } return o.parseOrgThrottlers() } // GitHubClientWithLogFields returns a GitHub client with extra logging fields func (o *GitHubOptions) GitHubClientWithLogFields(dryRun bool, fields logrus.Fields) (github.Client, error) { client, err := o.githubClient(dryRun) if err != nil { return nil, err } return client.WithFields(fields), nil } func (o *GitHubOptions) githubClient(dryRun bool) (github.Client, error) { fields := logrus.Fields{} options := o.baseClientOptions() options.DryRun = dryRun if o.TokenPath == "" && o.AppPrivateKeyPath == "" { logrus.Warn("empty -github-token-path, will use anonymous github client") } if o.TokenPath == "" { options.GetToken = func() []byte { return []byte{} } } else { if err := secret.Add(o.TokenPath); err != nil { return nil, fmt.Errorf("failed to add GitHub token to secret agent: %w", err) } options.GetToken = secret.GetTokenGenerator(o.TokenPath) } if o.AppPrivateKeyPath != "" { apk, err := o.appPrivateKeyGenerator() if err != nil { return nil, err } options.AppPrivateKey = apk } optionallyThrottled := func(c github.Client) (github.Client, error) { // Throttle handles zeros as "disable throttling" so we do not need to call it conditionally if err := c.Throttle(o.ThrottleHourlyTokens, o.ThrottleAllowBurst); err != nil { return nil, fmt.Errorf("failed to throttle: %w", err) } for org, settings := range o.parsedOrgThrottlers { if err := c.Throttle(settings.hourlyTokens, settings.burst, org); err != nil { return nil, fmt.Errorf("failed to set up throttling for org %s: %w", org, err) } } return c, nil } tokenGenerator, userGenerator, client, err := github.NewClientFromOptions(fields, options) if err != nil { return nil, fmt.Errorf("failed to construct github client: %w", err) } o.tokenGenerator = tokenGenerator o.userGenerator = userGenerator return optionallyThrottled(client) } // baseClientOptions populates client options that are derived from flags without processing func (o *GitHubOptions) baseClientOptions() github.ClientOptions { return github.ClientOptions{ Censor: secret.Censor, AppID: o.AppID, GraphqlEndpoint: o.graphqlEndpoint, Bases: o.endpoint.Strings(), MaxRequestTime: o.maxRequestTime, InitialDelay: o.initialDelay, MaxSleepTime: o.maxSleepTime, MaxRetries: o.maxRetries, Max404Retries: o.max404Retries, } } // GitHubClient returns a GitHub client. func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) { return o.GitHubClientWithLogFields(dryRun, logrus.Fields{}) } // GitHubClientWithAccessToken creates a GitHub client from an access token. func (o *GitHubOptions) GitHubClientWithAccessToken(token string) (github.Client, error) { options := o.baseClientOptions() options.GetToken = func() []byte { return []byte(token) } options.AppID = "" // Since we are using a token, we should not use the app auth _, _, client, err := github.NewClientFromOptions(logrus.Fields{}, options) return client, err } // GitClientFactory returns git.ClientFactory. Passing non-empty cookieFilePath // will result in git ClientFactory to work with Gerrit. // TODO(chaodaiG): move this logic to somewhere more appropriate instead of in // github.go. func (o *GitHubOptions) GitClientFactory(cookieFilePath string, cacheDir *string, dryRun, persistCache bool) (gitv2.ClientFactory, error) { var gitClientFactory gitv2.ClientFactory if cookieFilePath != "" && o.TokenPath == "" && o.AppPrivateKeyPath == "" { opts := gitv2.ClientFactoryOpts{ CookieFilePath: cookieFilePath, Persist: &persistCache, } if cacheDir != nil && *cacheDir != "" { opts.CacheDirBase = cacheDir } var err error gitClientFactory, err = gitv2.NewClientFactory(opts.Apply) if err != nil { return nil, fmt.Errorf("failed to create git client from cookieFile: %v\n(cookieFile is only for Gerrit)", err) } } else { gitClient, err := o.GitClient(dryRun) if err != nil { return nil, fmt.Errorf("Error getting git client: %w", err) } gitClientFactory = gitv2.ClientFactoryFrom(gitClient) } return gitClientFactory, nil } // GitClient returns a Git client. func (o *GitHubOptions) GitClient(dryRun bool) (client *git.Client, err error) { client, err = git.NewClientWithHost(o.Host) if err != nil { return nil, err } // We must capture the value of client here to prevent issues related // to the use of named return values when an error is encountered. // Without this, we risk a nil pointer dereference. defer func(client *git.Client) { if err != nil { client.Clean() } }(client) user, generator, err := o.getGitAuthentication(dryRun) if err != nil { return nil, fmt.Errorf("failed to get git authentication: %w", err) } client.SetCredentials(user, generator) return client, nil } func (o *GitHubOptions) getGitAuthentication(dryRun bool) (string, git.GitTokenGenerator, error) { // the client must have been created at least once for us to have generators if o.userGenerator == nil { if _, err := o.GitHubClient(dryRun); err != nil { return "", nil, fmt.Errorf("error getting GitHub client: %w", err) } } login, err := o.userGenerator() if err != nil { return "", nil, fmt.Errorf("error getting bot name: %w", err) } return login, git.GitTokenGenerator(o.tokenGenerator), nil } func (o *GitHubOptions) appPrivateKeyGenerator() (func() *rsa.PrivateKey, error) { generator, err := secret.AddWithParser( o.AppPrivateKeyPath, func(raw []byte) (*rsa.PrivateKey, error) { privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(raw) if err != nil { return nil, fmt.Errorf("failed to parse rsa key from pem: %w", err) } return privateKey, nil }, ) if err != nil { return nil, fmt.Errorf("failed to add the key from --app-private-key-path to secret agent: %w", err) } return generator, nil }
ThrottlerDefaults
identifier_name
github.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flagutil import ( "crypto/rsa" "errors" "flag" "fmt" "net/url" "strconv" "strings" "time" "github.com/dgrijalva/jwt-go/v4" "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/test-infra/prow/config/secret" "k8s.io/test-infra/prow/git" gitv2 "k8s.io/test-infra/prow/git/v2" "k8s.io/test-infra/prow/github" ) // GitHubOptions holds options for interacting with GitHub. // // Set AllowAnonymous to be true if you want to allow anonymous github access. // Set AllowDirectAccess to be true if you want to suppress warnings on direct github access (without ghproxy). type GitHubOptions struct { Host string endpoint Strings graphqlEndpoint string TokenPath string AllowAnonymous bool AllowDirectAccess bool AppID string AppPrivateKeyPath string ThrottleHourlyTokens int ThrottleAllowBurst int OrgThrottlers Strings parsedOrgThrottlers map[string]throttlerSettings // These will only be set after a github client was retrieved for the first time tokenGenerator github.TokenGenerator userGenerator github.UserGenerator // the following options determine how the client behaves around retries maxRequestTime time.Duration maxRetries int max404Retries int initialDelay time.Duration maxSleepTime time.Duration } type throttlerSettings struct { hourlyTokens int burst int } // flagParams struct is used indirectly by users of this package to customize // the common flags behavior, such as providing their own default values // or suppressing presence of certain flags. type flagParams struct { defaults GitHubOptions disableThrottlerOptions bool } type FlagParameter func(options *flagParams) // ThrottlerDefaults allows to customize the default values of flags // that control the throttler behavior. Setting `hourlyTokens` to zero // disables throttling by default. func ThrottlerDefaults(hourlyTokens, allowedBursts int) FlagParameter { return func(o *flagParams) { o.defaults.ThrottleHourlyTokens = hourlyTokens o.defaults.ThrottleAllowBurst = allowedBursts } } // DisableThrottlerOptions suppresses the presence of throttler-related flags, // effectively disallowing external users to parametrize default throttling // behavior. This is useful mostly when a program creates multiple GH clients // with different behavior. func DisableThrottlerOptions() FlagParameter { return func(o *flagParams) { o.disableThrottlerOptions = true } } // AddCustomizedFlags injects GitHub options into the given FlagSet. Behavior can be customized // via the functional options. func (o *GitHubOptions) AddCustomizedFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { o.addFlags(fs, paramFuncs...) } // AddFlags injects GitHub options into the given FlagSet func (o *GitHubOptions) AddFlags(fs *flag.FlagSet) { o.addFlags(fs) } func (o *GitHubOptions) addFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { params := flagParams{ defaults: GitHubOptions{ Host: github.DefaultHost, endpoint: NewStrings(github.DefaultAPIEndpoint), graphqlEndpoint: github.DefaultGraphQLEndpoint, }, } for _, parametrize := range paramFuncs { parametrize(&params) } defaults := params.defaults fs.StringVar(&o.Host, "github-host", defaults.Host, "GitHub's default host (may differ for enterprise)") o.endpoint = NewStrings(defaults.endpoint.Strings()...) fs.Var(&o.endpoint, "github-endpoint", "GitHub's API endpoint (may differ for enterprise).") fs.StringVar(&o.graphqlEndpoint, "github-graphql-endpoint", defaults.graphqlEndpoint, "GitHub GraphQL API endpoint (may differ for enterprise).") fs.StringVar(&o.TokenPath, "github-token-path", defaults.TokenPath, "Path to the file containing the GitHub OAuth secret.") fs.StringVar(&o.AppID, "github-app-id", defaults.AppID, "ID of the GitHub app. If set, requires --github-app-private-key-path to be set and --github-token-path to be unset.") fs.StringVar(&o.AppPrivateKeyPath, "github-app-private-key-path", defaults.AppPrivateKeyPath, "Path to the private key of the github app. If set, requires --github-app-id to bet set and --github-token-path to be unset") if !params.disableThrottlerOptions { fs.IntVar(&o.ThrottleHourlyTokens, "github-hourly-tokens", defaults.ThrottleHourlyTokens, "If set to a value larger than zero, enable client-side throttling to limit hourly token consumption. If set, --github-allowed-burst must be positive too.") fs.IntVar(&o.ThrottleAllowBurst, "github-allowed-burst", defaults.ThrottleAllowBurst, "Size of token consumption bursts. If set, --github-hourly-tokens must be positive too and set to a higher or equal number.") fs.Var(&o.OrgThrottlers, "github-throttle-org", "Throttler settings for a specific org in org:hourlyTokens:burst format. Can be passed multiple times. Only valid when using github apps auth.") } fs.DurationVar(&o.maxRequestTime, "github-client.request-timeout", github.DefaultMaxSleepTime, "Timeout for any single request to the GitHub API.") fs.IntVar(&o.maxRetries, "github-client.max-retries", github.DefaultMaxRetries, "Maximum number of retries that will be used for a failing request to the GitHub API.") fs.IntVar(&o.max404Retries, "github-client.max-404-retries", github.DefaultMax404Retries, "Maximum number of retries that will be used for a 404-ing request to the GitHub API.") fs.DurationVar(&o.maxSleepTime, "github-client.backoff-timeout", github.DefaultMaxSleepTime, "Largest allowable Retry-After time for requests to the GitHub API.") fs.DurationVar(&o.initialDelay, "github-client.initial-delay", github.DefaultInitialDelay, "Initial delay before retries begin for requests to the GitHub API.") } func (o *GitHubOptions) parseOrgThrottlers() error { if len(o.OrgThrottlers.vals) == 0 { return nil } if o.AppID == "" { return errors.New("--github-throttle-org was passed, but client doesn't use apps auth") } o.parsedOrgThrottlers = make(map[string]throttlerSettings, len(o.OrgThrottlers.vals)) var errs []error for _, orgThrottler := range o.OrgThrottlers.vals { colonSplit := strings.Split(orgThrottler, ":") if len(colonSplit) != 3 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format", orgThrottler)) continue } org, hourlyTokensString, burstString := colonSplit[0], colonSplit[1], colonSplit[2] hourlyTokens, err := strconv.ParseInt(hourlyTokensString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: hourlyTokens is not an int", orgThrottler)) continue } burst, err := strconv.ParseInt(burstString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: burst is not an int", orgThrottler)) continue } if hourlyTokens < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: hourlyTokens must be > 0", orgThrottler)) continue } if burst < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must be > 0", orgThrottler)) continue } if burst > hourlyTokens { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must not be greater than hourlyTokens", orgThrottler)) continue } if _, alreadyExists := o.parsedOrgThrottlers[org]; alreadyExists { errs = append(errs, fmt.Errorf("got multiple -github-throttle-org for the %s org", org)) continue } o.parsedOrgThrottlers[org] = throttlerSettings{hourlyTokens: int(hourlyTokens), burst: int(burst)} } return utilerrors.NewAggregate(errs) } // Validate validates GitHub options. Note that validate updates the GitHubOptions // to add default values for TokenPath and graphqlEndpoint. func (o *GitHubOptions) Validate(bool) error { endpoints := o.endpoint.Strings() for i, uri := range endpoints { if uri == "" { endpoints[i] = github.DefaultAPIEndpoint } else if _, err := url.ParseRequestURI(uri); err != nil { return fmt.Errorf("invalid -github-endpoint URI: %q", uri) } } if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") { return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path") } if o.AppID == "" != (o.AppPrivateKeyPath == "") { return errors.New("--app-id and --app-private-key-path must be set together") } if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess { logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy") } if o.graphqlEndpoint == "" { o.graphqlEndpoint = github.DefaultGraphQLEndpoint } else if _, err := url.Parse(o.graphqlEndpoint); err != nil { return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint) } if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) { if o.ThrottleHourlyTokens == 0 { // Tolerate `--github-hourly-tokens=0` alone to disable throttling o.ThrottleAllowBurst = 0 } else { return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero") } } if o.ThrottleAllowBurst > o.ThrottleHourlyTokens { return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens") } return o.parseOrgThrottlers() } // GitHubClientWithLogFields returns a GitHub client with extra logging fields func (o *GitHubOptions) GitHubClientWithLogFields(dryRun bool, fields logrus.Fields) (github.Client, error) { client, err := o.githubClient(dryRun) if err != nil { return nil, err } return client.WithFields(fields), nil } func (o *GitHubOptions) githubClient(dryRun bool) (github.Client, error) { fields := logrus.Fields{} options := o.baseClientOptions() options.DryRun = dryRun if o.TokenPath == "" && o.AppPrivateKeyPath == "" { logrus.Warn("empty -github-token-path, will use anonymous github client") } if o.TokenPath == "" { options.GetToken = func() []byte { return []byte{} } } else { if err := secret.Add(o.TokenPath); err != nil { return nil, fmt.Errorf("failed to add GitHub token to secret agent: %w", err) } options.GetToken = secret.GetTokenGenerator(o.TokenPath) } if o.AppPrivateKeyPath != "" { apk, err := o.appPrivateKeyGenerator() if err != nil { return nil, err } options.AppPrivateKey = apk } optionallyThrottled := func(c github.Client) (github.Client, error) { // Throttle handles zeros as "disable throttling" so we do not need to call it conditionally if err := c.Throttle(o.ThrottleHourlyTokens, o.ThrottleAllowBurst); err != nil { return nil, fmt.Errorf("failed to throttle: %w", err) } for org, settings := range o.parsedOrgThrottlers { if err := c.Throttle(settings.hourlyTokens, settings.burst, org); err != nil { return nil, fmt.Errorf("failed to set up throttling for org %s: %w", org, err) } } return c, nil } tokenGenerator, userGenerator, client, err := github.NewClientFromOptions(fields, options) if err != nil { return nil, fmt.Errorf("failed to construct github client: %w", err) } o.tokenGenerator = tokenGenerator o.userGenerator = userGenerator return optionallyThrottled(client) } // baseClientOptions populates client options that are derived from flags without processing func (o *GitHubOptions) baseClientOptions() github.ClientOptions { return github.ClientOptions{ Censor: secret.Censor, AppID: o.AppID, GraphqlEndpoint: o.graphqlEndpoint, Bases: o.endpoint.Strings(), MaxRequestTime: o.maxRequestTime, InitialDelay: o.initialDelay, MaxSleepTime: o.maxSleepTime, MaxRetries: o.maxRetries, Max404Retries: o.max404Retries, } } // GitHubClient returns a GitHub client. func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) { return o.GitHubClientWithLogFields(dryRun, logrus.Fields{}) } // GitHubClientWithAccessToken creates a GitHub client from an access token. func (o *GitHubOptions) GitHubClientWithAccessToken(token string) (github.Client, error) { options := o.baseClientOptions() options.GetToken = func() []byte { return []byte(token) } options.AppID = "" // Since we are using a token, we should not use the app auth _, _, client, err := github.NewClientFromOptions(logrus.Fields{}, options) return client, err } // GitClientFactory returns git.ClientFactory. Passing non-empty cookieFilePath // will result in git ClientFactory to work with Gerrit. // TODO(chaodaiG): move this logic to somewhere more appropriate instead of in // github.go. func (o *GitHubOptions) GitClientFactory(cookieFilePath string, cacheDir *string, dryRun, persistCache bool) (gitv2.ClientFactory, error)
// GitClient returns a Git client. func (o *GitHubOptions) GitClient(dryRun bool) (client *git.Client, err error) { client, err = git.NewClientWithHost(o.Host) if err != nil { return nil, err } // We must capture the value of client here to prevent issues related // to the use of named return values when an error is encountered. // Without this, we risk a nil pointer dereference. defer func(client *git.Client) { if err != nil { client.Clean() } }(client) user, generator, err := o.getGitAuthentication(dryRun) if err != nil { return nil, fmt.Errorf("failed to get git authentication: %w", err) } client.SetCredentials(user, generator) return client, nil } func (o *GitHubOptions) getGitAuthentication(dryRun bool) (string, git.GitTokenGenerator, error) { // the client must have been created at least once for us to have generators if o.userGenerator == nil { if _, err := o.GitHubClient(dryRun); err != nil { return "", nil, fmt.Errorf("error getting GitHub client: %w", err) } } login, err := o.userGenerator() if err != nil { return "", nil, fmt.Errorf("error getting bot name: %w", err) } return login, git.GitTokenGenerator(o.tokenGenerator), nil } func (o *GitHubOptions) appPrivateKeyGenerator() (func() *rsa.PrivateKey, error) { generator, err := secret.AddWithParser( o.AppPrivateKeyPath, func(raw []byte) (*rsa.PrivateKey, error) { privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(raw) if err != nil { return nil, fmt.Errorf("failed to parse rsa key from pem: %w", err) } return privateKey, nil }, ) if err != nil { return nil, fmt.Errorf("failed to add the key from --app-private-key-path to secret agent: %w", err) } return generator, nil }
{ var gitClientFactory gitv2.ClientFactory if cookieFilePath != "" && o.TokenPath == "" && o.AppPrivateKeyPath == "" { opts := gitv2.ClientFactoryOpts{ CookieFilePath: cookieFilePath, Persist: &persistCache, } if cacheDir != nil && *cacheDir != "" { opts.CacheDirBase = cacheDir } var err error gitClientFactory, err = gitv2.NewClientFactory(opts.Apply) if err != nil { return nil, fmt.Errorf("failed to create git client from cookieFile: %v\n(cookieFile is only for Gerrit)", err) } } else { gitClient, err := o.GitClient(dryRun) if err != nil { return nil, fmt.Errorf("Error getting git client: %w", err) } gitClientFactory = gitv2.ClientFactoryFrom(gitClient) } return gitClientFactory, nil }
identifier_body
github.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package flagutil import ( "crypto/rsa" "errors" "flag" "fmt" "net/url" "strconv" "strings" "time" "github.com/dgrijalva/jwt-go/v4" "github.com/sirupsen/logrus" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/test-infra/prow/config/secret" "k8s.io/test-infra/prow/git" gitv2 "k8s.io/test-infra/prow/git/v2" "k8s.io/test-infra/prow/github" ) // GitHubOptions holds options for interacting with GitHub. // // Set AllowAnonymous to be true if you want to allow anonymous github access. // Set AllowDirectAccess to be true if you want to suppress warnings on direct github access (without ghproxy). type GitHubOptions struct { Host string endpoint Strings graphqlEndpoint string TokenPath string AllowAnonymous bool AllowDirectAccess bool AppID string AppPrivateKeyPath string ThrottleHourlyTokens int ThrottleAllowBurst int OrgThrottlers Strings parsedOrgThrottlers map[string]throttlerSettings // These will only be set after a github client was retrieved for the first time tokenGenerator github.TokenGenerator userGenerator github.UserGenerator // the following options determine how the client behaves around retries maxRequestTime time.Duration maxRetries int max404Retries int initialDelay time.Duration maxSleepTime time.Duration } type throttlerSettings struct { hourlyTokens int burst int } // flagParams struct is used indirectly by users of this package to customize // the common flags behavior, such as providing their own default values // or suppressing presence of certain flags. type flagParams struct { defaults GitHubOptions disableThrottlerOptions bool } type FlagParameter func(options *flagParams) // ThrottlerDefaults allows to customize the default values of flags // that control the throttler behavior. Setting `hourlyTokens` to zero // disables throttling by default. func ThrottlerDefaults(hourlyTokens, allowedBursts int) FlagParameter { return func(o *flagParams) { o.defaults.ThrottleHourlyTokens = hourlyTokens o.defaults.ThrottleAllowBurst = allowedBursts } } // DisableThrottlerOptions suppresses the presence of throttler-related flags, // effectively disallowing external users to parametrize default throttling // behavior. This is useful mostly when a program creates multiple GH clients // with different behavior. func DisableThrottlerOptions() FlagParameter { return func(o *flagParams) { o.disableThrottlerOptions = true } } // AddCustomizedFlags injects GitHub options into the given FlagSet. Behavior can be customized // via the functional options. func (o *GitHubOptions) AddCustomizedFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { o.addFlags(fs, paramFuncs...) } // AddFlags injects GitHub options into the given FlagSet func (o *GitHubOptions) AddFlags(fs *flag.FlagSet) { o.addFlags(fs) } func (o *GitHubOptions) addFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) { params := flagParams{ defaults: GitHubOptions{ Host: github.DefaultHost, endpoint: NewStrings(github.DefaultAPIEndpoint), graphqlEndpoint: github.DefaultGraphQLEndpoint, }, } for _, parametrize := range paramFuncs { parametrize(&params) } defaults := params.defaults fs.StringVar(&o.Host, "github-host", defaults.Host, "GitHub's default host (may differ for enterprise)") o.endpoint = NewStrings(defaults.endpoint.Strings()...) fs.Var(&o.endpoint, "github-endpoint", "GitHub's API endpoint (may differ for enterprise).") fs.StringVar(&o.graphqlEndpoint, "github-graphql-endpoint", defaults.graphqlEndpoint, "GitHub GraphQL API endpoint (may differ for enterprise).") fs.StringVar(&o.TokenPath, "github-token-path", defaults.TokenPath, "Path to the file containing the GitHub OAuth secret.") fs.StringVar(&o.AppID, "github-app-id", defaults.AppID, "ID of the GitHub app. If set, requires --github-app-private-key-path to be set and --github-token-path to be unset.") fs.StringVar(&o.AppPrivateKeyPath, "github-app-private-key-path", defaults.AppPrivateKeyPath, "Path to the private key of the github app. If set, requires --github-app-id to bet set and --github-token-path to be unset") if !params.disableThrottlerOptions { fs.IntVar(&o.ThrottleHourlyTokens, "github-hourly-tokens", defaults.ThrottleHourlyTokens, "If set to a value larger than zero, enable client-side throttling to limit hourly token consumption. If set, --github-allowed-burst must be positive too.") fs.IntVar(&o.ThrottleAllowBurst, "github-allowed-burst", defaults.ThrottleAllowBurst, "Size of token consumption bursts. If set, --github-hourly-tokens must be positive too and set to a higher or equal number.") fs.Var(&o.OrgThrottlers, "github-throttle-org", "Throttler settings for a specific org in org:hourlyTokens:burst format. Can be passed multiple times. Only valid when using github apps auth.") } fs.DurationVar(&o.maxRequestTime, "github-client.request-timeout", github.DefaultMaxSleepTime, "Timeout for any single request to the GitHub API.") fs.IntVar(&o.maxRetries, "github-client.max-retries", github.DefaultMaxRetries, "Maximum number of retries that will be used for a failing request to the GitHub API.") fs.IntVar(&o.max404Retries, "github-client.max-404-retries", github.DefaultMax404Retries, "Maximum number of retries that will be used for a 404-ing request to the GitHub API.") fs.DurationVar(&o.maxSleepTime, "github-client.backoff-timeout", github.DefaultMaxSleepTime, "Largest allowable Retry-After time for requests to the GitHub API.") fs.DurationVar(&o.initialDelay, "github-client.initial-delay", github.DefaultInitialDelay, "Initial delay before retries begin for requests to the GitHub API.") } func (o *GitHubOptions) parseOrgThrottlers() error { if len(o.OrgThrottlers.vals) == 0 { return nil } if o.AppID == "" { return errors.New("--github-throttle-org was passed, but client doesn't use apps auth") } o.parsedOrgThrottlers = make(map[string]throttlerSettings, len(o.OrgThrottlers.vals)) var errs []error for _, orgThrottler := range o.OrgThrottlers.vals { colonSplit := strings.Split(orgThrottler, ":") if len(colonSplit) != 3 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format", orgThrottler)) continue } org, hourlyTokensString, burstString := colonSplit[0], colonSplit[1], colonSplit[2] hourlyTokens, err := strconv.ParseInt(hourlyTokensString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: hourlyTokens is not an int", orgThrottler)) continue } burst, err := strconv.ParseInt(burstString, 10, 32) if err != nil { errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: burst is not an int", orgThrottler)) continue } if hourlyTokens < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: hourlyTokens must be > 0", orgThrottler)) continue } if burst < 1 { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must be > 0", orgThrottler)) continue } if burst > hourlyTokens { errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must not be greater than hourlyTokens", orgThrottler)) continue } if _, alreadyExists := o.parsedOrgThrottlers[org]; alreadyExists { errs = append(errs, fmt.Errorf("got multiple -github-throttle-org for the %s org", org)) continue } o.parsedOrgThrottlers[org] = throttlerSettings{hourlyTokens: int(hourlyTokens), burst: int(burst)} } return utilerrors.NewAggregate(errs) } // Validate validates GitHub options. Note that validate updates the GitHubOptions // to add default values for TokenPath and graphqlEndpoint. func (o *GitHubOptions) Validate(bool) error { endpoints := o.endpoint.Strings() for i, uri := range endpoints { if uri == "" { endpoints[i] = github.DefaultAPIEndpoint } else if _, err := url.ParseRequestURI(uri); err != nil { return fmt.Errorf("invalid -github-endpoint URI: %q", uri) } } if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") { return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path") } if o.AppID == "" != (o.AppPrivateKeyPath == "") { return errors.New("--app-id and --app-private-key-path must be set together") } if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess { logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy") } if o.graphqlEndpoint == "" { o.graphqlEndpoint = github.DefaultGraphQLEndpoint } else if _, err := url.Parse(o.graphqlEndpoint); err != nil { return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint) } if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) { if o.ThrottleHourlyTokens == 0 { // Tolerate `--github-hourly-tokens=0` alone to disable throttling o.ThrottleAllowBurst = 0 } else { return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero") } } if o.ThrottleAllowBurst > o.ThrottleHourlyTokens { return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens") } return o.parseOrgThrottlers() } // GitHubClientWithLogFields returns a GitHub client with extra logging fields func (o *GitHubOptions) GitHubClientWithLogFields(dryRun bool, fields logrus.Fields) (github.Client, error) { client, err := o.githubClient(dryRun) if err != nil { return nil, err } return client.WithFields(fields), nil } func (o *GitHubOptions) githubClient(dryRun bool) (github.Client, error) { fields := logrus.Fields{} options := o.baseClientOptions() options.DryRun = dryRun if o.TokenPath == "" && o.AppPrivateKeyPath == "" { logrus.Warn("empty -github-token-path, will use anonymous github client") } if o.TokenPath == "" { options.GetToken = func() []byte { return []byte{} } } else { if err := secret.Add(o.TokenPath); err != nil { return nil, fmt.Errorf("failed to add GitHub token to secret agent: %w", err) } options.GetToken = secret.GetTokenGenerator(o.TokenPath) } if o.AppPrivateKeyPath != "" { apk, err := o.appPrivateKeyGenerator() if err != nil { return nil, err } options.AppPrivateKey = apk } optionallyThrottled := func(c github.Client) (github.Client, error) { // Throttle handles zeros as "disable throttling" so we do not need to call it conditionally if err := c.Throttle(o.ThrottleHourlyTokens, o.ThrottleAllowBurst); err != nil { return nil, fmt.Errorf("failed to throttle: %w", err) } for org, settings := range o.parsedOrgThrottlers { if err := c.Throttle(settings.hourlyTokens, settings.burst, org); err != nil { return nil, fmt.Errorf("failed to set up throttling for org %s: %w", org, err) } } return c, nil } tokenGenerator, userGenerator, client, err := github.NewClientFromOptions(fields, options) if err != nil { return nil, fmt.Errorf("failed to construct github client: %w", err) } o.tokenGenerator = tokenGenerator o.userGenerator = userGenerator return optionallyThrottled(client) } // baseClientOptions populates client options that are derived from flags without processing func (o *GitHubOptions) baseClientOptions() github.ClientOptions { return github.ClientOptions{ Censor: secret.Censor, AppID: o.AppID, GraphqlEndpoint: o.graphqlEndpoint, Bases: o.endpoint.Strings(), MaxRequestTime: o.maxRequestTime, InitialDelay: o.initialDelay, MaxSleepTime: o.maxSleepTime, MaxRetries: o.maxRetries, Max404Retries: o.max404Retries, } } // GitHubClient returns a GitHub client. func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) { return o.GitHubClientWithLogFields(dryRun, logrus.Fields{}) } // GitHubClientWithAccessToken creates a GitHub client from an access token. func (o *GitHubOptions) GitHubClientWithAccessToken(token string) (github.Client, error) { options := o.baseClientOptions() options.GetToken = func() []byte { return []byte(token) } options.AppID = "" // Since we are using a token, we should not use the app auth _, _, client, err := github.NewClientFromOptions(logrus.Fields{}, options) return client, err } // GitClientFactory returns git.ClientFactory. Passing non-empty cookieFilePath // will result in git ClientFactory to work with Gerrit. // TODO(chaodaiG): move this logic to somewhere more appropriate instead of in // github.go. func (o *GitHubOptions) GitClientFactory(cookieFilePath string, cacheDir *string, dryRun, persistCache bool) (gitv2.ClientFactory, error) { var gitClientFactory gitv2.ClientFactory if cookieFilePath != "" && o.TokenPath == "" && o.AppPrivateKeyPath == "" { opts := gitv2.ClientFactoryOpts{ CookieFilePath: cookieFilePath, Persist: &persistCache, } if cacheDir != nil && *cacheDir != "" { opts.CacheDirBase = cacheDir } var err error gitClientFactory, err = gitv2.NewClientFactory(opts.Apply) if err != nil { return nil, fmt.Errorf("failed to create git client from cookieFile: %v\n(cookieFile is only for Gerrit)", err) } } else { gitClient, err := o.GitClient(dryRun) if err != nil { return nil, fmt.Errorf("Error getting git client: %w", err) } gitClientFactory = gitv2.ClientFactoryFrom(gitClient) } return gitClientFactory, nil } // GitClient returns a Git client. func (o *GitHubOptions) GitClient(dryRun bool) (client *git.Client, err error) { client, err = git.NewClientWithHost(o.Host) if err != nil { return nil, err } // We must capture the value of client here to prevent issues related // to the use of named return values when an error is encountered. // Without this, we risk a nil pointer dereference. defer func(client *git.Client) { if err != nil { client.Clean() } }(client) user, generator, err := o.getGitAuthentication(dryRun) if err != nil { return nil, fmt.Errorf("failed to get git authentication: %w", err) } client.SetCredentials(user, generator) return client, nil } func (o *GitHubOptions) getGitAuthentication(dryRun bool) (string, git.GitTokenGenerator, error) { // the client must have been created at least once for us to have generators if o.userGenerator == nil { if _, err := o.GitHubClient(dryRun); err != nil { return "", nil, fmt.Errorf("error getting GitHub client: %w", err) } } login, err := o.userGenerator() if err != nil { return "", nil, fmt.Errorf("error getting bot name: %w", err) } return login, git.GitTokenGenerator(o.tokenGenerator), nil } func (o *GitHubOptions) appPrivateKeyGenerator() (func() *rsa.PrivateKey, error) { generator, err := secret.AddWithParser( o.AppPrivateKeyPath, func(raw []byte) (*rsa.PrivateKey, error) { privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(raw) if err != nil { return nil, fmt.Errorf("failed to parse rsa key from pem: %w", err) } return privateKey, nil
) if err != nil { return nil, fmt.Errorf("failed to add the key from --app-private-key-path to secret agent: %w", err) } return generator, nil }
},
random_line_split
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance
//// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success() { // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); // Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); } else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn bob() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![], block_index: 0, block_timestamp: 0, account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
{ self.ledger.get_locked_balance(&owner_id).into() }
identifier_body
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_locked_balance(&owner_id).into() } //// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success()
else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn bob() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![], block_index: 0, block_timestamp: 0, account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
{ // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); // Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); }
conditional_block
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_locked_balance(&owner_id).into() } //// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success() { // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); // Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); } else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn bob() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![],
account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
block_index: 0, block_timestamp: 0,
random_line_split
token.rs
/** * An advanced fungible token implementation. * */ use near_sdk::serde_json::{self, json}; use near_sdk::borsh::{ self, BorshDeserialize, BorshSerialize}; use near_sdk::{ env, near_bindgen, ext_contract, AccountId, Balance, Promise, StorageUsage}; use near_sdk::collections::LookupMap; use near_sdk::json_types::U128; use crate::receiver::{ ext_token_receiver }; use crate::utils::{ is_promise_success }; // TODO: All gas stipends are more or less random - check througfully const SINGLE_CALL_GAS: u64 = 200000000000000; /** * A balance ledger that keeps track of rollbackable promise transactions. * * TODO: Currently we lock balance by account, but this is not very flexible. * What we really want to is lock balancy by a promise chain. However, this * would need to be able to identify the originating transaction in NEAR * and currently I am not sure if this information is exposed * on the smart contract level. * * https://stackoverflow.com/questions/64170363/tracking-promise-chains-in-near-smart-contract-protocol * */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Ledger { // Total balances, including locked, for each user pub balances: LookupMap<AccountId, Balance>, /// Account has a pending promise chain in progress /// and balance locked is this chain cannot be withdawn. /// If a promise chain is succesful free the locked balance. /// If a promise chain fails, then the send() gets undoed pub locked_balances: LookupMap<AccountId, Balance>, /// Total supply of the token pub total_supply: Balance, /// Helper counter for testing to diagnose /// how many rollbacks have occured pub rollbacks: u64, } impl Ledger { /// Helper method to get the account details for `owner_id`. fn get_balance(&self, owner_id: &AccountId) -> u128 { match self.balances.get(owner_id) { Some(x) => return x, None => return 0, } } /// Helper method to set the account details for `owner_id` to the state. fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) { assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid"); self.balances.insert(owner_id, &balance); } /// Helper method to get the account details for `owner_id`. fn get_locked_balance(&self, owner_id: &AccountId) -> Balance { match self.locked_balances.get(owner_id) { Some(x) => return x, None => return 0, } } /** * Send tokens to a new owner. * * message is an optional byte data that is passed to the receiving smart contract. * notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time * within NEAR smart contract. */ pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { assert!( env::is_valid_account_id(new_owner_id.as_bytes()), "New owner's account ID is invalid" ); let amount = amount.into(); if amount == 0 { env::panic(b"Can't transfer 0 tokens"); } assert_ne!( owner_id, new_owner_id, "The new owner should be different from the current owner" ); // Retrieving the account from the state. let source_balance = self.get_balance(&owner_id); let source_lock = self.get_locked_balance(&owner_id); // Checking and updating unlocked balance if source_balance < amount { env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes()); } // Checking and updating unlocked balance if source_balance < amount + source_lock { env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes()); } self.set_balance(&owner_id, source_balance - amount); // Deposit amount to the new owner and save the new account to the state. let target_balance = self.get_balance(&new_owner_id); let new_target_balance = target_balance + amount; self.set_balance(&new_owner_id, new_target_balance); // This much of user balance is lockedup in promise chains self.set_balance(&new_owner_id, new_target_balance); let target_lock = self.get_locked_balance(&new_owner_id); self.locked_balances.insert(&new_owner_id, &(target_lock + amount)); let promise0 = env::promise_create( new_owner_id.clone(), b"is_receiver", &[], 0, SINGLE_CALL_GAS/3, ); let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_receiver", json!({ "old_owner_id": owner_id, "new_owner_id": new_owner_id, "amount_received": amount.to_string(), "amount_total": new_target_balance.to_string(), "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/3, ); env::promise_return(promise1); } /// All promise chains have been successful, release balance from the lock /// and consider the promise chain final. pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); } /// Smart contract call failed. We need to roll back the balance update pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) { let target_lock = self.get_locked_balance(&new_owner_id); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes()); env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes()); assert!( target_lock >= amount, "Locked balance cannot go to negative" ); // Roll back lock let new_amount = target_lock - amount; self.locked_balances.insert(&new_owner_id, &new_amount); self.balances.insert(&new_owner_id, &new_amount); // Rollback new owner let new_target_balance = target_balance - amount; self.set_balance(&new_owner_id, new_target_balance); // Rollback old owner let new_source_balance = source_balance + amount; self.set_balance(&old_owner_id, new_source_balance); let target_balance = self.get_balance(&new_owner_id); let source_balance = self.get_balance(&old_owner_id); self.rollbacks += 1; } } /* * Information about the token. * * We hold the name, symbol and homepage readibly available on chain, but other information must be * from the JSON data. This way we do not bloat the chain size and also make upgrading the information * somewhat easier. * * All metadata fields are optional. */ #[derive(BorshDeserialize, BorshSerialize)] pub struct Metadata { // Name of the token pub name: String, // Symbol of the token pub symbol: String, // URL to the human readable page about the token pub web_link: String, // URL to the metadata file with more information about the token, like different icon sets pub metadata_link: String, } /** * Presents on token. */ #[near_bindgen] #[derive(BorshDeserialize, BorshSerialize)] pub struct Token { pub ledger: Ledger, pub metadata: Metadata, } impl Default for Token { fn default() -> Self { panic!("Token should be initialized before usage") } } #[near_bindgen] impl Token { /// Initializes the contract with the given total supply owned by the given `owner_id`. #[init] pub fn new(owner_id: AccountId, total_supply: Balance) -> Self { assert!(!env::state_exists(), "Already initialized"); let total_supply = total_supply.into(); // Initialize the ledger with the initial total supply let ledger = Ledger { balances: LookupMap::new(b"bal".to_vec()), locked_balances: LookupMap::new(b"lck".to_vec()), total_supply, rollbacks: 0, }; // Currently the constructor does not support passing of metadata. // Start with empty metadata, owner needs to initialize this // after the token has been created in another transaction let metadata = Metadata { name: String::from(""), symbol: String::from(""), web_link: String::from(""), metadata_link: String::from(""), }; let mut token = Self { ledger, metadata }; token.ledger.set_balance(&owner_id, total_supply); return token; } /// Returns total supply of tokens. pub fn get_total_supply(&self) -> Balance { self.ledger.total_supply.into() } /// Returns balance of the `owner_id` account. pub fn get_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_balance(&owner_id).into() } /// Returns balance lockedin pending transactions pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance { self.ledger.get_locked_balance(&owner_id).into() } //// How many rollbacks we have had pub fn get_rollback_count(&self) -> u64 { self.ledger.rollbacks } /// Returns balance of the `owner_id` account. pub fn get_name(&self) -> &str { return &self.metadata.name; } /// Send owner's tokens to another person or a smart contract #[payable] pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) { self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message); } /** * After trying to call receiving smart contract if it reports it can receive tokens. * * We gpt the interface test promise back. If the account was not smart contract, finalise the transaction. * Otherwise trigger the smart contract notifier. */ pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"handle_receiver reached"); let uint_amount_received: u128 = amount_received.into(); let uint_amount_total: u128 = amount_total.into(); if is_promise_success() { // The send() was destined to a compatible receiver smart contract. // Build another promise that notifies the smart contract // that is has received new tokens. env::log(b"Constructing smart contract notifier promise"); let promise0 = env::promise_create( new_owner_id.clone(), b"on_token_received", json!({ "sender_id": old_owner_id, "amount_received": amount_received, "amount_total": amount_total, "message": message, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); // Construct the promise that calls back the // token contract to finalise the transaction let promise1 = env::promise_then( promise0, env::current_account_id(), b"handle_token_received", json!({ "old_owner_id": old_owner_id, "new_owner_id": new_owner_id, "amount_received": amount_received, }).to_string().as_bytes(), 0, SINGLE_CALL_GAS/10, ); env::promise_return(promise1); } else { // Non-code account // Finalise transaction now. self.ledger.finalise(new_owner_id, uint_amount_received); } } /// Smart contract notify succeed, free up any locked balance /// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) { // Only callable by self assert_eq!(env::current_account_id(), env::predecessor_account_id()); env::log(b"Checking for the need to rollback smart contract transaction"); let amount_received: u128 = amount_received.into(); // TODO: Have some nice error code logic here if is_promise_success() { self.ledger.finalise(new_owner_id, amount_received); } else { self.ledger.rollback(old_owner_id, new_owner_id, amount_received); } } } #[cfg(test)] mod tests { use super::*; use near_sdk::MockedBlockchain; use near_sdk::{testing_env, VMContext}; fn alice() -> AccountId { "alice.near".to_string() } fn
() -> AccountId { "bob.near".to_string() } fn carol() -> AccountId { "carol.near".to_string() } fn get_context(predecessor_account_id: AccountId) -> VMContext { VMContext { current_account_id: alice(), signer_account_id: bob(), signer_account_pk: vec![0, 1, 2], predecessor_account_id, input: vec![], block_index: 0, block_timestamp: 0, account_balance: 1_000_000_000_000_000_000_000_000_000u128, account_locked_balance: 0, storage_usage: 10u64.pow(6), attached_deposit: 0, prepaid_gas: 10u64.pow(18), random_seed: vec![0, 1, 2], is_view: false, output_data_receivers: vec![], epoch_height: 0, } } #[test] fn test_new() { let context = get_context(carol()); testing_env!(context); let total_supply = 1_000_000_000_000_000u128; let contract = Token::new(bob(), total_supply.into()); assert_eq!(contract.get_total_supply(), total_supply); assert_eq!(contract.get_balance(bob()), total_supply); } }
bob
identifier_name
mp4-tools.ts
import { ElementaryStreamTypes } from '../loader/fragment'; import { logger } from '../utils/logger'; let USER_DATA_REGISTERED_ITU_T_T35 = 4, RBSP_TRAILING_BITS = 128; const UINT32_MAX = Math.pow(2, 32) - 1; export function bin2str (buffer): string { return String.fromCharCode.apply(null, buffer); } export function readUint32 (buffer, offset): number { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 24 | buffer[offset + 1] << 16 | buffer[offset + 2] << 8 | buffer[offset + 3]; return val < 0 ? 4294967296 + val : val; } export function readUint16 (buffer, offset) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 8 | buffer[offset + 1]; return val < 0 ? 65536 + val : val; } export function writeUint32 (buffer, offset, value) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } buffer[offset] = value >> 24; buffer[offset + 1] = (value >> 16) & 0xff; buffer[offset + 2] = (value >> 8) & 0xff; buffer[offset + 3] = value & 0xff; } export function probe (data) { // ensure we find a moof box in the first 16 kB return findBox({ data: data, start: 0, end: Math.min(data.length, 16384) }, ['moof']).length > 0; } // Find the data for a box specified by its path export function findBox (data, path): Array<any> { let results = [] as Array<any>; let i; let size; let type; let end; let subresults; let start; let endbox; if (data.data) { start = data.start; end = data.end; data = data.data; } else { start = 0; end = data.byteLength; } if (!path.length) { // short-circuit the search for empty paths return results; } for (i = start; i < end;) { size = readUint32(data, i); type = bin2str(data.subarray(i + 4, i + 8)); endbox = size > 1 ? i + size : end; if (type === path[0]) { if (path.length === 1) { // this is the end of the path and we've found the box we were // looking for results.push({ data: data, start: i + 8, end: endbox }); } else { // recursively search for the next box along the path subresults = findBox({ data: data, start: i + 8, end: endbox }, path.slice(1)); if (subresults.length) { results = results.concat(subresults); } } } i = endbox; } // we've finished searching all of data return results; } interface InitDataTrack { timescale: number, id: number, codec: string } type HdlrType = ElementaryStreamTypes.AUDIO | ElementaryStreamTypes.VIDEO; export interface InitData extends Array<any> { [index: number]: { timescale: number, type: HdlrType }; audio?: InitDataTrack video?: InitDataTrack } export function parseInitSegment (initSegment): InitData { const result: InitData = []; const traks = findBox(initSegment, ['moov', 'trak']); traks.forEach(trak => { const tkhd = findBox(trak, ['tkhd'])[0]; if (tkhd) { let version = tkhd.data[tkhd.start]; let index = version === 0 ? 12 : 20; const trackId = readUint32(tkhd, index); const mdhd = findBox(trak, ['mdia', 'mdhd'])[0]; if (mdhd) { version = mdhd.data[mdhd.start]; index = version === 0 ? 12 : 20; const timescale = readUint32(mdhd, index); const hdlr = findBox(trak, ['mdia', 'hdlr'])[0]; if (hdlr) { const hdlrType = bin2str(hdlr.data.subarray(hdlr.start + 8, hdlr.start + 12)); const type: HdlrType = { soun: ElementaryStreamTypes.AUDIO, vide: ElementaryStreamTypes.VIDEO }[hdlrType]; if (type) { // TODO: Parse codec details to be able to build MIME type. const codexBoxes = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd']); let codec; if (codexBoxes.length) { const codecBox = codexBoxes[0]; codec = bin2str(codecBox.data.subarray(codecBox.start + 12, codecBox.start + 16)); } result[trackId] = { timescale, type }; result[type] = { timescale, id: trackId, codec }; } } } } }); return result; } /** * see ANSI/SCTE 128-1 (2013), section 8.1 * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function parseUserData (sei) { // itu_t_t35_contry_code must be 181 (United States) for // captions if (sei.payload[0] !== 181) { return null; } // itu_t_t35_provider_code should be 49 (ATSC) for captions if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) { return null; } // the user_identifier should be "GA94" to indicate ATSC1 data if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') { return null; } // finally, user_data_type_code should be 0x03 for caption data if (sei.payload[7] !== 0x03) { return null; } // return the user_data_type_structure and strip the trailing // marker bits return sei.payload.subarray(8, sei.payload.length - 1); } /** * Parse a supplemental enhancement information (SEI) NAL unit. * Stops parsing once a message of type ITU T T35 has been found. * * This code was ported from the mux.js project at: * https://github.com/videojs/mux.js * * @param bytes {Uint8Array} the bytes of a SEI NAL unit * @return {object} the parsed SEI payload * @see Rec. ITU-T H.264, 7.3.2.3.1 */ export function parseSei (bytes) { let i = 0, result = { payloadType: -1, payloadSize: 0, payload: null }, payloadType = 0, payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message while (i < bytes.byteLength) { // stop once we have hit the end of the sei_rbsp if (bytes[i] === RBSP_TRAILING_BITS) { break; } // Parse payload type while (bytes[i] === 0xFF) { payloadType += 255; i++; } payloadType += bytes[i++]; // Parse payload size while (bytes[i] === 0xFF) { payloadSize += 255; i++; } payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break // there can only ever be one caption message in a frame's sei if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) { result.payloadType = payloadType; result.payloadSize = payloadSize; result.payload = bytes.subarray(i, i + payloadSize); break; } // skip the payload and parse the next message i += payloadSize; payloadType = 0; payloadSize = 0; } return result; } /** * Parses text track samples to be used in 608 extraction * * @param data * @param videoTrackId */ export function parseTextTrackSamplesFromVideoSegment (data, videoTrackId) { let captionNals = parseCaptionNals(data, videoTrackId); return captionNals.reduce((acc, nal) => { const seiNal = parseSei(nal.escapedRBSP); if (seiNal.payload) { const userData = parseUserData(seiNal); const sample = { type: 3, trackId: nal.trackId, pts: nal.pts, dts: nal.dts, bytes: userData }; acc.push(sample); } return acc }, []) } /** * Parses out caption nals from an FMP4 segment's video tracks. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * @param {Uint8Array} segment - The bytes of a single segment * @param {Number} videoTrackId - The trackId of a video track in the segment * @return {Object.<Number, Object[]>} A mapping of video trackId to * a list of seiNals found in that track **/ export function parseCaptionNals (data, videoTrackId) { let captionNals = [] as any; // To get the samples let trafs = findBox(data, ['moof', 'traf']); // To get SEI NAL units let mdats = findBox(data, ['mdat']); let mdatTrafPairs = [] as any; // Pair up each traf with a mdat as moofs and mdats are in pairs mdats.forEach(function (mdat, index) { let matchingTraf = trafs[index]; mdatTrafPairs.push({ mdat: mdat, traf: matchingTraf }); }); mdatTrafPairs.forEach(function (pair) { let mdat = pair.mdat; let mdatBytes = mdat.data.subarray(mdat.start, mdat.end); let traf = pair.traf; let trafBytes = traf.data.subarray(traf.start, traf.end); let tfhd = findBox(trafBytes, ['tfhd']); // Exactly 1 tfhd per traf let headerInfo = parseTfhd(tfhd[0]); let trackId = headerInfo.trackId; let tfdt = findBox(trafBytes, ['tfdt']); // Either 0 or 1 tfdt per traf let baseMediaDecodeTime = (tfdt.length > 0) ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0; let truns = findBox(trafBytes, ['trun']); let samples; let seiNals; // Only parse video data for the chosen video track if (videoTrackId === trackId && truns.length > 0) { samples = parseSamples(truns, baseMediaDecodeTime, headerInfo); seiNals = findSeiNals(mdatBytes, samples, trackId); captionNals = captionNals.concat(seiNals); } }); return captionNals; } export function parseTfhd (tfhd) { const data = tfhd.data.subarray(tfhd.start, tfhd.end); let view = new DataView(data.buffer, data.byteOffset, data.byteLength), result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), trackId: view.getUint32(4) } as any, baseDataOffsetPresent = result.flags[2] & 0x01, sampleDescriptionIndexPresent = result.flags[2] & 0x02, defaultSampleDurationPresent = result.flags[2] & 0x08, defaultSampleSizePresent = result.flags[2] & 0x10, defaultSampleFlagsPresent = result.flags[2] & 0x20, durationIsEmpty = result.flags[0] & 0x010000, defaultBaseIsMoof = result.flags[0] & 0x020000, i; i = 8; if (baseDataOffsetPresent) { i += 4; // truncate top 4 bytes // FIXME: should we read the full 64 bits? result.baseDataOffset = view.getUint32(12); i += 4; } if (sampleDescriptionIndexPresent) { result.sampleDescriptionIndex = view.getUint32(i); i += 4; } if (defaultSampleDurationPresent) { result.defaultSampleDuration = view.getUint32(i); i += 4; } if (defaultSampleSizePresent) { result.defaultSampleSize = view.getUint32(i); i += 4; } if (defaultSampleFlagsPresent) { result.defaultSampleFlags = view.getUint32(i); } if (durationIsEmpty) { result.durationIsEmpty = true; } if (!baseDataOffsetPresent && defaultBaseIsMoof) { result.baseDataOffsetIsMoof = true; } return result; } export function parseTfdt (tfdt) { const data = tfdt.data.subarray(tfdt.start, tfdt.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), baseMediaDecodeTime: toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]) }; if (result.version === 1) { result.baseMediaDecodeTime *= Math.pow(2, 32); result.baseMediaDecodeTime += toUnsigned(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSamples (truns, baseMediaDecodeTime, tfhd) { let currentDts = baseMediaDecodeTime; let defaultSampleDuration = tfhd.defaultSampleDuration || 0; let defaultSampleSize = tfhd.defaultSampleSize || 0; let trackId = tfhd.trackId; let allSamples = [] as any; truns.forEach(function (trun) { // Note: We currently do not parse the sample table as well // as the trun. It's possible some sources will require this. // moov > trak > mdia > minf > stbl let trackRun = parseTrun(trun); let samples = trackRun.samples as any[]; samples.forEach(function (sample) { if (sample.duration === undefined) { sample.duration = defaultSampleDuration; } if (sample.size === undefined) { sample.size = defaultSampleSize; } sample.trackId = trackId; sample.dts = currentDts; if (sample.compositionTimeOffset === undefined) { sample.compositionTimeOffset = 0; } sample.pts = currentDts + sample.compositionTimeOffset; currentDts += sample.duration; }); allSamples = allSamples.concat(samples); }); return allSamples; } /** * Finds SEI nal units contained in a Media Data Box. * Assumes that `parseSamples` has been called first. * * This was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Uint8Array} avcStream - The bytes of the mdat * @param {Object[]} samples - The samples parsed out by `parseSamples` * @param {Number} trackId - The trackId of this video track * @return {Object[]} seiNals - the parsed SEI NALUs found. * The contents of the seiNal should match what is expected by * CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts) * * @see ISO-BMFF-12/2015, Section 8.1.1 * @see Rec. ITU-T H.264, 7.3.2.3.1 **/ export function findSeiNals (avcStream, samples, trackId) { let avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), result = [] as any, seiNal, i, length, lastMatchedSample; for (i = 0; i + 4 < avcStream.length; i += length) { length = avcView.getUint32(i); i += 4; // Bail if this doesn't appear to be an H264 stream if (length <= 0) { continue; } switch (avcStream[i] & 0x1F) { case 0x06: var data = avcStream.subarray(i + 1, i + 1 + length); var matchingSample = mapToSample(i, samples); seiNal = { nalUnitType: 'sei_rbsp', size: length, data: data, escapedRBSP: discardEmulationPreventionBytes(data), trackId: trackId }; if (matchingSample) { seiNal.pts = matchingSample.pts; seiNal.dts = matchingSample.dts; lastMatchedSample = matchingSample; } else if (lastMatchedSample) { // If a matching sample cannot be found, use the last // sample's values as they should be as close as possible seiNal.pts = lastMatchedSample.pts; seiNal.dts = lastMatchedSample.dts; } else
result.push(seiNal); break; default: break; } } return result; } /** * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param trun */ export function parseTrun (trun) { const data = trun.data.subarray(trun.start, trun.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), samples: [] } as any, view = new DataView(data.buffer, data.byteOffset, data.byteLength), // Flag interpretation dataOffsetPresent = result.flags[2] & 0x01, // compare with 2nd byte of 0x1 firstSampleFlagsPresent = result.flags[2] & 0x04, // compare with 2nd byte of 0x4 sampleDurationPresent = result.flags[1] & 0x01, // compare with 2nd byte of 0x100 sampleSizePresent = result.flags[1] & 0x02, // compare with 2nd byte of 0x200 sampleFlagsPresent = result.flags[1] & 0x04, // compare with 2nd byte of 0x400 sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08, // compare with 2nd byte of 0x800 sampleCount = view.getUint32(4), offset = 8, sample; if (dataOffsetPresent) { // 32 bit signed integer result.dataOffset = view.getInt32(offset); offset += 4; } // Overrides the flags for the first sample only. The order of // optional values will be: duration, size, compositionTimeOffset if (firstSampleFlagsPresent && sampleCount) { sample = { flags: parseSampleFlags(data.subarray(offset, offset + 4)) }; offset += 4; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); sampleCount--; } while (sampleCount--) { sample = {}; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleFlagsPresent) { sample.flags = parseSampleFlags(data.subarray(offset, offset + 4)); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSampleFlags (flags) { return { isLeading: (flags[0] & 0x0c) >>> 2, dependsOn: flags[0] & 0x03, isDependedOn: (flags[1] & 0xc0) >>> 6, hasRedundancy: (flags[1] & 0x30) >>> 4, paddingValue: (flags[1] & 0x0e) >>> 1, isNonSyncSample: flags[1] & 0x01, degradationPriority: (flags[2] << 8) | flags[3] }; } /** * Maps an offset in the mdat to a sample based on the the size of the samples. * Assumes that `parseSamples` has been called first. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Number} offset - The offset into the mdat * @param {Object[]} samples - An array of samples, parsed using `parseSamples` * @return {?Object} The matching sample, or null if no match was found. * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function mapToSample (offset, samples) { let approximateOffset = offset; for (let i = 0; i < samples.length; i++) { let sample = samples[i]; if (approximateOffset < sample.size) { return sample; } approximateOffset -= sample.size; } return null; } /** * Determine the base media decode start time, in seconds, for an MP4 * fragment. If multiple fragments are specified, the earliest time is * returned. * * The base media decode time can be parsed from track fragment * metadata: * ``` * moof > traf > tfdt.baseMediaDecodeTime * ``` * It requires the timescale value from the mdhd to interpret. * * @param initData {object} containing information about track * @param fragment mp4 fragment data * @return {number} the earliest base media decode start time for the * fragment, in seconds */ export function getStartDTS (initData, fragment) { let trafs, baseTimes, result; // we need info from two childrend of each track fragment box trafs = findBox(fragment, ['moof', 'traf']); // determine the start times for each track baseTimes = [].concat.apply([], trafs.map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { let id, scale, baseTime; // get the track id from the tfhd id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified scale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt baseTime = findBox(traf, ['tfdt']).map(function (tfdt) { let version, result; version = tfdt.data[tfdt.start]; result = readUint32(tfdt, 4); if (version === 1) { result *= Math.pow(2, 32); result += readUint32(tfdt, 8); } return result; })[0]; // convert base time to seconds return baseTime / scale; }); })); // return the minimum result = Math.min.apply(null, baseTimes); return isFinite(result) ? result : 0; } export function offsetStartDTS (initData, fragment, timeOffset) { findBox(fragment, ['moof', 'traf']).map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { // get the track id from the tfhd let id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified let timescale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt findBox(traf, ['tfdt']).map(function (tfdt) { let version = tfdt.data[tfdt.start]; let baseMediaDecodeTime = readUint32(tfdt, 4); if (version === 0) { writeUint32(tfdt, 4, baseMediaDecodeTime - timeOffset * timescale); } else { baseMediaDecodeTime *= Math.pow(2, 32); baseMediaDecodeTime += readUint32(tfdt, 8); baseMediaDecodeTime -= timeOffset * timescale; baseMediaDecodeTime = Math.max(baseMediaDecodeTime, 0); const upper = Math.floor(baseMediaDecodeTime / (UINT32_MAX + 1)); const lower = Math.floor(baseMediaDecodeTime % (UINT32_MAX + 1)); writeUint32(tfdt, 4, upper); writeUint32(tfdt, 8, lower); } }); }); }); } /** * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function discardEmulationPreventionBytes (data) { let length = data.byteLength, emulationPreventionBytesPositions = [] as any, i = 1, newLength, newData; // Find all `Emulation Prevention Bytes` while (i < length - 2) { if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { emulationPreventionBytesPositions.push(i + 2); i += 2; } else { i++; } } // If no Emulation Prevention Bytes were found just return the original // array if (emulationPreventionBytesPositions.length === 0) { return data; } // Create a new array to hold the NAL unit data newLength = length - emulationPreventionBytesPositions.length; newData = new Uint8Array(newLength); let sourceIndex = 0; for (i = 0; i < newLength; sourceIndex++, i++) { if (sourceIndex === emulationPreventionBytesPositions[0]) { // Skip this byte sourceIndex++; // Remove this position index emulationPreventionBytesPositions.shift(); } newData[i] = data[sourceIndex]; } return newData; } export function parseSegmentIndex (initSegment) { const moov = findBox(initSegment, ['moov'])[0]; const moovEndOffset = moov ? moov.end : null; // we need this in case we need to chop off garbage of the end of current data let index = 0; let sidx = findBox(initSegment, ['sidx']) as any; let references; if (!sidx || !sidx[0]) { return null; } references = []; sidx = sidx[0]; const version = sidx.data[0]; // set initial offset, we skip the reference ID (not needed) index = version === 0 ? 8 : 16; const timescale = readUint32(sidx, index); index += 4; // TODO: parse earliestPresentationTime and firstOffset // usually zero in our case let earliestPresentationTime = 0; let firstOffset = 0; if (version === 0) { index += 8; } else { index += 16; } // skip reserved index += 2; let startByte = sidx.end + firstOffset; const referencesCount = readUint16(sidx, index); index += 2; for (let i = 0; i < referencesCount; i++) { let referenceIndex = index; const referenceInfo = readUint32(sidx, referenceIndex); referenceIndex += 4; const referenceSize = referenceInfo & 0x7FFFFFFF; const referenceType = (referenceInfo & 0x80000000) >>> 31; if (referenceType === 1) { console.warn('SIDX has hierarchical references (not supported)'); return; } const subsegmentDuration = readUint32(sidx, referenceIndex); referenceIndex += 4; references.push({ referenceSize, subsegmentDuration, // unscaled info: { duration: subsegmentDuration / timescale, start: startByte, end: startByte + referenceSize - 1 } }); startByte += referenceSize; // Skipping 1 bit for |startsWithSap|, 3 bits for |sapType|, and 28 bits // for |sapDelta|. referenceIndex += 4; // skip to next ref index = referenceIndex; } return { earliestPresentationTime, timescale, version, referencesCount, references, moovEndOffset }; } export function toUnsigned (value) { return value >>> 0; };
{ logger.log('We\'ve encountered a nal unit without data. See mux.js#233.'); break; }
conditional_block
mp4-tools.ts
import { ElementaryStreamTypes } from '../loader/fragment'; import { logger } from '../utils/logger'; let USER_DATA_REGISTERED_ITU_T_T35 = 4, RBSP_TRAILING_BITS = 128; const UINT32_MAX = Math.pow(2, 32) - 1; export function bin2str (buffer): string { return String.fromCharCode.apply(null, buffer); } export function readUint32 (buffer, offset): number { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 24 | buffer[offset + 1] << 16 | buffer[offset + 2] << 8 | buffer[offset + 3]; return val < 0 ? 4294967296 + val : val; } export function readUint16 (buffer, offset) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 8 | buffer[offset + 1]; return val < 0 ? 65536 + val : val; } export function writeUint32 (buffer, offset, value) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } buffer[offset] = value >> 24; buffer[offset + 1] = (value >> 16) & 0xff; buffer[offset + 2] = (value >> 8) & 0xff; buffer[offset + 3] = value & 0xff; } export function probe (data) { // ensure we find a moof box in the first 16 kB return findBox({ data: data, start: 0, end: Math.min(data.length, 16384) }, ['moof']).length > 0; } // Find the data for a box specified by its path export function findBox (data, path): Array<any> { let results = [] as Array<any>; let i; let size; let type; let end; let subresults; let start; let endbox; if (data.data) { start = data.start; end = data.end; data = data.data; } else { start = 0; end = data.byteLength; } if (!path.length) { // short-circuit the search for empty paths return results; } for (i = start; i < end;) { size = readUint32(data, i); type = bin2str(data.subarray(i + 4, i + 8)); endbox = size > 1 ? i + size : end; if (type === path[0]) { if (path.length === 1) { // this is the end of the path and we've found the box we were // looking for results.push({ data: data, start: i + 8, end: endbox }); } else { // recursively search for the next box along the path subresults = findBox({ data: data, start: i + 8, end: endbox }, path.slice(1)); if (subresults.length) { results = results.concat(subresults); } } } i = endbox; } // we've finished searching all of data return results; } interface InitDataTrack { timescale: number, id: number, codec: string } type HdlrType = ElementaryStreamTypes.AUDIO | ElementaryStreamTypes.VIDEO; export interface InitData extends Array<any> { [index: number]: { timescale: number, type: HdlrType }; audio?: InitDataTrack video?: InitDataTrack } export function parseInitSegment (initSegment): InitData { const result: InitData = []; const traks = findBox(initSegment, ['moov', 'trak']); traks.forEach(trak => { const tkhd = findBox(trak, ['tkhd'])[0]; if (tkhd) { let version = tkhd.data[tkhd.start]; let index = version === 0 ? 12 : 20; const trackId = readUint32(tkhd, index); const mdhd = findBox(trak, ['mdia', 'mdhd'])[0]; if (mdhd) { version = mdhd.data[mdhd.start]; index = version === 0 ? 12 : 20; const timescale = readUint32(mdhd, index); const hdlr = findBox(trak, ['mdia', 'hdlr'])[0]; if (hdlr) { const hdlrType = bin2str(hdlr.data.subarray(hdlr.start + 8, hdlr.start + 12)); const type: HdlrType = { soun: ElementaryStreamTypes.AUDIO, vide: ElementaryStreamTypes.VIDEO }[hdlrType]; if (type) { // TODO: Parse codec details to be able to build MIME type. const codexBoxes = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd']); let codec; if (codexBoxes.length) { const codecBox = codexBoxes[0]; codec = bin2str(codecBox.data.subarray(codecBox.start + 12, codecBox.start + 16)); } result[trackId] = { timescale, type }; result[type] = { timescale, id: trackId, codec }; } } } } }); return result; } /** * see ANSI/SCTE 128-1 (2013), section 8.1 * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function parseUserData (sei) { // itu_t_t35_contry_code must be 181 (United States) for // captions if (sei.payload[0] !== 181) { return null; } // itu_t_t35_provider_code should be 49 (ATSC) for captions if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) { return null; } // the user_identifier should be "GA94" to indicate ATSC1 data if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') { return null; } // finally, user_data_type_code should be 0x03 for caption data if (sei.payload[7] !== 0x03) { return null; } // return the user_data_type_structure and strip the trailing // marker bits return sei.payload.subarray(8, sei.payload.length - 1); } /** * Parse a supplemental enhancement information (SEI) NAL unit. * Stops parsing once a message of type ITU T T35 has been found. * * This code was ported from the mux.js project at: * https://github.com/videojs/mux.js * * @param bytes {Uint8Array} the bytes of a SEI NAL unit * @return {object} the parsed SEI payload * @see Rec. ITU-T H.264, 7.3.2.3.1 */ export function parseSei (bytes) { let i = 0, result = { payloadType: -1, payloadSize: 0, payload: null }, payloadType = 0, payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message while (i < bytes.byteLength) { // stop once we have hit the end of the sei_rbsp if (bytes[i] === RBSP_TRAILING_BITS) { break; } // Parse payload type while (bytes[i] === 0xFF) { payloadType += 255; i++; } payloadType += bytes[i++]; // Parse payload size while (bytes[i] === 0xFF) { payloadSize += 255; i++; } payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break // there can only ever be one caption message in a frame's sei if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) { result.payloadType = payloadType; result.payloadSize = payloadSize; result.payload = bytes.subarray(i, i + payloadSize); break; } // skip the payload and parse the next message i += payloadSize; payloadType = 0; payloadSize = 0; } return result; } /** * Parses text track samples to be used in 608 extraction * * @param data * @param videoTrackId */ export function parseTextTrackSamplesFromVideoSegment (data, videoTrackId) { let captionNals = parseCaptionNals(data, videoTrackId); return captionNals.reduce((acc, nal) => { const seiNal = parseSei(nal.escapedRBSP); if (seiNal.payload) { const userData = parseUserData(seiNal); const sample = { type: 3, trackId: nal.trackId, pts: nal.pts, dts: nal.dts, bytes: userData }; acc.push(sample); } return acc }, []) } /** * Parses out caption nals from an FMP4 segment's video tracks. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * @param {Uint8Array} segment - The bytes of a single segment * @param {Number} videoTrackId - The trackId of a video track in the segment * @return {Object.<Number, Object[]>} A mapping of video trackId to * a list of seiNals found in that track **/ export function parseCaptionNals (data, videoTrackId) { let captionNals = [] as any; // To get the samples let trafs = findBox(data, ['moof', 'traf']); // To get SEI NAL units let mdats = findBox(data, ['mdat']); let mdatTrafPairs = [] as any; // Pair up each traf with a mdat as moofs and mdats are in pairs mdats.forEach(function (mdat, index) { let matchingTraf = trafs[index]; mdatTrafPairs.push({ mdat: mdat, traf: matchingTraf }); }); mdatTrafPairs.forEach(function (pair) { let mdat = pair.mdat; let mdatBytes = mdat.data.subarray(mdat.start, mdat.end); let traf = pair.traf; let trafBytes = traf.data.subarray(traf.start, traf.end); let tfhd = findBox(trafBytes, ['tfhd']); // Exactly 1 tfhd per traf let headerInfo = parseTfhd(tfhd[0]); let trackId = headerInfo.trackId; let tfdt = findBox(trafBytes, ['tfdt']); // Either 0 or 1 tfdt per traf let baseMediaDecodeTime = (tfdt.length > 0) ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0; let truns = findBox(trafBytes, ['trun']); let samples; let seiNals; // Only parse video data for the chosen video track if (videoTrackId === trackId && truns.length > 0) { samples = parseSamples(truns, baseMediaDecodeTime, headerInfo); seiNals = findSeiNals(mdatBytes, samples, trackId); captionNals = captionNals.concat(seiNals); } }); return captionNals; } export function parseTfhd (tfhd) { const data = tfhd.data.subarray(tfhd.start, tfhd.end); let view = new DataView(data.buffer, data.byteOffset, data.byteLength), result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), trackId: view.getUint32(4) } as any, baseDataOffsetPresent = result.flags[2] & 0x01, sampleDescriptionIndexPresent = result.flags[2] & 0x02, defaultSampleDurationPresent = result.flags[2] & 0x08, defaultSampleSizePresent = result.flags[2] & 0x10, defaultSampleFlagsPresent = result.flags[2] & 0x20, durationIsEmpty = result.flags[0] & 0x010000, defaultBaseIsMoof = result.flags[0] & 0x020000, i; i = 8; if (baseDataOffsetPresent) { i += 4; // truncate top 4 bytes // FIXME: should we read the full 64 bits? result.baseDataOffset = view.getUint32(12); i += 4; } if (sampleDescriptionIndexPresent) { result.sampleDescriptionIndex = view.getUint32(i); i += 4; } if (defaultSampleDurationPresent) { result.defaultSampleDuration = view.getUint32(i); i += 4; } if (defaultSampleSizePresent) { result.defaultSampleSize = view.getUint32(i); i += 4; } if (defaultSampleFlagsPresent) { result.defaultSampleFlags = view.getUint32(i); } if (durationIsEmpty) { result.durationIsEmpty = true; } if (!baseDataOffsetPresent && defaultBaseIsMoof) { result.baseDataOffsetIsMoof = true; } return result; } export function parseTfdt (tfdt) { const data = tfdt.data.subarray(tfdt.start, tfdt.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), baseMediaDecodeTime: toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]) }; if (result.version === 1) { result.baseMediaDecodeTime *= Math.pow(2, 32); result.baseMediaDecodeTime += toUnsigned(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSamples (truns, baseMediaDecodeTime, tfhd) { let currentDts = baseMediaDecodeTime; let defaultSampleDuration = tfhd.defaultSampleDuration || 0; let defaultSampleSize = tfhd.defaultSampleSize || 0; let trackId = tfhd.trackId; let allSamples = [] as any; truns.forEach(function (trun) { // Note: We currently do not parse the sample table as well // as the trun. It's possible some sources will require this. // moov > trak > mdia > minf > stbl let trackRun = parseTrun(trun); let samples = trackRun.samples as any[]; samples.forEach(function (sample) { if (sample.duration === undefined) { sample.duration = defaultSampleDuration; } if (sample.size === undefined) { sample.size = defaultSampleSize; } sample.trackId = trackId; sample.dts = currentDts; if (sample.compositionTimeOffset === undefined) { sample.compositionTimeOffset = 0; } sample.pts = currentDts + sample.compositionTimeOffset; currentDts += sample.duration; }); allSamples = allSamples.concat(samples); }); return allSamples; } /** * Finds SEI nal units contained in a Media Data Box. * Assumes that `parseSamples` has been called first. * * This was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Uint8Array} avcStream - The bytes of the mdat * @param {Object[]} samples - The samples parsed out by `parseSamples` * @param {Number} trackId - The trackId of this video track * @return {Object[]} seiNals - the parsed SEI NALUs found. * The contents of the seiNal should match what is expected by * CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts) * * @see ISO-BMFF-12/2015, Section 8.1.1 * @see Rec. ITU-T H.264, 7.3.2.3.1 **/ export function findSeiNals (avcStream, samples, trackId) { let avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), result = [] as any, seiNal, i, length, lastMatchedSample; for (i = 0; i + 4 < avcStream.length; i += length) { length = avcView.getUint32(i); i += 4; // Bail if this doesn't appear to be an H264 stream if (length <= 0) { continue; } switch (avcStream[i] & 0x1F) { case 0x06: var data = avcStream.subarray(i + 1, i + 1 + length); var matchingSample = mapToSample(i, samples); seiNal = { nalUnitType: 'sei_rbsp', size: length, data: data, escapedRBSP: discardEmulationPreventionBytes(data), trackId: trackId }; if (matchingSample) { seiNal.pts = matchingSample.pts; seiNal.dts = matchingSample.dts; lastMatchedSample = matchingSample; } else if (lastMatchedSample) { // If a matching sample cannot be found, use the last // sample's values as they should be as close as possible seiNal.pts = lastMatchedSample.pts; seiNal.dts = lastMatchedSample.dts; } else { logger.log('We\'ve encountered a nal unit without data. See mux.js#233.'); break; } result.push(seiNal); break; default: break; } } return result; } /** * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param trun */ export function
(trun) { const data = trun.data.subarray(trun.start, trun.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), samples: [] } as any, view = new DataView(data.buffer, data.byteOffset, data.byteLength), // Flag interpretation dataOffsetPresent = result.flags[2] & 0x01, // compare with 2nd byte of 0x1 firstSampleFlagsPresent = result.flags[2] & 0x04, // compare with 2nd byte of 0x4 sampleDurationPresent = result.flags[1] & 0x01, // compare with 2nd byte of 0x100 sampleSizePresent = result.flags[1] & 0x02, // compare with 2nd byte of 0x200 sampleFlagsPresent = result.flags[1] & 0x04, // compare with 2nd byte of 0x400 sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08, // compare with 2nd byte of 0x800 sampleCount = view.getUint32(4), offset = 8, sample; if (dataOffsetPresent) { // 32 bit signed integer result.dataOffset = view.getInt32(offset); offset += 4; } // Overrides the flags for the first sample only. The order of // optional values will be: duration, size, compositionTimeOffset if (firstSampleFlagsPresent && sampleCount) { sample = { flags: parseSampleFlags(data.subarray(offset, offset + 4)) }; offset += 4; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); sampleCount--; } while (sampleCount--) { sample = {}; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleFlagsPresent) { sample.flags = parseSampleFlags(data.subarray(offset, offset + 4)); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSampleFlags (flags) { return { isLeading: (flags[0] & 0x0c) >>> 2, dependsOn: flags[0] & 0x03, isDependedOn: (flags[1] & 0xc0) >>> 6, hasRedundancy: (flags[1] & 0x30) >>> 4, paddingValue: (flags[1] & 0x0e) >>> 1, isNonSyncSample: flags[1] & 0x01, degradationPriority: (flags[2] << 8) | flags[3] }; } /** * Maps an offset in the mdat to a sample based on the the size of the samples. * Assumes that `parseSamples` has been called first. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Number} offset - The offset into the mdat * @param {Object[]} samples - An array of samples, parsed using `parseSamples` * @return {?Object} The matching sample, or null if no match was found. * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function mapToSample (offset, samples) { let approximateOffset = offset; for (let i = 0; i < samples.length; i++) { let sample = samples[i]; if (approximateOffset < sample.size) { return sample; } approximateOffset -= sample.size; } return null; } /** * Determine the base media decode start time, in seconds, for an MP4 * fragment. If multiple fragments are specified, the earliest time is * returned. * * The base media decode time can be parsed from track fragment * metadata: * ``` * moof > traf > tfdt.baseMediaDecodeTime * ``` * It requires the timescale value from the mdhd to interpret. * * @param initData {object} containing information about track * @param fragment mp4 fragment data * @return {number} the earliest base media decode start time for the * fragment, in seconds */ export function getStartDTS (initData, fragment) { let trafs, baseTimes, result; // we need info from two childrend of each track fragment box trafs = findBox(fragment, ['moof', 'traf']); // determine the start times for each track baseTimes = [].concat.apply([], trafs.map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { let id, scale, baseTime; // get the track id from the tfhd id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified scale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt baseTime = findBox(traf, ['tfdt']).map(function (tfdt) { let version, result; version = tfdt.data[tfdt.start]; result = readUint32(tfdt, 4); if (version === 1) { result *= Math.pow(2, 32); result += readUint32(tfdt, 8); } return result; })[0]; // convert base time to seconds return baseTime / scale; }); })); // return the minimum result = Math.min.apply(null, baseTimes); return isFinite(result) ? result : 0; } export function offsetStartDTS (initData, fragment, timeOffset) { findBox(fragment, ['moof', 'traf']).map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { // get the track id from the tfhd let id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified let timescale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt findBox(traf, ['tfdt']).map(function (tfdt) { let version = tfdt.data[tfdt.start]; let baseMediaDecodeTime = readUint32(tfdt, 4); if (version === 0) { writeUint32(tfdt, 4, baseMediaDecodeTime - timeOffset * timescale); } else { baseMediaDecodeTime *= Math.pow(2, 32); baseMediaDecodeTime += readUint32(tfdt, 8); baseMediaDecodeTime -= timeOffset * timescale; baseMediaDecodeTime = Math.max(baseMediaDecodeTime, 0); const upper = Math.floor(baseMediaDecodeTime / (UINT32_MAX + 1)); const lower = Math.floor(baseMediaDecodeTime % (UINT32_MAX + 1)); writeUint32(tfdt, 4, upper); writeUint32(tfdt, 8, lower); } }); }); }); } /** * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function discardEmulationPreventionBytes (data) { let length = data.byteLength, emulationPreventionBytesPositions = [] as any, i = 1, newLength, newData; // Find all `Emulation Prevention Bytes` while (i < length - 2) { if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { emulationPreventionBytesPositions.push(i + 2); i += 2; } else { i++; } } // If no Emulation Prevention Bytes were found just return the original // array if (emulationPreventionBytesPositions.length === 0) { return data; } // Create a new array to hold the NAL unit data newLength = length - emulationPreventionBytesPositions.length; newData = new Uint8Array(newLength); let sourceIndex = 0; for (i = 0; i < newLength; sourceIndex++, i++) { if (sourceIndex === emulationPreventionBytesPositions[0]) { // Skip this byte sourceIndex++; // Remove this position index emulationPreventionBytesPositions.shift(); } newData[i] = data[sourceIndex]; } return newData; } export function parseSegmentIndex (initSegment) { const moov = findBox(initSegment, ['moov'])[0]; const moovEndOffset = moov ? moov.end : null; // we need this in case we need to chop off garbage of the end of current data let index = 0; let sidx = findBox(initSegment, ['sidx']) as any; let references; if (!sidx || !sidx[0]) { return null; } references = []; sidx = sidx[0]; const version = sidx.data[0]; // set initial offset, we skip the reference ID (not needed) index = version === 0 ? 8 : 16; const timescale = readUint32(sidx, index); index += 4; // TODO: parse earliestPresentationTime and firstOffset // usually zero in our case let earliestPresentationTime = 0; let firstOffset = 0; if (version === 0) { index += 8; } else { index += 16; } // skip reserved index += 2; let startByte = sidx.end + firstOffset; const referencesCount = readUint16(sidx, index); index += 2; for (let i = 0; i < referencesCount; i++) { let referenceIndex = index; const referenceInfo = readUint32(sidx, referenceIndex); referenceIndex += 4; const referenceSize = referenceInfo & 0x7FFFFFFF; const referenceType = (referenceInfo & 0x80000000) >>> 31; if (referenceType === 1) { console.warn('SIDX has hierarchical references (not supported)'); return; } const subsegmentDuration = readUint32(sidx, referenceIndex); referenceIndex += 4; references.push({ referenceSize, subsegmentDuration, // unscaled info: { duration: subsegmentDuration / timescale, start: startByte, end: startByte + referenceSize - 1 } }); startByte += referenceSize; // Skipping 1 bit for |startsWithSap|, 3 bits for |sapType|, and 28 bits // for |sapDelta|. referenceIndex += 4; // skip to next ref index = referenceIndex; } return { earliestPresentationTime, timescale, version, referencesCount, references, moovEndOffset }; } export function toUnsigned (value) { return value >>> 0; };
parseTrun
identifier_name
mp4-tools.ts
import { ElementaryStreamTypes } from '../loader/fragment'; import { logger } from '../utils/logger'; let USER_DATA_REGISTERED_ITU_T_T35 = 4, RBSP_TRAILING_BITS = 128; const UINT32_MAX = Math.pow(2, 32) - 1; export function bin2str (buffer): string { return String.fromCharCode.apply(null, buffer); } export function readUint32 (buffer, offset): number { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 24 | buffer[offset + 1] << 16 | buffer[offset + 2] << 8 | buffer[offset + 3]; return val < 0 ? 4294967296 + val : val; } export function readUint16 (buffer, offset) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 8 | buffer[offset + 1]; return val < 0 ? 65536 + val : val; } export function writeUint32 (buffer, offset, value) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } buffer[offset] = value >> 24; buffer[offset + 1] = (value >> 16) & 0xff; buffer[offset + 2] = (value >> 8) & 0xff; buffer[offset + 3] = value & 0xff; } export function probe (data) { // ensure we find a moof box in the first 16 kB return findBox({ data: data, start: 0, end: Math.min(data.length, 16384) }, ['moof']).length > 0; } // Find the data for a box specified by its path export function findBox (data, path): Array<any> { let results = [] as Array<any>; let i; let size; let type; let end; let subresults; let start; let endbox; if (data.data) { start = data.start; end = data.end; data = data.data; } else { start = 0; end = data.byteLength; } if (!path.length) { // short-circuit the search for empty paths return results; } for (i = start; i < end;) { size = readUint32(data, i); type = bin2str(data.subarray(i + 4, i + 8)); endbox = size > 1 ? i + size : end; if (type === path[0]) { if (path.length === 1) { // this is the end of the path and we've found the box we were // looking for results.push({ data: data, start: i + 8, end: endbox }); } else { // recursively search for the next box along the path subresults = findBox({ data: data, start: i + 8, end: endbox }, path.slice(1)); if (subresults.length) { results = results.concat(subresults); } } } i = endbox; } // we've finished searching all of data return results; } interface InitDataTrack { timescale: number, id: number, codec: string } type HdlrType = ElementaryStreamTypes.AUDIO | ElementaryStreamTypes.VIDEO; export interface InitData extends Array<any> { [index: number]: { timescale: number, type: HdlrType }; audio?: InitDataTrack video?: InitDataTrack } export function parseInitSegment (initSegment): InitData { const result: InitData = []; const traks = findBox(initSegment, ['moov', 'trak']); traks.forEach(trak => { const tkhd = findBox(trak, ['tkhd'])[0]; if (tkhd) { let version = tkhd.data[tkhd.start]; let index = version === 0 ? 12 : 20; const trackId = readUint32(tkhd, index); const mdhd = findBox(trak, ['mdia', 'mdhd'])[0]; if (mdhd) { version = mdhd.data[mdhd.start]; index = version === 0 ? 12 : 20; const timescale = readUint32(mdhd, index); const hdlr = findBox(trak, ['mdia', 'hdlr'])[0]; if (hdlr) { const hdlrType = bin2str(hdlr.data.subarray(hdlr.start + 8, hdlr.start + 12)); const type: HdlrType = { soun: ElementaryStreamTypes.AUDIO, vide: ElementaryStreamTypes.VIDEO }[hdlrType]; if (type) { // TODO: Parse codec details to be able to build MIME type. const codexBoxes = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd']); let codec; if (codexBoxes.length) { const codecBox = codexBoxes[0]; codec = bin2str(codecBox.data.subarray(codecBox.start + 12, codecBox.start + 16)); } result[trackId] = { timescale, type }; result[type] = { timescale, id: trackId, codec }; } } } } }); return result; } /** * see ANSI/SCTE 128-1 (2013), section 8.1 * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function parseUserData (sei) { // itu_t_t35_contry_code must be 181 (United States) for // captions if (sei.payload[0] !== 181) { return null; } // itu_t_t35_provider_code should be 49 (ATSC) for captions if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) { return null; } // the user_identifier should be "GA94" to indicate ATSC1 data if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') { return null; } // finally, user_data_type_code should be 0x03 for caption data if (sei.payload[7] !== 0x03) { return null; } // return the user_data_type_structure and strip the trailing // marker bits return sei.payload.subarray(8, sei.payload.length - 1); } /** * Parse a supplemental enhancement information (SEI) NAL unit. * Stops parsing once a message of type ITU T T35 has been found. * * This code was ported from the mux.js project at: * https://github.com/videojs/mux.js * * @param bytes {Uint8Array} the bytes of a SEI NAL unit * @return {object} the parsed SEI payload * @see Rec. ITU-T H.264, 7.3.2.3.1 */ export function parseSei (bytes) { let i = 0, result = { payloadType: -1, payloadSize: 0, payload: null }, payloadType = 0, payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message while (i < bytes.byteLength) { // stop once we have hit the end of the sei_rbsp if (bytes[i] === RBSP_TRAILING_BITS) { break; } // Parse payload type while (bytes[i] === 0xFF) { payloadType += 255; i++; } payloadType += bytes[i++]; // Parse payload size while (bytes[i] === 0xFF) { payloadSize += 255; i++; } payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break // there can only ever be one caption message in a frame's sei if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) { result.payloadType = payloadType; result.payloadSize = payloadSize; result.payload = bytes.subarray(i, i + payloadSize); break; } // skip the payload and parse the next message i += payloadSize; payloadType = 0; payloadSize = 0; } return result; } /** * Parses text track samples to be used in 608 extraction * * @param data * @param videoTrackId */ export function parseTextTrackSamplesFromVideoSegment (data, videoTrackId) { let captionNals = parseCaptionNals(data, videoTrackId); return captionNals.reduce((acc, nal) => { const seiNal = parseSei(nal.escapedRBSP); if (seiNal.payload) { const userData = parseUserData(seiNal); const sample = { type: 3, trackId: nal.trackId, pts: nal.pts, dts: nal.dts, bytes: userData }; acc.push(sample); } return acc }, []) } /** * Parses out caption nals from an FMP4 segment's video tracks. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * @param {Uint8Array} segment - The bytes of a single segment * @param {Number} videoTrackId - The trackId of a video track in the segment * @return {Object.<Number, Object[]>} A mapping of video trackId to * a list of seiNals found in that track **/ export function parseCaptionNals (data, videoTrackId) { let captionNals = [] as any; // To get the samples let trafs = findBox(data, ['moof', 'traf']); // To get SEI NAL units let mdats = findBox(data, ['mdat']); let mdatTrafPairs = [] as any; // Pair up each traf with a mdat as moofs and mdats are in pairs mdats.forEach(function (mdat, index) { let matchingTraf = trafs[index]; mdatTrafPairs.push({ mdat: mdat, traf: matchingTraf }); }); mdatTrafPairs.forEach(function (pair) { let mdat = pair.mdat; let mdatBytes = mdat.data.subarray(mdat.start, mdat.end); let traf = pair.traf; let trafBytes = traf.data.subarray(traf.start, traf.end); let tfhd = findBox(trafBytes, ['tfhd']); // Exactly 1 tfhd per traf let headerInfo = parseTfhd(tfhd[0]); let trackId = headerInfo.trackId; let tfdt = findBox(trafBytes, ['tfdt']); // Either 0 or 1 tfdt per traf let baseMediaDecodeTime = (tfdt.length > 0) ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0; let truns = findBox(trafBytes, ['trun']); let samples; let seiNals; // Only parse video data for the chosen video track if (videoTrackId === trackId && truns.length > 0) { samples = parseSamples(truns, baseMediaDecodeTime, headerInfo); seiNals = findSeiNals(mdatBytes, samples, trackId); captionNals = captionNals.concat(seiNals); } }); return captionNals; } export function parseTfhd (tfhd) { const data = tfhd.data.subarray(tfhd.start, tfhd.end); let view = new DataView(data.buffer, data.byteOffset, data.byteLength), result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), trackId: view.getUint32(4) } as any, baseDataOffsetPresent = result.flags[2] & 0x01, sampleDescriptionIndexPresent = result.flags[2] & 0x02, defaultSampleDurationPresent = result.flags[2] & 0x08, defaultSampleSizePresent = result.flags[2] & 0x10, defaultSampleFlagsPresent = result.flags[2] & 0x20, durationIsEmpty = result.flags[0] & 0x010000, defaultBaseIsMoof = result.flags[0] & 0x020000, i; i = 8; if (baseDataOffsetPresent) { i += 4; // truncate top 4 bytes // FIXME: should we read the full 64 bits? result.baseDataOffset = view.getUint32(12); i += 4; } if (sampleDescriptionIndexPresent) { result.sampleDescriptionIndex = view.getUint32(i); i += 4; } if (defaultSampleDurationPresent) { result.defaultSampleDuration = view.getUint32(i); i += 4; } if (defaultSampleSizePresent) { result.defaultSampleSize = view.getUint32(i); i += 4; } if (defaultSampleFlagsPresent) { result.defaultSampleFlags = view.getUint32(i); } if (durationIsEmpty) { result.durationIsEmpty = true; } if (!baseDataOffsetPresent && defaultBaseIsMoof) { result.baseDataOffsetIsMoof = true; } return result; } export function parseTfdt (tfdt) { const data = tfdt.data.subarray(tfdt.start, tfdt.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), baseMediaDecodeTime: toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]) }; if (result.version === 1) { result.baseMediaDecodeTime *= Math.pow(2, 32); result.baseMediaDecodeTime += toUnsigned(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSamples (truns, baseMediaDecodeTime, tfhd) { let currentDts = baseMediaDecodeTime; let defaultSampleDuration = tfhd.defaultSampleDuration || 0; let defaultSampleSize = tfhd.defaultSampleSize || 0; let trackId = tfhd.trackId; let allSamples = [] as any; truns.forEach(function (trun) { // Note: We currently do not parse the sample table as well // as the trun. It's possible some sources will require this. // moov > trak > mdia > minf > stbl let trackRun = parseTrun(trun); let samples = trackRun.samples as any[]; samples.forEach(function (sample) { if (sample.duration === undefined) { sample.duration = defaultSampleDuration; } if (sample.size === undefined) { sample.size = defaultSampleSize; } sample.trackId = trackId; sample.dts = currentDts; if (sample.compositionTimeOffset === undefined) { sample.compositionTimeOffset = 0; } sample.pts = currentDts + sample.compositionTimeOffset; currentDts += sample.duration; }); allSamples = allSamples.concat(samples); }); return allSamples; } /** * Finds SEI nal units contained in a Media Data Box. * Assumes that `parseSamples` has been called first. * * This was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Uint8Array} avcStream - The bytes of the mdat * @param {Object[]} samples - The samples parsed out by `parseSamples` * @param {Number} trackId - The trackId of this video track * @return {Object[]} seiNals - the parsed SEI NALUs found. * The contents of the seiNal should match what is expected by * CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts) * * @see ISO-BMFF-12/2015, Section 8.1.1 * @see Rec. ITU-T H.264, 7.3.2.3.1 **/ export function findSeiNals (avcStream, samples, trackId) { let avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), result = [] as any, seiNal, i, length, lastMatchedSample; for (i = 0; i + 4 < avcStream.length; i += length) { length = avcView.getUint32(i); i += 4; // Bail if this doesn't appear to be an H264 stream if (length <= 0) { continue; } switch (avcStream[i] & 0x1F) { case 0x06: var data = avcStream.subarray(i + 1, i + 1 + length); var matchingSample = mapToSample(i, samples); seiNal = { nalUnitType: 'sei_rbsp', size: length, data: data, escapedRBSP: discardEmulationPreventionBytes(data), trackId: trackId }; if (matchingSample) { seiNal.pts = matchingSample.pts; seiNal.dts = matchingSample.dts; lastMatchedSample = matchingSample; } else if (lastMatchedSample) { // If a matching sample cannot be found, use the last // sample's values as they should be as close as possible seiNal.pts = lastMatchedSample.pts; seiNal.dts = lastMatchedSample.dts; } else { logger.log('We\'ve encountered a nal unit without data. See mux.js#233.'); break; } result.push(seiNal); break; default: break; } } return result; } /** * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param trun */ export function parseTrun (trun) { const data = trun.data.subarray(trun.start, trun.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), samples: [] } as any, view = new DataView(data.buffer, data.byteOffset, data.byteLength), // Flag interpretation dataOffsetPresent = result.flags[2] & 0x01, // compare with 2nd byte of 0x1 firstSampleFlagsPresent = result.flags[2] & 0x04, // compare with 2nd byte of 0x4 sampleDurationPresent = result.flags[1] & 0x01, // compare with 2nd byte of 0x100 sampleSizePresent = result.flags[1] & 0x02, // compare with 2nd byte of 0x200 sampleFlagsPresent = result.flags[1] & 0x04, // compare with 2nd byte of 0x400 sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08, // compare with 2nd byte of 0x800 sampleCount = view.getUint32(4), offset = 8, sample; if (dataOffsetPresent) { // 32 bit signed integer result.dataOffset = view.getInt32(offset); offset += 4; } // Overrides the flags for the first sample only. The order of // optional values will be: duration, size, compositionTimeOffset if (firstSampleFlagsPresent && sampleCount) { sample = { flags: parseSampleFlags(data.subarray(offset, offset + 4)) }; offset += 4; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); sampleCount--; } while (sampleCount--) { sample = {}; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleFlagsPresent) { sample.flags = parseSampleFlags(data.subarray(offset, offset + 4)); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSampleFlags (flags) { return { isLeading: (flags[0] & 0x0c) >>> 2, dependsOn: flags[0] & 0x03, isDependedOn: (flags[1] & 0xc0) >>> 6, hasRedundancy: (flags[1] & 0x30) >>> 4, paddingValue: (flags[1] & 0x0e) >>> 1, isNonSyncSample: flags[1] & 0x01, degradationPriority: (flags[2] << 8) | flags[3] }; } /** * Maps an offset in the mdat to a sample based on the the size of the samples. * Assumes that `parseSamples` has been called first. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Number} offset - The offset into the mdat * @param {Object[]} samples - An array of samples, parsed using `parseSamples` * @return {?Object} The matching sample, or null if no match was found. * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function mapToSample (offset, samples) { let approximateOffset = offset; for (let i = 0; i < samples.length; i++) { let sample = samples[i]; if (approximateOffset < sample.size) { return sample; } approximateOffset -= sample.size; } return null; } /** * Determine the base media decode start time, in seconds, for an MP4 * fragment. If multiple fragments are specified, the earliest time is * returned. * * The base media decode time can be parsed from track fragment * metadata: * ``` * moof > traf > tfdt.baseMediaDecodeTime * ``` * It requires the timescale value from the mdhd to interpret. * * @param initData {object} containing information about track * @param fragment mp4 fragment data * @return {number} the earliest base media decode start time for the * fragment, in seconds */ export function getStartDTS (initData, fragment)
export function offsetStartDTS (initData, fragment, timeOffset) { findBox(fragment, ['moof', 'traf']).map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { // get the track id from the tfhd let id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified let timescale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt findBox(traf, ['tfdt']).map(function (tfdt) { let version = tfdt.data[tfdt.start]; let baseMediaDecodeTime = readUint32(tfdt, 4); if (version === 0) { writeUint32(tfdt, 4, baseMediaDecodeTime - timeOffset * timescale); } else { baseMediaDecodeTime *= Math.pow(2, 32); baseMediaDecodeTime += readUint32(tfdt, 8); baseMediaDecodeTime -= timeOffset * timescale; baseMediaDecodeTime = Math.max(baseMediaDecodeTime, 0); const upper = Math.floor(baseMediaDecodeTime / (UINT32_MAX + 1)); const lower = Math.floor(baseMediaDecodeTime % (UINT32_MAX + 1)); writeUint32(tfdt, 4, upper); writeUint32(tfdt, 8, lower); } }); }); }); } /** * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function discardEmulationPreventionBytes (data) { let length = data.byteLength, emulationPreventionBytesPositions = [] as any, i = 1, newLength, newData; // Find all `Emulation Prevention Bytes` while (i < length - 2) { if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { emulationPreventionBytesPositions.push(i + 2); i += 2; } else { i++; } } // If no Emulation Prevention Bytes were found just return the original // array if (emulationPreventionBytesPositions.length === 0) { return data; } // Create a new array to hold the NAL unit data newLength = length - emulationPreventionBytesPositions.length; newData = new Uint8Array(newLength); let sourceIndex = 0; for (i = 0; i < newLength; sourceIndex++, i++) { if (sourceIndex === emulationPreventionBytesPositions[0]) { // Skip this byte sourceIndex++; // Remove this position index emulationPreventionBytesPositions.shift(); } newData[i] = data[sourceIndex]; } return newData; } export function parseSegmentIndex (initSegment) { const moov = findBox(initSegment, ['moov'])[0]; const moovEndOffset = moov ? moov.end : null; // we need this in case we need to chop off garbage of the end of current data let index = 0; let sidx = findBox(initSegment, ['sidx']) as any; let references; if (!sidx || !sidx[0]) { return null; } references = []; sidx = sidx[0]; const version = sidx.data[0]; // set initial offset, we skip the reference ID (not needed) index = version === 0 ? 8 : 16; const timescale = readUint32(sidx, index); index += 4; // TODO: parse earliestPresentationTime and firstOffset // usually zero in our case let earliestPresentationTime = 0; let firstOffset = 0; if (version === 0) { index += 8; } else { index += 16; } // skip reserved index += 2; let startByte = sidx.end + firstOffset; const referencesCount = readUint16(sidx, index); index += 2; for (let i = 0; i < referencesCount; i++) { let referenceIndex = index; const referenceInfo = readUint32(sidx, referenceIndex); referenceIndex += 4; const referenceSize = referenceInfo & 0x7FFFFFFF; const referenceType = (referenceInfo & 0x80000000) >>> 31; if (referenceType === 1) { console.warn('SIDX has hierarchical references (not supported)'); return; } const subsegmentDuration = readUint32(sidx, referenceIndex); referenceIndex += 4; references.push({ referenceSize, subsegmentDuration, // unscaled info: { duration: subsegmentDuration / timescale, start: startByte, end: startByte + referenceSize - 1 } }); startByte += referenceSize; // Skipping 1 bit for |startsWithSap|, 3 bits for |sapType|, and 28 bits // for |sapDelta|. referenceIndex += 4; // skip to next ref index = referenceIndex; } return { earliestPresentationTime, timescale, version, referencesCount, references, moovEndOffset }; } export function toUnsigned (value) { return value >>> 0; };
{ let trafs, baseTimes, result; // we need info from two childrend of each track fragment box trafs = findBox(fragment, ['moof', 'traf']); // determine the start times for each track baseTimes = [].concat.apply([], trafs.map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { let id, scale, baseTime; // get the track id from the tfhd id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified scale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt baseTime = findBox(traf, ['tfdt']).map(function (tfdt) { let version, result; version = tfdt.data[tfdt.start]; result = readUint32(tfdt, 4); if (version === 1) { result *= Math.pow(2, 32); result += readUint32(tfdt, 8); } return result; })[0]; // convert base time to seconds return baseTime / scale; }); })); // return the minimum result = Math.min.apply(null, baseTimes); return isFinite(result) ? result : 0; }
identifier_body
mp4-tools.ts
import { ElementaryStreamTypes } from '../loader/fragment'; import { logger } from '../utils/logger'; let USER_DATA_REGISTERED_ITU_T_T35 = 4, RBSP_TRAILING_BITS = 128; const UINT32_MAX = Math.pow(2, 32) - 1; export function bin2str (buffer): string { return String.fromCharCode.apply(null, buffer); } export function readUint32 (buffer, offset): number { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 24 | buffer[offset + 1] << 16 | buffer[offset + 2] << 8 | buffer[offset + 3]; return val < 0 ? 4294967296 + val : val; } export function readUint16 (buffer, offset) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } const val = buffer[offset] << 8 | buffer[offset + 1]; return val < 0 ? 65536 + val : val; } export function writeUint32 (buffer, offset, value) { if (buffer.data) { offset += buffer.start; buffer = buffer.data; } buffer[offset] = value >> 24; buffer[offset + 1] = (value >> 16) & 0xff; buffer[offset + 2] = (value >> 8) & 0xff; buffer[offset + 3] = value & 0xff; } export function probe (data) { // ensure we find a moof box in the first 16 kB return findBox({ data: data, start: 0, end: Math.min(data.length, 16384) }, ['moof']).length > 0; } // Find the data for a box specified by its path export function findBox (data, path): Array<any> { let results = [] as Array<any>; let i; let size; let type; let end; let subresults; let start; let endbox; if (data.data) { start = data.start; end = data.end; data = data.data; } else { start = 0; end = data.byteLength; } if (!path.length) { // short-circuit the search for empty paths return results; } for (i = start; i < end;) { size = readUint32(data, i); type = bin2str(data.subarray(i + 4, i + 8)); endbox = size > 1 ? i + size : end; if (type === path[0]) { if (path.length === 1) { // this is the end of the path and we've found the box we were // looking for results.push({ data: data, start: i + 8, end: endbox }); } else { // recursively search for the next box along the path subresults = findBox({ data: data, start: i + 8, end: endbox }, path.slice(1)); if (subresults.length) { results = results.concat(subresults); } } } i = endbox; } // we've finished searching all of data return results; } interface InitDataTrack { timescale: number, id: number, codec: string } type HdlrType = ElementaryStreamTypes.AUDIO | ElementaryStreamTypes.VIDEO; export interface InitData extends Array<any> { [index: number]: { timescale: number, type: HdlrType }; audio?: InitDataTrack video?: InitDataTrack } export function parseInitSegment (initSegment): InitData { const result: InitData = []; const traks = findBox(initSegment, ['moov', 'trak']); traks.forEach(trak => { const tkhd = findBox(trak, ['tkhd'])[0]; if (tkhd) { let version = tkhd.data[tkhd.start]; let index = version === 0 ? 12 : 20; const trackId = readUint32(tkhd, index); const mdhd = findBox(trak, ['mdia', 'mdhd'])[0]; if (mdhd) { version = mdhd.data[mdhd.start]; index = version === 0 ? 12 : 20; const timescale = readUint32(mdhd, index); const hdlr = findBox(trak, ['mdia', 'hdlr'])[0]; if (hdlr) { const hdlrType = bin2str(hdlr.data.subarray(hdlr.start + 8, hdlr.start + 12)); const type: HdlrType = { soun: ElementaryStreamTypes.AUDIO, vide: ElementaryStreamTypes.VIDEO }[hdlrType]; if (type) { // TODO: Parse codec details to be able to build MIME type. const codexBoxes = findBox(trak, ['mdia', 'minf', 'stbl', 'stsd']); let codec; if (codexBoxes.length) { const codecBox = codexBoxes[0]; codec = bin2str(codecBox.data.subarray(codecBox.start + 12, codecBox.start + 16)); } result[trackId] = { timescale, type }; result[type] = { timescale, id: trackId, codec }; } } } } }); return result; } /** * see ANSI/SCTE 128-1 (2013), section 8.1 * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function parseUserData (sei) { // itu_t_t35_contry_code must be 181 (United States) for // captions if (sei.payload[0] !== 181) { return null; } // itu_t_t35_provider_code should be 49 (ATSC) for captions if (((sei.payload[1] << 8) | sei.payload[2]) !== 49) { return null; } // the user_identifier should be "GA94" to indicate ATSC1 data if (String.fromCharCode(sei.payload[3], sei.payload[4], sei.payload[5], sei.payload[6]) !== 'GA94') { return null; } // finally, user_data_type_code should be 0x03 for caption data if (sei.payload[7] !== 0x03) { return null; } // return the user_data_type_structure and strip the trailing // marker bits return sei.payload.subarray(8, sei.payload.length - 1); } /** * Parse a supplemental enhancement information (SEI) NAL unit. * Stops parsing once a message of type ITU T T35 has been found. * * This code was ported from the mux.js project at: * https://github.com/videojs/mux.js * * @param bytes {Uint8Array} the bytes of a SEI NAL unit * @return {object} the parsed SEI payload * @see Rec. ITU-T H.264, 7.3.2.3.1 */ export function parseSei (bytes) { let i = 0, result = { payloadType: -1, payloadSize: 0, payload: null }, payloadType = 0, payloadSize = 0; // go through the sei_rbsp parsing each each individual sei_message while (i < bytes.byteLength) { // stop once we have hit the end of the sei_rbsp if (bytes[i] === RBSP_TRAILING_BITS) { break; } // Parse payload type while (bytes[i] === 0xFF) { payloadType += 255; i++; } payloadType += bytes[i++]; // Parse payload size while (bytes[i] === 0xFF) { payloadSize += 255; i++; } payloadSize += bytes[i++]; // this sei_message is a 608/708 caption so save it and break // there can only ever be one caption message in a frame's sei if (!result.payload && payloadType === USER_DATA_REGISTERED_ITU_T_T35) { result.payloadType = payloadType; result.payloadSize = payloadSize; result.payload = bytes.subarray(i, i + payloadSize); break; } // skip the payload and parse the next message i += payloadSize; payloadType = 0; payloadSize = 0; } return result; } /** * Parses text track samples to be used in 608 extraction * * @param data * @param videoTrackId */ export function parseTextTrackSamplesFromVideoSegment (data, videoTrackId) { let captionNals = parseCaptionNals(data, videoTrackId); return captionNals.reduce((acc, nal) => { const seiNal = parseSei(nal.escapedRBSP); if (seiNal.payload) { const userData = parseUserData(seiNal); const sample = { type: 3, trackId: nal.trackId, pts: nal.pts, dts: nal.dts, bytes: userData }; acc.push(sample); } return acc }, []) } /** * Parses out caption nals from an FMP4 segment's video tracks. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * @param {Uint8Array} segment - The bytes of a single segment * @param {Number} videoTrackId - The trackId of a video track in the segment * @return {Object.<Number, Object[]>} A mapping of video trackId to * a list of seiNals found in that track **/ export function parseCaptionNals (data, videoTrackId) { let captionNals = [] as any; // To get the samples let trafs = findBox(data, ['moof', 'traf']); // To get SEI NAL units let mdats = findBox(data, ['mdat']); let mdatTrafPairs = [] as any; // Pair up each traf with a mdat as moofs and mdats are in pairs mdats.forEach(function (mdat, index) { let matchingTraf = trafs[index]; mdatTrafPairs.push({ mdat: mdat, traf: matchingTraf }); }); mdatTrafPairs.forEach(function (pair) { let mdat = pair.mdat; let mdatBytes = mdat.data.subarray(mdat.start, mdat.end); let traf = pair.traf; let trafBytes = traf.data.subarray(traf.start, traf.end); let tfhd = findBox(trafBytes, ['tfhd']); // Exactly 1 tfhd per traf let headerInfo = parseTfhd(tfhd[0]); let trackId = headerInfo.trackId; let tfdt = findBox(trafBytes, ['tfdt']); // Either 0 or 1 tfdt per traf let baseMediaDecodeTime = (tfdt.length > 0) ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0; let truns = findBox(trafBytes, ['trun']); let samples; let seiNals; // Only parse video data for the chosen video track if (videoTrackId === trackId && truns.length > 0) { samples = parseSamples(truns, baseMediaDecodeTime, headerInfo); seiNals = findSeiNals(mdatBytes, samples, trackId); captionNals = captionNals.concat(seiNals); } }); return captionNals; } export function parseTfhd (tfhd) { const data = tfhd.data.subarray(tfhd.start, tfhd.end); let view = new DataView(data.buffer, data.byteOffset, data.byteLength), result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), trackId: view.getUint32(4) } as any, baseDataOffsetPresent = result.flags[2] & 0x01, sampleDescriptionIndexPresent = result.flags[2] & 0x02, defaultSampleDurationPresent = result.flags[2] & 0x08, defaultSampleSizePresent = result.flags[2] & 0x10, defaultSampleFlagsPresent = result.flags[2] & 0x20, durationIsEmpty = result.flags[0] & 0x010000, defaultBaseIsMoof = result.flags[0] & 0x020000, i; i = 8; if (baseDataOffsetPresent) { i += 4; // truncate top 4 bytes // FIXME: should we read the full 64 bits? result.baseDataOffset = view.getUint32(12); i += 4; } if (sampleDescriptionIndexPresent) { result.sampleDescriptionIndex = view.getUint32(i); i += 4; } if (defaultSampleDurationPresent) { result.defaultSampleDuration = view.getUint32(i); i += 4; } if (defaultSampleSizePresent) { result.defaultSampleSize = view.getUint32(i); i += 4; } if (defaultSampleFlagsPresent) { result.defaultSampleFlags = view.getUint32(i); } if (durationIsEmpty) { result.durationIsEmpty = true; } if (!baseDataOffsetPresent && defaultBaseIsMoof) { result.baseDataOffsetIsMoof = true; } return result; } export function parseTfdt (tfdt) { const data = tfdt.data.subarray(tfdt.start, tfdt.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), baseMediaDecodeTime: toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7]) }; if (result.version === 1) { result.baseMediaDecodeTime *= Math.pow(2, 32); result.baseMediaDecodeTime += toUnsigned(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSamples (truns, baseMediaDecodeTime, tfhd) { let currentDts = baseMediaDecodeTime; let defaultSampleDuration = tfhd.defaultSampleDuration || 0; let defaultSampleSize = tfhd.defaultSampleSize || 0; let trackId = tfhd.trackId; let allSamples = [] as any; truns.forEach(function (trun) { // Note: We currently do not parse the sample table as well // as the trun. It's possible some sources will require this. // moov > trak > mdia > minf > stbl let trackRun = parseTrun(trun); let samples = trackRun.samples as any[]; samples.forEach(function (sample) { if (sample.duration === undefined) { sample.duration = defaultSampleDuration; } if (sample.size === undefined) { sample.size = defaultSampleSize; } sample.trackId = trackId; sample.dts = currentDts; if (sample.compositionTimeOffset === undefined) { sample.compositionTimeOffset = 0; } sample.pts = currentDts + sample.compositionTimeOffset; currentDts += sample.duration; }); allSamples = allSamples.concat(samples); }); return allSamples; } /** * Finds SEI nal units contained in a Media Data Box. * Assumes that `parseSamples` has been called first. * * This was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Uint8Array} avcStream - The bytes of the mdat * @param {Object[]} samples - The samples parsed out by `parseSamples` * @param {Number} trackId - The trackId of this video track * @return {Object[]} seiNals - the parsed SEI NALUs found. * The contents of the seiNal should match what is expected by * CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts) * * @see ISO-BMFF-12/2015, Section 8.1.1 * @see Rec. ITU-T H.264, 7.3.2.3.1 **/ export function findSeiNals (avcStream, samples, trackId) { let avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength), result = [] as any, seiNal, i, length, lastMatchedSample; for (i = 0; i + 4 < avcStream.length; i += length) { length = avcView.getUint32(i); i += 4; // Bail if this doesn't appear to be an H264 stream if (length <= 0) { continue; } switch (avcStream[i] & 0x1F) { case 0x06: var data = avcStream.subarray(i + 1, i + 1 + length); var matchingSample = mapToSample(i, samples); seiNal = { nalUnitType: 'sei_rbsp', size: length, data: data,
if (matchingSample) { seiNal.pts = matchingSample.pts; seiNal.dts = matchingSample.dts; lastMatchedSample = matchingSample; } else if (lastMatchedSample) { // If a matching sample cannot be found, use the last // sample's values as they should be as close as possible seiNal.pts = lastMatchedSample.pts; seiNal.dts = lastMatchedSample.dts; } else { logger.log('We\'ve encountered a nal unit without data. See mux.js#233.'); break; } result.push(seiNal); break; default: break; } } return result; } /** * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param trun */ export function parseTrun (trun) { const data = trun.data.subarray(trun.start, trun.end); let result = { version: data[0], flags: new Uint8Array(data.subarray(1, 4)), samples: [] } as any, view = new DataView(data.buffer, data.byteOffset, data.byteLength), // Flag interpretation dataOffsetPresent = result.flags[2] & 0x01, // compare with 2nd byte of 0x1 firstSampleFlagsPresent = result.flags[2] & 0x04, // compare with 2nd byte of 0x4 sampleDurationPresent = result.flags[1] & 0x01, // compare with 2nd byte of 0x100 sampleSizePresent = result.flags[1] & 0x02, // compare with 2nd byte of 0x200 sampleFlagsPresent = result.flags[1] & 0x04, // compare with 2nd byte of 0x400 sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08, // compare with 2nd byte of 0x800 sampleCount = view.getUint32(4), offset = 8, sample; if (dataOffsetPresent) { // 32 bit signed integer result.dataOffset = view.getInt32(offset); offset += 4; } // Overrides the flags for the first sample only. The order of // optional values will be: duration, size, compositionTimeOffset if (firstSampleFlagsPresent && sampleCount) { sample = { flags: parseSampleFlags(data.subarray(offset, offset + 4)) }; offset += 4; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); sampleCount--; } while (sampleCount--) { sample = {}; if (sampleDurationPresent) { sample.duration = view.getUint32(offset); offset += 4; } if (sampleSizePresent) { sample.size = view.getUint32(offset); offset += 4; } if (sampleFlagsPresent) { sample.flags = parseSampleFlags(data.subarray(offset, offset + 4)); offset += 4; } if (sampleCompositionTimeOffsetPresent) { // Note: this should be a signed int if version is 1 sample.compositionTimeOffset = view.getUint32(offset); offset += 4; } result.samples.push(sample); } return result; } /** * Parses sample information out of Track Run Boxes and calculates * the absolute presentation and decode timestamps of each sample. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed * @param {Number} baseMediaDecodeTime - base media decode time from tfdt @see ISO-BMFF-12/2015, Section 8.8.12 * @param {Object} tfhd - The parsed Track Fragment Header * @see inspect.parseTfhd * @return {Object[]} the parsed samples * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function parseSampleFlags (flags) { return { isLeading: (flags[0] & 0x0c) >>> 2, dependsOn: flags[0] & 0x03, isDependedOn: (flags[1] & 0xc0) >>> 6, hasRedundancy: (flags[1] & 0x30) >>> 4, paddingValue: (flags[1] & 0x0e) >>> 1, isNonSyncSample: flags[1] & 0x01, degradationPriority: (flags[2] << 8) | flags[3] }; } /** * Maps an offset in the mdat to a sample based on the the size of the samples. * Assumes that `parseSamples` has been called first. * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js * * @param {Number} offset - The offset into the mdat * @param {Object[]} samples - An array of samples, parsed using `parseSamples` * @return {?Object} The matching sample, or null if no match was found. * * @see ISO-BMFF-12/2015, Section 8.8.8 **/ export function mapToSample (offset, samples) { let approximateOffset = offset; for (let i = 0; i < samples.length; i++) { let sample = samples[i]; if (approximateOffset < sample.size) { return sample; } approximateOffset -= sample.size; } return null; } /** * Determine the base media decode start time, in seconds, for an MP4 * fragment. If multiple fragments are specified, the earliest time is * returned. * * The base media decode time can be parsed from track fragment * metadata: * ``` * moof > traf > tfdt.baseMediaDecodeTime * ``` * It requires the timescale value from the mdhd to interpret. * * @param initData {object} containing information about track * @param fragment mp4 fragment data * @return {number} the earliest base media decode start time for the * fragment, in seconds */ export function getStartDTS (initData, fragment) { let trafs, baseTimes, result; // we need info from two childrend of each track fragment box trafs = findBox(fragment, ['moof', 'traf']); // determine the start times for each track baseTimes = [].concat.apply([], trafs.map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { let id, scale, baseTime; // get the track id from the tfhd id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified scale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt baseTime = findBox(traf, ['tfdt']).map(function (tfdt) { let version, result; version = tfdt.data[tfdt.start]; result = readUint32(tfdt, 4); if (version === 1) { result *= Math.pow(2, 32); result += readUint32(tfdt, 8); } return result; })[0]; // convert base time to seconds return baseTime / scale; }); })); // return the minimum result = Math.min.apply(null, baseTimes); return isFinite(result) ? result : 0; } export function offsetStartDTS (initData, fragment, timeOffset) { findBox(fragment, ['moof', 'traf']).map(function (traf) { return findBox(traf, ['tfhd']).map(function (tfhd) { // get the track id from the tfhd let id = readUint32(tfhd, 4); // assume a 90kHz clock if no timescale was specified let timescale = initData[id].timescale || 90e3; // get the base media decode time from the tfdt findBox(traf, ['tfdt']).map(function (tfdt) { let version = tfdt.data[tfdt.start]; let baseMediaDecodeTime = readUint32(tfdt, 4); if (version === 0) { writeUint32(tfdt, 4, baseMediaDecodeTime - timeOffset * timescale); } else { baseMediaDecodeTime *= Math.pow(2, 32); baseMediaDecodeTime += readUint32(tfdt, 8); baseMediaDecodeTime -= timeOffset * timescale; baseMediaDecodeTime = Math.max(baseMediaDecodeTime, 0); const upper = Math.floor(baseMediaDecodeTime / (UINT32_MAX + 1)); const lower = Math.floor(baseMediaDecodeTime % (UINT32_MAX + 1)); writeUint32(tfdt, 4, upper); writeUint32(tfdt, 8, lower); } }); }); }); } /** * * This code was ported from the mux.js project at: https://github.com/videojs/mux.js */ export function discardEmulationPreventionBytes (data) { let length = data.byteLength, emulationPreventionBytesPositions = [] as any, i = 1, newLength, newData; // Find all `Emulation Prevention Bytes` while (i < length - 2) { if (data[i] === 0 && data[i + 1] === 0 && data[i + 2] === 0x03) { emulationPreventionBytesPositions.push(i + 2); i += 2; } else { i++; } } // If no Emulation Prevention Bytes were found just return the original // array if (emulationPreventionBytesPositions.length === 0) { return data; } // Create a new array to hold the NAL unit data newLength = length - emulationPreventionBytesPositions.length; newData = new Uint8Array(newLength); let sourceIndex = 0; for (i = 0; i < newLength; sourceIndex++, i++) { if (sourceIndex === emulationPreventionBytesPositions[0]) { // Skip this byte sourceIndex++; // Remove this position index emulationPreventionBytesPositions.shift(); } newData[i] = data[sourceIndex]; } return newData; } export function parseSegmentIndex (initSegment) { const moov = findBox(initSegment, ['moov'])[0]; const moovEndOffset = moov ? moov.end : null; // we need this in case we need to chop off garbage of the end of current data let index = 0; let sidx = findBox(initSegment, ['sidx']) as any; let references; if (!sidx || !sidx[0]) { return null; } references = []; sidx = sidx[0]; const version = sidx.data[0]; // set initial offset, we skip the reference ID (not needed) index = version === 0 ? 8 : 16; const timescale = readUint32(sidx, index); index += 4; // TODO: parse earliestPresentationTime and firstOffset // usually zero in our case let earliestPresentationTime = 0; let firstOffset = 0; if (version === 0) { index += 8; } else { index += 16; } // skip reserved index += 2; let startByte = sidx.end + firstOffset; const referencesCount = readUint16(sidx, index); index += 2; for (let i = 0; i < referencesCount; i++) { let referenceIndex = index; const referenceInfo = readUint32(sidx, referenceIndex); referenceIndex += 4; const referenceSize = referenceInfo & 0x7FFFFFFF; const referenceType = (referenceInfo & 0x80000000) >>> 31; if (referenceType === 1) { console.warn('SIDX has hierarchical references (not supported)'); return; } const subsegmentDuration = readUint32(sidx, referenceIndex); referenceIndex += 4; references.push({ referenceSize, subsegmentDuration, // unscaled info: { duration: subsegmentDuration / timescale, start: startByte, end: startByte + referenceSize - 1 } }); startByte += referenceSize; // Skipping 1 bit for |startsWithSap|, 3 bits for |sapType|, and 28 bits // for |sapDelta|. referenceIndex += 4; // skip to next ref index = referenceIndex; } return { earliestPresentationTime, timescale, version, referencesCount, references, moovEndOffset }; } export function toUnsigned (value) { return value >>> 0; };
escapedRBSP: discardEmulationPreventionBytes(data), trackId: trackId };
random_line_split
processor.rs
use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; use std::ffi::CString; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::os::raw::{c_char, c_void}; use std::str::FromStr; use std::{fmt, mem, ptr, slice, str}; use regex::Regex; use uuid::Uuid; use symbolic_common::byteview::ByteView; use symbolic_common::types::{Arch, CpuFamily, DebugId, ParseDebugIdError}; use utils; lazy_static! { static ref LINUX_BUILD_RE: Regex = Regex::new(r"^Linux ([^ ]+) (.*) \w+(?: GNU/Linux)?$").unwrap(); } extern "C" { fn code_module_base_address(module: *const CodeModule) -> u64; fn code_module_size(module: *const CodeModule) -> u64; fn code_module_code_file(module: *const CodeModule) -> *mut c_char; fn code_module_code_identifier(module: *const CodeModule) -> *mut c_char; fn code_module_debug_file(module: *const CodeModule) -> *mut c_char; fn code_module_debug_identifier(module: *const CodeModule) -> *mut c_char; fn stack_frame_return_address(frame: *const StackFrame) -> u64; fn stack_frame_instruction(frame: *const StackFrame) -> u64; fn stack_frame_module(frame: *const StackFrame) -> *const CodeModule; fn stack_frame_trust(frame: *const StackFrame) -> FrameTrust; fn call_stack_thread_id(stack: *const CallStack) -> u32; fn call_stack_frames(stack: *const CallStack, size_out: *mut usize) -> *const *const StackFrame; fn system_info_os_name(info: *const SystemInfo) -> *mut c_char; fn system_info_os_version(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_family(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_info(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_count(info: *const SystemInfo) -> u32; fn process_minidump( buffer: *const c_char, buffer_size: usize, symbols: *const SymbolEntry, symbol_count: usize, result: *mut ProcessResult, ) -> *mut IProcessState; fn process_state_delete(state: *mut IProcessState); fn process_state_threads( state: *const IProcessState, size_out: *mut usize, ) -> *const *const CallStack; fn process_state_requesting_thread(state: *const IProcessState) -> i32; fn process_state_timestamp(state: *const IProcessState) -> u64; fn process_state_crashed(state: *const IProcessState) -> bool; fn process_state_crash_address(state: *const IProcessState) -> u64; fn process_state_crash_reason(state: *const IProcessState) -> *mut c_char; fn process_state_assertion(state: *const IProcessState) -> *mut c_char; fn process_state_system_info(state: *const IProcessState) -> *mut SystemInfo; } /// An error returned when parsing invalid `CodeModuleId`s. pub type ParseCodeModuleIdError = ParseDebugIdError; /// Breakpad code module IDs. /// /// **Example:** /// /// ``` /// # extern crate symbolic_common; /// # extern crate symbolic_minidump; /// use std::str::FromStr; /// use symbolic_minidump::processor::CodeModuleId; /// # use symbolic_minidump::processor::ParseCodeModuleIdError; /// /// # fn foo() -> Result<(), ParseCodeModuleIdError> { /// let id = CodeModuleId::from_str("DFB8E43AF2423D73A453AEB6A777EF75a")?; /// assert_eq!("DFB8E43AF2423D73A453AEB6A777EF75a".to_string(), id.to_string()); /// # Ok(()) /// # } /// /// # fn main() { foo().unwrap() } /// ``` #[derive(Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Copy)] pub struct CodeModuleId { inner: DebugId, } impl CodeModuleId { /// Constructs a `CodeModuleId` from its `uuid` and `age` parts. pub fn from_parts(uuid: Uuid, age: u32) -> CodeModuleId { CodeModuleId { inner: DebugId::from_parts(uuid, age), } } /// Returns the UUID part of the code module id. pub fn uuid(&self) -> Uuid { self.inner.uuid() } /// Returns the appendix part of the code module id. /// /// On Windows, this is an incrementing counter to identify the build. /// On all other platforms, this value will always be zero. pub fn age(&self) -> u32 { self.inner.appendix() } /// Converts this code module id into a debug identifier. pub fn as_object_id(&self) -> DebugId { self.inner } } impl From<DebugId> for CodeModuleId { fn from(inner: DebugId) -> Self { CodeModuleId { inner } } } impl Into<DebugId> for CodeModuleId { fn into(self) -> DebugId { self.inner } } impl fmt::Display for CodeModuleId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.breakpad().fmt(f) } } impl str::FromStr for CodeModuleId { type Err = ParseCodeModuleIdError; fn from_str(string: &str) -> Result<CodeModuleId, ParseCodeModuleIdError> { Ok(CodeModuleId { inner: DebugId::from_breakpad(string)?, }) } } #[cfg(feature = "with_serde")] derive_deserialize_from_str!(CodeModuleId, "CodeModuleId"); #[cfg(feature = "with_serde")] derive_serialize_from_display!(CodeModuleId); /// Carries information about a code module loaded into the process during the /// crash. The `debug_identifier` uniquely identifies this module. #[repr(C)] pub struct CodeModule(c_void); impl CodeModule { /// Returns the unique identifier of this `CodeModule`. pub fn id(&self) -> Option<CodeModuleId> { CodeModuleId::from_str(&self.debug_identifier()).ok() } /// Returns the base address of this code module as it was loaded by the /// process. (uint64_t)-1 on error. pub fn base_address(&self) -> u64 { unsafe { code_module_base_address(self) } } /// The size of the code module. 0 on error. pub fn size(&self) -> u64 { unsafe { code_module_size(self) } } /// Returns the path or file name that the code module was loaded from. pub fn code_file(&self) -> String { unsafe { let ptr = code_module_code_file(self); utils::ptr_to_string(ptr) } } /// An identifying string used to discriminate between multiple versions and builds of the same /// code module. /// /// This may contain a UUID, timestamp, version number, or any combination of this or other /// information, in an implementation-defined format. pub fn code_identifier(&self) -> String { unsafe { let ptr = code_module_code_identifier(self); utils::ptr_to_string(ptr) } } /// Returns the filename containing debugging information of this code module. /// /// If debugging information is stored in a file separate from the code module itself (as is the /// case when .pdb or .dSYM files are used), this will be different from `code_file`. If /// debugging information is stored in the code module itself (possibly prior to stripping), /// this will be the same as code_file. pub fn debug_file(&self) -> String { unsafe { let ptr = code_module_debug_file(self); utils::ptr_to_string(ptr) } } /// Returns a string identifying the specific version and build of the associated debug file. /// /// This may be the same as `code_identifier` when the `debug_file` and `code_file` are /// identical or when the same identifier is used to identify distinct debug and code files. /// /// It usually comprises the library's UUID and an age field. On Windows, the age field is a /// generation counter, on all other platforms it is mostly zero. pub fn debug_identifier(&self) -> String { unsafe { let ptr = code_module_debug_identifier(self); utils::ptr_to_string(ptr) } } } impl Eq for CodeModule {} impl PartialEq for CodeModule { fn eq(&self, other: &Self) -> bool { self.id() == other.id() } } impl Hash for CodeModule { fn hash<H: Hasher>(&self, state: &mut H) { self.id().hash(state) } } impl Ord for CodeModule { fn cmp(&self, other: &Self) -> Ordering { self.id().cmp(&other.id()) } } impl PartialOrd for CodeModule { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl fmt::Debug for CodeModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CodeModule") .field("id", &self.id()) .field("base_address", &self.base_address()) .field("size", &self.size()) .field("code_file", &self.code_file()) .field("code_identifier", &self.code_identifier()) .field("debug_file", &self.debug_file()) .field("debug_identifier", &self.debug_identifier()) .finish() } } /// Indicates how well the instruction pointer derived during /// stack walking is trusted. Since the stack walker can resort to /// stack scanning, it can wind up with dubious frames. /// /// In rough order of "trust metric". #[repr(u32)] #[derive(Debug)] pub enum FrameTrust { /// Unknown trust. None, /// Scanned the stack, found this (lowest precision). Scan, /// Found while scanning stack using call frame info. CFIScan, /// Derived from frame pointer. FP, /// Derived from call frame info. CFI, /// Explicitly provided by some external stack walker. Prewalked, /// Given as instruction pointer in a context (highest precision). Context, } impl fmt::Display for FrameTrust { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let string = match *self { FrameTrust::None => "none", FrameTrust::Scan => "stack scanning", FrameTrust::CFIScan => "call frame info with scanning", FrameTrust::FP => "previous frame's frame pointer", FrameTrust::CFI => "call frame info", FrameTrust::Prewalked => "recovered by external stack walker", FrameTrust::Context => "given as instruction pointer in context", }; write!(f, "{}", string) } } /// Contains information from the memorydump, especially the frame's instruction /// pointer. Also references an optional `CodeModule` that contains the /// instruction of this stack frame. #[repr(C)] pub struct StackFrame(c_void); impl StackFrame { /// Returns the program counter location as an absolute virtual address. /// /// - For the innermost called frame in a stack, this will be an exact /// program counter or instruction pointer value. /// /// - For all other frames, this address is within the instruction that /// caused execution to branch to this frame's callee (although it may /// not point to the exact beginning of that instruction). This ensures /// that, when we look up the source code location for this frame, we /// get the source location of the call, not of the point at which /// control will resume when the call returns, which may be on the next /// line. (If the compiler knows the callee never returns, it may even /// place the call instruction at the very end of the caller's machine /// code, such that the "return address" (which will never be used) /// immediately after the call instruction is in an entirely different /// function, perhaps even from a different source file.) /// /// On some architectures, the return address as saved on the stack or in /// a register is fine for looking up the point of the call. On others, it /// requires adjustment. ReturnAddress returns the address as saved by the /// machine. /// /// Use `trust` to obtain how trustworthy this instruction is. pub fn instruction(&self) -> u64 { unsafe { stack_frame_instruction(self) } } // Return the actual return address, as saved on the stack or in a // register. See the comments for `StackFrame::instruction' for // details. pub fn return_address(&self, arch: Arch) -> u64 { let address = unsafe { stack_frame_return_address(self) }; // The return address reported for ARM* frames is actually the // instruction with heuristics from Breakpad applied already. // To resolve the original return address value, compensate // by adding the offsets applied in `StackwalkerARM::GetCallerFrame` // and `StackwalkerARM64::GetCallerFrame`. match arch.cpu_family() { CpuFamily::Arm32 => address + 2, CpuFamily::Arm64 => address + 4, _ => address, } } /// Returns the `CodeModule` that contains this frame's instruction. pub fn module(&self) -> Option<&CodeModule> { unsafe { stack_frame_module(self).as_ref() } } /// Returns how well the instruction pointer is trusted. pub fn trust(&self) -> FrameTrust { unsafe { stack_frame_trust(self) } } } impl fmt::Debug for StackFrame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StackFrame") .field("return_address", &self.return_address(Arch::Unknown)) .field("instruction", &self.instruction()) .field("trust", &self.trust()) .field("module", &self.module()) .finish() } } /// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s. #[repr(C)] pub struct CallStack(c_void); impl CallStack { /// Returns the thread identifier of this callstack. pub fn thread_id(&self) -> u32 { unsafe { call_stack_thread_id(self) } } /// Returns the list of `StackFrame`s in the call stack. pub fn frames(&self) -> &[&StackFrame] { unsafe { let mut size = 0 as usize; let data = call_stack_frames(self, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } } impl fmt::Debug for CallStack { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CallStack") .field("thread_id", &self.thread_id()) .field("frames", &self.frames()) .finish() } } /// Information about the CPU and OS on which a minidump was generated. #[repr(C)] pub struct SystemInfo(c_void); impl SystemInfo { /// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux". /// /// If the information is present in the dump but its value is unknown, this field will contain /// a numeric value. If the information is not present in the dump, this field will be empty. pub fn os_name(&self) -> String { unsafe { let ptr = system_info_os_name(self); utils::ptr_to_string(ptr) } } /// Strings identifying the version and build number of the operating system. /// /// If the dump does not contain either information, the component will be empty. Tries to parse /// the version number from the build if it is not apparent from the version string. pub fn os_parts(&self) -> (String, String) { let string = unsafe { let ptr = system_info_os_version(self); utils::ptr_to_string(ptr) }; let mut parts = string.splitn(2, ' '); let version = parts.next().unwrap_or("0.0.0"); let build = parts.next().unwrap_or(""); if version == "0.0.0" { // Try to parse the Linux build string. Breakpad and Crashpad run // `uname -srvmo` to generate it. This roughtly resembles: // "Linux [version] [build...] [arch] Linux/GNU" if let Some(captures) = LINUX_BUILD_RE.captures(&build) { let version = captures.get(1).unwrap(); // uname -r portion let build = captures.get(2).unwrap(); // uname -v portion return (version.as_str().into(), build.as_str().into()); } } (version.into(), build.into()) } /// A string identifying the version of the operating system. /// /// The version will be formatted as three-component semantic version, such as "5.1.2600" or /// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0". pub fn os_version(&self) -> String { self.os_parts().0 } /// A string identifying the build of the operating system. /// /// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump /// does not contain this information, this field will be empty. pub fn os_build(&self) -> String { self.os_parts().1 } /// A string identifying the basic CPU family, such as "x86" or "ppc". /// /// If this information is present in the dump but its value is unknown, /// this field will contain a numeric value. If the information is not /// present in the dump, this field will be empty. pub fn cpu_family(&self) -> String { unsafe { let ptr = system_info_cpu_family(self); utils::ptr_to_string(ptr) } } /// The architecture of the CPU parsed from `ProcessState::cpu_family`. /// /// If this information is present in the dump but its value is unknown /// or if the value is missing, this field will contain `Arch::Unknown`. pub fn cpu_arch(&self) -> Arch { Arch::from_breakpad(&self.cpu_family()).unwrap_or_default() } /// A string further identifying the specific CPU. /// /// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping /// 8". If the information is not present in the dump, or additional identifying information is /// not defined for the CPU family, this field will be empty. pub fn cpu_info(&self) -> String { unsafe { let ptr = system_info_cpu_info(self); utils::ptr_to_string(ptr) } } /// The number of processors in the system. /// /// Will be greater than one for multi-core systems. pub fn cpu_count(&self) -> u32 { unsafe { system_info_cpu_count(self) } } } impl fmt::Debug for SystemInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SystemInfo") .field("os_name", &self.os_name()) .field("os_version", &self.os_version()) .field("cpu_family", &self.cpu_family()) .field("cpu_info", &self.cpu_info()) .field("cpu_count", &self.cpu_count()) .finish() } } /// Result of processing a Minidump or Microdump file. /// /// Usually included in `ProcessError` when the file cannot be processed. #[repr(u32)] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum ProcessResult { /// The dump was processed successfully. Ok, /// The minidump file was not found or the buffer was empty. MinidumpNotFound, /// The minidump file had no header. NoMinidumpHeader, /// The minidump file has no thread list. NoThreadList, /// There was an error getting one thread's data from the dump. InvalidThreadIndex, /// There was an error getting a thread id from the thread's data. InvalidThreadId, /// There was more than one requesting thread. DuplicateRequestingThreads, /// The dump processing was interrupted (not fatal). SymbolSupplierInterrupted, } impl fmt::Display for ProcessResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let formatted = match self { &ProcessResult::Ok => "dump processed successfully", &ProcessResult::MinidumpNotFound => "file could not be opened", &ProcessResult::NoMinidumpHeader => "minidump header missing", &ProcessResult::NoThreadList => "minidump has no thread list", &ProcessResult::InvalidThreadIndex => "could not get thread data", &ProcessResult::InvalidThreadId => "could not get a thread by id", &ProcessResult::DuplicateRequestingThreads => "multiple requesting threads", &ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)", }; write!(f, "{}", formatted) } } /// An error generated when trying to process a minidump. #[derive(Debug, Fail, Copy, Clone)] #[fail(display = "minidump processing failed: {}", _0)] pub struct ProcessMinidumpError(ProcessResult); impl ProcessMinidumpError { /// Returns the kind of this error. pub fn kind(&self) -> ProcessResult { self.0 } } /// Internal type used to transfer Breakpad symbols over FFI. #[repr(C)] struct SymbolEntry { debug_identifier: *const c_char, symbol_size: usize, symbol_data: *const u8, } /// Container for call frame information (CFI) of `CodeModules`. /// /// This information is required by the stackwalker in case framepointers are /// missing in the raw stacktraces. Frame information is given as plain ASCII /// text as specified in the Breakpad symbol file specification. pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>; type IProcessState = c_void; /// Snapshot of the state of a processes during its crash. The object can be /// obtained by processing Minidump or Microdump files. pub struct ProcessState<'a> { internal: *mut IProcessState, _ty: PhantomData<ByteView<'a>>, } impl<'a> ProcessState<'a> { /// Processes a minidump supplied via raw binary data. /// /// Returns a `ProcessState` that contains information about the crashed /// process. The parameter `frame_infos` expects a map of Breakpad symbols /// containing STACK CFI and STACK WIN records to allow stackwalking with /// omitted frame pointers. pub fn from_minidump( buffer: &ByteView<'a>, frame_infos: Option<&FrameInfoMap>, ) -> Result<ProcessState<'a>, ProcessMinidumpError> { let cfi_count = frame_infos.map_or(0, |s| s.len()); let mut result: ProcessResult = ProcessResult::Ok; // Keep a reference to all CStrings to extend their lifetime. let cfi_vec: Vec<_> = frame_infos.map_or(Vec::new(), |s| { s.iter() .map(|(k, v)| (CString::new(k.to_string()), v.len(), v.as_ptr())) .collect() }); // Keep a reference to all symbol entries to extend their lifetime. let cfi_entries: Vec<_> = cfi_vec .iter() .map(|&(ref id, size, data)| SymbolEntry { debug_identifier: id.as_ref().map(|i| i.as_ptr()).unwrap_or(ptr::null()), symbol_size: size, symbol_data: data, }) .collect(); let internal = unsafe { process_minidump( buffer.as_ptr() as *const c_char, buffer.len(), cfi_entries.as_ptr(), cfi_count, &mut result, ) }; if result == ProcessResult::Ok && !internal.is_null()
else { Err(ProcessMinidumpError(result)) } } /// The index of the thread that requested a dump be written in the threads vector. /// /// If a dump was produced as a result of a crash, this will point to the thread that crashed. /// If the dump was produced as by user code without crashing, and the dump contains extended /// Breakpad information, this will point to the thread that requested the dump. If the dump was /// not produced as a result of an exception and no extended Breakpad information is present, /// this field will be set to -1, indicating that the dump thread is not available. pub fn requesting_thread(&self) -> i32 { unsafe { process_state_requesting_thread(self.internal) } } /// The time-date stamp of the minidump. pub fn timestamp(&self) -> u64 { unsafe { process_state_timestamp(self.internal) } } /// True if the process crashed, false if the dump was produced outside /// of an exception handler. pub fn crashed(&self) -> bool { unsafe { process_state_crashed(self.internal) } } /// If the process crashed, and if crash_reason implicates memory, the memory address that /// caused the crash. /// /// For data access errors, this will be the data address that caused the fault. For code /// errors, this will be the address of the instruction that caused the fault. pub fn crash_address(&self) -> u64 { unsafe { process_state_crash_address(self.internal) } } /// If the process crashed, the type of crash. /// /// OS- and possibly CPU-specific. For example, "EXCEPTION_ACCESS_VIOLATION" (Windows), /// "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS" (Mac OS X), "SIGSEGV" (other Unix). pub fn crash_reason(&self) -> String { unsafe { let ptr = process_state_crash_reason(self.internal); utils::ptr_to_string(ptr) } } /// If there was an assertion that was hit, a textual representation /// of that assertion, possibly including the file and line at which /// it occurred. pub fn assertion(&self) -> String { unsafe { let ptr = process_state_assertion(self.internal); utils::ptr_to_string(ptr) } } /// Returns OS and CPU information. pub fn system_info(&self) -> &SystemInfo { unsafe { process_state_system_info(self.internal).as_ref().unwrap() } } /// Returns a list of `CallStack`s in the minidump. pub fn threads(&self) -> &[&CallStack] { unsafe { let mut size = 0 as usize; let data = process_state_threads(self.internal, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } /// Returns a list of all `CodeModule`s referenced in one of the `CallStack`s. pub fn referenced_modules(&self) -> BTreeSet<&CodeModule> { self.threads() .iter() .flat_map(|stack| stack.frames().iter()) .filter_map(|frame| frame.module()) .collect() } } impl<'a> Drop for ProcessState<'a> { fn drop(&mut self) { unsafe { process_state_delete(self.internal) }; } } impl<'a> fmt::Debug for ProcessState<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ProcessState") .field("requesting_thread", &self.requesting_thread()) .field("timestamp", &self.timestamp()) .field("crash_address", &self.crash_address()) .field("crash_reason", &self.crash_reason()) .field("assertion", &self.assertion()) .field("system_info", &self.system_info()) .field("threads", &self.threads()) .finish() } }
{ Ok(ProcessState { internal, _ty: PhantomData, }) }
conditional_block
processor.rs
use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; use std::ffi::CString; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::os::raw::{c_char, c_void}; use std::str::FromStr; use std::{fmt, mem, ptr, slice, str}; use regex::Regex; use uuid::Uuid; use symbolic_common::byteview::ByteView; use symbolic_common::types::{Arch, CpuFamily, DebugId, ParseDebugIdError}; use utils; lazy_static! { static ref LINUX_BUILD_RE: Regex = Regex::new(r"^Linux ([^ ]+) (.*) \w+(?: GNU/Linux)?$").unwrap(); } extern "C" { fn code_module_base_address(module: *const CodeModule) -> u64; fn code_module_size(module: *const CodeModule) -> u64; fn code_module_code_file(module: *const CodeModule) -> *mut c_char; fn code_module_code_identifier(module: *const CodeModule) -> *mut c_char; fn code_module_debug_file(module: *const CodeModule) -> *mut c_char; fn code_module_debug_identifier(module: *const CodeModule) -> *mut c_char; fn stack_frame_return_address(frame: *const StackFrame) -> u64; fn stack_frame_instruction(frame: *const StackFrame) -> u64; fn stack_frame_module(frame: *const StackFrame) -> *const CodeModule; fn stack_frame_trust(frame: *const StackFrame) -> FrameTrust; fn call_stack_thread_id(stack: *const CallStack) -> u32; fn call_stack_frames(stack: *const CallStack, size_out: *mut usize) -> *const *const StackFrame; fn system_info_os_name(info: *const SystemInfo) -> *mut c_char; fn system_info_os_version(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_family(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_info(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_count(info: *const SystemInfo) -> u32; fn process_minidump( buffer: *const c_char, buffer_size: usize, symbols: *const SymbolEntry, symbol_count: usize, result: *mut ProcessResult, ) -> *mut IProcessState; fn process_state_delete(state: *mut IProcessState); fn process_state_threads( state: *const IProcessState, size_out: *mut usize, ) -> *const *const CallStack; fn process_state_requesting_thread(state: *const IProcessState) -> i32; fn process_state_timestamp(state: *const IProcessState) -> u64; fn process_state_crashed(state: *const IProcessState) -> bool; fn process_state_crash_address(state: *const IProcessState) -> u64; fn process_state_crash_reason(state: *const IProcessState) -> *mut c_char; fn process_state_assertion(state: *const IProcessState) -> *mut c_char; fn process_state_system_info(state: *const IProcessState) -> *mut SystemInfo; } /// An error returned when parsing invalid `CodeModuleId`s. pub type ParseCodeModuleIdError = ParseDebugIdError; /// Breakpad code module IDs. /// /// **Example:** /// /// ``` /// # extern crate symbolic_common; /// # extern crate symbolic_minidump; /// use std::str::FromStr; /// use symbolic_minidump::processor::CodeModuleId; /// # use symbolic_minidump::processor::ParseCodeModuleIdError; /// /// # fn foo() -> Result<(), ParseCodeModuleIdError> { /// let id = CodeModuleId::from_str("DFB8E43AF2423D73A453AEB6A777EF75a")?; /// assert_eq!("DFB8E43AF2423D73A453AEB6A777EF75a".to_string(), id.to_string()); /// # Ok(()) /// # } /// /// # fn main() { foo().unwrap() } /// ``` #[derive(Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Copy)] pub struct CodeModuleId { inner: DebugId, } impl CodeModuleId { /// Constructs a `CodeModuleId` from its `uuid` and `age` parts. pub fn from_parts(uuid: Uuid, age: u32) -> CodeModuleId { CodeModuleId { inner: DebugId::from_parts(uuid, age), } } /// Returns the UUID part of the code module id. pub fn uuid(&self) -> Uuid { self.inner.uuid() } /// Returns the appendix part of the code module id. /// /// On Windows, this is an incrementing counter to identify the build. /// On all other platforms, this value will always be zero. pub fn age(&self) -> u32 { self.inner.appendix() } /// Converts this code module id into a debug identifier. pub fn as_object_id(&self) -> DebugId { self.inner } } impl From<DebugId> for CodeModuleId { fn from(inner: DebugId) -> Self { CodeModuleId { inner } } } impl Into<DebugId> for CodeModuleId { fn into(self) -> DebugId { self.inner } } impl fmt::Display for CodeModuleId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.breakpad().fmt(f) } } impl str::FromStr for CodeModuleId { type Err = ParseCodeModuleIdError; fn from_str(string: &str) -> Result<CodeModuleId, ParseCodeModuleIdError> { Ok(CodeModuleId { inner: DebugId::from_breakpad(string)?, }) } } #[cfg(feature = "with_serde")] derive_deserialize_from_str!(CodeModuleId, "CodeModuleId"); #[cfg(feature = "with_serde")] derive_serialize_from_display!(CodeModuleId); /// Carries information about a code module loaded into the process during the /// crash. The `debug_identifier` uniquely identifies this module. #[repr(C)] pub struct CodeModule(c_void); impl CodeModule { /// Returns the unique identifier of this `CodeModule`. pub fn id(&self) -> Option<CodeModuleId> { CodeModuleId::from_str(&self.debug_identifier()).ok() } /// Returns the base address of this code module as it was loaded by the /// process. (uint64_t)-1 on error. pub fn base_address(&self) -> u64 { unsafe { code_module_base_address(self) } } /// The size of the code module. 0 on error. pub fn size(&self) -> u64 { unsafe { code_module_size(self) } } /// Returns the path or file name that the code module was loaded from. pub fn code_file(&self) -> String { unsafe { let ptr = code_module_code_file(self); utils::ptr_to_string(ptr) } } /// An identifying string used to discriminate between multiple versions and builds of the same /// code module. /// /// This may contain a UUID, timestamp, version number, or any combination of this or other /// information, in an implementation-defined format. pub fn code_identifier(&self) -> String { unsafe { let ptr = code_module_code_identifier(self); utils::ptr_to_string(ptr) } } /// Returns the filename containing debugging information of this code module. /// /// If debugging information is stored in a file separate from the code module itself (as is the /// case when .pdb or .dSYM files are used), this will be different from `code_file`. If /// debugging information is stored in the code module itself (possibly prior to stripping), /// this will be the same as code_file. pub fn debug_file(&self) -> String { unsafe { let ptr = code_module_debug_file(self); utils::ptr_to_string(ptr) } } /// Returns a string identifying the specific version and build of the associated debug file. /// /// This may be the same as `code_identifier` when the `debug_file` and `code_file` are /// identical or when the same identifier is used to identify distinct debug and code files. /// /// It usually comprises the library's UUID and an age field. On Windows, the age field is a /// generation counter, on all other platforms it is mostly zero. pub fn debug_identifier(&self) -> String { unsafe { let ptr = code_module_debug_identifier(self); utils::ptr_to_string(ptr) } } } impl Eq for CodeModule {} impl PartialEq for CodeModule { fn eq(&self, other: &Self) -> bool { self.id() == other.id() } } impl Hash for CodeModule { fn hash<H: Hasher>(&self, state: &mut H) { self.id().hash(state) } } impl Ord for CodeModule { fn cmp(&self, other: &Self) -> Ordering { self.id().cmp(&other.id()) } } impl PartialOrd for CodeModule { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl fmt::Debug for CodeModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CodeModule") .field("id", &self.id()) .field("base_address", &self.base_address()) .field("size", &self.size()) .field("code_file", &self.code_file()) .field("code_identifier", &self.code_identifier()) .field("debug_file", &self.debug_file()) .field("debug_identifier", &self.debug_identifier()) .finish() } } /// Indicates how well the instruction pointer derived during /// stack walking is trusted. Since the stack walker can resort to /// stack scanning, it can wind up with dubious frames. /// /// In rough order of "trust metric". #[repr(u32)] #[derive(Debug)] pub enum FrameTrust { /// Unknown trust. None, /// Scanned the stack, found this (lowest precision). Scan, /// Found while scanning stack using call frame info. CFIScan, /// Derived from frame pointer. FP, /// Derived from call frame info. CFI, /// Explicitly provided by some external stack walker. Prewalked, /// Given as instruction pointer in a context (highest precision). Context, } impl fmt::Display for FrameTrust { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let string = match *self { FrameTrust::None => "none", FrameTrust::Scan => "stack scanning", FrameTrust::CFIScan => "call frame info with scanning", FrameTrust::FP => "previous frame's frame pointer", FrameTrust::CFI => "call frame info", FrameTrust::Prewalked => "recovered by external stack walker", FrameTrust::Context => "given as instruction pointer in context", }; write!(f, "{}", string) } } /// Contains information from the memorydump, especially the frame's instruction /// pointer. Also references an optional `CodeModule` that contains the /// instruction of this stack frame. #[repr(C)] pub struct StackFrame(c_void); impl StackFrame { /// Returns the program counter location as an absolute virtual address. /// /// - For the innermost called frame in a stack, this will be an exact /// program counter or instruction pointer value. /// /// - For all other frames, this address is within the instruction that /// caused execution to branch to this frame's callee (although it may /// not point to the exact beginning of that instruction). This ensures /// that, when we look up the source code location for this frame, we /// get the source location of the call, not of the point at which /// control will resume when the call returns, which may be on the next /// line. (If the compiler knows the callee never returns, it may even /// place the call instruction at the very end of the caller's machine /// code, such that the "return address" (which will never be used) /// immediately after the call instruction is in an entirely different /// function, perhaps even from a different source file.) /// /// On some architectures, the return address as saved on the stack or in /// a register is fine for looking up the point of the call. On others, it /// requires adjustment. ReturnAddress returns the address as saved by the /// machine. /// /// Use `trust` to obtain how trustworthy this instruction is. pub fn instruction(&self) -> u64 { unsafe { stack_frame_instruction(self) } } // Return the actual return address, as saved on the stack or in a // register. See the comments for `StackFrame::instruction' for // details. pub fn return_address(&self, arch: Arch) -> u64 { let address = unsafe { stack_frame_return_address(self) }; // The return address reported for ARM* frames is actually the // instruction with heuristics from Breakpad applied already. // To resolve the original return address value, compensate // by adding the offsets applied in `StackwalkerARM::GetCallerFrame` // and `StackwalkerARM64::GetCallerFrame`. match arch.cpu_family() { CpuFamily::Arm32 => address + 2, CpuFamily::Arm64 => address + 4, _ => address, } } /// Returns the `CodeModule` that contains this frame's instruction. pub fn module(&self) -> Option<&CodeModule> { unsafe { stack_frame_module(self).as_ref() } } /// Returns how well the instruction pointer is trusted. pub fn trust(&self) -> FrameTrust { unsafe { stack_frame_trust(self) } } } impl fmt::Debug for StackFrame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StackFrame") .field("return_address", &self.return_address(Arch::Unknown)) .field("instruction", &self.instruction()) .field("trust", &self.trust()) .field("module", &self.module()) .finish() } } /// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s. #[repr(C)] pub struct CallStack(c_void); impl CallStack { /// Returns the thread identifier of this callstack. pub fn thread_id(&self) -> u32 { unsafe { call_stack_thread_id(self) } } /// Returns the list of `StackFrame`s in the call stack. pub fn frames(&self) -> &[&StackFrame] { unsafe { let mut size = 0 as usize; let data = call_stack_frames(self, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } } impl fmt::Debug for CallStack { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CallStack") .field("thread_id", &self.thread_id()) .field("frames", &self.frames()) .finish() } } /// Information about the CPU and OS on which a minidump was generated. #[repr(C)] pub struct SystemInfo(c_void); impl SystemInfo { /// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux". /// /// If the information is present in the dump but its value is unknown, this field will contain /// a numeric value. If the information is not present in the dump, this field will be empty. pub fn os_name(&self) -> String { unsafe { let ptr = system_info_os_name(self); utils::ptr_to_string(ptr) } } /// Strings identifying the version and build number of the operating system. /// /// If the dump does not contain either information, the component will be empty. Tries to parse /// the version number from the build if it is not apparent from the version string. pub fn os_parts(&self) -> (String, String) { let string = unsafe { let ptr = system_info_os_version(self); utils::ptr_to_string(ptr) }; let mut parts = string.splitn(2, ' '); let version = parts.next().unwrap_or("0.0.0"); let build = parts.next().unwrap_or(""); if version == "0.0.0" { // Try to parse the Linux build string. Breakpad and Crashpad run // `uname -srvmo` to generate it. This roughtly resembles: // "Linux [version] [build...] [arch] Linux/GNU" if let Some(captures) = LINUX_BUILD_RE.captures(&build) { let version = captures.get(1).unwrap(); // uname -r portion let build = captures.get(2).unwrap(); // uname -v portion return (version.as_str().into(), build.as_str().into()); } } (version.into(), build.into()) } /// A string identifying the version of the operating system. /// /// The version will be formatted as three-component semantic version, such as "5.1.2600" or /// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0". pub fn os_version(&self) -> String { self.os_parts().0 } /// A string identifying the build of the operating system. /// /// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump /// does not contain this information, this field will be empty. pub fn os_build(&self) -> String { self.os_parts().1 } /// A string identifying the basic CPU family, such as "x86" or "ppc". /// /// If this information is present in the dump but its value is unknown, /// this field will contain a numeric value. If the information is not /// present in the dump, this field will be empty. pub fn cpu_family(&self) -> String { unsafe { let ptr = system_info_cpu_family(self); utils::ptr_to_string(ptr) } } /// The architecture of the CPU parsed from `ProcessState::cpu_family`. /// /// If this information is present in the dump but its value is unknown /// or if the value is missing, this field will contain `Arch::Unknown`. pub fn cpu_arch(&self) -> Arch { Arch::from_breakpad(&self.cpu_family()).unwrap_or_default() } /// A string further identifying the specific CPU. /// /// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping /// 8". If the information is not present in the dump, or additional identifying information is /// not defined for the CPU family, this field will be empty. pub fn cpu_info(&self) -> String { unsafe { let ptr = system_info_cpu_info(self); utils::ptr_to_string(ptr) } } /// The number of processors in the system. /// /// Will be greater than one for multi-core systems. pub fn cpu_count(&self) -> u32 { unsafe { system_info_cpu_count(self) } } } impl fmt::Debug for SystemInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SystemInfo") .field("os_name", &self.os_name()) .field("os_version", &self.os_version()) .field("cpu_family", &self.cpu_family()) .field("cpu_info", &self.cpu_info()) .field("cpu_count", &self.cpu_count()) .finish() } } /// Result of processing a Minidump or Microdump file. /// /// Usually included in `ProcessError` when the file cannot be processed. #[repr(u32)] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum ProcessResult { /// The dump was processed successfully. Ok, /// The minidump file was not found or the buffer was empty. MinidumpNotFound, /// The minidump file had no header. NoMinidumpHeader, /// The minidump file has no thread list. NoThreadList, /// There was an error getting one thread's data from the dump. InvalidThreadIndex, /// There was an error getting a thread id from the thread's data. InvalidThreadId, /// There was more than one requesting thread. DuplicateRequestingThreads, /// The dump processing was interrupted (not fatal). SymbolSupplierInterrupted, } impl fmt::Display for ProcessResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let formatted = match self { &ProcessResult::Ok => "dump processed successfully", &ProcessResult::MinidumpNotFound => "file could not be opened", &ProcessResult::NoMinidumpHeader => "minidump header missing", &ProcessResult::NoThreadList => "minidump has no thread list", &ProcessResult::InvalidThreadIndex => "could not get thread data", &ProcessResult::InvalidThreadId => "could not get a thread by id", &ProcessResult::DuplicateRequestingThreads => "multiple requesting threads", &ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)", }; write!(f, "{}", formatted) } } /// An error generated when trying to process a minidump. #[derive(Debug, Fail, Copy, Clone)] #[fail(display = "minidump processing failed: {}", _0)] pub struct ProcessMinidumpError(ProcessResult); impl ProcessMinidumpError { /// Returns the kind of this error. pub fn kind(&self) -> ProcessResult { self.0 } } /// Internal type used to transfer Breakpad symbols over FFI. #[repr(C)] struct SymbolEntry { debug_identifier: *const c_char, symbol_size: usize, symbol_data: *const u8, } /// Container for call frame information (CFI) of `CodeModules`. /// /// This information is required by the stackwalker in case framepointers are /// missing in the raw stacktraces. Frame information is given as plain ASCII /// text as specified in the Breakpad symbol file specification. pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>; type IProcessState = c_void; /// Snapshot of the state of a processes during its crash. The object can be /// obtained by processing Minidump or Microdump files. pub struct
<'a> { internal: *mut IProcessState, _ty: PhantomData<ByteView<'a>>, } impl<'a> ProcessState<'a> { /// Processes a minidump supplied via raw binary data. /// /// Returns a `ProcessState` that contains information about the crashed /// process. The parameter `frame_infos` expects a map of Breakpad symbols /// containing STACK CFI and STACK WIN records to allow stackwalking with /// omitted frame pointers. pub fn from_minidump( buffer: &ByteView<'a>, frame_infos: Option<&FrameInfoMap>, ) -> Result<ProcessState<'a>, ProcessMinidumpError> { let cfi_count = frame_infos.map_or(0, |s| s.len()); let mut result: ProcessResult = ProcessResult::Ok; // Keep a reference to all CStrings to extend their lifetime. let cfi_vec: Vec<_> = frame_infos.map_or(Vec::new(), |s| { s.iter() .map(|(k, v)| (CString::new(k.to_string()), v.len(), v.as_ptr())) .collect() }); // Keep a reference to all symbol entries to extend their lifetime. let cfi_entries: Vec<_> = cfi_vec .iter() .map(|&(ref id, size, data)| SymbolEntry { debug_identifier: id.as_ref().map(|i| i.as_ptr()).unwrap_or(ptr::null()), symbol_size: size, symbol_data: data, }) .collect(); let internal = unsafe { process_minidump( buffer.as_ptr() as *const c_char, buffer.len(), cfi_entries.as_ptr(), cfi_count, &mut result, ) }; if result == ProcessResult::Ok && !internal.is_null() { Ok(ProcessState { internal, _ty: PhantomData, }) } else { Err(ProcessMinidumpError(result)) } } /// The index of the thread that requested a dump be written in the threads vector. /// /// If a dump was produced as a result of a crash, this will point to the thread that crashed. /// If the dump was produced as by user code without crashing, and the dump contains extended /// Breakpad information, this will point to the thread that requested the dump. If the dump was /// not produced as a result of an exception and no extended Breakpad information is present, /// this field will be set to -1, indicating that the dump thread is not available. pub fn requesting_thread(&self) -> i32 { unsafe { process_state_requesting_thread(self.internal) } } /// The time-date stamp of the minidump. pub fn timestamp(&self) -> u64 { unsafe { process_state_timestamp(self.internal) } } /// True if the process crashed, false if the dump was produced outside /// of an exception handler. pub fn crashed(&self) -> bool { unsafe { process_state_crashed(self.internal) } } /// If the process crashed, and if crash_reason implicates memory, the memory address that /// caused the crash. /// /// For data access errors, this will be the data address that caused the fault. For code /// errors, this will be the address of the instruction that caused the fault. pub fn crash_address(&self) -> u64 { unsafe { process_state_crash_address(self.internal) } } /// If the process crashed, the type of crash. /// /// OS- and possibly CPU-specific. For example, "EXCEPTION_ACCESS_VIOLATION" (Windows), /// "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS" (Mac OS X), "SIGSEGV" (other Unix). pub fn crash_reason(&self) -> String { unsafe { let ptr = process_state_crash_reason(self.internal); utils::ptr_to_string(ptr) } } /// If there was an assertion that was hit, a textual representation /// of that assertion, possibly including the file and line at which /// it occurred. pub fn assertion(&self) -> String { unsafe { let ptr = process_state_assertion(self.internal); utils::ptr_to_string(ptr) } } /// Returns OS and CPU information. pub fn system_info(&self) -> &SystemInfo { unsafe { process_state_system_info(self.internal).as_ref().unwrap() } } /// Returns a list of `CallStack`s in the minidump. pub fn threads(&self) -> &[&CallStack] { unsafe { let mut size = 0 as usize; let data = process_state_threads(self.internal, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } /// Returns a list of all `CodeModule`s referenced in one of the `CallStack`s. pub fn referenced_modules(&self) -> BTreeSet<&CodeModule> { self.threads() .iter() .flat_map(|stack| stack.frames().iter()) .filter_map(|frame| frame.module()) .collect() } } impl<'a> Drop for ProcessState<'a> { fn drop(&mut self) { unsafe { process_state_delete(self.internal) }; } } impl<'a> fmt::Debug for ProcessState<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ProcessState") .field("requesting_thread", &self.requesting_thread()) .field("timestamp", &self.timestamp()) .field("crash_address", &self.crash_address()) .field("crash_reason", &self.crash_reason()) .field("assertion", &self.assertion()) .field("system_info", &self.system_info()) .field("threads", &self.threads()) .finish() } }
ProcessState
identifier_name
processor.rs
use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; use std::ffi::CString; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::os::raw::{c_char, c_void}; use std::str::FromStr; use std::{fmt, mem, ptr, slice, str}; use regex::Regex; use uuid::Uuid; use symbolic_common::byteview::ByteView; use symbolic_common::types::{Arch, CpuFamily, DebugId, ParseDebugIdError}; use utils; lazy_static! { static ref LINUX_BUILD_RE: Regex = Regex::new(r"^Linux ([^ ]+) (.*) \w+(?: GNU/Linux)?$").unwrap(); } extern "C" { fn code_module_base_address(module: *const CodeModule) -> u64; fn code_module_size(module: *const CodeModule) -> u64; fn code_module_code_file(module: *const CodeModule) -> *mut c_char; fn code_module_code_identifier(module: *const CodeModule) -> *mut c_char; fn code_module_debug_file(module: *const CodeModule) -> *mut c_char; fn code_module_debug_identifier(module: *const CodeModule) -> *mut c_char; fn stack_frame_return_address(frame: *const StackFrame) -> u64; fn stack_frame_instruction(frame: *const StackFrame) -> u64; fn stack_frame_module(frame: *const StackFrame) -> *const CodeModule; fn stack_frame_trust(frame: *const StackFrame) -> FrameTrust; fn call_stack_thread_id(stack: *const CallStack) -> u32; fn call_stack_frames(stack: *const CallStack, size_out: *mut usize) -> *const *const StackFrame; fn system_info_os_name(info: *const SystemInfo) -> *mut c_char; fn system_info_os_version(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_family(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_info(info: *const SystemInfo) -> *mut c_char; fn system_info_cpu_count(info: *const SystemInfo) -> u32; fn process_minidump( buffer: *const c_char, buffer_size: usize, symbols: *const SymbolEntry, symbol_count: usize, result: *mut ProcessResult, ) -> *mut IProcessState; fn process_state_delete(state: *mut IProcessState); fn process_state_threads( state: *const IProcessState, size_out: *mut usize, ) -> *const *const CallStack; fn process_state_requesting_thread(state: *const IProcessState) -> i32; fn process_state_timestamp(state: *const IProcessState) -> u64; fn process_state_crashed(state: *const IProcessState) -> bool; fn process_state_crash_address(state: *const IProcessState) -> u64; fn process_state_crash_reason(state: *const IProcessState) -> *mut c_char; fn process_state_assertion(state: *const IProcessState) -> *mut c_char; fn process_state_system_info(state: *const IProcessState) -> *mut SystemInfo; } /// An error returned when parsing invalid `CodeModuleId`s. pub type ParseCodeModuleIdError = ParseDebugIdError; /// Breakpad code module IDs. /// /// **Example:** /// /// ``` /// # extern crate symbolic_common; /// # extern crate symbolic_minidump; /// use std::str::FromStr; /// use symbolic_minidump::processor::CodeModuleId; /// # use symbolic_minidump::processor::ParseCodeModuleIdError; /// /// # fn foo() -> Result<(), ParseCodeModuleIdError> { /// let id = CodeModuleId::from_str("DFB8E43AF2423D73A453AEB6A777EF75a")?; /// assert_eq!("DFB8E43AF2423D73A453AEB6A777EF75a".to_string(), id.to_string()); /// # Ok(()) /// # } /// /// # fn main() { foo().unwrap() } /// ``` #[derive(Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Copy)] pub struct CodeModuleId { inner: DebugId, } impl CodeModuleId { /// Constructs a `CodeModuleId` from its `uuid` and `age` parts. pub fn from_parts(uuid: Uuid, age: u32) -> CodeModuleId { CodeModuleId { inner: DebugId::from_parts(uuid, age), } } /// Returns the UUID part of the code module id. pub fn uuid(&self) -> Uuid { self.inner.uuid() } /// Returns the appendix part of the code module id. /// /// On Windows, this is an incrementing counter to identify the build. /// On all other platforms, this value will always be zero. pub fn age(&self) -> u32 { self.inner.appendix() } /// Converts this code module id into a debug identifier. pub fn as_object_id(&self) -> DebugId { self.inner } } impl From<DebugId> for CodeModuleId { fn from(inner: DebugId) -> Self { CodeModuleId { inner } } } impl Into<DebugId> for CodeModuleId { fn into(self) -> DebugId { self.inner } } impl fmt::Display for CodeModuleId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.breakpad().fmt(f) } } impl str::FromStr for CodeModuleId { type Err = ParseCodeModuleIdError; fn from_str(string: &str) -> Result<CodeModuleId, ParseCodeModuleIdError> { Ok(CodeModuleId { inner: DebugId::from_breakpad(string)?, }) } } #[cfg(feature = "with_serde")] derive_deserialize_from_str!(CodeModuleId, "CodeModuleId"); #[cfg(feature = "with_serde")] derive_serialize_from_display!(CodeModuleId); /// Carries information about a code module loaded into the process during the /// crash. The `debug_identifier` uniquely identifies this module. #[repr(C)] pub struct CodeModule(c_void); impl CodeModule { /// Returns the unique identifier of this `CodeModule`. pub fn id(&self) -> Option<CodeModuleId> { CodeModuleId::from_str(&self.debug_identifier()).ok() } /// Returns the base address of this code module as it was loaded by the /// process. (uint64_t)-1 on error. pub fn base_address(&self) -> u64 { unsafe { code_module_base_address(self) } } /// The size of the code module. 0 on error. pub fn size(&self) -> u64 { unsafe { code_module_size(self) } } /// Returns the path or file name that the code module was loaded from. pub fn code_file(&self) -> String { unsafe { let ptr = code_module_code_file(self); utils::ptr_to_string(ptr) } } /// An identifying string used to discriminate between multiple versions and builds of the same /// code module. /// /// This may contain a UUID, timestamp, version number, or any combination of this or other /// information, in an implementation-defined format. pub fn code_identifier(&self) -> String { unsafe { let ptr = code_module_code_identifier(self); utils::ptr_to_string(ptr) } } /// Returns the filename containing debugging information of this code module. /// /// If debugging information is stored in a file separate from the code module itself (as is the /// case when .pdb or .dSYM files are used), this will be different from `code_file`. If /// debugging information is stored in the code module itself (possibly prior to stripping), /// this will be the same as code_file. pub fn debug_file(&self) -> String { unsafe { let ptr = code_module_debug_file(self); utils::ptr_to_string(ptr) } } /// Returns a string identifying the specific version and build of the associated debug file. /// /// This may be the same as `code_identifier` when the `debug_file` and `code_file` are /// identical or when the same identifier is used to identify distinct debug and code files. /// /// It usually comprises the library's UUID and an age field. On Windows, the age field is a /// generation counter, on all other platforms it is mostly zero. pub fn debug_identifier(&self) -> String { unsafe { let ptr = code_module_debug_identifier(self); utils::ptr_to_string(ptr) } } } impl Eq for CodeModule {} impl PartialEq for CodeModule { fn eq(&self, other: &Self) -> bool { self.id() == other.id() } } impl Hash for CodeModule { fn hash<H: Hasher>(&self, state: &mut H) { self.id().hash(state) } } impl Ord for CodeModule { fn cmp(&self, other: &Self) -> Ordering { self.id().cmp(&other.id()) } } impl PartialOrd for CodeModule { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl fmt::Debug for CodeModule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CodeModule") .field("id", &self.id()) .field("base_address", &self.base_address()) .field("size", &self.size()) .field("code_file", &self.code_file()) .field("code_identifier", &self.code_identifier()) .field("debug_file", &self.debug_file()) .field("debug_identifier", &self.debug_identifier()) .finish() } } /// Indicates how well the instruction pointer derived during /// stack walking is trusted. Since the stack walker can resort to /// stack scanning, it can wind up with dubious frames. /// /// In rough order of "trust metric". #[repr(u32)] #[derive(Debug)] pub enum FrameTrust { /// Unknown trust. None, /// Scanned the stack, found this (lowest precision). Scan, /// Found while scanning stack using call frame info. CFIScan, /// Derived from frame pointer. FP, /// Derived from call frame info. CFI, /// Explicitly provided by some external stack walker. Prewalked, /// Given as instruction pointer in a context (highest precision). Context, } impl fmt::Display for FrameTrust { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let string = match *self { FrameTrust::None => "none", FrameTrust::Scan => "stack scanning", FrameTrust::CFIScan => "call frame info with scanning", FrameTrust::FP => "previous frame's frame pointer", FrameTrust::CFI => "call frame info", FrameTrust::Prewalked => "recovered by external stack walker", FrameTrust::Context => "given as instruction pointer in context", }; write!(f, "{}", string) } } /// Contains information from the memorydump, especially the frame's instruction /// pointer. Also references an optional `CodeModule` that contains the /// instruction of this stack frame. #[repr(C)] pub struct StackFrame(c_void); impl StackFrame { /// Returns the program counter location as an absolute virtual address. /// /// - For the innermost called frame in a stack, this will be an exact /// program counter or instruction pointer value. /// /// - For all other frames, this address is within the instruction that /// caused execution to branch to this frame's callee (although it may /// not point to the exact beginning of that instruction). This ensures /// that, when we look up the source code location for this frame, we /// get the source location of the call, not of the point at which /// control will resume when the call returns, which may be on the next /// line. (If the compiler knows the callee never returns, it may even /// place the call instruction at the very end of the caller's machine /// code, such that the "return address" (which will never be used) /// immediately after the call instruction is in an entirely different /// function, perhaps even from a different source file.) /// /// On some architectures, the return address as saved on the stack or in /// a register is fine for looking up the point of the call. On others, it /// requires adjustment. ReturnAddress returns the address as saved by the /// machine. /// /// Use `trust` to obtain how trustworthy this instruction is. pub fn instruction(&self) -> u64 { unsafe { stack_frame_instruction(self) } } // Return the actual return address, as saved on the stack or in a // register. See the comments for `StackFrame::instruction' for // details. pub fn return_address(&self, arch: Arch) -> u64 { let address = unsafe { stack_frame_return_address(self) }; // The return address reported for ARM* frames is actually the // instruction with heuristics from Breakpad applied already. // To resolve the original return address value, compensate // by adding the offsets applied in `StackwalkerARM::GetCallerFrame` // and `StackwalkerARM64::GetCallerFrame`. match arch.cpu_family() { CpuFamily::Arm32 => address + 2, CpuFamily::Arm64 => address + 4, _ => address, } } /// Returns the `CodeModule` that contains this frame's instruction. pub fn module(&self) -> Option<&CodeModule> { unsafe { stack_frame_module(self).as_ref() } } /// Returns how well the instruction pointer is trusted. pub fn trust(&self) -> FrameTrust { unsafe { stack_frame_trust(self) } } } impl fmt::Debug for StackFrame { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("StackFrame") .field("return_address", &self.return_address(Arch::Unknown)) .field("instruction", &self.instruction()) .field("trust", &self.trust()) .field("module", &self.module()) .finish() } } /// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s. #[repr(C)] pub struct CallStack(c_void); impl CallStack { /// Returns the thread identifier of this callstack. pub fn thread_id(&self) -> u32 { unsafe { call_stack_thread_id(self) } } /// Returns the list of `StackFrame`s in the call stack. pub fn frames(&self) -> &[&StackFrame] { unsafe { let mut size = 0 as usize; let data = call_stack_frames(self, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } } impl fmt::Debug for CallStack { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("CallStack") .field("thread_id", &self.thread_id()) .field("frames", &self.frames()) .finish() } } /// Information about the CPU and OS on which a minidump was generated. #[repr(C)] pub struct SystemInfo(c_void); impl SystemInfo { /// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux". /// /// If the information is present in the dump but its value is unknown, this field will contain /// a numeric value. If the information is not present in the dump, this field will be empty. pub fn os_name(&self) -> String { unsafe { let ptr = system_info_os_name(self); utils::ptr_to_string(ptr) } } /// Strings identifying the version and build number of the operating system. /// /// If the dump does not contain either information, the component will be empty. Tries to parse /// the version number from the build if it is not apparent from the version string. pub fn os_parts(&self) -> (String, String) { let string = unsafe { let ptr = system_info_os_version(self); utils::ptr_to_string(ptr) }; let mut parts = string.splitn(2, ' '); let version = parts.next().unwrap_or("0.0.0"); let build = parts.next().unwrap_or(""); if version == "0.0.0" { // Try to parse the Linux build string. Breakpad and Crashpad run // `uname -srvmo` to generate it. This roughtly resembles: // "Linux [version] [build...] [arch] Linux/GNU" if let Some(captures) = LINUX_BUILD_RE.captures(&build) { let version = captures.get(1).unwrap(); // uname -r portion let build = captures.get(2).unwrap(); // uname -v portion return (version.as_str().into(), build.as_str().into()); } } (version.into(), build.into()) } /// A string identifying the version of the operating system. /// /// The version will be formatted as three-component semantic version, such as "5.1.2600" or /// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0". pub fn os_version(&self) -> String { self.os_parts().0 } /// A string identifying the build of the operating system. /// /// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump /// does not contain this information, this field will be empty. pub fn os_build(&self) -> String { self.os_parts().1 } /// A string identifying the basic CPU family, such as "x86" or "ppc". /// /// If this information is present in the dump but its value is unknown, /// this field will contain a numeric value. If the information is not /// present in the dump, this field will be empty. pub fn cpu_family(&self) -> String { unsafe { let ptr = system_info_cpu_family(self); utils::ptr_to_string(ptr) } } /// The architecture of the CPU parsed from `ProcessState::cpu_family`. /// /// If this information is present in the dump but its value is unknown /// or if the value is missing, this field will contain `Arch::Unknown`. pub fn cpu_arch(&self) -> Arch { Arch::from_breakpad(&self.cpu_family()).unwrap_or_default() } /// A string further identifying the specific CPU. /// /// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping /// 8". If the information is not present in the dump, or additional identifying information is /// not defined for the CPU family, this field will be empty. pub fn cpu_info(&self) -> String { unsafe { let ptr = system_info_cpu_info(self); utils::ptr_to_string(ptr) } } /// The number of processors in the system. /// /// Will be greater than one for multi-core systems. pub fn cpu_count(&self) -> u32 { unsafe { system_info_cpu_count(self) } } } impl fmt::Debug for SystemInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SystemInfo") .field("os_name", &self.os_name()) .field("os_version", &self.os_version()) .field("cpu_family", &self.cpu_family()) .field("cpu_info", &self.cpu_info()) .field("cpu_count", &self.cpu_count()) .finish() } } /// Result of processing a Minidump or Microdump file. /// /// Usually included in `ProcessError` when the file cannot be processed. #[repr(u32)] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum ProcessResult { /// The dump was processed successfully. Ok, /// The minidump file was not found or the buffer was empty. MinidumpNotFound,
NoMinidumpHeader, /// The minidump file has no thread list. NoThreadList, /// There was an error getting one thread's data from the dump. InvalidThreadIndex, /// There was an error getting a thread id from the thread's data. InvalidThreadId, /// There was more than one requesting thread. DuplicateRequestingThreads, /// The dump processing was interrupted (not fatal). SymbolSupplierInterrupted, } impl fmt::Display for ProcessResult { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let formatted = match self { &ProcessResult::Ok => "dump processed successfully", &ProcessResult::MinidumpNotFound => "file could not be opened", &ProcessResult::NoMinidumpHeader => "minidump header missing", &ProcessResult::NoThreadList => "minidump has no thread list", &ProcessResult::InvalidThreadIndex => "could not get thread data", &ProcessResult::InvalidThreadId => "could not get a thread by id", &ProcessResult::DuplicateRequestingThreads => "multiple requesting threads", &ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)", }; write!(f, "{}", formatted) } } /// An error generated when trying to process a minidump. #[derive(Debug, Fail, Copy, Clone)] #[fail(display = "minidump processing failed: {}", _0)] pub struct ProcessMinidumpError(ProcessResult); impl ProcessMinidumpError { /// Returns the kind of this error. pub fn kind(&self) -> ProcessResult { self.0 } } /// Internal type used to transfer Breakpad symbols over FFI. #[repr(C)] struct SymbolEntry { debug_identifier: *const c_char, symbol_size: usize, symbol_data: *const u8, } /// Container for call frame information (CFI) of `CodeModules`. /// /// This information is required by the stackwalker in case framepointers are /// missing in the raw stacktraces. Frame information is given as plain ASCII /// text as specified in the Breakpad symbol file specification. pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>; type IProcessState = c_void; /// Snapshot of the state of a processes during its crash. The object can be /// obtained by processing Minidump or Microdump files. pub struct ProcessState<'a> { internal: *mut IProcessState, _ty: PhantomData<ByteView<'a>>, } impl<'a> ProcessState<'a> { /// Processes a minidump supplied via raw binary data. /// /// Returns a `ProcessState` that contains information about the crashed /// process. The parameter `frame_infos` expects a map of Breakpad symbols /// containing STACK CFI and STACK WIN records to allow stackwalking with /// omitted frame pointers. pub fn from_minidump( buffer: &ByteView<'a>, frame_infos: Option<&FrameInfoMap>, ) -> Result<ProcessState<'a>, ProcessMinidumpError> { let cfi_count = frame_infos.map_or(0, |s| s.len()); let mut result: ProcessResult = ProcessResult::Ok; // Keep a reference to all CStrings to extend their lifetime. let cfi_vec: Vec<_> = frame_infos.map_or(Vec::new(), |s| { s.iter() .map(|(k, v)| (CString::new(k.to_string()), v.len(), v.as_ptr())) .collect() }); // Keep a reference to all symbol entries to extend their lifetime. let cfi_entries: Vec<_> = cfi_vec .iter() .map(|&(ref id, size, data)| SymbolEntry { debug_identifier: id.as_ref().map(|i| i.as_ptr()).unwrap_or(ptr::null()), symbol_size: size, symbol_data: data, }) .collect(); let internal = unsafe { process_minidump( buffer.as_ptr() as *const c_char, buffer.len(), cfi_entries.as_ptr(), cfi_count, &mut result, ) }; if result == ProcessResult::Ok && !internal.is_null() { Ok(ProcessState { internal, _ty: PhantomData, }) } else { Err(ProcessMinidumpError(result)) } } /// The index of the thread that requested a dump be written in the threads vector. /// /// If a dump was produced as a result of a crash, this will point to the thread that crashed. /// If the dump was produced as by user code without crashing, and the dump contains extended /// Breakpad information, this will point to the thread that requested the dump. If the dump was /// not produced as a result of an exception and no extended Breakpad information is present, /// this field will be set to -1, indicating that the dump thread is not available. pub fn requesting_thread(&self) -> i32 { unsafe { process_state_requesting_thread(self.internal) } } /// The time-date stamp of the minidump. pub fn timestamp(&self) -> u64 { unsafe { process_state_timestamp(self.internal) } } /// True if the process crashed, false if the dump was produced outside /// of an exception handler. pub fn crashed(&self) -> bool { unsafe { process_state_crashed(self.internal) } } /// If the process crashed, and if crash_reason implicates memory, the memory address that /// caused the crash. /// /// For data access errors, this will be the data address that caused the fault. For code /// errors, this will be the address of the instruction that caused the fault. pub fn crash_address(&self) -> u64 { unsafe { process_state_crash_address(self.internal) } } /// If the process crashed, the type of crash. /// /// OS- and possibly CPU-specific. For example, "EXCEPTION_ACCESS_VIOLATION" (Windows), /// "EXC_BAD_ACCESS / KERN_INVALID_ADDRESS" (Mac OS X), "SIGSEGV" (other Unix). pub fn crash_reason(&self) -> String { unsafe { let ptr = process_state_crash_reason(self.internal); utils::ptr_to_string(ptr) } } /// If there was an assertion that was hit, a textual representation /// of that assertion, possibly including the file and line at which /// it occurred. pub fn assertion(&self) -> String { unsafe { let ptr = process_state_assertion(self.internal); utils::ptr_to_string(ptr) } } /// Returns OS and CPU information. pub fn system_info(&self) -> &SystemInfo { unsafe { process_state_system_info(self.internal).as_ref().unwrap() } } /// Returns a list of `CallStack`s in the minidump. pub fn threads(&self) -> &[&CallStack] { unsafe { let mut size = 0 as usize; let data = process_state_threads(self.internal, &mut size); let slice = slice::from_raw_parts(data, size); mem::transmute(slice) } } /// Returns a list of all `CodeModule`s referenced in one of the `CallStack`s. pub fn referenced_modules(&self) -> BTreeSet<&CodeModule> { self.threads() .iter() .flat_map(|stack| stack.frames().iter()) .filter_map(|frame| frame.module()) .collect() } } impl<'a> Drop for ProcessState<'a> { fn drop(&mut self) { unsafe { process_state_delete(self.internal) }; } } impl<'a> fmt::Debug for ProcessState<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ProcessState") .field("requesting_thread", &self.requesting_thread()) .field("timestamp", &self.timestamp()) .field("crash_address", &self.crash_address()) .field("crash_reason", &self.crash_reason()) .field("assertion", &self.assertion()) .field("system_info", &self.system_info()) .field("threads", &self.threads()) .finish() } }
/// The minidump file had no header.
random_line_split
level_2_optionals_cdsu.py
import sys import time import logging import pandas as pd from traceback import format_exc import level_2_optionals_cdsu_options from level_2_optionals_cdsu_options import project_id from modules.level_1_a_data_acquisition import sql_retrieve_df from modules.level_1_b_data_processing import null_analysis, options_scraping_v2, remove_zero_price_total_vhe, lowercase_column_conversion, remove_rows, remove_columns, string_replacer, color_replacement, new_column_creation, score_calculation, duplicate_removal, total_price, margin_calculation, new_features, column_rename from modules.level_1_d_model_evaluation import data_grouping_by_locals_temp from modules.level_1_e_deployment import sql_inject, sql_delete, sql_date_comparison from modules.level_0_performance_report import performance_info_append, error_upload, log_record, project_dict, performance_info pd.set_option('display.expand_frame_repr', False) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S @ %d/%m/%y', filename=level_2_optionals_cdsu_options.log_files['full_log'], filemode='a') logging.Logger('errors') # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # Allows the stdout to be seen in the console logging.getLogger().addHandler(logging.StreamHandler(sys.stderr)) # Allows the stderr to be seen in the console configuration_parameters = level_2_optionals_cdsu_options.selected_configuration_parameters dict_sql_upload_flag = 0 def main(): log_record('Projeto: Sugestão Encomenda CDSU - Viaturas', project_id) query_filters = {'NLR_CODE': '4R0', 'Franchise_Code_DW': '43'} df = data_acquisition(query_filters) control_prints(df, 'after getting data', head=1, date=1) df = data_processing(df) model_choice_message, df, vehicle_count = data_grouping_by_locals_temp(df, configuration_parameters, level_2_optionals_cdsu_options.project_id) control_prints(df, 'before deployment', head=1) deployment(df, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['final_table']) performance_info(level_2_optionals_cdsu_options.project_id, level_2_optionals_cdsu_options, model_choice_message, vehicle_count) log_record('Conclusão com sucesso - Projeto {}.\n'.format(project_dict[project_id]), project_id) def data_acquisition(query_filters): performance_info_append(time.time(), 'Section_A_Start') log_record('Início Secção A...', project_id) df = sql_retrieve_df(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['initial_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.sql_to_code_renaming.keys()), query_filters, column_renaming=1, parse_dates=['Purchase_Date', 'Sell_Date']) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=0) log_record('Fim Secção A.', project_id) performance_info_append(time.time(), 'Section_A_End') return df def control_prints(df, tag, head=0, save=0, null_analysis_flag=0, date=0): # print('{}\n{}'.format(tag, df.shape)) # try: # print('Unique Vehicles: {}'.format(df['Nº Stock'].nunique())) # # except KeyError: # print('Unique Vehicles: {}'.format(df['VHE_Number'].nunique())) # # if head: # print(df.head()) # if save: # df.to_csv('dbs/cdsu_control_save_tag_{}.csv'.format(tag)) # if null_analysis_flag: # null_analysis(df) # if date: # try: # print('Current Max Sell Date is {}'.format(max(df['Data Venda']))) # except KeyError: # print('Current Max Sell Date is {}'.format(max(df['Sell_Date']))) return def data_processing(df): performance_info_append(time.time(), 'Section_B_Start') log_record('Início Secção B...', project_id) log_record('Checkpoint não encontrado ou demasiado antigo. A processar dados...', project_id) df = lowercase_column_conversion(df, ['Opcional', 'Cor', 'Interior', 'Versão']) # Lowercases the strings of these columns dict_strings_to_replace = {('Modelo', ' - não utilizar'): '', ('Interior', '\\|'): '/', ('Cor', '\\|'): '', ('Interior', 'ind.'): '', ('Interior', '\\]'): '/', ('Interior', '\\.'): ' ', ('Interior', '\'merino\''): 'merino', ('Interior', '\' merino\''): 'merino', ('Interior', '\'vernasca\''): 'vernasca', ('Interior', 'leder'): 'leather', ('Interior', 'p '): 'pele', ('Interior', 'pelenevada'): 'pele nevada', ('Opcional', 'bi-xénon'): 'bixénon', ('Opcional', 'bi-xenon'): 'bixénon', ('Opcional', 'vidro'): 'vidros', ('Opcional', 'dacota'): 'dakota', ('Opcional', 'whites'): 'white', ('Opcional', 'beige'): 'bege', ('Interior', '\'dakota\''): 'dakota', ('Interior', 'dacota'): 'dakota', ('Interior', 'mokka'): 'mocha', ('Interior', 'beige'): 'bege', ('Interior', 'dakota\''): 'dakota', ('Interior', 'antracite/cinza/p'): 'antracite/cinza/preto', ('Interior', 'antracite/cinza/pretoreto'): 'antracite/cinza/preto', ('Interior', 'nevada\''): 'nevada', ('Interior', '"nappa"'): 'nappa', ('Interior', 'anthrazit'): 'antracite', ('Interior', 'antracito'): 'antracite', ('Interior', 'preto/laranja/preto/lara'): 'preto/laranja', ('Interior', 'anthtacite'): 'antracite', ('Interior', 'champag'): 'champagne', ('Interior', 'cri'): 'crimson', ('Modelo', 'Enter Model Details'): '', ('Registration_Number', '\.'): '', ('Interior', 'preto/m '): 'preto ', ('Interior', 'congnac/preto'): 'cognac/preto', ('Local da Venda', 'DCN'): 'DCP', ('Cor', 'oceanao'): 'oceano', ('Cor', 'ocenao'): 'oceano', ('Interior', 'reto'): 'preto', ('Cor', 'banco'): 'branco', ('Cor', 'catanho'): 'castanho', ('Cor', 'petrìleo'): 'petróleo', ('Interior', 'ecido'): 'tecido', ('Interior', 'ege'): 'bege', ('Interior', 'inza'): 'cinza', ('Interior', 'inzento'): 'cinzento', ('Interior', 'teciso'): 'tecido', ('Opcional', 'autmático'): 'automático', ('Opcional', 'esctacionamento'): 'estacionamento', ('Opcional', 'estacionamernto'): 'estacionamento', ('Opcional', 'pct'): 'pacote', ('Opcional', 'navegaçãp'): 'navegação', ('Opcional', '\\+'): '', ('Versão', 'bussiness'): 'business', ('Versão', 'r-line'): 'rline', ('Versão', 'confortl'): 'confortline', ('Versão', 'high'): 'highline', ('Opcional', 'p/dsg'): 'para dsg', ('Opcional', 'dianteirostraseiros'): 'dianteiros traseiros', ('Opcional', 'dianteirostras'): 'dianteiros traseiros', ('Opcional', 'diant'): 'dianteiros', ('Opcional', 'dttras'): 'dianteiros traseiros', ('Opcional', 'dttrpark'): 'dianteiros traseiros park', ('Opcional', 'dianttras'): 'dianteiros traseiros', ('Opcional', 'câmara'): 'camara', ('Opcional', 'camera'): 'camara', ('Opcional', 'câmera'): 'camara', ('Versão', 'trendtline'): 'trendline', ('Versão', 'trendtline'): 'trendline', ('Versão', 'confort'): 'confortline', ('Versão', 'conftl'): 'confortline', ('Versão', 'hightline'): 'highline', ('Versão', 'bluem'): 'bluemotion', ('Versão', 'bmt'): 'bluemotion', ('Versão', 'up!bluemotion'): 'up! bluemotion', ('Versão', 'up!bluem'): 'up! bluemotion', ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline', ('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'} control_prints(df, '1', head=1) df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc control_prints(df, '1b', head=1) df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's control_prints(df, '2') df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future df = total_price(df) # Creates a new column with the total cost for each configuration; control_prints(df, '3a', head=0) df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary control_prints(df, '3b', head=0) df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1) control_prints(df, '3c') df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking Sensors, External and Internal colours, Model and Rim's Size control_prints(df, '3d', head=1, null_analysis_flag=1) df.loc[df['Combustível'].isin(['Elétrico', 'Híbrido']), 'Motor'] = 'N/A' # Defaults the value of motorization for electric/hybrid cars; control_prints(df, '4', head=0, save=1) # df = remove_rows(df, [df[df.Modelo.isnull()].index], project_id, warning=1) df = remove_columns(df, ['Colour_Ext_Code'], project_id) # This column was only needed for some very specific cases where no Colour_Ext_Code was available; df.to_csv('dbs/df_cdsu.csv', index=False) control_prints(df, '5', head=0, save=1) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=1) df = color_replacement(df, level_2_optionals_cdsu_options.colors_to_replace_dict, project_id) # Translates all english colors to portuguese control_prints(df, '6', head=0, save=1) df = duplicate_removal(df, subset_col='Nº Stock') # Removes duplicate rows, based on the Stock number. This leaves one line per configuration; control_prints(df, '7') df = remove_columns(df, ['Cor', 'Interior', 'Opcional', 'Custo', 'Versão', 'Franchise_Code'], project_id) # Remove columns not needed atm; # Will probably need to also remove: stock_days, stock_days_norm, and one of the scores # df = remove_rows(df, [df.loc[df['Local da Venda'] == 'DCV - Viat.Toy Viseu', :].index], project_id) # Removes the vehicles sold here, as they are from another brand (Toyota) df = margin_calculation(df) # Calculates the margin in percentage of the total price control_prints(df, '8') df = score_calculation(df, [level_2_optionals_cdsu_options.stock_days_threshold], level_2_optionals_cdsu_options.margin_threshold, level_2_optionals_cdsu_options.project_id) # Classifies the stockdays and margin based in their respective thresholds in tow classes (0 or 1) and then creates a new_score metric, control_prints(df, '9') # where only configurations with 1 in both dimension, have 1 as new_score # df = new_column_creation(df, ['Local da Venda_v2'], df['Local da Venda']) # control_prints(df, '10') # cols_to_group_layer_2 = ['Local da Venda'] # mapping_dictionaries, _ = sql_mapping_retrieval(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['mappings_temp'], 'Mapped_Value', level_2_optionals_cdsu_options) # df = sell_place_parametrization(df, 'Local da Venda', 'Local da Venda_Fase2', mapping_dictionaries[2], level_2_optionals_cdsu_options.project_id) # df = col_group(df, cols_to_group_layer_2[0:2], mapping_dictionaries[0:2], project_id) # Based on the information provided by Manuel some entries were grouped as to remove small groups. The columns grouped are mentioned in cols_to_group, and their respective groups are shown in level_2_optionals_cdsu_options control_prints(df, '9b, before new features', null_analysis_flag=1) df = new_features(df, configuration_parameters, project_id) # Creates a series of new features, explained in the provided pdf control_prints(df, '10, after new_features', null_analysis_flag=1) # global_variables_saving(df, level_2_optionals_cdsu_options.project_id) # Small functions to save 2 specific global variables which will be needed later log_record('Checkpoint B.1...', project_id) # performance_info_append(time.time(), 'checkpoint_b1') df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values())) # sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['checkpoint_b_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), truncate=1, check_date=1) df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys())) df = remove_columns(df, ['Date'], project_id) log_record('Fim Secção B.', project_id) performance_info_append(time.time(), 'Section_B_End') return df def deployment(df, db, view): performance_info_append(time.time(), 'Section_E_Start') log_record('Início Secção E...', project_id) if df is not None: df['NLR_Code'] = level_2_optionals_cdsu_options.nlr_code # df = column_rename(df, list(level_2_optionals_cdsu_options.column_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_sql_renaming.values())) df = df.rename(columns=level_2_optionals_cdsu_options.column_sql_renaming) control_prints(df, 'before deployment, after renaming', head=1) sql_delete(level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, {'NLR_Code': '{}'.format(level_2_optionals_cdsu_options.nlr_code)}) sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), check_date=1) log_record('Fim Secção E.', project_id) performance_info_append(time.time(), 'Section_E_End') return if __name__ == '__main__': try: main() except Exception as exception:
project_identifier, exception_desc = level_2_optionals_cdsu_options.project_id, str(sys.exc_info()[1]) log_record(exception_desc, project_identifier, flag=2) error_upload(level_2_optionals_cdsu_options, project_identifier, format_exc(), exception_desc, error_flag=1) log_record('Falhou - Projeto: {}.'.format(str(project_dict[project_identifier])), project_identifier)
conditional_block
level_2_optionals_cdsu.py
import sys import time import logging import pandas as pd from traceback import format_exc import level_2_optionals_cdsu_options from level_2_optionals_cdsu_options import project_id from modules.level_1_a_data_acquisition import sql_retrieve_df from modules.level_1_b_data_processing import null_analysis, options_scraping_v2, remove_zero_price_total_vhe, lowercase_column_conversion, remove_rows, remove_columns, string_replacer, color_replacement, new_column_creation, score_calculation, duplicate_removal, total_price, margin_calculation, new_features, column_rename from modules.level_1_d_model_evaluation import data_grouping_by_locals_temp from modules.level_1_e_deployment import sql_inject, sql_delete, sql_date_comparison from modules.level_0_performance_report import performance_info_append, error_upload, log_record, project_dict, performance_info pd.set_option('display.expand_frame_repr', False) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S @ %d/%m/%y', filename=level_2_optionals_cdsu_options.log_files['full_log'], filemode='a') logging.Logger('errors') # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # Allows the stdout to be seen in the console logging.getLogger().addHandler(logging.StreamHandler(sys.stderr)) # Allows the stderr to be seen in the console configuration_parameters = level_2_optionals_cdsu_options.selected_configuration_parameters dict_sql_upload_flag = 0 def main(): log_record('Projeto: Sugestão Encomenda CDSU - Viaturas', project_id) query_filters = {'NLR_CODE': '4R0', 'Franchise_Code_DW': '43'} df = data_acquisition(query_filters) control_prints(df, 'after getting data', head=1, date=1) df = data_processing(df) model_choice_message, df, vehicle_count = data_grouping_by_locals_temp(df, configuration_parameters, level_2_optionals_cdsu_options.project_id) control_prints(df, 'before deployment', head=1) deployment(df, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['final_table']) performance_info(level_2_optionals_cdsu_options.project_id, level_2_optionals_cdsu_options, model_choice_message, vehicle_count) log_record('Conclusão com sucesso - Projeto {}.\n'.format(project_dict[project_id]), project_id) def data_acquisition(query_filters): performance_info_append(time.time(), 'Section_A_Start') log_record('Início Secção A...', project_id) df = sql_retrieve_df(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['initial_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.sql_to_code_renaming.keys()), query_filters, column_renaming=1, parse_dates=['Purchase_Date', 'Sell_Date']) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=0) log_record('Fim Secção A.', project_id) performance_info_append(time.time(), 'Section_A_End') return df def control_prints(df, tag, head=0, save=0, null_analysis_flag=0, date=0): # print('{}\n{}'.format(tag, df.shape)) # try: # print('Unique Vehicles: {}'.format(df['Nº Stock'].nunique())) # # except KeyError: # print('Unique Vehicles: {}'.format(df['VHE_Number'].nunique())) # # if head: # print(df.head()) # if save: # df.to_csv('dbs/cdsu_control_save_tag_{}.csv'.format(tag)) # if null_analysis_flag: # null_analysis(df) # if date: # try: # print('Current Max Sell Date is {}'.format(max(df['Data Venda']))) # except KeyError: # print('Current Max Sell Date is {}'.format(max(df['Sell_Date']))) return def data_processing(df): performance_info_append(time.time(), 'Section_B_Start') log_record('Início Secção B...', project_id) log_record('Checkpoint não encontrado ou demasiado antigo. A processar dados...', project_id) df = lowercase_column_conversion(df, ['Opcional', 'Cor', 'Interior', 'Versão']) # Lowercases the strings of these columns dict_strings_to_replace = {('Modelo', ' - não utilizar'): '', ('Interior', '\\|'): '/', ('Cor', '\\|'): '', ('Interior', 'ind.'): '', ('Interior', '\\]'): '/', ('Interior', '\\.'): ' ', ('Interior', '\'merino\''): 'merino', ('Interior', '\' merino\''): 'merino', ('Interior', '\'vernasca\''): 'vernasca', ('Interior', 'leder'): 'leather', ('Interior', 'p '): 'pele', ('Interior', 'pelenevada'): 'pele nevada', ('Opcional', 'bi-xénon'): 'bixénon', ('Opcional', 'bi-xenon'): 'bixénon', ('Opcional', 'vidro'): 'vidros', ('Opcional', 'dacota'): 'dakota', ('Opcional', 'whites'): 'white', ('Opcional', 'beige'): 'bege', ('Interior', '\'dakota\''): 'dakota', ('Interior', 'dacota'): 'dakota', ('Interior', 'mokka'): 'mocha', ('Interior', 'beige'): 'bege', ('Interior', 'dakota\''): 'dakota', ('Interior', 'antracite/cinza/p'): 'antracite/cinza/preto', ('Interior', 'antracite/cinza/pretoreto'): 'antracite/cinza/preto', ('Interior', 'nevada\''): 'nevada', ('Interior', '"nappa"'): 'nappa', ('Interior', 'anthrazit'): 'antracite', ('Interior', 'antracito'): 'antracite', ('Interior', 'preto/laranja/preto/lara'): 'preto/laranja', ('Interior', 'anthtacite'): 'antracite', ('Interior', 'champag'): 'champagne', ('Interior', 'cri'): 'crimson', ('Modelo', 'Enter Model Details'): '', ('Registration_Number', '\.'): '', ('Interior', 'preto/m '): 'preto ', ('Interior', 'congnac/preto'): 'cognac/preto', ('Local da Venda', 'DCN'): 'DCP', ('Cor', 'oceanao'): 'oceano', ('Cor', 'ocenao'): 'oceano', ('Interior', 'reto'): 'preto', ('Cor', 'banco'): 'branco', ('Cor', 'catanho'): 'castanho', ('Cor', 'petrìleo'): 'petróleo', ('Interior', 'ecido'): 'tecido', ('Interior', 'ege'): 'bege', ('Interior', 'inza'): 'cinza', ('Interior', 'inzento'): 'cinzento', ('Interior', 'teciso'): 'tecido', ('Opcional', 'autmático'): 'automático', ('Opcional', 'esctacionamento'): 'estacionamento', ('Opcional', 'estacionamernto'): 'estacionamento', ('Opcional', 'pct'): 'pacote', ('Opcional', 'navegaçãp'): 'navegação', ('Opcional', '\\+'): '', ('Versão', 'bussiness'): 'business', ('Versão', 'r-line'): 'rline', ('Versão', 'confortl'): 'confortline', ('Versão', 'high'): 'highline', ('Opcional', 'p/dsg'): 'para dsg', ('Opcional', 'dianteirostraseiros'): 'dianteiros traseiros', ('Opcional', 'dianteirostras'): 'dianteiros traseiros', ('Opcional', 'diant'): 'dianteiros', ('Opcional', 'dttras'): 'dianteiros traseiros', ('Opcional', 'dttrpark'): 'dianteiros traseiros park', ('Opcional', 'dianttras'): 'dianteiros traseiros', ('Opcional', 'câmara'): 'camara', ('Opcional', 'camera'): 'camara', ('Opcional', 'câmera'): 'camara', ('Versão', 'trendtline'): 'trendline', ('Versão', 'trendtline'): 'trendline', ('Versão', 'confort'): 'confortline', ('Versão', 'conftl'): 'confortline', ('Versão', 'hightline'): 'highline', ('Versão', 'bluem'): 'bluemotion', ('Versão', 'bmt'): 'bluemotion', ('Versão', 'up!bluemotion'): 'up! bluemotion', ('Versão', 'up!bluem'): 'up! bluemotion', ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline', ('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'} control_prints(df, '1', head=1) df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc control_prints(df, '1b', head=1) df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's control_prints(df, '2') df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future df = total_price(df) # Creates a new column with the total cost for each configuration; control_prints(df, '3a', head=0) df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary control_prints(df, '3b', head=0) df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1) control_prints(df, '3c') df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking Sensors, External and Internal colours, Model and Rim's Size control_prints(df, '3d', head=1, null_analysis_flag=1) df.loc[df['Combustível'].isin(['Elétrico', 'Híbrido']), 'Motor'] = 'N/A' # Defaults the value of motorization for electric/hybrid cars; control_prints(df, '4', head=0, save=1) # df = remove_rows(df, [df[df.Modelo.isnull()].index], project_id, warning=1) df = remove_columns(df, ['Colour_Ext_Code'], project_id) # This column was only needed for some very specific cases where no Colour_Ext_Code was available; df.to_csv('dbs/df_cdsu.csv', index=False) control_prints(df, '5', head=0, save=1) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=1) df = color_replacement(df, level_2_optionals_cdsu_options.colors_to_replace_dict, project_id) # Translates all english colors to portuguese control_prints(df, '6', head=0, save=1) df = duplicate_removal(df, subset_col='Nº Stock') # Removes duplicate rows, based on the Stock number. This leaves one line per configuration; control_prints(df, '7')
# df = remove_rows(df, [df.loc[df['Local da Venda'] == 'DCV - Viat.Toy Viseu', :].index], project_id) # Removes the vehicles sold here, as they are from another brand (Toyota) df = margin_calculation(df) # Calculates the margin in percentage of the total price control_prints(df, '8') df = score_calculation(df, [level_2_optionals_cdsu_options.stock_days_threshold], level_2_optionals_cdsu_options.margin_threshold, level_2_optionals_cdsu_options.project_id) # Classifies the stockdays and margin based in their respective thresholds in tow classes (0 or 1) and then creates a new_score metric, control_prints(df, '9') # where only configurations with 1 in both dimension, have 1 as new_score # df = new_column_creation(df, ['Local da Venda_v2'], df['Local da Venda']) # control_prints(df, '10') # cols_to_group_layer_2 = ['Local da Venda'] # mapping_dictionaries, _ = sql_mapping_retrieval(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['mappings_temp'], 'Mapped_Value', level_2_optionals_cdsu_options) # df = sell_place_parametrization(df, 'Local da Venda', 'Local da Venda_Fase2', mapping_dictionaries[2], level_2_optionals_cdsu_options.project_id) # df = col_group(df, cols_to_group_layer_2[0:2], mapping_dictionaries[0:2], project_id) # Based on the information provided by Manuel some entries were grouped as to remove small groups. The columns grouped are mentioned in cols_to_group, and their respective groups are shown in level_2_optionals_cdsu_options control_prints(df, '9b, before new features', null_analysis_flag=1) df = new_features(df, configuration_parameters, project_id) # Creates a series of new features, explained in the provided pdf control_prints(df, '10, after new_features', null_analysis_flag=1) # global_variables_saving(df, level_2_optionals_cdsu_options.project_id) # Small functions to save 2 specific global variables which will be needed later log_record('Checkpoint B.1...', project_id) # performance_info_append(time.time(), 'checkpoint_b1') df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values())) # sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['checkpoint_b_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), truncate=1, check_date=1) df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys())) df = remove_columns(df, ['Date'], project_id) log_record('Fim Secção B.', project_id) performance_info_append(time.time(), 'Section_B_End') return df def deployment(df, db, view): performance_info_append(time.time(), 'Section_E_Start') log_record('Início Secção E...', project_id) if df is not None: df['NLR_Code'] = level_2_optionals_cdsu_options.nlr_code # df = column_rename(df, list(level_2_optionals_cdsu_options.column_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_sql_renaming.values())) df = df.rename(columns=level_2_optionals_cdsu_options.column_sql_renaming) control_prints(df, 'before deployment, after renaming', head=1) sql_delete(level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, {'NLR_Code': '{}'.format(level_2_optionals_cdsu_options.nlr_code)}) sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), check_date=1) log_record('Fim Secção E.', project_id) performance_info_append(time.time(), 'Section_E_End') return if __name__ == '__main__': try: main() except Exception as exception: project_identifier, exception_desc = level_2_optionals_cdsu_options.project_id, str(sys.exc_info()[1]) log_record(exception_desc, project_identifier, flag=2) error_upload(level_2_optionals_cdsu_options, project_identifier, format_exc(), exception_desc, error_flag=1) log_record('Falhou - Projeto: {}.'.format(str(project_dict[project_identifier])), project_identifier)
df = remove_columns(df, ['Cor', 'Interior', 'Opcional', 'Custo', 'Versão', 'Franchise_Code'], project_id) # Remove columns not needed atm; # Will probably need to also remove: stock_days, stock_days_norm, and one of the scores
random_line_split
level_2_optionals_cdsu.py
import sys import time import logging import pandas as pd from traceback import format_exc import level_2_optionals_cdsu_options from level_2_optionals_cdsu_options import project_id from modules.level_1_a_data_acquisition import sql_retrieve_df from modules.level_1_b_data_processing import null_analysis, options_scraping_v2, remove_zero_price_total_vhe, lowercase_column_conversion, remove_rows, remove_columns, string_replacer, color_replacement, new_column_creation, score_calculation, duplicate_removal, total_price, margin_calculation, new_features, column_rename from modules.level_1_d_model_evaluation import data_grouping_by_locals_temp from modules.level_1_e_deployment import sql_inject, sql_delete, sql_date_comparison from modules.level_0_performance_report import performance_info_append, error_upload, log_record, project_dict, performance_info pd.set_option('display.expand_frame_repr', False) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S @ %d/%m/%y', filename=level_2_optionals_cdsu_options.log_files['full_log'], filemode='a') logging.Logger('errors') # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # Allows the stdout to be seen in the console logging.getLogger().addHandler(logging.StreamHandler(sys.stderr)) # Allows the stderr to be seen in the console configuration_parameters = level_2_optionals_cdsu_options.selected_configuration_parameters dict_sql_upload_flag = 0 def main(): log_record('Projeto: Sugestão Encomenda CDSU - Viaturas', project_id) query_filters = {'NLR_CODE': '4R0', 'Franchise_Code_DW': '43'} df = data_acquisition(query_filters) control_prints(df, 'after getting data', head=1, date=1) df = data_processing(df) model_choice_message, df, vehicle_count = data_grouping_by_locals_temp(df, configuration_parameters, level_2_optionals_cdsu_options.project_id) control_prints(df, 'before deployment', head=1) deployment(df, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['final_table']) performance_info(level_2_optionals_cdsu_options.project_id, level_2_optionals_cdsu_options, model_choice_message, vehicle_count) log_record('Conclusão com sucesso - Projeto {}.\n'.format(project_dict[project_id]), project_id) def data_acquisition(query_filters): performance_info_append(time.time(), 'Section_A_Start') log_record('Início Secção A...', project_id) df = sql_retrieve_df(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['initial_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.sql_to_code_renaming.keys()), query_filters, column_renaming=1, parse_dates=['Purchase_Date', 'Sell_Date']) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=0) log_record('Fim Secção A.', project_id) performance_info_append(time.time(), 'Section_A_End') return df def control_prints(df, tag, head=0, save=0, null_analysis_flag=0, date=0): # print('{}\n{}'.format(tag, df.shape)) # try: # print('Unique Vehicles: {}'.format(df['Nº Stock'].nunique())) # # except KeyError: # print('Unique Vehicles: {}'.format(df['VHE_Number'].nunique())) # # if head: # print(df.head()) # if save: # df.to_csv('dbs/cdsu_control_save_tag_{}.csv'.format(tag)) # if null_analysis_flag: # null_analysis(df) # if date: # try: # print('Current Max Sell Date is {}'.format(max(df['Data Venda']))) # except KeyError: # print('Current Max Sell Date is {}'.format(max(df['Sell_Date']))) return def data_processing(df): performance_info_append(time.time(), 'Section_B_Start') log_record('Início Secção B...', project_id) log_record('Checkpoint não encontrado ou demasiado antigo. A processar dados...', project_id) df = lowercase_column_conversion(df, ['Opcional', 'Cor', 'Interior', 'Versão']) # Lowercases the strings of these columns dict_strings_to_replace = {('Modelo', ' - não utilizar'): '', ('Interior', '\\|'): '/', ('Cor', '\\|'): '', ('Interior', 'ind.'): '', ('Interior', '\\]'): '/', ('Interior', '\\.'): ' ', ('Interior', '\'merino\''): 'merino', ('Interior', '\' merino\''): 'merino', ('Interior', '\'vernasca\''): 'vernasca', ('Interior', 'leder'): 'leather', ('Interior', 'p '): 'pele', ('Interior', 'pelenevada'): 'pele nevada', ('Opcional', 'bi-xénon'): 'bixénon', ('Opcional', 'bi-xenon'): 'bixénon', ('Opcional', 'vidro'): 'vidros', ('Opcional', 'dacota'): 'dakota', ('Opcional', 'whites'): 'white', ('Opcional', 'beige'): 'bege', ('Interior', '\'dakota\''): 'dakota', ('Interior', 'dacota'): 'dakota', ('Interior', 'mokka'): 'mocha', ('Interior', 'beige'): 'bege', ('Interior', 'dakota\''): 'dakota', ('Interior', 'antracite/cinza/p'): 'antracite/cinza/preto', ('Interior', 'antracite/cinza/pretoreto'): 'antracite/cinza/preto', ('Interior', 'nevada\''): 'nevada', ('Interior', '"nappa"'): 'nappa', ('Interior', 'anthrazit'): 'antracite', ('Interior', 'antracito'): 'antracite', ('Interior', 'preto/laranja/preto/lara'): 'preto/laranja', ('Interior', 'anthtacite'): 'antracite', ('Interior', 'champag'): 'champagne', ('Interior', 'cri'): 'crimson', ('Modelo', 'Enter Model Details'): '', ('Registration_Number', '\.'): '', ('Interior', 'preto/m '): 'preto ', ('Interior', 'congnac/preto'): 'cognac/preto', ('Local da Venda', 'DCN'): 'DCP', ('Cor', 'oceanao'): 'oceano', ('Cor', 'ocenao'): 'oceano', ('Interior', 'reto'): 'preto', ('Cor', 'banco'): 'branco', ('Cor', 'catanho'): 'castanho', ('Cor', 'petrìleo'): 'petróleo', ('Interior', 'ecido'): 'tecido', ('Interior', 'ege'): 'bege', ('Interior', 'inza'): 'cinza', ('Interior', 'inzento'): 'cinzento', ('Interior', 'teciso'): 'tecido', ('Opcional', 'autmático'): 'automático', ('Opcional', 'esctacionamento'): 'estacionamento', ('Opcional', 'estacionamernto'): 'estacionamento', ('Opcional', 'pct'): 'pacote', ('Opcional', 'navegaçãp'): 'navegação', ('Opcional', '\\+'): '', ('Versão', 'bussiness'): 'business', ('Versão', 'r-line'): 'rline', ('Versão', 'confortl'): 'confortline', ('Versão', 'high'): 'highline', ('Opcional', 'p/dsg'): 'para dsg', ('Opcional', 'dianteirostraseiros'): 'dianteiros traseiros', ('Opcional', 'dianteirostras'): 'dianteiros traseiros', ('Opcional', 'diant'): 'dianteiros', ('Opcional', 'dttras'): 'dianteiros traseiros', ('Opcional', 'dttrpark'): 'dianteiros traseiros park', ('Opcional', 'dianttras'): 'dianteiros traseiros', ('Opcional', 'câmara'): 'camara', ('Opcional', 'camera'): 'camara', ('Opcional', 'câmera'): 'camara', ('Versão', 'trendtline'): 'trendline', ('Versão', 'trendtline'): 'trendline', ('Versão', 'confort'): 'confortline', ('Versão', 'conftl'): 'confortline', ('Versão', 'hightline'): 'highline', ('Versão', 'bluem'): 'bluemotion', ('Versão', 'bmt'): 'bluemotion', ('Versão', 'up!bluemotion'): 'up! bluemotion', ('Versão', 'up!bluem'): 'up! bluemotion', ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline', ('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'} control_prints(df, '1', head=1) df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc control_prints(df, '1b', head=1) df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's control_prints(df, '2') df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future df = total_price(df) # Creates a new column with the total cost for each configuration; control_prints(df, '3a', head=0) df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary control_prints(df, '3b', head=0) df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1) control_prints(df, '3c') df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking Sensors, External and Internal colours, Model and Rim's Size control_prints(df, '3d', head=1, null_analysis_flag=1) df.loc[df['Combustível'].isin(['Elétrico', 'Híbrido']), 'Motor'] = 'N/A' # Defaults the value of motorization for electric/hybrid cars; control_prints(df, '4', head=0, save=1) # df = remove_rows(df, [df[df.Modelo.isnull()].index], project_id, warning=1) df = remove_columns(df, ['Colour_Ext_Code'], project_id) # This column was only needed for some very specific cases where no Colour_Ext_Code was available; df.to_csv('dbs/df_cdsu.csv', index=False) control_prints(df, '5', head=0, save=1) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=1) df = color_replacement(df, level_2_optionals_cdsu_options.colors_to_replace_dict, project_id) # Translates all english colors to portuguese control_prints(df, '6', head=0, save=1) df = duplicate_removal(df, subset_col='Nº Stock') # Removes duplicate rows, based on the Stock number. This leaves one line per configuration; control_prints(df, '7') df = remove_columns(df, ['Cor', 'Interior', 'Opcional', 'Custo', 'Versão', 'Franchise_Code'], project_id) # Remove columns not needed atm; # Will probably need to also remove: stock_days, stock_days_norm, and one of the scores # df = remove_rows(df, [df.loc[df['Local da Venda'] == 'DCV - Viat.Toy Viseu', :].index], project_id) # Removes the vehicles sold here, as they are from another brand (Toyota) df = margin_calculation(df) # Calculates the margin in percentage of the total price control_prints(df, '8') df = score_calculation(df, [level_2_optionals_cdsu_options.stock_days_threshold], level_2_optionals_cdsu_options.margin_threshold, level_2_optionals_cdsu_options.project_id) # Classifies the stockdays and margin based in their respective thresholds in tow classes (0 or 1) and then creates a new_score metric, control_prints(df, '9') # where only configurations with 1 in both dimension, have 1 as new_score # df = new_column_creation(df, ['Local da Venda_v2'], df['Local da Venda']) # control_prints(df, '10') # cols_to_group_layer_2 = ['Local da Venda'] # mapping_dictionaries, _ = sql_mapping_retrieval(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['mappings_temp'], 'Mapped_Value', level_2_optionals_cdsu_options) # df = sell_place_parametrization(df, 'Local da Venda', 'Local da Venda_Fase2', mapping_dictionaries[2], level_2_optionals_cdsu_options.project_id) # df = col_group(df, cols_to_group_layer_2[0:2], mapping_dictionaries[0:2], project_id) # Based on the information provided by Manuel some entries were grouped as to remove small groups. The columns grouped are mentioned in cols_to_group, and their respective groups are shown in level_2_optionals_cdsu_options control_prints(df, '9b, before new features', null_analysis_flag=1) df = new_features(df, configuration_parameters, project_id) # Creates a series of new features, explained in the provided pdf control_prints(df, '10, after new_features', null_analysis_flag=1) # global_variables_saving(df, level_2_optionals_cdsu_options.project_id) # Small functions to save 2 specific global variables which will be needed later log_record('Checkpoint B.1...', project_id) # performance_info_append(time.time(), 'checkpoint_b1') df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values())) # sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['checkpoint_b_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), truncate=1, check_date=1) df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys())) df = remove_columns(df, ['Date'], project_id) log_record('Fim Secção B.', project_id) performance_info_append(time.time(), 'Section_B_End') return df def deployment(df, db, view): performance_info_append(time.time(), 'Section_E_Start')
pt Exception as exception: project_identifier, exception_desc = level_2_optionals_cdsu_options.project_id, str(sys.exc_info()[1]) log_record(exception_desc, project_identifier, flag=2) error_upload(level_2_optionals_cdsu_options, project_identifier, format_exc(), exception_desc, error_flag=1) log_record('Falhou - Projeto: {}.'.format(str(project_dict[project_identifier])), project_identifier)
log_record('Início Secção E...', project_id) if df is not None: df['NLR_Code'] = level_2_optionals_cdsu_options.nlr_code # df = column_rename(df, list(level_2_optionals_cdsu_options.column_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_sql_renaming.values())) df = df.rename(columns=level_2_optionals_cdsu_options.column_sql_renaming) control_prints(df, 'before deployment, after renaming', head=1) sql_delete(level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, {'NLR_Code': '{}'.format(level_2_optionals_cdsu_options.nlr_code)}) sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), check_date=1) log_record('Fim Secção E.', project_id) performance_info_append(time.time(), 'Section_E_End') return if __name__ == '__main__': try: main() exce
identifier_body
level_2_optionals_cdsu.py
import sys import time import logging import pandas as pd from traceback import format_exc import level_2_optionals_cdsu_options from level_2_optionals_cdsu_options import project_id from modules.level_1_a_data_acquisition import sql_retrieve_df from modules.level_1_b_data_processing import null_analysis, options_scraping_v2, remove_zero_price_total_vhe, lowercase_column_conversion, remove_rows, remove_columns, string_replacer, color_replacement, new_column_creation, score_calculation, duplicate_removal, total_price, margin_calculation, new_features, column_rename from modules.level_1_d_model_evaluation import data_grouping_by_locals_temp from modules.level_1_e_deployment import sql_inject, sql_delete, sql_date_comparison from modules.level_0_performance_report import performance_info_append, error_upload, log_record, project_dict, performance_info pd.set_option('display.expand_frame_repr', False) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S @ %d/%m/%y', filename=level_2_optionals_cdsu_options.log_files['full_log'], filemode='a') logging.Logger('errors') # logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # Allows the stdout to be seen in the console logging.getLogger().addHandler(logging.StreamHandler(sys.stderr)) # Allows the stderr to be seen in the console configuration_parameters = level_2_optionals_cdsu_options.selected_configuration_parameters dict_sql_upload_flag = 0 def main(): log_record('Projeto: Sugestão Encomenda CDSU - Viaturas', project_id) query_filters = {'NLR_CODE': '4R0', 'Franchise_Code_DW': '43'} df = data_acquisition(query_filters) control_prints(df, 'after getting data', head=1, date=1) df = data_processing(df) model_choice_message, df, vehicle_count = data_grouping_by_locals_temp(df, configuration_parameters, level_2_optionals_cdsu_options.project_id) control_prints(df, 'before deployment', head=1) deployment(df, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['final_table']) performance_info(level_2_optionals_cdsu_options.project_id, level_2_optionals_cdsu_options, model_choice_message, vehicle_count) log_record('Conclusão com sucesso - Projeto {}.\n'.format(project_dict[project_id]), project_id) def da
uery_filters): performance_info_append(time.time(), 'Section_A_Start') log_record('Início Secção A...', project_id) df = sql_retrieve_df(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['initial_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.sql_to_code_renaming.keys()), query_filters, column_renaming=1, parse_dates=['Purchase_Date', 'Sell_Date']) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=0) log_record('Fim Secção A.', project_id) performance_info_append(time.time(), 'Section_A_End') return df def control_prints(df, tag, head=0, save=0, null_analysis_flag=0, date=0): # print('{}\n{}'.format(tag, df.shape)) # try: # print('Unique Vehicles: {}'.format(df['Nº Stock'].nunique())) # # except KeyError: # print('Unique Vehicles: {}'.format(df['VHE_Number'].nunique())) # # if head: # print(df.head()) # if save: # df.to_csv('dbs/cdsu_control_save_tag_{}.csv'.format(tag)) # if null_analysis_flag: # null_analysis(df) # if date: # try: # print('Current Max Sell Date is {}'.format(max(df['Data Venda']))) # except KeyError: # print('Current Max Sell Date is {}'.format(max(df['Sell_Date']))) return def data_processing(df): performance_info_append(time.time(), 'Section_B_Start') log_record('Início Secção B...', project_id) log_record('Checkpoint não encontrado ou demasiado antigo. A processar dados...', project_id) df = lowercase_column_conversion(df, ['Opcional', 'Cor', 'Interior', 'Versão']) # Lowercases the strings of these columns dict_strings_to_replace = {('Modelo', ' - não utilizar'): '', ('Interior', '\\|'): '/', ('Cor', '\\|'): '', ('Interior', 'ind.'): '', ('Interior', '\\]'): '/', ('Interior', '\\.'): ' ', ('Interior', '\'merino\''): 'merino', ('Interior', '\' merino\''): 'merino', ('Interior', '\'vernasca\''): 'vernasca', ('Interior', 'leder'): 'leather', ('Interior', 'p '): 'pele', ('Interior', 'pelenevada'): 'pele nevada', ('Opcional', 'bi-xénon'): 'bixénon', ('Opcional', 'bi-xenon'): 'bixénon', ('Opcional', 'vidro'): 'vidros', ('Opcional', 'dacota'): 'dakota', ('Opcional', 'whites'): 'white', ('Opcional', 'beige'): 'bege', ('Interior', '\'dakota\''): 'dakota', ('Interior', 'dacota'): 'dakota', ('Interior', 'mokka'): 'mocha', ('Interior', 'beige'): 'bege', ('Interior', 'dakota\''): 'dakota', ('Interior', 'antracite/cinza/p'): 'antracite/cinza/preto', ('Interior', 'antracite/cinza/pretoreto'): 'antracite/cinza/preto', ('Interior', 'nevada\''): 'nevada', ('Interior', '"nappa"'): 'nappa', ('Interior', 'anthrazit'): 'antracite', ('Interior', 'antracito'): 'antracite', ('Interior', 'preto/laranja/preto/lara'): 'preto/laranja', ('Interior', 'anthtacite'): 'antracite', ('Interior', 'champag'): 'champagne', ('Interior', 'cri'): 'crimson', ('Modelo', 'Enter Model Details'): '', ('Registration_Number', '\.'): '', ('Interior', 'preto/m '): 'preto ', ('Interior', 'congnac/preto'): 'cognac/preto', ('Local da Venda', 'DCN'): 'DCP', ('Cor', 'oceanao'): 'oceano', ('Cor', 'ocenao'): 'oceano', ('Interior', 'reto'): 'preto', ('Cor', 'banco'): 'branco', ('Cor', 'catanho'): 'castanho', ('Cor', 'petrìleo'): 'petróleo', ('Interior', 'ecido'): 'tecido', ('Interior', 'ege'): 'bege', ('Interior', 'inza'): 'cinza', ('Interior', 'inzento'): 'cinzento', ('Interior', 'teciso'): 'tecido', ('Opcional', 'autmático'): 'automático', ('Opcional', 'esctacionamento'): 'estacionamento', ('Opcional', 'estacionamernto'): 'estacionamento', ('Opcional', 'pct'): 'pacote', ('Opcional', 'navegaçãp'): 'navegação', ('Opcional', '\\+'): '', ('Versão', 'bussiness'): 'business', ('Versão', 'r-line'): 'rline', ('Versão', 'confortl'): 'confortline', ('Versão', 'high'): 'highline', ('Opcional', 'p/dsg'): 'para dsg', ('Opcional', 'dianteirostraseiros'): 'dianteiros traseiros', ('Opcional', 'dianteirostras'): 'dianteiros traseiros', ('Opcional', 'diant'): 'dianteiros', ('Opcional', 'dttras'): 'dianteiros traseiros', ('Opcional', 'dttrpark'): 'dianteiros traseiros park', ('Opcional', 'dianttras'): 'dianteiros traseiros', ('Opcional', 'câmara'): 'camara', ('Opcional', 'camera'): 'camara', ('Opcional', 'câmera'): 'camara', ('Versão', 'trendtline'): 'trendline', ('Versão', 'trendtline'): 'trendline', ('Versão', 'confort'): 'confortline', ('Versão', 'conftl'): 'confortline', ('Versão', 'hightline'): 'highline', ('Versão', 'bluem'): 'bluemotion', ('Versão', 'bmt'): 'bluemotion', ('Versão', 'up!bluemotion'): 'up! bluemotion', ('Versão', 'up!bluem'): 'up! bluemotion', ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline', ('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'} control_prints(df, '1', head=1) df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc control_prints(df, '1b', head=1) df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's control_prints(df, '2') df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future df = total_price(df) # Creates a new column with the total cost for each configuration; control_prints(df, '3a', head=0) df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary control_prints(df, '3b', head=0) df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1) control_prints(df, '3c') df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking Sensors, External and Internal colours, Model and Rim's Size control_prints(df, '3d', head=1, null_analysis_flag=1) df.loc[df['Combustível'].isin(['Elétrico', 'Híbrido']), 'Motor'] = 'N/A' # Defaults the value of motorization for electric/hybrid cars; control_prints(df, '4', head=0, save=1) # df = remove_rows(df, [df[df.Modelo.isnull()].index], project_id, warning=1) df = remove_columns(df, ['Colour_Ext_Code'], project_id) # This column was only needed for some very specific cases where no Colour_Ext_Code was available; df.to_csv('dbs/df_cdsu.csv', index=False) control_prints(df, '5', head=0, save=1) # project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=1) df = color_replacement(df, level_2_optionals_cdsu_options.colors_to_replace_dict, project_id) # Translates all english colors to portuguese control_prints(df, '6', head=0, save=1) df = duplicate_removal(df, subset_col='Nº Stock') # Removes duplicate rows, based on the Stock number. This leaves one line per configuration; control_prints(df, '7') df = remove_columns(df, ['Cor', 'Interior', 'Opcional', 'Custo', 'Versão', 'Franchise_Code'], project_id) # Remove columns not needed atm; # Will probably need to also remove: stock_days, stock_days_norm, and one of the scores # df = remove_rows(df, [df.loc[df['Local da Venda'] == 'DCV - Viat.Toy Viseu', :].index], project_id) # Removes the vehicles sold here, as they are from another brand (Toyota) df = margin_calculation(df) # Calculates the margin in percentage of the total price control_prints(df, '8') df = score_calculation(df, [level_2_optionals_cdsu_options.stock_days_threshold], level_2_optionals_cdsu_options.margin_threshold, level_2_optionals_cdsu_options.project_id) # Classifies the stockdays and margin based in their respective thresholds in tow classes (0 or 1) and then creates a new_score metric, control_prints(df, '9') # where only configurations with 1 in both dimension, have 1 as new_score # df = new_column_creation(df, ['Local da Venda_v2'], df['Local da Venda']) # control_prints(df, '10') # cols_to_group_layer_2 = ['Local da Venda'] # mapping_dictionaries, _ = sql_mapping_retrieval(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['mappings_temp'], 'Mapped_Value', level_2_optionals_cdsu_options) # df = sell_place_parametrization(df, 'Local da Venda', 'Local da Venda_Fase2', mapping_dictionaries[2], level_2_optionals_cdsu_options.project_id) # df = col_group(df, cols_to_group_layer_2[0:2], mapping_dictionaries[0:2], project_id) # Based on the information provided by Manuel some entries were grouped as to remove small groups. The columns grouped are mentioned in cols_to_group, and their respective groups are shown in level_2_optionals_cdsu_options control_prints(df, '9b, before new features', null_analysis_flag=1) df = new_features(df, configuration_parameters, project_id) # Creates a series of new features, explained in the provided pdf control_prints(df, '10, after new_features', null_analysis_flag=1) # global_variables_saving(df, level_2_optionals_cdsu_options.project_id) # Small functions to save 2 specific global variables which will be needed later log_record('Checkpoint B.1...', project_id) # performance_info_append(time.time(), 'checkpoint_b1') df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values())) # sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['checkpoint_b_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), truncate=1, check_date=1) df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys())) df = remove_columns(df, ['Date'], project_id) log_record('Fim Secção B.', project_id) performance_info_append(time.time(), 'Section_B_End') return df def deployment(df, db, view): performance_info_append(time.time(), 'Section_E_Start') log_record('Início Secção E...', project_id) if df is not None: df['NLR_Code'] = level_2_optionals_cdsu_options.nlr_code # df = column_rename(df, list(level_2_optionals_cdsu_options.column_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_sql_renaming.values())) df = df.rename(columns=level_2_optionals_cdsu_options.column_sql_renaming) control_prints(df, 'before deployment, after renaming', head=1) sql_delete(level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, {'NLR_Code': '{}'.format(level_2_optionals_cdsu_options.nlr_code)}) sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), check_date=1) log_record('Fim Secção E.', project_id) performance_info_append(time.time(), 'Section_E_End') return if __name__ == '__main__': try: main() except Exception as exception: project_identifier, exception_desc = level_2_optionals_cdsu_options.project_id, str(sys.exc_info()[1]) log_record(exception_desc, project_identifier, flag=2) error_upload(level_2_optionals_cdsu_options, project_identifier, format_exc(), exception_desc, error_flag=1) log_record('Falhou - Projeto: {}.'.format(str(project_dict[project_identifier])), project_identifier)
ta_acquisition(q
identifier_name
util.js
function StringUtils() {} String.prototype.afterFirst = function(e) { var c = this.indexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.afterLast = function(e) { var c = this.lastIndexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.beginsWith = function(e) { return this.indexOf(e) == 0 }; String.prototype.beforeFirst = function(e) { e = this.indexOf(e); if (e == -1) { return "" } return this.substr(0, e) }; String.prototype.beforeLast = function(e) { e = this.lastIndexOf(e); if (e == -1) { return ""
var a = "", b = this.indexOf(e); if (b != -1) { b += e.length; var d = this.indexOf(c, b); if (d != -1) { a = this.substr(b, d - b) } } return a }; StringUtils.capitalize = function(e, c) { e = StringUtils.trimLeft(e); return c === true ? e.replace(/^.|\s+(.)/, StringUtils._upperCase) : e.replace(/(^\w)/, StringUtils._upperCase) }; String.prototype.capitalize = function() { return StringUtils.capitalize(this) }; String.prototype.ljust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this + this.repeat(e - this.length, a) : this }; String.prototype.rjust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this.repeat(e - this.length, a) + this : this }; String.prototype.center = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); if (this.length < e) { var b = e - this.length, d = b % 2 == 0 ? "" : a; a = this.repeat(Math.round(b / 2), a); return a + this + a + d } else { return this } }; String.prototype.repeat = function(e, c) { if (isNaN(e)) { e = 1 } for (var a = ""; e--;) { a += c || this } return a }; String.prototype.base64Encode = function() { for (var e = "", c = 0, a = this.length; c < a;) { var b = this.charCodeAt(c++) & 255; if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4) + "=="; break } var d = this.charCodeAt(c++); if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "="; break } var g = this.charCodeAt(c++); e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((d & 15) << 2 | (g & 192) >> 6) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(g & 63) } return e }; String.prototype.contains = function(e) { return this.indexOf(e) != -1 }; StringUtils.editDistance = function(e, c) { if (e == null) { e = "" } if (c == null) { c = "" } if (e == c) { return 0 } var a = [], b, d = e.length, g = c.length; if (d == 0) { return g } if (g == 0) { return d } for (var m = 0; m <= d; m++) { a[m] = [] } for (m = 0; m <= d; m++) { a[m][0] = m } for (m = 0; m <= g; m++) { a[0][m] = m } for (m = 1; m <= d; m++) { for (var q = e.charAt(m - 1), s = 1; s <= g; s++) { b = c.charAt(s - 1); b = q == b ? 0 : 1; a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b) } } return a[d][g] }; String.prototype.editDistance = function(e) { return StringUtils.editDistance(this, e) }; String.prototype.endsWith = function(e) { return RegExp(e + "$").test(this) }; String.prototype.hasText = function() { return !!this.removeExtraWhitespace().length }; String.prototype.isEmpty = function() { return !this.length }; String.prototype.isNumeric = function() { return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this) }; String.prototype.padLeft = function(e, c) { for (var a = this; a.length < c;) { a = e + a } return a }; String.prototype.padRight = function(e, c) { for (var a = this; a.length < c;) { a += e } return a }; String.prototype.properCase = function() { return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I") }; String.prototype.quote = function() { return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"' }; String.prototype.remove = function(e, c) { if (c === null) { c = true } var a = StringUtils.escapePattern(e); return this.replace(RegExp(a, !c ? "ig" : "g"), "") }; String.prototype.removeExtraWhitespace = function() { return this.trim(this).replace(/\s+/g, " ") }; String.prototype.reverse = function() { return this.split("").reverse().join("") }; String.prototype.reverseWords = function() { return this.split(/\s+/).reverse().join(" ") }; String.prototype.similarity = function(e) { var c = StringUtils.editDistance(this, e); e = Math.max(this.length, e.length); return e == 0 ? 1 : 1 - c / e }; String.prototype.stripTags = function() { return this.replace(/<\/?[^>]+>/igm, "") }; String.prototype.supplant = function() { var e = this; if (arguments[0] instanceof Object) { for (var c in arguments[0]) { e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c]) } } else { c = arguments.length; for (var a = 0; a < c; a++) { e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a]) } } return e }; String.prototype.swapCase = function() { return this.replace(/(\w)/, StringUtils._swapCase) }; String.prototype.trim = function() { return this.replace(/^\s+|\s+$/g, "") }; StringUtils.trimLeft = function(e) { return e.replace(/^\s+/, "") }; String.prototype.trimLeft = function() { return StringUtils.trimLeft(this) }; StringUtils.trimRight = function(e) { return e.replace(/\s+$/, "") }; String.prototype.trimRight = function() { return StringUtils.trimLeft(this) }; String.prototype.truncate = function(e, c) { if (c == null) { c = "..." } if (e == 0) { e = this.length } e -= c.length; var a = this; if (a.length > e) { a = a.substr(0, e); if (/[^\s]/.test(a.charAt(e))) { a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, "")) } a += c } return a }; String.prototype.wordCount = function() { return this.match(/\b\w+\b/g).length }; StringUtils.escapePattern = function(e) { return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1") }; StringUtils.prototype._quote = function() { switch (this) { case "\\": return "\\\\"; case "\r": return "\\r"; case "\n": return "\\n"; case '"': return '\\"' } return null }; StringUtils._upperCase = function(e) { return e.toUpperCase() }; StringUtils._swapCase = function(e) { var c = e.toLowerCase(), a = e.toUpperCase(); switch (e) { case c: return a; case a: return c; default: return e } }; function Rnd() { throw Error("Rnd is static and cannot be instantiated."); } Rnd.randFloat = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.random() * (c - e) + e }; Rnd.randBoolean = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e }; Rnd.randSign = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : -1 }; Rnd.randBit = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : 0 }; Rnd.randInteger = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.floor(Rnd.randFloat(e, c)) }; Number.prototype.floor = function() { return this | 0 }; Number.prototype.round = function() { return this + 0.5 | 0 }; Number.RADIANS = 180 / Math.PI; Number.prototype.fromRadians = function() { return this * Number.RADIANS }; Number.prototype.toRadians = function() { return this / Number.RADIANS }; Array.prototype.randomSort = function() { var e = this.length; if (e == 0) { return false } for (; e--;) { var c = Math.random() * (e + 1) | 0, a = this[e]; this[e] = this[c]; this[c] = a } return this }; Array.prototype.findRandom = function() { if (this.length == 1) { return this[0] } return this[Math.random() * this.length | 0] }; Array.prototype.removeRandom = function() { return this.splice(Math.random() * this.length | 0, 1)[0] }; Array.prototype.removeItem = function(e) { for (var c = 0, a = this.length; c < a; c++) { if (e == this[c]) { this.splice(c, 1); return true } } return false }; Array.prototype.sum = function() { for (var e = 0, c = 0, a = this.length; c < a; c++) { e += this[c] } return e }; Object.prototype.formatToString = function() { if (arguments == null) { return "[Object object]" } for (var e = [], c = 0, a = arguments.length; c < a; c++) { var b = arguments[c], d = this[b]; if (!isNaN(d) && d << 0 != d) { d = d.toFixed(2) } e.push(b + ":" + d) } return "[" + e.join(", ") + "]" }; Number.prototype.commaDelimit = function() { var e = String(this), c = e.length % 3, a = Math.floor(e.length / 3); if (a > 0) { for (var b = [], d = 0; d < a; d++) { var g = d * 3 + c; d == 0 && c > 0 && b.push(e.substr(0, c)); b.push(e.substr(g, 3)) } e = b.join(",") } return e }; Number.prototype.getOrdinal = function(e) { e = e == true ? this.commaDelimit() : this; switch (this % 10) { case 1: return e + "st"; case 2: return e + "nd"; case 3: return e + "rd"; default: return e + "th" } }; function getTimer() { return (new Date).getTime() } Function.prototype.extend = function(e) { if (e.constructor == Function) { this.$ = this.prototype = new e; this.prototype.constructor = this; this.prototype.parent = e.prototype } else { this.prototype = e; this.prototype.constructor = this; this.prototype.parent = e } return this };
} return this.substr(0, e) }; String.prototype.between = function(e, c) {
random_line_split
util.js
function StringUtils() {} String.prototype.afterFirst = function(e) { var c = this.indexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.afterLast = function(e) { var c = this.lastIndexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.beginsWith = function(e) { return this.indexOf(e) == 0 }; String.prototype.beforeFirst = function(e) { e = this.indexOf(e); if (e == -1) { return "" } return this.substr(0, e) }; String.prototype.beforeLast = function(e) { e = this.lastIndexOf(e); if (e == -1) { return "" } return this.substr(0, e) }; String.prototype.between = function(e, c) { var a = "", b = this.indexOf(e); if (b != -1) { b += e.length; var d = this.indexOf(c, b); if (d != -1) { a = this.substr(b, d - b) } } return a }; StringUtils.capitalize = function(e, c) { e = StringUtils.trimLeft(e); return c === true ? e.replace(/^.|\s+(.)/, StringUtils._upperCase) : e.replace(/(^\w)/, StringUtils._upperCase) }; String.prototype.capitalize = function() { return StringUtils.capitalize(this) }; String.prototype.ljust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this + this.repeat(e - this.length, a) : this }; String.prototype.rjust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this.repeat(e - this.length, a) + this : this }; String.prototype.center = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); if (this.length < e) { var b = e - this.length, d = b % 2 == 0 ? "" : a; a = this.repeat(Math.round(b / 2), a); return a + this + a + d } else { return this } }; String.prototype.repeat = function(e, c) { if (isNaN(e)) { e = 1 } for (var a = ""; e--;) { a += c || this } return a }; String.prototype.base64Encode = function() { for (var e = "", c = 0, a = this.length; c < a;) { var b = this.charCodeAt(c++) & 255; if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4) + "=="; break } var d = this.charCodeAt(c++); if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "="; break } var g = this.charCodeAt(c++); e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((d & 15) << 2 | (g & 192) >> 6) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(g & 63) } return e }; String.prototype.contains = function(e) { return this.indexOf(e) != -1 }; StringUtils.editDistance = function(e, c) { if (e == null) { e = "" } if (c == null) { c = "" } if (e == c) { return 0 } var a = [], b, d = e.length, g = c.length; if (d == 0) { return g } if (g == 0) { return d } for (var m = 0; m <= d; m++) { a[m] = [] } for (m = 0; m <= d; m++)
for (m = 0; m <= g; m++) { a[0][m] = m } for (m = 1; m <= d; m++) { for (var q = e.charAt(m - 1), s = 1; s <= g; s++) { b = c.charAt(s - 1); b = q == b ? 0 : 1; a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b) } } return a[d][g] }; String.prototype.editDistance = function(e) { return StringUtils.editDistance(this, e) }; String.prototype.endsWith = function(e) { return RegExp(e + "$").test(this) }; String.prototype.hasText = function() { return !!this.removeExtraWhitespace().length }; String.prototype.isEmpty = function() { return !this.length }; String.prototype.isNumeric = function() { return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this) }; String.prototype.padLeft = function(e, c) { for (var a = this; a.length < c;) { a = e + a } return a }; String.prototype.padRight = function(e, c) { for (var a = this; a.length < c;) { a += e } return a }; String.prototype.properCase = function() { return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I") }; String.prototype.quote = function() { return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"' }; String.prototype.remove = function(e, c) { if (c === null) { c = true } var a = StringUtils.escapePattern(e); return this.replace(RegExp(a, !c ? "ig" : "g"), "") }; String.prototype.removeExtraWhitespace = function() { return this.trim(this).replace(/\s+/g, " ") }; String.prototype.reverse = function() { return this.split("").reverse().join("") }; String.prototype.reverseWords = function() { return this.split(/\s+/).reverse().join(" ") }; String.prototype.similarity = function(e) { var c = StringUtils.editDistance(this, e); e = Math.max(this.length, e.length); return e == 0 ? 1 : 1 - c / e }; String.prototype.stripTags = function() { return this.replace(/<\/?[^>]+>/igm, "") }; String.prototype.supplant = function() { var e = this; if (arguments[0] instanceof Object) { for (var c in arguments[0]) { e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c]) } } else { c = arguments.length; for (var a = 0; a < c; a++) { e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a]) } } return e }; String.prototype.swapCase = function() { return this.replace(/(\w)/, StringUtils._swapCase) }; String.prototype.trim = function() { return this.replace(/^\s+|\s+$/g, "") }; StringUtils.trimLeft = function(e) { return e.replace(/^\s+/, "") }; String.prototype.trimLeft = function() { return StringUtils.trimLeft(this) }; StringUtils.trimRight = function(e) { return e.replace(/\s+$/, "") }; String.prototype.trimRight = function() { return StringUtils.trimLeft(this) }; String.prototype.truncate = function(e, c) { if (c == null) { c = "..." } if (e == 0) { e = this.length } e -= c.length; var a = this; if (a.length > e) { a = a.substr(0, e); if (/[^\s]/.test(a.charAt(e))) { a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, "")) } a += c } return a }; String.prototype.wordCount = function() { return this.match(/\b\w+\b/g).length }; StringUtils.escapePattern = function(e) { return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1") }; StringUtils.prototype._quote = function() { switch (this) { case "\\": return "\\\\"; case "\r": return "\\r"; case "\n": return "\\n"; case '"': return '\\"' } return null }; StringUtils._upperCase = function(e) { return e.toUpperCase() }; StringUtils._swapCase = function(e) { var c = e.toLowerCase(), a = e.toUpperCase(); switch (e) { case c: return a; case a: return c; default: return e } }; function Rnd() { throw Error("Rnd is static and cannot be instantiated."); } Rnd.randFloat = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.random() * (c - e) + e }; Rnd.randBoolean = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e }; Rnd.randSign = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : -1 }; Rnd.randBit = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : 0 }; Rnd.randInteger = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.floor(Rnd.randFloat(e, c)) }; Number.prototype.floor = function() { return this | 0 }; Number.prototype.round = function() { return this + 0.5 | 0 }; Number.RADIANS = 180 / Math.PI; Number.prototype.fromRadians = function() { return this * Number.RADIANS }; Number.prototype.toRadians = function() { return this / Number.RADIANS }; Array.prototype.randomSort = function() { var e = this.length; if (e == 0) { return false } for (; e--;) { var c = Math.random() * (e + 1) | 0, a = this[e]; this[e] = this[c]; this[c] = a } return this }; Array.prototype.findRandom = function() { if (this.length == 1) { return this[0] } return this[Math.random() * this.length | 0] }; Array.prototype.removeRandom = function() { return this.splice(Math.random() * this.length | 0, 1)[0] }; Array.prototype.removeItem = function(e) { for (var c = 0, a = this.length; c < a; c++) { if (e == this[c]) { this.splice(c, 1); return true } } return false }; Array.prototype.sum = function() { for (var e = 0, c = 0, a = this.length; c < a; c++) { e += this[c] } return e }; Object.prototype.formatToString = function() { if (arguments == null) { return "[Object object]" } for (var e = [], c = 0, a = arguments.length; c < a; c++) { var b = arguments[c], d = this[b]; if (!isNaN(d) && d << 0 != d) { d = d.toFixed(2) } e.push(b + ":" + d) } return "[" + e.join(", ") + "]" }; Number.prototype.commaDelimit = function() { var e = String(this), c = e.length % 3, a = Math.floor(e.length / 3); if (a > 0) { for (var b = [], d = 0; d < a; d++) { var g = d * 3 + c; d == 0 && c > 0 && b.push(e.substr(0, c)); b.push(e.substr(g, 3)) } e = b.join(",") } return e }; Number.prototype.getOrdinal = function(e) { e = e == true ? this.commaDelimit() : this; switch (this % 10) { case 1: return e + "st"; case 2: return e + "nd"; case 3: return e + "rd"; default: return e + "th" } }; function getTimer() { return (new Date).getTime() } Function.prototype.extend = function(e) { if (e.constructor == Function) { this.$ = this.prototype = new e; this.prototype.constructor = this; this.prototype.parent = e.prototype } else { this.prototype = e; this.prototype.constructor = this; this.prototype.parent = e } return this };
{ a[m][0] = m }
conditional_block
util.js
function StringUtils() {} String.prototype.afterFirst = function(e) { var c = this.indexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.afterLast = function(e) { var c = this.lastIndexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.beginsWith = function(e) { return this.indexOf(e) == 0 }; String.prototype.beforeFirst = function(e) { e = this.indexOf(e); if (e == -1) { return "" } return this.substr(0, e) }; String.prototype.beforeLast = function(e) { e = this.lastIndexOf(e); if (e == -1) { return "" } return this.substr(0, e) }; String.prototype.between = function(e, c) { var a = "", b = this.indexOf(e); if (b != -1) { b += e.length; var d = this.indexOf(c, b); if (d != -1) { a = this.substr(b, d - b) } } return a }; StringUtils.capitalize = function(e, c) { e = StringUtils.trimLeft(e); return c === true ? e.replace(/^.|\s+(.)/, StringUtils._upperCase) : e.replace(/(^\w)/, StringUtils._upperCase) }; String.prototype.capitalize = function() { return StringUtils.capitalize(this) }; String.prototype.ljust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this + this.repeat(e - this.length, a) : this }; String.prototype.rjust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this.repeat(e - this.length, a) + this : this }; String.prototype.center = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); if (this.length < e) { var b = e - this.length, d = b % 2 == 0 ? "" : a; a = this.repeat(Math.round(b / 2), a); return a + this + a + d } else { return this } }; String.prototype.repeat = function(e, c) { if (isNaN(e)) { e = 1 } for (var a = ""; e--;) { a += c || this } return a }; String.prototype.base64Encode = function() { for (var e = "", c = 0, a = this.length; c < a;) { var b = this.charCodeAt(c++) & 255; if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4) + "=="; break } var d = this.charCodeAt(c++); if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "="; break } var g = this.charCodeAt(c++); e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((d & 15) << 2 | (g & 192) >> 6) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(g & 63) } return e }; String.prototype.contains = function(e) { return this.indexOf(e) != -1 }; StringUtils.editDistance = function(e, c) { if (e == null) { e = "" } if (c == null) { c = "" } if (e == c) { return 0 } var a = [], b, d = e.length, g = c.length; if (d == 0) { return g } if (g == 0) { return d } for (var m = 0; m <= d; m++) { a[m] = [] } for (m = 0; m <= d; m++) { a[m][0] = m } for (m = 0; m <= g; m++) { a[0][m] = m } for (m = 1; m <= d; m++) { for (var q = e.charAt(m - 1), s = 1; s <= g; s++) { b = c.charAt(s - 1); b = q == b ? 0 : 1; a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b) } } return a[d][g] }; String.prototype.editDistance = function(e) { return StringUtils.editDistance(this, e) }; String.prototype.endsWith = function(e) { return RegExp(e + "$").test(this) }; String.prototype.hasText = function() { return !!this.removeExtraWhitespace().length }; String.prototype.isEmpty = function() { return !this.length }; String.prototype.isNumeric = function() { return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this) }; String.prototype.padLeft = function(e, c) { for (var a = this; a.length < c;) { a = e + a } return a }; String.prototype.padRight = function(e, c) { for (var a = this; a.length < c;) { a += e } return a }; String.prototype.properCase = function() { return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I") }; String.prototype.quote = function() { return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"' }; String.prototype.remove = function(e, c) { if (c === null) { c = true } var a = StringUtils.escapePattern(e); return this.replace(RegExp(a, !c ? "ig" : "g"), "") }; String.prototype.removeExtraWhitespace = function() { return this.trim(this).replace(/\s+/g, " ") }; String.prototype.reverse = function() { return this.split("").reverse().join("") }; String.prototype.reverseWords = function() { return this.split(/\s+/).reverse().join(" ") }; String.prototype.similarity = function(e) { var c = StringUtils.editDistance(this, e); e = Math.max(this.length, e.length); return e == 0 ? 1 : 1 - c / e }; String.prototype.stripTags = function() { return this.replace(/<\/?[^>]+>/igm, "") }; String.prototype.supplant = function() { var e = this; if (arguments[0] instanceof Object) { for (var c in arguments[0]) { e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c]) } } else { c = arguments.length; for (var a = 0; a < c; a++) { e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a]) } } return e }; String.prototype.swapCase = function() { return this.replace(/(\w)/, StringUtils._swapCase) }; String.prototype.trim = function() { return this.replace(/^\s+|\s+$/g, "") }; StringUtils.trimLeft = function(e) { return e.replace(/^\s+/, "") }; String.prototype.trimLeft = function() { return StringUtils.trimLeft(this) }; StringUtils.trimRight = function(e) { return e.replace(/\s+$/, "") }; String.prototype.trimRight = function() { return StringUtils.trimLeft(this) }; String.prototype.truncate = function(e, c) { if (c == null) { c = "..." } if (e == 0) { e = this.length } e -= c.length; var a = this; if (a.length > e) { a = a.substr(0, e); if (/[^\s]/.test(a.charAt(e))) { a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, "")) } a += c } return a }; String.prototype.wordCount = function() { return this.match(/\b\w+\b/g).length }; StringUtils.escapePattern = function(e) { return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1") }; StringUtils.prototype._quote = function() { switch (this) { case "\\": return "\\\\"; case "\r": return "\\r"; case "\n": return "\\n"; case '"': return '\\"' } return null }; StringUtils._upperCase = function(e) { return e.toUpperCase() }; StringUtils._swapCase = function(e) { var c = e.toLowerCase(), a = e.toUpperCase(); switch (e) { case c: return a; case a: return c; default: return e } }; function Rnd() { throw Error("Rnd is static and cannot be instantiated."); } Rnd.randFloat = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.random() * (c - e) + e }; Rnd.randBoolean = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e }; Rnd.randSign = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : -1 }; Rnd.randBit = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : 0 }; Rnd.randInteger = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.floor(Rnd.randFloat(e, c)) }; Number.prototype.floor = function() { return this | 0 }; Number.prototype.round = function() { return this + 0.5 | 0 }; Number.RADIANS = 180 / Math.PI; Number.prototype.fromRadians = function() { return this * Number.RADIANS }; Number.prototype.toRadians = function() { return this / Number.RADIANS }; Array.prototype.randomSort = function() { var e = this.length; if (e == 0) { return false } for (; e--;) { var c = Math.random() * (e + 1) | 0, a = this[e]; this[e] = this[c]; this[c] = a } return this }; Array.prototype.findRandom = function() { if (this.length == 1) { return this[0] } return this[Math.random() * this.length | 0] }; Array.prototype.removeRandom = function() { return this.splice(Math.random() * this.length | 0, 1)[0] }; Array.prototype.removeItem = function(e) { for (var c = 0, a = this.length; c < a; c++) { if (e == this[c]) { this.splice(c, 1); return true } } return false }; Array.prototype.sum = function() { for (var e = 0, c = 0, a = this.length; c < a; c++) { e += this[c] } return e }; Object.prototype.formatToString = function() { if (arguments == null) { return "[Object object]" } for (var e = [], c = 0, a = arguments.length; c < a; c++) { var b = arguments[c], d = this[b]; if (!isNaN(d) && d << 0 != d) { d = d.toFixed(2) } e.push(b + ":" + d) } return "[" + e.join(", ") + "]" }; Number.prototype.commaDelimit = function() { var e = String(this), c = e.length % 3, a = Math.floor(e.length / 3); if (a > 0) { for (var b = [], d = 0; d < a; d++) { var g = d * 3 + c; d == 0 && c > 0 && b.push(e.substr(0, c)); b.push(e.substr(g, 3)) } e = b.join(",") } return e }; Number.prototype.getOrdinal = function(e) { e = e == true ? this.commaDelimit() : this; switch (this % 10) { case 1: return e + "st"; case 2: return e + "nd"; case 3: return e + "rd"; default: return e + "th" } }; function getTimer()
Function.prototype.extend = function(e) { if (e.constructor == Function) { this.$ = this.prototype = new e; this.prototype.constructor = this; this.prototype.parent = e.prototype } else { this.prototype = e; this.prototype.constructor = this; this.prototype.parent = e } return this };
{ return (new Date).getTime() }
identifier_body
util.js
function StringUtils() {} String.prototype.afterFirst = function(e) { var c = this.indexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.afterLast = function(e) { var c = this.lastIndexOf(e); if (c == -1) { return "" } c += e.length; return this.substr(c) }; String.prototype.beginsWith = function(e) { return this.indexOf(e) == 0 }; String.prototype.beforeFirst = function(e) { e = this.indexOf(e); if (e == -1) { return "" } return this.substr(0, e) }; String.prototype.beforeLast = function(e) { e = this.lastIndexOf(e); if (e == -1) { return "" } return this.substr(0, e) }; String.prototype.between = function(e, c) { var a = "", b = this.indexOf(e); if (b != -1) { b += e.length; var d = this.indexOf(c, b); if (d != -1) { a = this.substr(b, d - b) } } return a }; StringUtils.capitalize = function(e, c) { e = StringUtils.trimLeft(e); return c === true ? e.replace(/^.|\s+(.)/, StringUtils._upperCase) : e.replace(/(^\w)/, StringUtils._upperCase) }; String.prototype.capitalize = function() { return StringUtils.capitalize(this) }; String.prototype.ljust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this + this.repeat(e - this.length, a) : this }; String.prototype.rjust = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); return this.length < e ? this.repeat(e - this.length, a) + this : this }; String.prototype.center = function(e, c) { if (c == null) { c = " " } var a = c.substr(0, 1); if (this.length < e) { var b = e - this.length, d = b % 2 == 0 ? "" : a; a = this.repeat(Math.round(b / 2), a); return a + this + a + d } else { return this } }; String.prototype.repeat = function(e, c) { if (isNaN(e)) { e = 1 } for (var a = ""; e--;) { a += c || this } return a }; String.prototype.base64Encode = function() { for (var e = "", c = 0, a = this.length; c < a;) { var b = this.charCodeAt(c++) & 255; if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4) + "=="; break } var d = this.charCodeAt(c++); if (c == a) { e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "="; break } var g = this.charCodeAt(c++); e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((d & 15) << 2 | (g & 192) >> 6) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(g & 63) } return e }; String.prototype.contains = function(e) { return this.indexOf(e) != -1 }; StringUtils.editDistance = function(e, c) { if (e == null) { e = "" } if (c == null) { c = "" } if (e == c) { return 0 } var a = [], b, d = e.length, g = c.length; if (d == 0) { return g } if (g == 0) { return d } for (var m = 0; m <= d; m++) { a[m] = [] } for (m = 0; m <= d; m++) { a[m][0] = m } for (m = 0; m <= g; m++) { a[0][m] = m } for (m = 1; m <= d; m++) { for (var q = e.charAt(m - 1), s = 1; s <= g; s++) { b = c.charAt(s - 1); b = q == b ? 0 : 1; a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b) } } return a[d][g] }; String.prototype.editDistance = function(e) { return StringUtils.editDistance(this, e) }; String.prototype.endsWith = function(e) { return RegExp(e + "$").test(this) }; String.prototype.hasText = function() { return !!this.removeExtraWhitespace().length }; String.prototype.isEmpty = function() { return !this.length }; String.prototype.isNumeric = function() { return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this) }; String.prototype.padLeft = function(e, c) { for (var a = this; a.length < c;) { a = e + a } return a }; String.prototype.padRight = function(e, c) { for (var a = this; a.length < c;) { a += e } return a }; String.prototype.properCase = function() { return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I") }; String.prototype.quote = function() { return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"' }; String.prototype.remove = function(e, c) { if (c === null) { c = true } var a = StringUtils.escapePattern(e); return this.replace(RegExp(a, !c ? "ig" : "g"), "") }; String.prototype.removeExtraWhitespace = function() { return this.trim(this).replace(/\s+/g, " ") }; String.prototype.reverse = function() { return this.split("").reverse().join("") }; String.prototype.reverseWords = function() { return this.split(/\s+/).reverse().join(" ") }; String.prototype.similarity = function(e) { var c = StringUtils.editDistance(this, e); e = Math.max(this.length, e.length); return e == 0 ? 1 : 1 - c / e }; String.prototype.stripTags = function() { return this.replace(/<\/?[^>]+>/igm, "") }; String.prototype.supplant = function() { var e = this; if (arguments[0] instanceof Object) { for (var c in arguments[0]) { e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c]) } } else { c = arguments.length; for (var a = 0; a < c; a++) { e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a]) } } return e }; String.prototype.swapCase = function() { return this.replace(/(\w)/, StringUtils._swapCase) }; String.prototype.trim = function() { return this.replace(/^\s+|\s+$/g, "") }; StringUtils.trimLeft = function(e) { return e.replace(/^\s+/, "") }; String.prototype.trimLeft = function() { return StringUtils.trimLeft(this) }; StringUtils.trimRight = function(e) { return e.replace(/\s+$/, "") }; String.prototype.trimRight = function() { return StringUtils.trimLeft(this) }; String.prototype.truncate = function(e, c) { if (c == null) { c = "..." } if (e == 0) { e = this.length } e -= c.length; var a = this; if (a.length > e) { a = a.substr(0, e); if (/[^\s]/.test(a.charAt(e))) { a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, "")) } a += c } return a }; String.prototype.wordCount = function() { return this.match(/\b\w+\b/g).length }; StringUtils.escapePattern = function(e) { return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1") }; StringUtils.prototype._quote = function() { switch (this) { case "\\": return "\\\\"; case "\r": return "\\r"; case "\n": return "\\n"; case '"': return '\\"' } return null }; StringUtils._upperCase = function(e) { return e.toUpperCase() }; StringUtils._swapCase = function(e) { var c = e.toLowerCase(), a = e.toUpperCase(); switch (e) { case c: return a; case a: return c; default: return e } }; function
() { throw Error("Rnd is static and cannot be instantiated."); } Rnd.randFloat = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.random() * (c - e) + e }; Rnd.randBoolean = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e }; Rnd.randSign = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : -1 }; Rnd.randBit = function(e) { if (isNaN(e)) { e = 0.5 } return Math.random() < e ? 1 : 0 }; Rnd.randInteger = function(e, c) { if (isNaN(c)) { c = e; e = 0 } return Math.floor(Rnd.randFloat(e, c)) }; Number.prototype.floor = function() { return this | 0 }; Number.prototype.round = function() { return this + 0.5 | 0 }; Number.RADIANS = 180 / Math.PI; Number.prototype.fromRadians = function() { return this * Number.RADIANS }; Number.prototype.toRadians = function() { return this / Number.RADIANS }; Array.prototype.randomSort = function() { var e = this.length; if (e == 0) { return false } for (; e--;) { var c = Math.random() * (e + 1) | 0, a = this[e]; this[e] = this[c]; this[c] = a } return this }; Array.prototype.findRandom = function() { if (this.length == 1) { return this[0] } return this[Math.random() * this.length | 0] }; Array.prototype.removeRandom = function() { return this.splice(Math.random() * this.length | 0, 1)[0] }; Array.prototype.removeItem = function(e) { for (var c = 0, a = this.length; c < a; c++) { if (e == this[c]) { this.splice(c, 1); return true } } return false }; Array.prototype.sum = function() { for (var e = 0, c = 0, a = this.length; c < a; c++) { e += this[c] } return e }; Object.prototype.formatToString = function() { if (arguments == null) { return "[Object object]" } for (var e = [], c = 0, a = arguments.length; c < a; c++) { var b = arguments[c], d = this[b]; if (!isNaN(d) && d << 0 != d) { d = d.toFixed(2) } e.push(b + ":" + d) } return "[" + e.join(", ") + "]" }; Number.prototype.commaDelimit = function() { var e = String(this), c = e.length % 3, a = Math.floor(e.length / 3); if (a > 0) { for (var b = [], d = 0; d < a; d++) { var g = d * 3 + c; d == 0 && c > 0 && b.push(e.substr(0, c)); b.push(e.substr(g, 3)) } e = b.join(",") } return e }; Number.prototype.getOrdinal = function(e) { e = e == true ? this.commaDelimit() : this; switch (this % 10) { case 1: return e + "st"; case 2: return e + "nd"; case 3: return e + "rd"; default: return e + "th" } }; function getTimer() { return (new Date).getTime() } Function.prototype.extend = function(e) { if (e.constructor == Function) { this.$ = this.prototype = new e; this.prototype.constructor = this; this.prototype.parent = e.prototype } else { this.prototype = e; this.prototype.constructor = this; this.prototype.parent = e } return this };
Rnd
identifier_name
img-touch-clip.js
/* ================================= img-touch-canvas - v0.1 http://github.com/rombdn/img-touch-canvas (c) 2013 Romain BEAUDON This code may be freely distributed under the MIT License ================================= */ (function() { var root = this; //global object var ImgTouchCanvas = function(options) { if( !options || !options.canvas) { throw 'ImgZoom constructor: missing arguments canvas or path'; } this.canvas = options.canvas; this.canvas.width = this.canvas.clientWidth; this.canvas.height = this.canvas.clientHeight; this.context = this.canvas.getContext('2d'); this.desktop = options.desktop || false; //non touch events console.log("canvs-client-w",parseInt(this.canvas.clientWidth));//750 可理解为实际的宽度 // DrawMapInfo( // this.scale.x * this.scaleAdaption, //宽高的放大缩小量 // this.scale.y * this.scaleAdaption, // this.position.x + this.positionAdaption.x, //off_x,x坐标的偏移量 // this.position.y + this.positionAdaption.y); this.position = { x: 0, y: 0 }; this.img_y=0; this.scale = { x: 0.5, y: 0.5 }; this.box_Scale=1; //options.path改写为options.image----->使用传进来的image // this.imgTexture.src = options.path; //从canvas-zoom迁移过来,关于边框等元素的adaption的设置 this.scaleAdaption = 1; var indoormap =options.canvas; var pageWidth = parseInt(indoormap.getAttribute("width")); //750 var pageHeight = parseInt(indoormap.getAttribute("height"));//1180 currentWidth = document.documentElement.clientWidth; //value 414 currentHeight = document.documentElement.clientHeight;//value 736 console.log("currentWidth",currentWidth); console.log("pageWidth",pageWidth); var offsetX = 0; var offsetY = 0; if (pageWidth < pageHeight) {//canvas.width < canvas.height this.scaleAdaption = currentHeight / pageHeight; if (pageWidth * this.scaleAdaption > currentWidth) { this.scaleAdaption = this.scaleAdaption * (currentWidth / (this.scaleAdaption * pageWidth)); } } else {//canvas.width >= canvas.height this.scaleAdaption = currentWidth / pageWidth; if (pageHeight * this.scaleAdaption > currentHeight) { this.scaleAdaption = this.scaleAdaption * (currentHeight / (this.scaleAdaption * pageHeight)); } } console.log("scaleAdaption",this.scaleAdaption); //0.552 console.log("currentHeight",currentHeight); //736 this.positionAdaption = { x: (parseInt(currentWidth) - parseInt(indoormap.getAttribute("width"))) / 2, y: (parseInt(currentHeight) - parseInt(indoormap.getAttribute("height"))) / 2 }; console.log("positionada-x:",this.positionAdaption.x); //-168 console.log("positionada-y:",this.positionAdaption.y); //-222 //end this.imgTexture = new Image(); this.lastZoomScale = null; this.lastX = null; this.lastY = null; this.mdown = false; //desktop drag this.init = false; this.checkRequestAnimationFrame(); requestAnimationFrame(this.animate.bind(this)); // requestAnimationFrame(this.draw_box.bind(this)); //this.init_draw(); this.setEventListeners(); }; ImgTouchCanvas.prototype = { _imgBoxSize:750, _imgBox:null, _previewBox:null, _uploadInputBtn:null, _$canvas:null, _$canvasW:0, _$canvasH:0, _$canvas2d:null, _imgScale:0,
_img_sx:0, _img_sy:0, // 图片的高宽 _imgW:0, _imgH:0, init_url: function(url){ this.imgTexture = new Image(); this.imgTexture.src=url; this.init=false; this.box_Scale=1; this.position = { x: 0, y: 0 }; this.img_y=0; this.scale = { x: 0.5, y: 0.5 }; this.context.clearRect(0, 0, this.canvas.width, this.canvas.height); this.animate(); console.log(url); }, animate: function() { //set scale such as image cover all the canvas if(!this.init) { if(this.imgTexture.width) { var scaleRatio = null; //检测图片的宽高比例 var w_h_ratio=this.imgTexture.width/this.imgTexture.height; if(this.canvas.clientWidth <= this.canvas.clientHeight) { scaleRatio = this.canvas.clientWidth / this.imgTexture.width; } else { scaleRatio = this.canvas.clientHeight / this.imgTexture.height; } this.img_y=this.canvas.height/2-this.imgTexture.height*scaleRatio/2; this.position.y=this.img_y; this.scale.x = scaleRatio; this.scale.y = scaleRatio; this.init = true; console.log("init:",this.init ,this.scale.x); } } this.context.clearRect(0, 0, this.canvas.width, this.canvas.height); this.context.drawImage( this.imgTexture, this.position.x, this.position.y, this.scale.x * this.imgTexture.width, this.scale.y * this.imgTexture.height); DrawMapInfo( this.box_Scale, this.scale.y, this.position.x, this.position.y); requestAnimationFrame(this.animate.bind(this)); }, gesturePinchZoom: function(event) { var zoom = false; if( event.targetTouches.length >= 2 ) { var p1 = event.targetTouches[0]; var p2 = event.targetTouches[1]; //两个touch_X坐标的绝对值 var zoomScale = Math.sqrt(Math.pow(p2.pageX - p1.pageX, 2) + Math.pow(p2.pageY - p1.pageY, 2)); //euclidian distance if( this.lastZoomScale ) { zoom = zoomScale - this.lastZoomScale; } this.lastZoomScale = zoomScale; } return zoom; }, doZoom: function(zoom) { if(!zoom) return; //new scale var currentScale = this.scale.x; var newScale = this.scale.x + zoom/100; var box_Scale = this.box_Scale + zoom/100; //var newzoom console.log("zoom",zoom/100); var img_y=this.img_y; //some helpers var deltaScale = newScale - currentScale; var currentWidth = (this.imgTexture.width * this.scale.x); var currentHeight = (this.imgTexture.height * this.scale.y); //deltaWidth===》detascale缩放差 var deltaWidth = this.imgTexture.width*deltaScale; var deltaHeight = this.imgTexture.height*deltaScale; console.log("detalwidth",deltaWidth); //by default scale doesnt change position and only add/remove pixel to right and bottom //默认的缩放不会改变定位,只会添加/移除像素,到左边或底部 //so we must move the image to the left to keep the image centered //所以我们必须移动图像到左边以保持图像的中心化 //ex: coefX and coefY = 0.5 when image is centered <=> move image to the left 0.5x pixels added to the right //coefX和coefY赋值为0.5当图像在中心时,移动图像到左边的0.5像素, //canvasmiddleX——取得canvas的中心点 var canvas_middle_X = this.canvas.clientWidth / 2; var canvas_middle_Y = this.canvas.clientHeight / 2; // var xonmap = (-this.position.x) + canvas_middle_X; var yonmap = (-this.position.y) + canvas_middle_Y; var coefX = -xonmap / (currentWidth); var coefY = -yonmap / (currentHeight); var newPosX = this.position.x + deltaWidth*coefX; var newPosY = this.position.y + deltaHeight*coefY; // console.log("new_posy",newPosX); //edges cases var newWidth = currentWidth + deltaWidth; var newHeight = currentHeight + deltaHeight; if( newWidth < this.canvas.clientWidth ) return; if( newPosX > 0 ) { newPosX = 0; } if( newPosX + newWidth < this.canvas.clientWidth ) { newPosX = this.canvas.clientWidth - newWidth; } console.log("new_posy",newPosY); console.log("newHeight",newHeight); console.log("newPosY + newHeight",newPosY + newHeight); // if( newHeight < this.canvas.clientHeight ) return; if( newPosY > this.img_y) { newPosY = this.img_y; } // if( newPosY + newHeight < this.canvas.clientHeight ) { // newPosY = this.imgTexture.height - newHeight; } // console.log("new_posy",newPosX); //最终效果并初始赋值 //finally affectations this.box_Scale = box_Scale; this.scale.x = newScale; this.scale.y = newScale; this.position.x = newPosX; this.position.y = newPosY; console.log("newScale:",box_Scale); }, //平移 doMove: function(relativeX, relativeY) { if(this.lastX && this.lastY) { var deltaX = relativeX - this.lastX; var deltaY = relativeY - this.lastY; var currentWidth = (this.imgTexture.width * this.scale.x); var currentHeight = (this.imgTexture.height * this.scale.y); this.position.x += deltaX; this.position.y += deltaY; //domve--->edge cases if( this.position.x > 0 ) { this.position.x = 0; } else if( this.position.x + currentWidth < this.canvas.clientWidth ) { this.position.x = this.canvas.clientWidth - currentWidth; } if(currentHeight >= this.canvas.clientHeight){ if( this.position.y >0 ) { this.position.y = 0; } else if(this.position.y + currentHeight < this.canvas.clientHeight) { this.position.y = this.canvas.clientHeight - currentHeight; } }else{ if(this.position.y >this.img_y){ this.position.y =this.img_y; }else if(this.position.y + currentHeight < (this.canvas.clientHeight-this.img_y)) { this.position.y = this.canvas.clientHeight - currentHeight-this.img_y; } } } this.lastX = relativeX; this.lastY = relativeY; }, setEventListeners: function() { // touch //监听touchstart事件,初始化变量 this.canvas.addEventListener('touchstart', function(e) { this.lastX = null; this.lastY = null; this.lastZoomScale = null; }.bind(this)); this.canvas.addEventListener('touchmove', function(e) { e.preventDefault(); //放大缩小 zoom功能 if(e.targetTouches.length == 2) { //pinch this.doZoom(this.gesturePinchZoom(e)); } //平移功能 ,获取传入 relativeX,relativeY的相对平移量 else if(e.targetTouches.length == 1) { var relativeX = e.targetTouches[0].pageX - this.canvas.getBoundingClientRect().left; var relativeY = e.targetTouches[0].pageY - this.canvas.getBoundingClientRect().top; this.doMove(relativeX, relativeY); } }.bind(this)); if(this.desktop) { // keyboard+mouse window.addEventListener('keyup', function(e) { if(e.keyCode == 187 || e.keyCode == 61) { //+ this.doZoom(5); } else if(e.keyCode == 54) {//- this.doZoom(-5); } }.bind(this)); window.addEventListener('mousedown', function(e) { this.mdown = true; this.lastX = null; this.lastY = null; }.bind(this)); window.addEventListener('mouseup', function(e) { this.mdown = false; }.bind(this)); window.addEventListener('mousemove', function(e) { var relativeX = e.pageX - this.canvas.getBoundingClientRect().left; var relativeY = e.pageY - this.canvas.getBoundingClientRect().top; if(e.target == this.canvas && this.mdown) { this.doMove(relativeX, relativeY); } if(relativeX <= 0 || relativeX >= this.canvas.clientWidth || relativeY <= 0 || relativeY >= this.canvas.clientHeight) { this.mdown = false; } }.bind(this)); } }, checkRequestAnimationFrame: function() { var lastTime = 0; var vendors = ['ms', 'moz', 'webkit', 'o']; for(var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) { window.requestAnimationFrame = window[vendors[x]+'RequestAnimationFrame']; window.cancelAnimationFrame = window[vendors[x]+'CancelAnimationFrame'] || window[vendors[x]+'CancelRequestAnimationFrame']; } if (!window.requestAnimationFrame) { window.requestAnimationFrame = function(callback, element) { var currTime = new Date().getTime(); var timeToCall = Math.max(0, 16 - (currTime - lastTime)); var id = window.setTimeout(function() { callback(currTime + timeToCall); }, timeToCall); lastTime = currTime + timeToCall; return id; }; } if (!window.cancelAnimationFrame) { window.cancelAnimationFrame = function(id) { clearTimeout(id); }; } } }; root.ImgTouchCanvas = ImgTouchCanvas; }).call(this);
// _img:this.imgTexture, //剪裁的x y坐标
random_line_split
IIoT End-to-End (Pt 2).py
# Databricks notebook source # MAGIC %md # End to End Industrial IoT (IIoT) on Azure Databricks # MAGIC ## Part 2 - Machine Learning # MAGIC This notebook demonstrates the following architecture for IIoT Ingest, Processing and Analytics on Azure. The following architecture is implemented for the demo. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/end_to_end_architecture.png" width=800> # MAGIC # MAGIC The notebook is broken into sections following these steps: # MAGIC 3. **Machine Learning** - train XGBoost regression models using distributed ML to predict power output and asset remaining life on historical sensor data # MAGIC 4. **Model Deployment** - deploy trained models for real-time serving in Azure ML services # MAGIC 5. **Model Inference** - score real data instantly against hosted models via REST API # COMMAND ---------- # AzureML Workspace info (name, region, resource group and subscription ID) for model deployment dbutils.widgets.text("Subscription ID","<your Azure subscription ID>","Subscription ID") dbutils.widgets.text("Resource Group","<your Azure resource group name>","Resource Group") dbutils.widgets.text("Region","<your Azure region>","Region") dbutils.widgets.text("Storage Account","<your ADLS Gen 2 account name>","Storage Account") # COMMAND ---------- # MAGIC %md ## Step 1 - Environment Setup # MAGIC # MAGIC The pre-requisites are listed below: # MAGIC # MAGIC ### Azure Services Required # MAGIC * ADLS Gen 2 Storage account with a container called `iot` # MAGIC * Azure Machine Learning Workspace called `iot` # MAGIC # MAGIC ### Azure Databricks Configuration Required # MAGIC * 3-node (min) Databricks Cluster running **DBR 7.0ML+** and the following libraries: # MAGIC * **MLflow[AzureML]** - PyPI library `azureml-mlflow` # MAGIC * **Azure Event Hubs Connector for Databricks** - Maven coordinates `com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.16` # MAGIC * The following Secrets defined in scope `iot` # MAGIC * `adls_key` - Access Key to ADLS storage account **(Important - use the [Access Key](https://raw.githubusercontent.com/tomatoTomahto/azure_databricks_iot/master/bricks.com/blog/2020/03/27/data-exfiltration-protection-with-azure-databricks.html))** # MAGIC * The following notebook widgets populated: # MAGIC * `Subscription ID` - subscription ID of your Azure ML Workspace # MAGIC * `Resource Group` - resource group name of your Azure ML Workspace # MAGIC * `Region` - Azure region of your Azure ML Workspace # MAGIC * `Storage Account` - Name of your storage account # MAGIC * **Part 1 Notebook Run to generate and process the data** (this can be found [here](https://databricks.com/notebooks/iiot/iiot-end-to-end-part-1.html)). Ensure the following tables have been created: # MAGIC * **turbine_maintenance** - Maintenance dates for each Wind Turbine # MAGIC * **turbine_power** - Hourly power output for each Wind Turbine # MAGIC * **turbine_enriched** - Hourly turbine sensor readinigs (RPM, Angle) enriched with weather readings (temperature, wind speed/direction, humidity) # MAGIC * **gold_readings** - Combined view containing all 3 tables # COMMAND ---------- # Setup access to storage account for temp data when pushing to Synapse storage_account = dbutils.widgets.get("Storage Account") spark.conf.set(f"fs.azure.account.key.{storage_account}.dfs.core.windows.net", dbutils.secrets.get("iot","adls_key")) # Setup storage locations for all data ROOT_PATH = f"abfss://iot@{storage_account}.dfs.core.windows.net/" # Pyspark and ML Imports import os, json, requests from pyspark.sql import functions as F from pyspark.sql.functions import pandas_udf, PandasUDFType import numpy as np import pandas as pd import xgboost as xgb import mlflow.xgboost import mlflow.azureml from azureml.core import Workspace from azureml.core.webservice import AciWebservice, Webservice import random, string # Random String generator for ML models served in AzureML random_string = lambda length: ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(length)) # COMMAND ---------- # MAGIC %md ## Step 3 - Machine Learning # MAGIC Now that our data is flowing reliably from our sensor devices into an enriched Delta table in Data Lake storage, we can start to build ML models to predict power output and remaining life of our assets using historical sensor, weather, power and maintenance data. # MAGIC # MAGIC We create two models ***for each Wind Turbine***: # MAGIC 1. Turbine Power Output - using current readings for turbine operating parameters (angle, RPM) and weather (temperature, humidity, etc.), predict the expected power output 6 hours from now # MAGIC 2. Turbine Remaining Life - predict the remaining life in days until the next maintenance event # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/turbine_models.png" width=800> # MAGIC # MAGIC We will use the XGBoost framework to train regression models. Due to the size of the data and number of Wind Turbines, we will use Spark UDFs to distribute training across all the nodes in our cluster. # COMMAND ---------- # MAGIC %md ### 3a. Feature Engineering # MAGIC In order to predict power output 6 hours ahead, we need to first time-shift our data to create our label column. We can do this easily using Spark Window partitioning. # MAGIC # MAGIC In order to predict remaining life, we need to backtrace the remaining life from the maintenance events. We can do this easily using cross joins. The following diagram illustrates the ML Feature Engineering pipeline: # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/ml_pipeline.png" width=800> # COMMAND ---------- # MAGIC %sql # MAGIC -- Calculate the age of each turbine and the remaining life in days # MAGIC CREATE OR REPLACE VIEW turbine_age AS # MAGIC WITH reading_dates AS (SELECT distinct date, deviceid FROM turbine_power), # MAGIC maintenance_dates AS ( # MAGIC SELECT d.*, datediff(nm.date, d.date) as datediff_next, datediff(d.date, lm.date) as datediff_last # MAGIC FROM reading_dates d LEFT JOIN turbine_maintenance nm ON (d.deviceid=nm.deviceid AND d.date<=nm.date) # MAGIC LEFT JOIN turbine_maintenance lm ON (d.deviceid=lm.deviceid AND d.date>=lm.date )) # MAGIC SELECT date, deviceid, ifnull(min(datediff_last),0) AS age, ifnull(min(datediff_next),0) AS remaining_life # MAGIC FROM maintenance_dates # MAGIC GROUP BY deviceid, date; # MAGIC # MAGIC -- Calculate the power 6 hours ahead using Spark Windowing and build a feature_table to feed into our ML models # MAGIC CREATE OR REPLACE VIEW feature_table AS # MAGIC SELECT r.*, age, remaining_life, # MAGIC LEAD(power, 72, power) OVER (PARTITION BY r.deviceid ORDER BY window) as power_6_hours_ahead # MAGIC FROM gold_readings r JOIN turbine_age a ON (r.date=a.date AND r.deviceid=a.deviceid) # MAGIC WHERE r.date < CURRENT_DATE(); # COMMAND ---------- # MAGIC %sql # MAGIC SELECT window, power, power_6_hours_ahead FROM feature_table WHERE deviceid='WindTurbine-1' # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(age) as age, avg(remaining_life) as life FROM feature_table WHERE deviceid='WindTurbine-1' GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md ### 3b. Distributed Model Training - Predict Power Output # MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level. # COMMAND ---------- # Create a function to train a XGBoost Regressor on a turbine's data def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col): mlflow.xgboost.autolog() with mlflow.start_run(): # Log the model type and device ID mlflow.log_param('deviceid', readings_pd['deviceid'][0]) mlflow.log_param('model', model_type) # Train an XGBRegressor on the data for this Turbine alg = xgb.XGBRegressor() train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col]) params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5} model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')]) # Make predictions on the dataset and return the results readings_pd[prediction_col] = model.predict(train_dmatrix) return readings_pd # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','remaining_life'] feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age'] label_col = 'power_6_hours_ahead' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def
(readings_pd): return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col) # Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine power_predictions = feature_df.groupBy('deviceid').apply(train_power_models) # Save predictions to storage power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions") # COMMAND ---------- # MAGIC %sql # MAGIC -- Plot actuals vs. predicted # MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid # COMMAND ---------- # MAGIC %md #### Automated Model Tracking in Databricks # MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks: # MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable # MAGIC 2. Metrics specified in `evals` (RMSE by default) # MAGIC 3. The trained XGBoost model file # MAGIC 4. Feature importances # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800> # COMMAND ---------- # MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life # MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label. # MAGIC # MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions # COMMAND ---------- # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted'] label_col = 'remaining_life' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('turbine_power_predictions').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def train_life_models(readings_pd): return train_distributed_xgb(readings_pd, 'life_prediction', label_col, prediction_col) # Run the Pandas UDF against our feature dataset - this will train 1 model per turbine and write the predictions to a table life_predictions = ( feature_df.groupBy('deviceid').apply(train_life_models) .write.format("delta").mode("overwrite") .partitionBy("date") .saveAsTable("turbine_life_predictions") ) # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(remaining_life) as Actual_Life, avg(remaining_life_predicted) as Predicted_Life # MAGIC FROM turbine_life_predictions # MAGIC WHERE deviceid='WindTurbine-1' # MAGIC GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md The models to predict remaining useful life have been trained and logged by MLflow. We can now move on to model deployment in AzureML. # COMMAND ---------- # MAGIC %md ## Step 4 - Model Deployment to AzureML # MAGIC Now that our models have been trained, we can deploy them in an automated way directly to a model serving environment like Azure ML. Below, we connect to an AzureML workspace, build a container image for the model, and deploy that image to Azure Container Instances (ACI) to be hosted for REST API calls. # MAGIC # MAGIC **Note:** This step can take up to 10 minutes to run due to images being created and deplyed in Azure ML. # MAGIC # MAGIC **Important:** This step requires authentication to Azure - open the link provided in the output of the cell in a new browser tab and use the code provided. # COMMAND ---------- # AML Workspace Information - replace with your workspace info aml_resource_group = dbutils.widgets.get("Resource Group") aml_subscription_id = dbutils.widgets.get("Subscription ID") aml_region = dbutils.widgets.get("Region") aml_workspace_name = "iot" turbine = "WindTurbine-1" power_model = "power_prediction" life_model = "life_prediction" # Connect to a workspace (replace widgets with your own workspace info) workspace = Workspace.create(name = aml_workspace_name, subscription_id = aml_subscription_id, resource_group = aml_resource_group, location = aml_region, exist_ok=True) # Retrieve the remaining_life and power_output experiments on WindTurbine-1, and get the best performing model (min RMSE) best_life_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{life_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' best_power_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{power_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' scoring_uris = {} for model, path in [('life',best_life_model),('power',best_power_model)]: # Build images for each of our two models in Azure Container Instances print(f"-----Building image for {model} model-----") model_image, azure_model = mlflow.azureml.build_image(model_uri=path, workspace=workspace, model_name=model, image_name=model, description=f"XGBoost model to predict {model} of a turbine", synchronous=True) model_image.wait_for_creation(show_output=True) # Deploy web services to host each model as a REST API print(f"-----Deploying image for {model} model-----") dev_webservice_name = model + random_string(10) dev_webservice_deployment_config = AciWebservice.deploy_configuration() dev_webservice = Webservice.deploy_from_image(name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace) dev_webservice.wait_for_deployment() # Get the URI for sending REST requests to scoring_uris[model] = dev_webservice.scoring_uri # COMMAND ---------- print(f"-----Model URIs for Scoring:-----") print(f"Life Prediction URL: {scoring_uris['life']}") print(f"Power Prediction URL: {scoring_uris['power']}") # COMMAND ---------- # MAGIC %md You can view your model, it's deployments and URL endpoints by navigating to https://ml.azure.com/. # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_azureml.gif" width=800> # COMMAND ---------- # MAGIC %md ## Step 5 - Model Inference: Real-time Scoring # MAGIC We can now make HTTP REST calls from a web app, PowerBI, or directly from Databricks to the hosted model URI to score data directly # COMMAND ---------- # Retrieve the Scoring URL provided by AzureML power_uri = scoring_uris['power'] life_uri = scoring_uris['life'] # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } def score_data(uri, payload): rest_payload = json.dumps({"data": [list(payload.values())]}) response = requests.post(uri, data=rest_payload, headers={"Content-Type": "application/json"}) return json.loads(response.text) print(f'Current Operating Parameters: {payload}') print(f'Predicted power (in kwh) from model: {score_data(power_uri, payload)}') print(f'Predicted remaining life (in days) from model: {score_data(life_uri, payload)}') # COMMAND ---------- # MAGIC %md ### Step 6: Asset Optimization # MAGIC We can now identify the optimal operating conditions for maximizing power output while also maximizing asset useful life. # MAGIC # MAGIC \\(Revenue = Price\displaystyle\sum_1^{365} Power_t\\) # MAGIC # MAGIC \\(Cost = {365 \over Life_{rpm}} Price \displaystyle\sum_1^{24} Power_t \\) # MAGIC # MAGIC Price\displaystyle\sum_{t=1}^{24})\\) # MAGIC # MAGIC \\(Profit = Revenue - Cost\\) # MAGIC # MAGIC \\(Power_t\\) and \\(Life\\) will be calculated by scoring many different RPM values in AzureML. The results can be visualized to identify the RPM that yields the highest profit. # COMMAND ---------- # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } # Iterate through 50 different RPM configurations and capture the predicted power and remaining life at each RPM results = [] for rpm in range(1,15): payload['rpm'] = rpm expected_power = score_data(power_uri, payload)[0] payload['power'] = expected_power expected_life = -score_data(life_uri, payload)[0] results.append((rpm, expected_power, expected_life)) # Calculalte the Revenue, Cost and Profit generated for each RPM configuration optimization_df = pd.DataFrame(results, columns=['RPM', 'Expected Power', 'Expected Life']) optimization_df['Revenue'] = optimization_df['Expected Power'] * 24 * 365 optimization_df['Cost'] = optimization_df['Expected Power'] * 24 * 365 / optimization_df['Expected Life'] optimization_df['Profit'] = optimization_df['Revenue'] + optimization_df['Cost'] display(optimization_df) # COMMAND ---------- # MAGIC %md The optimal operating parameters for **WindTurbine-1** given the specified weather conditions is **11 RPM** for generating a maximum profit of **$1.4M**! Your results may vary due to the random nature of the sensor readings. # COMMAND ---------- # MAGIC %md # MAGIC ### Step 7: Data Serving and Visualization (not included in notebook) # MAGIC Now that our models are created and the data is scored, we can use Azure Synapse with PowerBI to perform data warehousing and analyltic reporting to generate a report like the one below. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/PBI_report.gif" width=800>
train_power_models
identifier_name
IIoT End-to-End (Pt 2).py
# Databricks notebook source # MAGIC %md # End to End Industrial IoT (IIoT) on Azure Databricks # MAGIC ## Part 2 - Machine Learning # MAGIC This notebook demonstrates the following architecture for IIoT Ingest, Processing and Analytics on Azure. The following architecture is implemented for the demo. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/end_to_end_architecture.png" width=800> # MAGIC # MAGIC The notebook is broken into sections following these steps: # MAGIC 3. **Machine Learning** - train XGBoost regression models using distributed ML to predict power output and asset remaining life on historical sensor data # MAGIC 4. **Model Deployment** - deploy trained models for real-time serving in Azure ML services # MAGIC 5. **Model Inference** - score real data instantly against hosted models via REST API # COMMAND ---------- # AzureML Workspace info (name, region, resource group and subscription ID) for model deployment dbutils.widgets.text("Subscription ID","<your Azure subscription ID>","Subscription ID") dbutils.widgets.text("Resource Group","<your Azure resource group name>","Resource Group") dbutils.widgets.text("Region","<your Azure region>","Region") dbutils.widgets.text("Storage Account","<your ADLS Gen 2 account name>","Storage Account") # COMMAND ---------- # MAGIC %md ## Step 1 - Environment Setup # MAGIC # MAGIC The pre-requisites are listed below: # MAGIC # MAGIC ### Azure Services Required # MAGIC * ADLS Gen 2 Storage account with a container called `iot` # MAGIC * Azure Machine Learning Workspace called `iot` # MAGIC # MAGIC ### Azure Databricks Configuration Required # MAGIC * 3-node (min) Databricks Cluster running **DBR 7.0ML+** and the following libraries: # MAGIC * **MLflow[AzureML]** - PyPI library `azureml-mlflow` # MAGIC * **Azure Event Hubs Connector for Databricks** - Maven coordinates `com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.16` # MAGIC * The following Secrets defined in scope `iot` # MAGIC * `adls_key` - Access Key to ADLS storage account **(Important - use the [Access Key](https://raw.githubusercontent.com/tomatoTomahto/azure_databricks_iot/master/bricks.com/blog/2020/03/27/data-exfiltration-protection-with-azure-databricks.html))** # MAGIC * The following notebook widgets populated: # MAGIC * `Subscription ID` - subscription ID of your Azure ML Workspace # MAGIC * `Resource Group` - resource group name of your Azure ML Workspace # MAGIC * `Region` - Azure region of your Azure ML Workspace # MAGIC * `Storage Account` - Name of your storage account # MAGIC * **Part 1 Notebook Run to generate and process the data** (this can be found [here](https://databricks.com/notebooks/iiot/iiot-end-to-end-part-1.html)). Ensure the following tables have been created: # MAGIC * **turbine_maintenance** - Maintenance dates for each Wind Turbine # MAGIC * **turbine_power** - Hourly power output for each Wind Turbine # MAGIC * **turbine_enriched** - Hourly turbine sensor readinigs (RPM, Angle) enriched with weather readings (temperature, wind speed/direction, humidity) # MAGIC * **gold_readings** - Combined view containing all 3 tables # COMMAND ---------- # Setup access to storage account for temp data when pushing to Synapse storage_account = dbutils.widgets.get("Storage Account") spark.conf.set(f"fs.azure.account.key.{storage_account}.dfs.core.windows.net", dbutils.secrets.get("iot","adls_key")) # Setup storage locations for all data ROOT_PATH = f"abfss://iot@{storage_account}.dfs.core.windows.net/" # Pyspark and ML Imports import os, json, requests from pyspark.sql import functions as F from pyspark.sql.functions import pandas_udf, PandasUDFType import numpy as np import pandas as pd import xgboost as xgb import mlflow.xgboost import mlflow.azureml from azureml.core import Workspace from azureml.core.webservice import AciWebservice, Webservice import random, string # Random String generator for ML models served in AzureML random_string = lambda length: ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(length)) # COMMAND ---------- # MAGIC %md ## Step 3 - Machine Learning # MAGIC Now that our data is flowing reliably from our sensor devices into an enriched Delta table in Data Lake storage, we can start to build ML models to predict power output and remaining life of our assets using historical sensor, weather, power and maintenance data. # MAGIC # MAGIC We create two models ***for each Wind Turbine***: # MAGIC 1. Turbine Power Output - using current readings for turbine operating parameters (angle, RPM) and weather (temperature, humidity, etc.), predict the expected power output 6 hours from now # MAGIC 2. Turbine Remaining Life - predict the remaining life in days until the next maintenance event # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/turbine_models.png" width=800> # MAGIC # MAGIC We will use the XGBoost framework to train regression models. Due to the size of the data and number of Wind Turbines, we will use Spark UDFs to distribute training across all the nodes in our cluster. # COMMAND ---------- # MAGIC %md ### 3a. Feature Engineering # MAGIC In order to predict power output 6 hours ahead, we need to first time-shift our data to create our label column. We can do this easily using Spark Window partitioning. # MAGIC # MAGIC In order to predict remaining life, we need to backtrace the remaining life from the maintenance events. We can do this easily using cross joins. The following diagram illustrates the ML Feature Engineering pipeline: # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/ml_pipeline.png" width=800> # COMMAND ---------- # MAGIC %sql # MAGIC -- Calculate the age of each turbine and the remaining life in days # MAGIC CREATE OR REPLACE VIEW turbine_age AS # MAGIC WITH reading_dates AS (SELECT distinct date, deviceid FROM turbine_power), # MAGIC maintenance_dates AS ( # MAGIC SELECT d.*, datediff(nm.date, d.date) as datediff_next, datediff(d.date, lm.date) as datediff_last # MAGIC FROM reading_dates d LEFT JOIN turbine_maintenance nm ON (d.deviceid=nm.deviceid AND d.date<=nm.date) # MAGIC LEFT JOIN turbine_maintenance lm ON (d.deviceid=lm.deviceid AND d.date>=lm.date )) # MAGIC SELECT date, deviceid, ifnull(min(datediff_last),0) AS age, ifnull(min(datediff_next),0) AS remaining_life # MAGIC FROM maintenance_dates # MAGIC GROUP BY deviceid, date; # MAGIC # MAGIC -- Calculate the power 6 hours ahead using Spark Windowing and build a feature_table to feed into our ML models # MAGIC CREATE OR REPLACE VIEW feature_table AS # MAGIC SELECT r.*, age, remaining_life, # MAGIC LEAD(power, 72, power) OVER (PARTITION BY r.deviceid ORDER BY window) as power_6_hours_ahead # MAGIC FROM gold_readings r JOIN turbine_age a ON (r.date=a.date AND r.deviceid=a.deviceid) # MAGIC WHERE r.date < CURRENT_DATE(); # COMMAND ---------- # MAGIC %sql # MAGIC SELECT window, power, power_6_hours_ahead FROM feature_table WHERE deviceid='WindTurbine-1' # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(age) as age, avg(remaining_life) as life FROM feature_table WHERE deviceid='WindTurbine-1' GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md ### 3b. Distributed Model Training - Predict Power Output # MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level. # COMMAND ---------- # Create a function to train a XGBoost Regressor on a turbine's data def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col): mlflow.xgboost.autolog() with mlflow.start_run(): # Log the model type and device ID mlflow.log_param('deviceid', readings_pd['deviceid'][0]) mlflow.log_param('model', model_type) # Train an XGBRegressor on the data for this Turbine alg = xgb.XGBRegressor() train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col]) params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5} model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')]) # Make predictions on the dataset and return the results readings_pd[prediction_col] = model.predict(train_dmatrix) return readings_pd # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','remaining_life'] feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age'] label_col = 'power_6_hours_ahead' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def train_power_models(readings_pd): return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col) # Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine power_predictions = feature_df.groupBy('deviceid').apply(train_power_models) # Save predictions to storage power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions") # COMMAND ---------- # MAGIC %sql # MAGIC -- Plot actuals vs. predicted # MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid # COMMAND ---------- # MAGIC %md #### Automated Model Tracking in Databricks # MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks: # MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable # MAGIC 2. Metrics specified in `evals` (RMSE by default) # MAGIC 3. The trained XGBoost model file # MAGIC 4. Feature importances # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800> # COMMAND ---------- # MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life # MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label. # MAGIC # MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions # COMMAND ---------- # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted'] label_col = 'remaining_life' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('turbine_power_predictions').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def train_life_models(readings_pd):
# Run the Pandas UDF against our feature dataset - this will train 1 model per turbine and write the predictions to a table life_predictions = ( feature_df.groupBy('deviceid').apply(train_life_models) .write.format("delta").mode("overwrite") .partitionBy("date") .saveAsTable("turbine_life_predictions") ) # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(remaining_life) as Actual_Life, avg(remaining_life_predicted) as Predicted_Life # MAGIC FROM turbine_life_predictions # MAGIC WHERE deviceid='WindTurbine-1' # MAGIC GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md The models to predict remaining useful life have been trained and logged by MLflow. We can now move on to model deployment in AzureML. # COMMAND ---------- # MAGIC %md ## Step 4 - Model Deployment to AzureML # MAGIC Now that our models have been trained, we can deploy them in an automated way directly to a model serving environment like Azure ML. Below, we connect to an AzureML workspace, build a container image for the model, and deploy that image to Azure Container Instances (ACI) to be hosted for REST API calls. # MAGIC # MAGIC **Note:** This step can take up to 10 minutes to run due to images being created and deplyed in Azure ML. # MAGIC # MAGIC **Important:** This step requires authentication to Azure - open the link provided in the output of the cell in a new browser tab and use the code provided. # COMMAND ---------- # AML Workspace Information - replace with your workspace info aml_resource_group = dbutils.widgets.get("Resource Group") aml_subscription_id = dbutils.widgets.get("Subscription ID") aml_region = dbutils.widgets.get("Region") aml_workspace_name = "iot" turbine = "WindTurbine-1" power_model = "power_prediction" life_model = "life_prediction" # Connect to a workspace (replace widgets with your own workspace info) workspace = Workspace.create(name = aml_workspace_name, subscription_id = aml_subscription_id, resource_group = aml_resource_group, location = aml_region, exist_ok=True) # Retrieve the remaining_life and power_output experiments on WindTurbine-1, and get the best performing model (min RMSE) best_life_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{life_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' best_power_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{power_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' scoring_uris = {} for model, path in [('life',best_life_model),('power',best_power_model)]: # Build images for each of our two models in Azure Container Instances print(f"-----Building image for {model} model-----") model_image, azure_model = mlflow.azureml.build_image(model_uri=path, workspace=workspace, model_name=model, image_name=model, description=f"XGBoost model to predict {model} of a turbine", synchronous=True) model_image.wait_for_creation(show_output=True) # Deploy web services to host each model as a REST API print(f"-----Deploying image for {model} model-----") dev_webservice_name = model + random_string(10) dev_webservice_deployment_config = AciWebservice.deploy_configuration() dev_webservice = Webservice.deploy_from_image(name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace) dev_webservice.wait_for_deployment() # Get the URI for sending REST requests to scoring_uris[model] = dev_webservice.scoring_uri # COMMAND ---------- print(f"-----Model URIs for Scoring:-----") print(f"Life Prediction URL: {scoring_uris['life']}") print(f"Power Prediction URL: {scoring_uris['power']}") # COMMAND ---------- # MAGIC %md You can view your model, it's deployments and URL endpoints by navigating to https://ml.azure.com/. # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_azureml.gif" width=800> # COMMAND ---------- # MAGIC %md ## Step 5 - Model Inference: Real-time Scoring # MAGIC We can now make HTTP REST calls from a web app, PowerBI, or directly from Databricks to the hosted model URI to score data directly # COMMAND ---------- # Retrieve the Scoring URL provided by AzureML power_uri = scoring_uris['power'] life_uri = scoring_uris['life'] # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } def score_data(uri, payload): rest_payload = json.dumps({"data": [list(payload.values())]}) response = requests.post(uri, data=rest_payload, headers={"Content-Type": "application/json"}) return json.loads(response.text) print(f'Current Operating Parameters: {payload}') print(f'Predicted power (in kwh) from model: {score_data(power_uri, payload)}') print(f'Predicted remaining life (in days) from model: {score_data(life_uri, payload)}') # COMMAND ---------- # MAGIC %md ### Step 6: Asset Optimization # MAGIC We can now identify the optimal operating conditions for maximizing power output while also maximizing asset useful life. # MAGIC # MAGIC \\(Revenue = Price\displaystyle\sum_1^{365} Power_t\\) # MAGIC # MAGIC \\(Cost = {365 \over Life_{rpm}} Price \displaystyle\sum_1^{24} Power_t \\) # MAGIC # MAGIC Price\displaystyle\sum_{t=1}^{24})\\) # MAGIC # MAGIC \\(Profit = Revenue - Cost\\) # MAGIC # MAGIC \\(Power_t\\) and \\(Life\\) will be calculated by scoring many different RPM values in AzureML. The results can be visualized to identify the RPM that yields the highest profit. # COMMAND ---------- # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } # Iterate through 50 different RPM configurations and capture the predicted power and remaining life at each RPM results = [] for rpm in range(1,15): payload['rpm'] = rpm expected_power = score_data(power_uri, payload)[0] payload['power'] = expected_power expected_life = -score_data(life_uri, payload)[0] results.append((rpm, expected_power, expected_life)) # Calculalte the Revenue, Cost and Profit generated for each RPM configuration optimization_df = pd.DataFrame(results, columns=['RPM', 'Expected Power', 'Expected Life']) optimization_df['Revenue'] = optimization_df['Expected Power'] * 24 * 365 optimization_df['Cost'] = optimization_df['Expected Power'] * 24 * 365 / optimization_df['Expected Life'] optimization_df['Profit'] = optimization_df['Revenue'] + optimization_df['Cost'] display(optimization_df) # COMMAND ---------- # MAGIC %md The optimal operating parameters for **WindTurbine-1** given the specified weather conditions is **11 RPM** for generating a maximum profit of **$1.4M**! Your results may vary due to the random nature of the sensor readings. # COMMAND ---------- # MAGIC %md # MAGIC ### Step 7: Data Serving and Visualization (not included in notebook) # MAGIC Now that our models are created and the data is scored, we can use Azure Synapse with PowerBI to perform data warehousing and analyltic reporting to generate a report like the one below. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/PBI_report.gif" width=800>
return train_distributed_xgb(readings_pd, 'life_prediction', label_col, prediction_col)
identifier_body
IIoT End-to-End (Pt 2).py
# Databricks notebook source # MAGIC %md # End to End Industrial IoT (IIoT) on Azure Databricks # MAGIC ## Part 2 - Machine Learning # MAGIC This notebook demonstrates the following architecture for IIoT Ingest, Processing and Analytics on Azure. The following architecture is implemented for the demo. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/end_to_end_architecture.png" width=800> # MAGIC # MAGIC The notebook is broken into sections following these steps: # MAGIC 3. **Machine Learning** - train XGBoost regression models using distributed ML to predict power output and asset remaining life on historical sensor data # MAGIC 4. **Model Deployment** - deploy trained models for real-time serving in Azure ML services # MAGIC 5. **Model Inference** - score real data instantly against hosted models via REST API # COMMAND ---------- # AzureML Workspace info (name, region, resource group and subscription ID) for model deployment dbutils.widgets.text("Subscription ID","<your Azure subscription ID>","Subscription ID") dbutils.widgets.text("Resource Group","<your Azure resource group name>","Resource Group") dbutils.widgets.text("Region","<your Azure region>","Region") dbutils.widgets.text("Storage Account","<your ADLS Gen 2 account name>","Storage Account") # COMMAND ---------- # MAGIC %md ## Step 1 - Environment Setup # MAGIC # MAGIC The pre-requisites are listed below: # MAGIC # MAGIC ### Azure Services Required # MAGIC * ADLS Gen 2 Storage account with a container called `iot` # MAGIC * Azure Machine Learning Workspace called `iot` # MAGIC # MAGIC ### Azure Databricks Configuration Required # MAGIC * 3-node (min) Databricks Cluster running **DBR 7.0ML+** and the following libraries: # MAGIC * **MLflow[AzureML]** - PyPI library `azureml-mlflow` # MAGIC * **Azure Event Hubs Connector for Databricks** - Maven coordinates `com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.16` # MAGIC * The following Secrets defined in scope `iot` # MAGIC * `adls_key` - Access Key to ADLS storage account **(Important - use the [Access Key](https://raw.githubusercontent.com/tomatoTomahto/azure_databricks_iot/master/bricks.com/blog/2020/03/27/data-exfiltration-protection-with-azure-databricks.html))** # MAGIC * The following notebook widgets populated: # MAGIC * `Subscription ID` - subscription ID of your Azure ML Workspace # MAGIC * `Resource Group` - resource group name of your Azure ML Workspace # MAGIC * `Region` - Azure region of your Azure ML Workspace # MAGIC * `Storage Account` - Name of your storage account # MAGIC * **Part 1 Notebook Run to generate and process the data** (this can be found [here](https://databricks.com/notebooks/iiot/iiot-end-to-end-part-1.html)). Ensure the following tables have been created: # MAGIC * **turbine_maintenance** - Maintenance dates for each Wind Turbine # MAGIC * **turbine_power** - Hourly power output for each Wind Turbine # MAGIC * **turbine_enriched** - Hourly turbine sensor readinigs (RPM, Angle) enriched with weather readings (temperature, wind speed/direction, humidity) # MAGIC * **gold_readings** - Combined view containing all 3 tables # COMMAND ---------- # Setup access to storage account for temp data when pushing to Synapse storage_account = dbutils.widgets.get("Storage Account") spark.conf.set(f"fs.azure.account.key.{storage_account}.dfs.core.windows.net", dbutils.secrets.get("iot","adls_key")) # Setup storage locations for all data ROOT_PATH = f"abfss://iot@{storage_account}.dfs.core.windows.net/" # Pyspark and ML Imports import os, json, requests from pyspark.sql import functions as F from pyspark.sql.functions import pandas_udf, PandasUDFType import numpy as np import pandas as pd import xgboost as xgb import mlflow.xgboost import mlflow.azureml from azureml.core import Workspace from azureml.core.webservice import AciWebservice, Webservice import random, string # Random String generator for ML models served in AzureML random_string = lambda length: ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(length)) # COMMAND ---------- # MAGIC %md ## Step 3 - Machine Learning # MAGIC Now that our data is flowing reliably from our sensor devices into an enriched Delta table in Data Lake storage, we can start to build ML models to predict power output and remaining life of our assets using historical sensor, weather, power and maintenance data. # MAGIC # MAGIC We create two models ***for each Wind Turbine***: # MAGIC 1. Turbine Power Output - using current readings for turbine operating parameters (angle, RPM) and weather (temperature, humidity, etc.), predict the expected power output 6 hours from now # MAGIC 2. Turbine Remaining Life - predict the remaining life in days until the next maintenance event # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/turbine_models.png" width=800> # MAGIC # MAGIC We will use the XGBoost framework to train regression models. Due to the size of the data and number of Wind Turbines, we will use Spark UDFs to distribute training across all the nodes in our cluster. # COMMAND ---------- # MAGIC %md ### 3a. Feature Engineering # MAGIC In order to predict power output 6 hours ahead, we need to first time-shift our data to create our label column. We can do this easily using Spark Window partitioning. # MAGIC # MAGIC In order to predict remaining life, we need to backtrace the remaining life from the maintenance events. We can do this easily using cross joins. The following diagram illustrates the ML Feature Engineering pipeline: # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/ml_pipeline.png" width=800> # COMMAND ---------- # MAGIC %sql # MAGIC -- Calculate the age of each turbine and the remaining life in days # MAGIC CREATE OR REPLACE VIEW turbine_age AS # MAGIC WITH reading_dates AS (SELECT distinct date, deviceid FROM turbine_power), # MAGIC maintenance_dates AS ( # MAGIC SELECT d.*, datediff(nm.date, d.date) as datediff_next, datediff(d.date, lm.date) as datediff_last # MAGIC FROM reading_dates d LEFT JOIN turbine_maintenance nm ON (d.deviceid=nm.deviceid AND d.date<=nm.date) # MAGIC LEFT JOIN turbine_maintenance lm ON (d.deviceid=lm.deviceid AND d.date>=lm.date )) # MAGIC SELECT date, deviceid, ifnull(min(datediff_last),0) AS age, ifnull(min(datediff_next),0) AS remaining_life # MAGIC FROM maintenance_dates # MAGIC GROUP BY deviceid, date; # MAGIC # MAGIC -- Calculate the power 6 hours ahead using Spark Windowing and build a feature_table to feed into our ML models # MAGIC CREATE OR REPLACE VIEW feature_table AS # MAGIC SELECT r.*, age, remaining_life, # MAGIC LEAD(power, 72, power) OVER (PARTITION BY r.deviceid ORDER BY window) as power_6_hours_ahead # MAGIC FROM gold_readings r JOIN turbine_age a ON (r.date=a.date AND r.deviceid=a.deviceid) # MAGIC WHERE r.date < CURRENT_DATE(); # COMMAND ---------- # MAGIC %sql # MAGIC SELECT window, power, power_6_hours_ahead FROM feature_table WHERE deviceid='WindTurbine-1' # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(age) as age, avg(remaining_life) as life FROM feature_table WHERE deviceid='WindTurbine-1' GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md ### 3b. Distributed Model Training - Predict Power Output # MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level. # COMMAND ---------- # Create a function to train a XGBoost Regressor on a turbine's data def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col): mlflow.xgboost.autolog() with mlflow.start_run(): # Log the model type and device ID mlflow.log_param('deviceid', readings_pd['deviceid'][0]) mlflow.log_param('model', model_type) # Train an XGBRegressor on the data for this Turbine alg = xgb.XGBRegressor() train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col]) params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5} model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')]) # Make predictions on the dataset and return the results readings_pd[prediction_col] = model.predict(train_dmatrix) return readings_pd # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','remaining_life'] feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age'] label_col = 'power_6_hours_ahead' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def train_power_models(readings_pd): return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col) # Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine power_predictions = feature_df.groupBy('deviceid').apply(train_power_models) # Save predictions to storage power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions") # COMMAND ---------- # MAGIC %sql # MAGIC -- Plot actuals vs. predicted # MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid # COMMAND ---------- # MAGIC %md #### Automated Model Tracking in Databricks # MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks: # MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable # MAGIC 2. Metrics specified in `evals` (RMSE by default) # MAGIC 3. The trained XGBoost model file # MAGIC 4. Feature importances # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800> # COMMAND ---------- # MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life # MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label. # MAGIC # MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions # COMMAND ---------- # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted'] label_col = 'remaining_life' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('turbine_power_predictions').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def train_life_models(readings_pd): return train_distributed_xgb(readings_pd, 'life_prediction', label_col, prediction_col) # Run the Pandas UDF against our feature dataset - this will train 1 model per turbine and write the predictions to a table life_predictions = ( feature_df.groupBy('deviceid').apply(train_life_models) .write.format("delta").mode("overwrite") .partitionBy("date") .saveAsTable("turbine_life_predictions") ) # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(remaining_life) as Actual_Life, avg(remaining_life_predicted) as Predicted_Life # MAGIC FROM turbine_life_predictions # MAGIC WHERE deviceid='WindTurbine-1' # MAGIC GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md The models to predict remaining useful life have been trained and logged by MLflow. We can now move on to model deployment in AzureML. # COMMAND ---------- # MAGIC %md ## Step 4 - Model Deployment to AzureML # MAGIC Now that our models have been trained, we can deploy them in an automated way directly to a model serving environment like Azure ML. Below, we connect to an AzureML workspace, build a container image for the model, and deploy that image to Azure Container Instances (ACI) to be hosted for REST API calls. # MAGIC # MAGIC **Note:** This step can take up to 10 minutes to run due to images being created and deplyed in Azure ML. # MAGIC # MAGIC **Important:** This step requires authentication to Azure - open the link provided in the output of the cell in a new browser tab and use the code provided. # COMMAND ---------- # AML Workspace Information - replace with your workspace info aml_resource_group = dbutils.widgets.get("Resource Group") aml_subscription_id = dbutils.widgets.get("Subscription ID") aml_region = dbutils.widgets.get("Region") aml_workspace_name = "iot" turbine = "WindTurbine-1" power_model = "power_prediction" life_model = "life_prediction" # Connect to a workspace (replace widgets with your own workspace info) workspace = Workspace.create(name = aml_workspace_name, subscription_id = aml_subscription_id, resource_group = aml_resource_group, location = aml_region, exist_ok=True) # Retrieve the remaining_life and power_output experiments on WindTurbine-1, and get the best performing model (min RMSE) best_life_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{life_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' best_power_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{power_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' scoring_uris = {} for model, path in [('life',best_life_model),('power',best_power_model)]: # Build images for each of our two models in Azure Container Instances
# COMMAND ---------- print(f"-----Model URIs for Scoring:-----") print(f"Life Prediction URL: {scoring_uris['life']}") print(f"Power Prediction URL: {scoring_uris['power']}") # COMMAND ---------- # MAGIC %md You can view your model, it's deployments and URL endpoints by navigating to https://ml.azure.com/. # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_azureml.gif" width=800> # COMMAND ---------- # MAGIC %md ## Step 5 - Model Inference: Real-time Scoring # MAGIC We can now make HTTP REST calls from a web app, PowerBI, or directly from Databricks to the hosted model URI to score data directly # COMMAND ---------- # Retrieve the Scoring URL provided by AzureML power_uri = scoring_uris['power'] life_uri = scoring_uris['life'] # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } def score_data(uri, payload): rest_payload = json.dumps({"data": [list(payload.values())]}) response = requests.post(uri, data=rest_payload, headers={"Content-Type": "application/json"}) return json.loads(response.text) print(f'Current Operating Parameters: {payload}') print(f'Predicted power (in kwh) from model: {score_data(power_uri, payload)}') print(f'Predicted remaining life (in days) from model: {score_data(life_uri, payload)}') # COMMAND ---------- # MAGIC %md ### Step 6: Asset Optimization # MAGIC We can now identify the optimal operating conditions for maximizing power output while also maximizing asset useful life. # MAGIC # MAGIC \\(Revenue = Price\displaystyle\sum_1^{365} Power_t\\) # MAGIC # MAGIC \\(Cost = {365 \over Life_{rpm}} Price \displaystyle\sum_1^{24} Power_t \\) # MAGIC # MAGIC Price\displaystyle\sum_{t=1}^{24})\\) # MAGIC # MAGIC \\(Profit = Revenue - Cost\\) # MAGIC # MAGIC \\(Power_t\\) and \\(Life\\) will be calculated by scoring many different RPM values in AzureML. The results can be visualized to identify the RPM that yields the highest profit. # COMMAND ---------- # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } # Iterate through 50 different RPM configurations and capture the predicted power and remaining life at each RPM results = [] for rpm in range(1,15): payload['rpm'] = rpm expected_power = score_data(power_uri, payload)[0] payload['power'] = expected_power expected_life = -score_data(life_uri, payload)[0] results.append((rpm, expected_power, expected_life)) # Calculalte the Revenue, Cost and Profit generated for each RPM configuration optimization_df = pd.DataFrame(results, columns=['RPM', 'Expected Power', 'Expected Life']) optimization_df['Revenue'] = optimization_df['Expected Power'] * 24 * 365 optimization_df['Cost'] = optimization_df['Expected Power'] * 24 * 365 / optimization_df['Expected Life'] optimization_df['Profit'] = optimization_df['Revenue'] + optimization_df['Cost'] display(optimization_df) # COMMAND ---------- # MAGIC %md The optimal operating parameters for **WindTurbine-1** given the specified weather conditions is **11 RPM** for generating a maximum profit of **$1.4M**! Your results may vary due to the random nature of the sensor readings. # COMMAND ---------- # MAGIC %md # MAGIC ### Step 7: Data Serving and Visualization (not included in notebook) # MAGIC Now that our models are created and the data is scored, we can use Azure Synapse with PowerBI to perform data warehousing and analyltic reporting to generate a report like the one below. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/PBI_report.gif" width=800>
print(f"-----Building image for {model} model-----") model_image, azure_model = mlflow.azureml.build_image(model_uri=path, workspace=workspace, model_name=model, image_name=model, description=f"XGBoost model to predict {model} of a turbine", synchronous=True) model_image.wait_for_creation(show_output=True) # Deploy web services to host each model as a REST API print(f"-----Deploying image for {model} model-----") dev_webservice_name = model + random_string(10) dev_webservice_deployment_config = AciWebservice.deploy_configuration() dev_webservice = Webservice.deploy_from_image(name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace) dev_webservice.wait_for_deployment() # Get the URI for sending REST requests to scoring_uris[model] = dev_webservice.scoring_uri
conditional_block
IIoT End-to-End (Pt 2).py
# Databricks notebook source # MAGIC %md # End to End Industrial IoT (IIoT) on Azure Databricks # MAGIC ## Part 2 - Machine Learning # MAGIC This notebook demonstrates the following architecture for IIoT Ingest, Processing and Analytics on Azure. The following architecture is implemented for the demo. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/end_to_end_architecture.png" width=800> # MAGIC # MAGIC The notebook is broken into sections following these steps: # MAGIC 3. **Machine Learning** - train XGBoost regression models using distributed ML to predict power output and asset remaining life on historical sensor data # MAGIC 4. **Model Deployment** - deploy trained models for real-time serving in Azure ML services # MAGIC 5. **Model Inference** - score real data instantly against hosted models via REST API # COMMAND ---------- # AzureML Workspace info (name, region, resource group and subscription ID) for model deployment dbutils.widgets.text("Subscription ID","<your Azure subscription ID>","Subscription ID") dbutils.widgets.text("Resource Group","<your Azure resource group name>","Resource Group") dbutils.widgets.text("Region","<your Azure region>","Region") dbutils.widgets.text("Storage Account","<your ADLS Gen 2 account name>","Storage Account") # COMMAND ---------- # MAGIC %md ## Step 1 - Environment Setup # MAGIC # MAGIC The pre-requisites are listed below: # MAGIC # MAGIC ### Azure Services Required # MAGIC * ADLS Gen 2 Storage account with a container called `iot` # MAGIC * Azure Machine Learning Workspace called `iot` # MAGIC # MAGIC ### Azure Databricks Configuration Required # MAGIC * 3-node (min) Databricks Cluster running **DBR 7.0ML+** and the following libraries: # MAGIC * **MLflow[AzureML]** - PyPI library `azureml-mlflow` # MAGIC * **Azure Event Hubs Connector for Databricks** - Maven coordinates `com.microsoft.azure:azure-eventhubs-spark_2.12:2.3.16` # MAGIC * The following Secrets defined in scope `iot` # MAGIC * `adls_key` - Access Key to ADLS storage account **(Important - use the [Access Key](https://raw.githubusercontent.com/tomatoTomahto/azure_databricks_iot/master/bricks.com/blog/2020/03/27/data-exfiltration-protection-with-azure-databricks.html))** # MAGIC * The following notebook widgets populated: # MAGIC * `Subscription ID` - subscription ID of your Azure ML Workspace # MAGIC * `Resource Group` - resource group name of your Azure ML Workspace # MAGIC * `Region` - Azure region of your Azure ML Workspace # MAGIC * `Storage Account` - Name of your storage account # MAGIC * **Part 1 Notebook Run to generate and process the data** (this can be found [here](https://databricks.com/notebooks/iiot/iiot-end-to-end-part-1.html)). Ensure the following tables have been created: # MAGIC * **turbine_maintenance** - Maintenance dates for each Wind Turbine # MAGIC * **turbine_power** - Hourly power output for each Wind Turbine # MAGIC * **turbine_enriched** - Hourly turbine sensor readinigs (RPM, Angle) enriched with weather readings (temperature, wind speed/direction, humidity) # MAGIC * **gold_readings** - Combined view containing all 3 tables # COMMAND ---------- # Setup access to storage account for temp data when pushing to Synapse storage_account = dbutils.widgets.get("Storage Account") spark.conf.set(f"fs.azure.account.key.{storage_account}.dfs.core.windows.net", dbutils.secrets.get("iot","adls_key")) # Setup storage locations for all data ROOT_PATH = f"abfss://iot@{storage_account}.dfs.core.windows.net/" # Pyspark and ML Imports import os, json, requests from pyspark.sql import functions as F from pyspark.sql.functions import pandas_udf, PandasUDFType import numpy as np import pandas as pd import xgboost as xgb import mlflow.xgboost import mlflow.azureml from azureml.core import Workspace from azureml.core.webservice import AciWebservice, Webservice import random, string # Random String generator for ML models served in AzureML random_string = lambda length: ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(length)) # COMMAND ---------- # MAGIC %md ## Step 3 - Machine Learning # MAGIC Now that our data is flowing reliably from our sensor devices into an enriched Delta table in Data Lake storage, we can start to build ML models to predict power output and remaining life of our assets using historical sensor, weather, power and maintenance data. # MAGIC # MAGIC We create two models ***for each Wind Turbine***: # MAGIC 1. Turbine Power Output - using current readings for turbine operating parameters (angle, RPM) and weather (temperature, humidity, etc.), predict the expected power output 6 hours from now # MAGIC 2. Turbine Remaining Life - predict the remaining life in days until the next maintenance event # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/turbine_models.png" width=800> # MAGIC # MAGIC We will use the XGBoost framework to train regression models. Due to the size of the data and number of Wind Turbines, we will use Spark UDFs to distribute training across all the nodes in our cluster. # COMMAND ---------- # MAGIC %md ### 3a. Feature Engineering # MAGIC In order to predict power output 6 hours ahead, we need to first time-shift our data to create our label column. We can do this easily using Spark Window partitioning. # MAGIC # MAGIC In order to predict remaining life, we need to backtrace the remaining life from the maintenance events. We can do this easily using cross joins. The following diagram illustrates the ML Feature Engineering pipeline: # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/ml_pipeline.png" width=800> # COMMAND ---------- # MAGIC %sql # MAGIC -- Calculate the age of each turbine and the remaining life in days # MAGIC CREATE OR REPLACE VIEW turbine_age AS # MAGIC WITH reading_dates AS (SELECT distinct date, deviceid FROM turbine_power), # MAGIC maintenance_dates AS ( # MAGIC SELECT d.*, datediff(nm.date, d.date) as datediff_next, datediff(d.date, lm.date) as datediff_last # MAGIC FROM reading_dates d LEFT JOIN turbine_maintenance nm ON (d.deviceid=nm.deviceid AND d.date<=nm.date) # MAGIC LEFT JOIN turbine_maintenance lm ON (d.deviceid=lm.deviceid AND d.date>=lm.date )) # MAGIC SELECT date, deviceid, ifnull(min(datediff_last),0) AS age, ifnull(min(datediff_next),0) AS remaining_life # MAGIC FROM maintenance_dates # MAGIC GROUP BY deviceid, date; # MAGIC # MAGIC -- Calculate the power 6 hours ahead using Spark Windowing and build a feature_table to feed into our ML models # MAGIC CREATE OR REPLACE VIEW feature_table AS # MAGIC SELECT r.*, age, remaining_life, # MAGIC LEAD(power, 72, power) OVER (PARTITION BY r.deviceid ORDER BY window) as power_6_hours_ahead # MAGIC FROM gold_readings r JOIN turbine_age a ON (r.date=a.date AND r.deviceid=a.deviceid) # MAGIC WHERE r.date < CURRENT_DATE(); # COMMAND ---------- # MAGIC %sql # MAGIC SELECT window, power, power_6_hours_ahead FROM feature_table WHERE deviceid='WindTurbine-1' # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(age) as age, avg(remaining_life) as life FROM feature_table WHERE deviceid='WindTurbine-1' GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md ### 3b. Distributed Model Training - Predict Power Output # MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level. # COMMAND ---------- # Create a function to train a XGBoost Regressor on a turbine's data def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col): mlflow.xgboost.autolog() with mlflow.start_run(): # Log the model type and device ID mlflow.log_param('deviceid', readings_pd['deviceid'][0]) mlflow.log_param('model', model_type) # Train an XGBRegressor on the data for this Turbine alg = xgb.XGBRegressor() train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col]) params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5} model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')]) # Make predictions on the dataset and return the results readings_pd[prediction_col] = model.predict(train_dmatrix) return readings_pd # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','remaining_life'] feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age'] label_col = 'power_6_hours_ahead' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def train_power_models(readings_pd): return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col) # Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine power_predictions = feature_df.groupBy('deviceid').apply(train_power_models) # Save predictions to storage power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions") # COMMAND ---------- # MAGIC %sql
# MAGIC %md #### Automated Model Tracking in Databricks # MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks: # MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable # MAGIC 2. Metrics specified in `evals` (RMSE by default) # MAGIC 3. The trained XGBoost model file # MAGIC 4. Feature importances # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800> # COMMAND ---------- # MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life # MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label. # MAGIC # MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions # COMMAND ---------- # Create a Spark Dataframe that contains the features and labels we need non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted'] label_col = 'remaining_life' prediction_col = label_col + '_predicted' # Read in our feature table and select the columns of interest feature_df = spark.table('turbine_power_predictions').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}']) # Register a Pandas UDF to distribute XGB model training using Spark @pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP) def train_life_models(readings_pd): return train_distributed_xgb(readings_pd, 'life_prediction', label_col, prediction_col) # Run the Pandas UDF against our feature dataset - this will train 1 model per turbine and write the predictions to a table life_predictions = ( feature_df.groupBy('deviceid').apply(train_life_models) .write.format("delta").mode("overwrite") .partitionBy("date") .saveAsTable("turbine_life_predictions") ) # COMMAND ---------- # MAGIC %sql # MAGIC SELECT date, avg(remaining_life) as Actual_Life, avg(remaining_life_predicted) as Predicted_Life # MAGIC FROM turbine_life_predictions # MAGIC WHERE deviceid='WindTurbine-1' # MAGIC GROUP BY date ORDER BY date # COMMAND ---------- # MAGIC %md The models to predict remaining useful life have been trained and logged by MLflow. We can now move on to model deployment in AzureML. # COMMAND ---------- # MAGIC %md ## Step 4 - Model Deployment to AzureML # MAGIC Now that our models have been trained, we can deploy them in an automated way directly to a model serving environment like Azure ML. Below, we connect to an AzureML workspace, build a container image for the model, and deploy that image to Azure Container Instances (ACI) to be hosted for REST API calls. # MAGIC # MAGIC **Note:** This step can take up to 10 minutes to run due to images being created and deplyed in Azure ML. # MAGIC # MAGIC **Important:** This step requires authentication to Azure - open the link provided in the output of the cell in a new browser tab and use the code provided. # COMMAND ---------- # AML Workspace Information - replace with your workspace info aml_resource_group = dbutils.widgets.get("Resource Group") aml_subscription_id = dbutils.widgets.get("Subscription ID") aml_region = dbutils.widgets.get("Region") aml_workspace_name = "iot" turbine = "WindTurbine-1" power_model = "power_prediction" life_model = "life_prediction" # Connect to a workspace (replace widgets with your own workspace info) workspace = Workspace.create(name = aml_workspace_name, subscription_id = aml_subscription_id, resource_group = aml_resource_group, location = aml_region, exist_ok=True) # Retrieve the remaining_life and power_output experiments on WindTurbine-1, and get the best performing model (min RMSE) best_life_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{life_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' best_power_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{power_model}"')\ .dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model' scoring_uris = {} for model, path in [('life',best_life_model),('power',best_power_model)]: # Build images for each of our two models in Azure Container Instances print(f"-----Building image for {model} model-----") model_image, azure_model = mlflow.azureml.build_image(model_uri=path, workspace=workspace, model_name=model, image_name=model, description=f"XGBoost model to predict {model} of a turbine", synchronous=True) model_image.wait_for_creation(show_output=True) # Deploy web services to host each model as a REST API print(f"-----Deploying image for {model} model-----") dev_webservice_name = model + random_string(10) dev_webservice_deployment_config = AciWebservice.deploy_configuration() dev_webservice = Webservice.deploy_from_image(name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace) dev_webservice.wait_for_deployment() # Get the URI for sending REST requests to scoring_uris[model] = dev_webservice.scoring_uri # COMMAND ---------- print(f"-----Model URIs for Scoring:-----") print(f"Life Prediction URL: {scoring_uris['life']}") print(f"Power Prediction URL: {scoring_uris['power']}") # COMMAND ---------- # MAGIC %md You can view your model, it's deployments and URL endpoints by navigating to https://ml.azure.com/. # MAGIC # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_azureml.gif" width=800> # COMMAND ---------- # MAGIC %md ## Step 5 - Model Inference: Real-time Scoring # MAGIC We can now make HTTP REST calls from a web app, PowerBI, or directly from Databricks to the hosted model URI to score data directly # COMMAND ---------- # Retrieve the Scoring URL provided by AzureML power_uri = scoring_uris['power'] life_uri = scoring_uris['life'] # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } def score_data(uri, payload): rest_payload = json.dumps({"data": [list(payload.values())]}) response = requests.post(uri, data=rest_payload, headers={"Content-Type": "application/json"}) return json.loads(response.text) print(f'Current Operating Parameters: {payload}') print(f'Predicted power (in kwh) from model: {score_data(power_uri, payload)}') print(f'Predicted remaining life (in days) from model: {score_data(life_uri, payload)}') # COMMAND ---------- # MAGIC %md ### Step 6: Asset Optimization # MAGIC We can now identify the optimal operating conditions for maximizing power output while also maximizing asset useful life. # MAGIC # MAGIC \\(Revenue = Price\displaystyle\sum_1^{365} Power_t\\) # MAGIC # MAGIC \\(Cost = {365 \over Life_{rpm}} Price \displaystyle\sum_1^{24} Power_t \\) # MAGIC # MAGIC Price\displaystyle\sum_{t=1}^{24})\\) # MAGIC # MAGIC \\(Profit = Revenue - Cost\\) # MAGIC # MAGIC \\(Power_t\\) and \\(Life\\) will be calculated by scoring many different RPM values in AzureML. The results can be visualized to identify the RPM that yields the highest profit. # COMMAND ---------- # Construct a payload to send with the request payload = { 'angle':8, 'rpm':6, 'temperature':25, 'humidity':50, 'windspeed':5, 'power':150, 'age':10 } # Iterate through 50 different RPM configurations and capture the predicted power and remaining life at each RPM results = [] for rpm in range(1,15): payload['rpm'] = rpm expected_power = score_data(power_uri, payload)[0] payload['power'] = expected_power expected_life = -score_data(life_uri, payload)[0] results.append((rpm, expected_power, expected_life)) # Calculalte the Revenue, Cost and Profit generated for each RPM configuration optimization_df = pd.DataFrame(results, columns=['RPM', 'Expected Power', 'Expected Life']) optimization_df['Revenue'] = optimization_df['Expected Power'] * 24 * 365 optimization_df['Cost'] = optimization_df['Expected Power'] * 24 * 365 / optimization_df['Expected Life'] optimization_df['Profit'] = optimization_df['Revenue'] + optimization_df['Cost'] display(optimization_df) # COMMAND ---------- # MAGIC %md The optimal operating parameters for **WindTurbine-1** given the specified weather conditions is **11 RPM** for generating a maximum profit of **$1.4M**! Your results may vary due to the random nature of the sensor readings. # COMMAND ---------- # MAGIC %md # MAGIC ### Step 7: Data Serving and Visualization (not included in notebook) # MAGIC Now that our models are created and the data is scored, we can use Azure Synapse with PowerBI to perform data warehousing and analyltic reporting to generate a report like the one below. # MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/PBI_report.gif" width=800>
# MAGIC -- Plot actuals vs. predicted # MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid # COMMAND ----------
random_line_split
builders.rs
//! Builder types used for patches and other complex data structures. //! //! These types do not usually need to be imported, but the methods available //! on them are very relevant to where they are used. use serde_json::Value; use chrono::offset::FixedOffset; use chrono::DateTime; use model::*; use Object; macro_rules! builder { ($(#[$attr:meta] $name:ident($inner:ty);)*) => { $( #[$attr] #[derive(Serialize, Deserialize)] pub struct $name($inner); impl $name { #[doc(hidden)] #[inline(always)] pub fn __build<F: FnOnce($name) -> $name>(f: F) -> $inner where $inner: Default { Self::__apply(f, Default::default()) } #[doc(hidden)] pub fn __apply<F: FnOnce($name) -> $name>(f: F, inp: $inner) -> $inner { f($name(inp)).0 } /// Merge this builder's contents with another of the same type. /// Keys in `other` will override those in `self`. /// /// This method is intended to be used with deserialized /// instances. Note that deserialization *does not* check that /// the keys are valid for the relevant API call. /// /// ```ignore /// discord.edit_server(|b| b /// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?)) /// ``` pub fn merge(mut self, other: $name) -> $name { self.0.extend(other.0); self } } )* } } builder! { /// Patch content for the `edit_server` call. EditServer(Object); /// Patch content for the `edit_channel` call. EditChannel(Object); /// Patch content for the `edit_member` call. EditMember(Object); /// Patch content for the `edit_profile` call. EditProfile(Object); /// Patch content for the `edit_user_profile` call. EditUserProfile(Object); /// Patch content for the `edit_role` call. EditRole(Object); /// Content for the `send_message` call. SendMessage(Object); /// `allowed_mentions` object for use within `send_message`. AllowedMentions(Object); /// Patch content for the `send_embed` call. EmbedBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFooterBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedAuthorBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFieldsBuilder(Vec<Value>); } macro_rules! set { ($self:ident, $key:expr, $($rest:tt)*) => {{ {let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s} }} } impl EditServer { /// Edit the server's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the server's voice region. pub fn region(self, region: &str) -> Self { set!(self, "region", region) } /// Edit the server's icon. Use `None` to remove the icon. pub fn icon(self, icon: Option<&str>) -> Self { set!(self, "icon", icon) } /// Edit the server's AFK channel. Use `None` to select no AFK channel. pub fn afk_channel(self, channel: Option<ChannelId>) -> Self { set!(self, "afk_channel_id", channel) } /// Edit the server's AFK timeout. pub fn afk_timeout(self, timeout: u64) -> Self { set!(self, "afk_timeout", timeout) } /// Transfer ownership of the server to a new owner. pub fn owner(self, owner: UserId) -> Self { set!(self, "owner_id", owner.0) } /// Edit the verification level of the server. pub fn verification_level(self, verification_level: VerificationLevel) -> Self { set!(self, "verification_level", verification_level) } /// Edit the server's splash. Use `None` to remove the splash. pub fn splash(self, splash: Option<&str>) -> Self { set!(self, "splash", splash) } } impl EditChannel { /// Edit the channel's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the text channel's topic. pub fn topic(self, topic: &str) -> Self { set!(self, "topic", topic) } /// Edit the channel's position in the list. pub fn position(self, position: u64) -> Self { set!(self, "position", position) } /// Edit the voice channel's bitrate. pub fn
(self, bitrate: u64) -> Self { set!(self, "bitrate", bitrate) } /// Edit the voice channel's user limit. Zero (`0`) means unlimited. pub fn user_limit(self, user_limit: u64) -> Self { set!(self, "user_limit", user_limit) } } impl EditMember { /// Edit the member's nickname. Supply the empty string to remove a nickname. pub fn nickname(self, nick: &str) -> Self { set!(self, "nick", nick) } /// Edit whether the member is server-muted. pub fn mute(self, mute: bool) -> Self { set!(self, "mute", mute) } /// Edit whether the member is server-deafened. pub fn deaf(self, deafen: bool) -> Self { set!(self, "deaf", deafen) } /// Edit the member's assigned roles. pub fn roles(self, roles: &[RoleId]) -> Self { set!(self, "roles", roles) } /// Move the member to another voice channel. pub fn channel(self, channel: ChannelId) -> Self { set!(self, "channel_id", channel.0) } } impl EditProfile { /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditUserProfile { /// Provide the user's current password for authentication. Required if /// the email or password is being changed. pub fn password(self, password: &str) -> Self { set!(self, "password", password) } /// Edit the user's email address. pub fn email(self, email: &str) -> Self { set!(self, "email", email) } /// Edit the user's password. pub fn new_password(self, password: &str) -> Self { set!(self, "new_password", password) } /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditRole { /// Edit the role's name. Supply the empty string to remove a name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the role's permissions. pub fn permissions(self, permissions: Permissions) -> Self { set!(self, "permissions", permissions) } /// Edit the role's color. Set to zero for default. pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Edit the role's hoist status (whether the role should be displayed separately in the sidebar). pub fn hoist(self, hoist: bool) -> Self { set!(self, "hoist", hoist) } /// Edit the role's mentionability, if the role can be mentioned. pub fn mentionable(self, mentionable: bool) -> Self { set!(self, "mentionable", mentionable) } } impl SendMessage { /// Set the text content of the message. pub fn content(self, content: &str) -> Self { set!(self, "content", content) } /// Set a nonce that can be used for optimistic message sending. pub fn nonce(self, nonce: &str) -> Self { set!(self, "nonce", nonce) } /// Set to true to use text-to-speech. pub fn tts(self, tts: bool) -> Self { set!(self, "tts", tts) } /// Embed rich content. pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self { set!(self, "embed", EmbedBuilder::__build(f)) } /// Restrict allowed mentions for this message. pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self { set!(self, "allowed_mentions", AllowedMentions::__build(f)) } /// Reply to the given message, optionally mentioning the sender. /// /// The given `message_id` must be in the same channel that this message is /// being sent to. pub fn reply(self, message_id: MessageId, mention: bool) -> Self { set!(self, "message_reference", json! {{ "message_id": message_id, }}).allowed_mentions(|b| b.replied_user(mention)) } /// Change the message's flags. /// /// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on /// request. pub fn flags(self, flags: MessageFlags) -> Self { set!(self, "flags", flags) } // TODO: file, payload_json, message_reference } impl AllowedMentions { // TODO: parse, roles, users /// Set to `false` to disable mentioning a replied-to user. pub fn replied_user(self, replied_user: bool) -> Self { set!(self, "replied_user", replied_user) } } impl EmbedBuilder { /// Add the "title of embed". pub fn title(self, title: &str) -> Self { set!(self, "title", title) } /// Add the "description of embed". pub fn description(self, description: &str) -> Self { set!(self, "description", description) } /// Add the "url of embed". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "timestamp of embed content". pub fn timestamp(self, timestamp: DateTime<FixedOffset>) -> Self { set!(self, "timestamp", timestamp.to_rfc3339()) } /// Add the "color code of the embed". pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Add "footer information". See the `EmbedFooterBuilder` struct for the editable fields. pub fn footer<F: FnOnce(EmbedFooterBuilder) -> EmbedFooterBuilder>(self, f: F) -> Self { set!(self, "footer", EmbedFooterBuilder::__build(f)) } /// Add "source url of image". Only supports http(s). pub fn image(self, url: &str) -> Self { set!(self, "image", { "url": url }) } /// Add "source url of thumbnail". Only supports http(s). pub fn thumbnail(self, url: &str) -> Self { set!(self, "thumbnail", { "url": url }) } /// Add "author information". See the `EmbedAuthorBuilder` struct for the editable fields. pub fn author<F: FnOnce(EmbedAuthorBuilder) -> EmbedAuthorBuilder>(self, f: F) -> Self { set!(self, "author", EmbedAuthorBuilder::__build(f)) } /// Add "fields information". See the `EmbedFieldsBuilder` struct for the editable fields. pub fn fields<F: FnOnce(EmbedFieldsBuilder) -> EmbedFieldsBuilder>(self, f: F) -> Self { set!(self, "fields", EmbedFieldsBuilder::__build(f)) } } impl EmbedFooterBuilder { /// Add the "footer text". pub fn text(self, text: &str) -> Self { set!(self, "text", text) } /// Add the "url of footer icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedAuthorBuilder { /// Add the "name of author". pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Add the "url of author". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "url of author icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedFieldsBuilder { /// Add an entire field structure, representing a mapping from `name` to `value`. /// /// `inline` determines "whether or not this field should display inline". pub fn field(mut self, name: &str, value: &str, inline: bool) -> Self { self.0.push(json! {{ "name": name, "value": value, "inline": inline, }}); self } }
bitrate
identifier_name
builders.rs
//! Builder types used for patches and other complex data structures. //! //! These types do not usually need to be imported, but the methods available //! on them are very relevant to where they are used. use serde_json::Value; use chrono::offset::FixedOffset; use chrono::DateTime; use model::*; use Object; macro_rules! builder { ($(#[$attr:meta] $name:ident($inner:ty);)*) => { $( #[$attr] #[derive(Serialize, Deserialize)] pub struct $name($inner);
#[inline(always)] pub fn __build<F: FnOnce($name) -> $name>(f: F) -> $inner where $inner: Default { Self::__apply(f, Default::default()) } #[doc(hidden)] pub fn __apply<F: FnOnce($name) -> $name>(f: F, inp: $inner) -> $inner { f($name(inp)).0 } /// Merge this builder's contents with another of the same type. /// Keys in `other` will override those in `self`. /// /// This method is intended to be used with deserialized /// instances. Note that deserialization *does not* check that /// the keys are valid for the relevant API call. /// /// ```ignore /// discord.edit_server(|b| b /// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?)) /// ``` pub fn merge(mut self, other: $name) -> $name { self.0.extend(other.0); self } } )* } } builder! { /// Patch content for the `edit_server` call. EditServer(Object); /// Patch content for the `edit_channel` call. EditChannel(Object); /// Patch content for the `edit_member` call. EditMember(Object); /// Patch content for the `edit_profile` call. EditProfile(Object); /// Patch content for the `edit_user_profile` call. EditUserProfile(Object); /// Patch content for the `edit_role` call. EditRole(Object); /// Content for the `send_message` call. SendMessage(Object); /// `allowed_mentions` object for use within `send_message`. AllowedMentions(Object); /// Patch content for the `send_embed` call. EmbedBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFooterBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedAuthorBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFieldsBuilder(Vec<Value>); } macro_rules! set { ($self:ident, $key:expr, $($rest:tt)*) => {{ {let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s} }} } impl EditServer { /// Edit the server's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the server's voice region. pub fn region(self, region: &str) -> Self { set!(self, "region", region) } /// Edit the server's icon. Use `None` to remove the icon. pub fn icon(self, icon: Option<&str>) -> Self { set!(self, "icon", icon) } /// Edit the server's AFK channel. Use `None` to select no AFK channel. pub fn afk_channel(self, channel: Option<ChannelId>) -> Self { set!(self, "afk_channel_id", channel) } /// Edit the server's AFK timeout. pub fn afk_timeout(self, timeout: u64) -> Self { set!(self, "afk_timeout", timeout) } /// Transfer ownership of the server to a new owner. pub fn owner(self, owner: UserId) -> Self { set!(self, "owner_id", owner.0) } /// Edit the verification level of the server. pub fn verification_level(self, verification_level: VerificationLevel) -> Self { set!(self, "verification_level", verification_level) } /// Edit the server's splash. Use `None` to remove the splash. pub fn splash(self, splash: Option<&str>) -> Self { set!(self, "splash", splash) } } impl EditChannel { /// Edit the channel's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the text channel's topic. pub fn topic(self, topic: &str) -> Self { set!(self, "topic", topic) } /// Edit the channel's position in the list. pub fn position(self, position: u64) -> Self { set!(self, "position", position) } /// Edit the voice channel's bitrate. pub fn bitrate(self, bitrate: u64) -> Self { set!(self, "bitrate", bitrate) } /// Edit the voice channel's user limit. Zero (`0`) means unlimited. pub fn user_limit(self, user_limit: u64) -> Self { set!(self, "user_limit", user_limit) } } impl EditMember { /// Edit the member's nickname. Supply the empty string to remove a nickname. pub fn nickname(self, nick: &str) -> Self { set!(self, "nick", nick) } /// Edit whether the member is server-muted. pub fn mute(self, mute: bool) -> Self { set!(self, "mute", mute) } /// Edit whether the member is server-deafened. pub fn deaf(self, deafen: bool) -> Self { set!(self, "deaf", deafen) } /// Edit the member's assigned roles. pub fn roles(self, roles: &[RoleId]) -> Self { set!(self, "roles", roles) } /// Move the member to another voice channel. pub fn channel(self, channel: ChannelId) -> Self { set!(self, "channel_id", channel.0) } } impl EditProfile { /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditUserProfile { /// Provide the user's current password for authentication. Required if /// the email or password is being changed. pub fn password(self, password: &str) -> Self { set!(self, "password", password) } /// Edit the user's email address. pub fn email(self, email: &str) -> Self { set!(self, "email", email) } /// Edit the user's password. pub fn new_password(self, password: &str) -> Self { set!(self, "new_password", password) } /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditRole { /// Edit the role's name. Supply the empty string to remove a name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the role's permissions. pub fn permissions(self, permissions: Permissions) -> Self { set!(self, "permissions", permissions) } /// Edit the role's color. Set to zero for default. pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Edit the role's hoist status (whether the role should be displayed separately in the sidebar). pub fn hoist(self, hoist: bool) -> Self { set!(self, "hoist", hoist) } /// Edit the role's mentionability, if the role can be mentioned. pub fn mentionable(self, mentionable: bool) -> Self { set!(self, "mentionable", mentionable) } } impl SendMessage { /// Set the text content of the message. pub fn content(self, content: &str) -> Self { set!(self, "content", content) } /// Set a nonce that can be used for optimistic message sending. pub fn nonce(self, nonce: &str) -> Self { set!(self, "nonce", nonce) } /// Set to true to use text-to-speech. pub fn tts(self, tts: bool) -> Self { set!(self, "tts", tts) } /// Embed rich content. pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self { set!(self, "embed", EmbedBuilder::__build(f)) } /// Restrict allowed mentions for this message. pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self { set!(self, "allowed_mentions", AllowedMentions::__build(f)) } /// Reply to the given message, optionally mentioning the sender. /// /// The given `message_id` must be in the same channel that this message is /// being sent to. pub fn reply(self, message_id: MessageId, mention: bool) -> Self { set!(self, "message_reference", json! {{ "message_id": message_id, }}).allowed_mentions(|b| b.replied_user(mention)) } /// Change the message's flags. /// /// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on /// request. pub fn flags(self, flags: MessageFlags) -> Self { set!(self, "flags", flags) } // TODO: file, payload_json, message_reference } impl AllowedMentions { // TODO: parse, roles, users /// Set to `false` to disable mentioning a replied-to user. pub fn replied_user(self, replied_user: bool) -> Self { set!(self, "replied_user", replied_user) } } impl EmbedBuilder { /// Add the "title of embed". pub fn title(self, title: &str) -> Self { set!(self, "title", title) } /// Add the "description of embed". pub fn description(self, description: &str) -> Self { set!(self, "description", description) } /// Add the "url of embed". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "timestamp of embed content". pub fn timestamp(self, timestamp: DateTime<FixedOffset>) -> Self { set!(self, "timestamp", timestamp.to_rfc3339()) } /// Add the "color code of the embed". pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Add "footer information". See the `EmbedFooterBuilder` struct for the editable fields. pub fn footer<F: FnOnce(EmbedFooterBuilder) -> EmbedFooterBuilder>(self, f: F) -> Self { set!(self, "footer", EmbedFooterBuilder::__build(f)) } /// Add "source url of image". Only supports http(s). pub fn image(self, url: &str) -> Self { set!(self, "image", { "url": url }) } /// Add "source url of thumbnail". Only supports http(s). pub fn thumbnail(self, url: &str) -> Self { set!(self, "thumbnail", { "url": url }) } /// Add "author information". See the `EmbedAuthorBuilder` struct for the editable fields. pub fn author<F: FnOnce(EmbedAuthorBuilder) -> EmbedAuthorBuilder>(self, f: F) -> Self { set!(self, "author", EmbedAuthorBuilder::__build(f)) } /// Add "fields information". See the `EmbedFieldsBuilder` struct for the editable fields. pub fn fields<F: FnOnce(EmbedFieldsBuilder) -> EmbedFieldsBuilder>(self, f: F) -> Self { set!(self, "fields", EmbedFieldsBuilder::__build(f)) } } impl EmbedFooterBuilder { /// Add the "footer text". pub fn text(self, text: &str) -> Self { set!(self, "text", text) } /// Add the "url of footer icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedAuthorBuilder { /// Add the "name of author". pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Add the "url of author". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "url of author icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedFieldsBuilder { /// Add an entire field structure, representing a mapping from `name` to `value`. /// /// `inline` determines "whether or not this field should display inline". pub fn field(mut self, name: &str, value: &str, inline: bool) -> Self { self.0.push(json! {{ "name": name, "value": value, "inline": inline, }}); self } }
impl $name { #[doc(hidden)]
random_line_split
builders.rs
//! Builder types used for patches and other complex data structures. //! //! These types do not usually need to be imported, but the methods available //! on them are very relevant to where they are used. use serde_json::Value; use chrono::offset::FixedOffset; use chrono::DateTime; use model::*; use Object; macro_rules! builder { ($(#[$attr:meta] $name:ident($inner:ty);)*) => { $( #[$attr] #[derive(Serialize, Deserialize)] pub struct $name($inner); impl $name { #[doc(hidden)] #[inline(always)] pub fn __build<F: FnOnce($name) -> $name>(f: F) -> $inner where $inner: Default { Self::__apply(f, Default::default()) } #[doc(hidden)] pub fn __apply<F: FnOnce($name) -> $name>(f: F, inp: $inner) -> $inner { f($name(inp)).0 } /// Merge this builder's contents with another of the same type. /// Keys in `other` will override those in `self`. /// /// This method is intended to be used with deserialized /// instances. Note that deserialization *does not* check that /// the keys are valid for the relevant API call. /// /// ```ignore /// discord.edit_server(|b| b /// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?)) /// ``` pub fn merge(mut self, other: $name) -> $name { self.0.extend(other.0); self } } )* } } builder! { /// Patch content for the `edit_server` call. EditServer(Object); /// Patch content for the `edit_channel` call. EditChannel(Object); /// Patch content for the `edit_member` call. EditMember(Object); /// Patch content for the `edit_profile` call. EditProfile(Object); /// Patch content for the `edit_user_profile` call. EditUserProfile(Object); /// Patch content for the `edit_role` call. EditRole(Object); /// Content for the `send_message` call. SendMessage(Object); /// `allowed_mentions` object for use within `send_message`. AllowedMentions(Object); /// Patch content for the `send_embed` call. EmbedBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFooterBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedAuthorBuilder(Object); /// Inner patch content for the `send_embed` call. EmbedFieldsBuilder(Vec<Value>); } macro_rules! set { ($self:ident, $key:expr, $($rest:tt)*) => {{ {let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s} }} } impl EditServer { /// Edit the server's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the server's voice region. pub fn region(self, region: &str) -> Self { set!(self, "region", region) } /// Edit the server's icon. Use `None` to remove the icon. pub fn icon(self, icon: Option<&str>) -> Self { set!(self, "icon", icon) } /// Edit the server's AFK channel. Use `None` to select no AFK channel. pub fn afk_channel(self, channel: Option<ChannelId>) -> Self { set!(self, "afk_channel_id", channel) } /// Edit the server's AFK timeout. pub fn afk_timeout(self, timeout: u64) -> Self
/// Transfer ownership of the server to a new owner. pub fn owner(self, owner: UserId) -> Self { set!(self, "owner_id", owner.0) } /// Edit the verification level of the server. pub fn verification_level(self, verification_level: VerificationLevel) -> Self { set!(self, "verification_level", verification_level) } /// Edit the server's splash. Use `None` to remove the splash. pub fn splash(self, splash: Option<&str>) -> Self { set!(self, "splash", splash) } } impl EditChannel { /// Edit the channel's name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the text channel's topic. pub fn topic(self, topic: &str) -> Self { set!(self, "topic", topic) } /// Edit the channel's position in the list. pub fn position(self, position: u64) -> Self { set!(self, "position", position) } /// Edit the voice channel's bitrate. pub fn bitrate(self, bitrate: u64) -> Self { set!(self, "bitrate", bitrate) } /// Edit the voice channel's user limit. Zero (`0`) means unlimited. pub fn user_limit(self, user_limit: u64) -> Self { set!(self, "user_limit", user_limit) } } impl EditMember { /// Edit the member's nickname. Supply the empty string to remove a nickname. pub fn nickname(self, nick: &str) -> Self { set!(self, "nick", nick) } /// Edit whether the member is server-muted. pub fn mute(self, mute: bool) -> Self { set!(self, "mute", mute) } /// Edit whether the member is server-deafened. pub fn deaf(self, deafen: bool) -> Self { set!(self, "deaf", deafen) } /// Edit the member's assigned roles. pub fn roles(self, roles: &[RoleId]) -> Self { set!(self, "roles", roles) } /// Move the member to another voice channel. pub fn channel(self, channel: ChannelId) -> Self { set!(self, "channel_id", channel.0) } } impl EditProfile { /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditUserProfile { /// Provide the user's current password for authentication. Required if /// the email or password is being changed. pub fn password(self, password: &str) -> Self { set!(self, "password", password) } /// Edit the user's email address. pub fn email(self, email: &str) -> Self { set!(self, "email", email) } /// Edit the user's password. pub fn new_password(self, password: &str) -> Self { set!(self, "new_password", password) } /// Edit the user's username. Must be between 2 and 32 characters long. pub fn username(self, username: &str) -> Self { set!(self, "username", username) } /// Edit the user's avatar. Use `None` to remove the avatar. pub fn avatar(self, icon: Option<&str>) -> Self { set!(self, "avatar", icon) } } impl EditRole { /// Edit the role's name. Supply the empty string to remove a name. pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Edit the role's permissions. pub fn permissions(self, permissions: Permissions) -> Self { set!(self, "permissions", permissions) } /// Edit the role's color. Set to zero for default. pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Edit the role's hoist status (whether the role should be displayed separately in the sidebar). pub fn hoist(self, hoist: bool) -> Self { set!(self, "hoist", hoist) } /// Edit the role's mentionability, if the role can be mentioned. pub fn mentionable(self, mentionable: bool) -> Self { set!(self, "mentionable", mentionable) } } impl SendMessage { /// Set the text content of the message. pub fn content(self, content: &str) -> Self { set!(self, "content", content) } /// Set a nonce that can be used for optimistic message sending. pub fn nonce(self, nonce: &str) -> Self { set!(self, "nonce", nonce) } /// Set to true to use text-to-speech. pub fn tts(self, tts: bool) -> Self { set!(self, "tts", tts) } /// Embed rich content. pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self { set!(self, "embed", EmbedBuilder::__build(f)) } /// Restrict allowed mentions for this message. pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self { set!(self, "allowed_mentions", AllowedMentions::__build(f)) } /// Reply to the given message, optionally mentioning the sender. /// /// The given `message_id` must be in the same channel that this message is /// being sent to. pub fn reply(self, message_id: MessageId, mention: bool) -> Self { set!(self, "message_reference", json! {{ "message_id": message_id, }}).allowed_mentions(|b| b.replied_user(mention)) } /// Change the message's flags. /// /// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on /// request. pub fn flags(self, flags: MessageFlags) -> Self { set!(self, "flags", flags) } // TODO: file, payload_json, message_reference } impl AllowedMentions { // TODO: parse, roles, users /// Set to `false` to disable mentioning a replied-to user. pub fn replied_user(self, replied_user: bool) -> Self { set!(self, "replied_user", replied_user) } } impl EmbedBuilder { /// Add the "title of embed". pub fn title(self, title: &str) -> Self { set!(self, "title", title) } /// Add the "description of embed". pub fn description(self, description: &str) -> Self { set!(self, "description", description) } /// Add the "url of embed". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "timestamp of embed content". pub fn timestamp(self, timestamp: DateTime<FixedOffset>) -> Self { set!(self, "timestamp", timestamp.to_rfc3339()) } /// Add the "color code of the embed". pub fn color(self, color: u64) -> Self { set!(self, "color", color) } /// Add "footer information". See the `EmbedFooterBuilder` struct for the editable fields. pub fn footer<F: FnOnce(EmbedFooterBuilder) -> EmbedFooterBuilder>(self, f: F) -> Self { set!(self, "footer", EmbedFooterBuilder::__build(f)) } /// Add "source url of image". Only supports http(s). pub fn image(self, url: &str) -> Self { set!(self, "image", { "url": url }) } /// Add "source url of thumbnail". Only supports http(s). pub fn thumbnail(self, url: &str) -> Self { set!(self, "thumbnail", { "url": url }) } /// Add "author information". See the `EmbedAuthorBuilder` struct for the editable fields. pub fn author<F: FnOnce(EmbedAuthorBuilder) -> EmbedAuthorBuilder>(self, f: F) -> Self { set!(self, "author", EmbedAuthorBuilder::__build(f)) } /// Add "fields information". See the `EmbedFieldsBuilder` struct for the editable fields. pub fn fields<F: FnOnce(EmbedFieldsBuilder) -> EmbedFieldsBuilder>(self, f: F) -> Self { set!(self, "fields", EmbedFieldsBuilder::__build(f)) } } impl EmbedFooterBuilder { /// Add the "footer text". pub fn text(self, text: &str) -> Self { set!(self, "text", text) } /// Add the "url of footer icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedAuthorBuilder { /// Add the "name of author". pub fn name(self, name: &str) -> Self { set!(self, "name", name) } /// Add the "url of author". pub fn url(self, url: &str) -> Self { set!(self, "url", url) } /// Add the "url of author icon". Only the http(s) protocols are supported. pub fn icon_url(self, icon_url: &str) -> Self { set!(self, "icon_url", icon_url) } } impl EmbedFieldsBuilder { /// Add an entire field structure, representing a mapping from `name` to `value`. /// /// `inline` determines "whether or not this field should display inline". pub fn field(mut self, name: &str, value: &str, inline: bool) -> Self { self.0.push(json! {{ "name": name, "value": value, "inline": inline, }}); self } }
{ set!(self, "afk_timeout", timeout) }
identifier_body
philo2.go
/* ----- This is an alternative implementation of the dining philosophers problem, where eating is controlled by a host. A philosopher is allowed to eat when the following criteria are fulfilled: // * The maximum number of parallelly eating philosophers has not yet been reached. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosophers last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // Note: seats are randomized. ----- */ package main import ( "fmt" "math/rand" "sort" "strconv" "strings" "sync" "time" ) func main() { var waitGrp sync.WaitGroup names := []string{"John", "Robert", "Lilla", "Charles", "Stella"} tableCount := len(names) host := NewDinnerHostPtr(tableCount, 2, 3) requestChannel, finishChannel := host.AskChannels() go host.Listen() for _, name := range names { phi := NewPhilosopherPtr(name) accepted := host.Add(*phi) if accepted { waitGrp.Add(1) go phi.GoToDinner(&waitGrp, requestChannel, finishChannel) } } waitGrp.Wait() fmt.Println("===== EVERYBODY LEFT THE TABLE. =====") fmt.Println() time.Sleep(time.Duration(2) * time.Second) } /* --- The host of the dinner. --- */ // DinnerHost is the main data structure for the host of the dinner. type DinnerHost struct { phiData map[string]*philosopherData requestChannel chan string finishChannel chan string maxParallel int maxDinner int currentlyEating int tableCount int chopsticksFree []bool freeSeats []int } // philosopherData contains philosopher specific data. It is used within DinnerHost. type philosopherData struct { respChannel chan string eating bool dinnersSpent int seat int leftChopstick int rightChopstick int finishedAt time.Time } // NewDinnerHostPtr creates a new, initialized DinnerHost object and returns a pointer to it. func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost { host := new(DinnerHost) host.Init(tableCount, maxParallel, maxDinner) return host } // Init is used to initialize the DinnerHost. Note: seats are randomized. func (host *DinnerHost) Init(tableCount, maxParallel, maxDinner int) { host.phiData = make(map[string]*philosopherData) host.requestChannel = make(chan string) host.finishChannel = make(chan string) host.maxParallel = maxParallel if host.maxParallel > tableCount { host.maxParallel = tableCount } host.maxDinner = maxDinner host.currentlyEating = 0 host.tableCount = tableCount host.chopsticksFree = make([]bool, 5) for i := range host.chopsticksFree { host.chopsticksFree[i] = true } rand.Seed(time.Now().Unix()) host.freeSeats = rand.Perm(tableCount) } // newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it. func newPhilosopherDataPtr(respChannel chan string) *philosopherData { pd := new(philosopherData) pd.Init(respChannel) return pd } // Init is used to initialize the philosopherData. func (pd *philosopherData) Init(respChannel chan string) { pd.respChannel = respChannel pd.eating = false pd.dinnersSpent = 0 pd.seat = -1 pd.leftChopstick = -1 pd.rightChopstick = -1 } // ===== DinnerHost methods ===== // AskChannels can be used to obtain two common channels of the host, the first used to request dinner, // the second used to indicate that someone finished eating. func (host *DinnerHost) AskChannels() (chan string, chan string) { return host.requestChannel, host.finishChannel } // Add registers the philosopher at the host. It first checks if they can join (table full, already at // the table), then creates a new philosopher data record and assigns a seat to the func (host *DinnerHost) Add(newPhilosopher Philosopher) bool { newName := newPhilosopher.Name() fmt.Println(newName + " WANTS TO JOIN THE TABLE.") if len(host.phiData) >= host.tableCount { fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.") fmt.Println() return false } if host.phiData[newName] != nil { fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.") fmt.Println() return false } host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel()) host.phiData[newName].TakeSeat(host.freeSeats[0]) host.freeSeats = host.freeSeats[1:] fmt.Println(newName + " JOINED THE TABLE.") fmt.Println() return true } // Listen is the main function of the host, which handles dinner requests and finish // indications coming from the philosophers on _requestChannel_ and _finishChannel_. // Dinner request is authorized with a proper reply to a philosopher on its own // dedicated response channel. func (host *DinnerHost) Listen() { name := "" for { select { case name = <-host.requestChannel: fmt.Println(name + " WOULD LIKE TO EAT.") response := host.AllowEating(name) kickOut := false switch response { case "OK": fmt.Println(name + " STARTS EATING.") case "E:CHOPSTICKS": fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.") case "E:FULL": fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.") case "E:JUSTFINISHED": fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.") case "E:EATING": fmt.Println(name + " CANNOT EAT: ALREADY EATING.") case "E:LIMIT": fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.") host.freeSeats = append(host.freeSeats, host.phiData[name].Seat()) kickOut = true } fmt.Println() host.phiData[name].RespChannel() <- response if kickOut { delete(host.phiData, name) } case name = <-host.finishChannel: host.SomeoneFinished(name) } host.PrintReport(false) } } // AllowEating checks if the philosopher is allowed to have dinner. Criteria: // * No more than _maxParallel_ philosophers can eat in parallel. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosopher's last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // The function also takes care of chopstick reservation. Note: when only either of the // chopsticks is free, it is reserved in spite the philosopher cannot start eating. func (host *DinnerHost)
(name string) string { if host.currentlyEating >= host.maxParallel { return "E:FULL" } data := host.phiData[name] canEat := data.CanEat(host.maxDinner) if canEat != "OK" { return canEat } seatNumber := data.Seat() leftChop := seatNumber rightChop := (seatNumber + 1) % host.tableCount if host.chopsticksFree[leftChop] { host.chopsticksFree[leftChop] = false data.SetLeftChop(leftChop) } if host.chopsticksFree[rightChop] { host.chopsticksFree[rightChop] = false data.SetRightChop(rightChop) } if !data.HasBothChopsticks() { return "E:CHOPSTICKS" } host.currentlyEating++ data.StartedEating() return "OK" } // SomeoneFinished takes the necessary actions when a philosopher finished eating. func (host *DinnerHost) SomeoneFinished(name string) { if host.currentlyEating > 0 { host.currentlyEating-- } host.chopsticksFree[host.phiData[name].LeftChopstick()] = true host.chopsticksFree[host.phiData[name].RightChopstick()] = true host.phiData[name].FinishedEating() fmt.Println(name + " FINISHED EATING.") fmt.Println() } // PrintReport shows the status of the philosophers in a verbose format. func (host *DinnerHost) PrintReport(additionalInfo bool) { names := make([]string, 0, len(host.phiData)) maxNameLen := 0 for i := range host.phiData { names = append(names, i) if len(i) > maxNameLen { maxNameLen = len(i) } } sort.Strings(names) fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME") fmt.Println() for _, name := range names { data := host.phiData[name] status := "waiting" if data.eating == true { status = "eating" } leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1) rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1) repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s", maxNameLen, name, 4, data.seat, 7, leftChopStr, 8, rightChopStr, 7, data.dinnersSpent, status) fmt.Println(repLine) } if additionalInfo { freeChops := fmt.Sprintf("CHOPSTICKS:") for chopInd, chopStat := range host.chopsticksFree { status := "FREE" if chopStat == false { status = "RESERVED" } freeChops += fmt.Sprintf(" %d[%s]", chopInd, status) } fmt.Println(freeChops) } fmt.Println() } // ===== philosopherData methods // CanEat checks if the philosopher specific criteria of eating is fulfilled. func (pd *philosopherData) CanEat(maxDinner int) string { switch { case pd.eating: return "E:EATING" case pd.dinnersSpent >= maxDinner: return "E:LIMIT" case time.Now().Sub(pd.finishedAt) < (time.Duration(150) * time.Millisecond): return "E:JUSTFINISHED" } return "OK" } // StartedEating updates philosopher specific data when the philosopher starts eating. func (pd *philosopherData) StartedEating() { pd.eating = true pd.dinnersSpent++ } // FinishedEating updates philosopher specific data when the philosopher finished eating. func (pd *philosopherData) FinishedEating() { pd.eating = false pd.leftChopstick = -1 pd.rightChopstick = -1 pd.finishedAt = time.Now() } // RespChannel returns the philosopher's response channel. func (pd *philosopherData) RespChannel() chan string { return pd.respChannel } // LeftChopstick returns the ID of the philosopher's currently reserved left chopstick. // If no left chopstick is reserved, then -1 is returned. func (pd *philosopherData) LeftChopstick() int { return pd.leftChopstick } // RightChopstick returns the ID of the philosopher's currently reserved right chopstick. // If no right chopstick is reserved, then -1 is returned. func (pd *philosopherData) RightChopstick() int { return pd.rightChopstick } // HasBothChopsticks returns true if both of the chopstics are reserved for the philosopher. func (pd *philosopherData) HasBothChopsticks() bool { return (pd.leftChopstick >= 0) && (pd.rightChopstick >= 0) } // SetLeftChop can be used to set the left chopstick ID for the philosopher. func (pd *philosopherData) SetLeftChop(leftChop int) { pd.leftChopstick = leftChop } // SetRightChop can be used to set the right chopstick ID for the philosopher. func (pd *philosopherData) SetRightChop(rightChop int) { pd.rightChopstick = rightChop } // TakeSeat can be used to set the seat number for the philosopher. func (pd *philosopherData) TakeSeat(seatNumber int) { pd.seat = seatNumber } // Seat returns the seat number of the philosopher. func (pd *philosopherData) Seat() int { return pd.seat } /* --- The philosophers. --- */ // Philosopher represents an individual philosopher. type Philosopher struct { name string respChannel chan string } // NewPhilosopherPtr creates and initializes a Philosopher object and returns a pointer to it. func NewPhilosopherPtr(name string) *Philosopher { phi := new(Philosopher) phi.Init(name) return phi } // Init can be used to initialize a Philosopher. func (phi *Philosopher) Init(name string) { phi.name = name phi.respChannel = make(chan string) } // Name returns the name of the philosopher. func (phi *Philosopher) Name() string { return phi.name } // RespChannel returns the philosopher's dedicated response channel. func (phi *Philosopher) RespChannel() chan string { return phi.respChannel } // GoToDinner is the philosopher's main task. They periodically issue eat requests to the host, unless // not already eating. When asked so by the host, the philosopher leaves. func (phi *Philosopher) GoToDinner(waitGrp *sync.WaitGroup, requestChannel, finishChannel chan string) { defer waitGrp.Done() retryInterval := time.Duration(2000) * time.Millisecond eatingDuration := time.Duration(5000) * time.Millisecond for { requestChannel <- phi.name switch <-phi.respChannel { case "OK": time.Sleep(eatingDuration) finishChannel <- phi.name case "E:LIMIT": fmt.Println(strings.ToUpper("----- " + phi.name + " LEFT THE TABLE. -----")) fmt.Println() return default: time.Sleep(retryInterval) } } }
AllowEating
identifier_name
philo2.go
/* ----- This is an alternative implementation of the dining philosophers problem, where eating is controlled by a host. A philosopher is allowed to eat when the following criteria are fulfilled: // * The maximum number of parallelly eating philosophers has not yet been reached. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosophers last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // Note: seats are randomized. ----- */ package main import ( "fmt" "math/rand" "sort" "strconv" "strings" "sync" "time" ) func main() { var waitGrp sync.WaitGroup names := []string{"John", "Robert", "Lilla", "Charles", "Stella"} tableCount := len(names) host := NewDinnerHostPtr(tableCount, 2, 3) requestChannel, finishChannel := host.AskChannels() go host.Listen() for _, name := range names { phi := NewPhilosopherPtr(name) accepted := host.Add(*phi) if accepted { waitGrp.Add(1) go phi.GoToDinner(&waitGrp, requestChannel, finishChannel) } } waitGrp.Wait() fmt.Println("===== EVERYBODY LEFT THE TABLE. =====") fmt.Println() time.Sleep(time.Duration(2) * time.Second) } /* --- The host of the dinner. --- */ // DinnerHost is the main data structure for the host of the dinner. type DinnerHost struct { phiData map[string]*philosopherData requestChannel chan string finishChannel chan string maxParallel int maxDinner int currentlyEating int tableCount int chopsticksFree []bool freeSeats []int } // philosopherData contains philosopher specific data. It is used within DinnerHost. type philosopherData struct { respChannel chan string eating bool dinnersSpent int seat int leftChopstick int rightChopstick int finishedAt time.Time } // NewDinnerHostPtr creates a new, initialized DinnerHost object and returns a pointer to it. func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost { host := new(DinnerHost) host.Init(tableCount, maxParallel, maxDinner) return host } // Init is used to initialize the DinnerHost. Note: seats are randomized. func (host *DinnerHost) Init(tableCount, maxParallel, maxDinner int) { host.phiData = make(map[string]*philosopherData) host.requestChannel = make(chan string) host.finishChannel = make(chan string) host.maxParallel = maxParallel if host.maxParallel > tableCount { host.maxParallel = tableCount } host.maxDinner = maxDinner host.currentlyEating = 0 host.tableCount = tableCount host.chopsticksFree = make([]bool, 5) for i := range host.chopsticksFree { host.chopsticksFree[i] = true } rand.Seed(time.Now().Unix()) host.freeSeats = rand.Perm(tableCount) } // newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it. func newPhilosopherDataPtr(respChannel chan string) *philosopherData { pd := new(philosopherData) pd.Init(respChannel) return pd } // Init is used to initialize the philosopherData. func (pd *philosopherData) Init(respChannel chan string) { pd.respChannel = respChannel pd.eating = false pd.dinnersSpent = 0 pd.seat = -1 pd.leftChopstick = -1 pd.rightChopstick = -1 } // ===== DinnerHost methods ===== // AskChannels can be used to obtain two common channels of the host, the first used to request dinner, // the second used to indicate that someone finished eating. func (host *DinnerHost) AskChannels() (chan string, chan string) { return host.requestChannel, host.finishChannel } // Add registers the philosopher at the host. It first checks if they can join (table full, already at // the table), then creates a new philosopher data record and assigns a seat to the func (host *DinnerHost) Add(newPhilosopher Philosopher) bool { newName := newPhilosopher.Name() fmt.Println(newName + " WANTS TO JOIN THE TABLE.") if len(host.phiData) >= host.tableCount { fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.") fmt.Println() return false } if host.phiData[newName] != nil { fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.") fmt.Println() return false } host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel()) host.phiData[newName].TakeSeat(host.freeSeats[0]) host.freeSeats = host.freeSeats[1:] fmt.Println(newName + " JOINED THE TABLE.") fmt.Println() return true } // Listen is the main function of the host, which handles dinner requests and finish // indications coming from the philosophers on _requestChannel_ and _finishChannel_. // Dinner request is authorized with a proper reply to a philosopher on its own // dedicated response channel. func (host *DinnerHost) Listen() { name := "" for { select { case name = <-host.requestChannel: fmt.Println(name + " WOULD LIKE TO EAT.") response := host.AllowEating(name) kickOut := false switch response { case "OK": fmt.Println(name + " STARTS EATING.") case "E:CHOPSTICKS": fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.") case "E:FULL": fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.") case "E:JUSTFINISHED": fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.") case "E:EATING": fmt.Println(name + " CANNOT EAT: ALREADY EATING.") case "E:LIMIT": fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.") host.freeSeats = append(host.freeSeats, host.phiData[name].Seat()) kickOut = true } fmt.Println() host.phiData[name].RespChannel() <- response if kickOut { delete(host.phiData, name) } case name = <-host.finishChannel: host.SomeoneFinished(name) } host.PrintReport(false) } } // AllowEating checks if the philosopher is allowed to have dinner. Criteria: // * No more than _maxParallel_ philosophers can eat in parallel. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosopher's last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // The function also takes care of chopstick reservation. Note: when only either of the // chopsticks is free, it is reserved in spite the philosopher cannot start eating. func (host *DinnerHost) AllowEating(name string) string { if host.currentlyEating >= host.maxParallel { return "E:FULL" } data := host.phiData[name] canEat := data.CanEat(host.maxDinner) if canEat != "OK" { return canEat } seatNumber := data.Seat() leftChop := seatNumber rightChop := (seatNumber + 1) % host.tableCount if host.chopsticksFree[leftChop] { host.chopsticksFree[leftChop] = false data.SetLeftChop(leftChop) } if host.chopsticksFree[rightChop] { host.chopsticksFree[rightChop] = false data.SetRightChop(rightChop) } if !data.HasBothChopsticks() { return "E:CHOPSTICKS" } host.currentlyEating++ data.StartedEating() return "OK" } // SomeoneFinished takes the necessary actions when a philosopher finished eating. func (host *DinnerHost) SomeoneFinished(name string) { if host.currentlyEating > 0
host.chopsticksFree[host.phiData[name].LeftChopstick()] = true host.chopsticksFree[host.phiData[name].RightChopstick()] = true host.phiData[name].FinishedEating() fmt.Println(name + " FINISHED EATING.") fmt.Println() } // PrintReport shows the status of the philosophers in a verbose format. func (host *DinnerHost) PrintReport(additionalInfo bool) { names := make([]string, 0, len(host.phiData)) maxNameLen := 0 for i := range host.phiData { names = append(names, i) if len(i) > maxNameLen { maxNameLen = len(i) } } sort.Strings(names) fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME") fmt.Println() for _, name := range names { data := host.phiData[name] status := "waiting" if data.eating == true { status = "eating" } leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1) rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1) repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s", maxNameLen, name, 4, data.seat, 7, leftChopStr, 8, rightChopStr, 7, data.dinnersSpent, status) fmt.Println(repLine) } if additionalInfo { freeChops := fmt.Sprintf("CHOPSTICKS:") for chopInd, chopStat := range host.chopsticksFree { status := "FREE" if chopStat == false { status = "RESERVED" } freeChops += fmt.Sprintf(" %d[%s]", chopInd, status) } fmt.Println(freeChops) } fmt.Println() } // ===== philosopherData methods // CanEat checks if the philosopher specific criteria of eating is fulfilled. func (pd *philosopherData) CanEat(maxDinner int) string { switch { case pd.eating: return "E:EATING" case pd.dinnersSpent >= maxDinner: return "E:LIMIT" case time.Now().Sub(pd.finishedAt) < (time.Duration(150) * time.Millisecond): return "E:JUSTFINISHED" } return "OK" } // StartedEating updates philosopher specific data when the philosopher starts eating. func (pd *philosopherData) StartedEating() { pd.eating = true pd.dinnersSpent++ } // FinishedEating updates philosopher specific data when the philosopher finished eating. func (pd *philosopherData) FinishedEating() { pd.eating = false pd.leftChopstick = -1 pd.rightChopstick = -1 pd.finishedAt = time.Now() } // RespChannel returns the philosopher's response channel. func (pd *philosopherData) RespChannel() chan string { return pd.respChannel } // LeftChopstick returns the ID of the philosopher's currently reserved left chopstick. // If no left chopstick is reserved, then -1 is returned. func (pd *philosopherData) LeftChopstick() int { return pd.leftChopstick } // RightChopstick returns the ID of the philosopher's currently reserved right chopstick. // If no right chopstick is reserved, then -1 is returned. func (pd *philosopherData) RightChopstick() int { return pd.rightChopstick } // HasBothChopsticks returns true if both of the chopstics are reserved for the philosopher. func (pd *philosopherData) HasBothChopsticks() bool { return (pd.leftChopstick >= 0) && (pd.rightChopstick >= 0) } // SetLeftChop can be used to set the left chopstick ID for the philosopher. func (pd *philosopherData) SetLeftChop(leftChop int) { pd.leftChopstick = leftChop } // SetRightChop can be used to set the right chopstick ID for the philosopher. func (pd *philosopherData) SetRightChop(rightChop int) { pd.rightChopstick = rightChop } // TakeSeat can be used to set the seat number for the philosopher. func (pd *philosopherData) TakeSeat(seatNumber int) { pd.seat = seatNumber } // Seat returns the seat number of the philosopher. func (pd *philosopherData) Seat() int { return pd.seat } /* --- The philosophers. --- */ // Philosopher represents an individual philosopher. type Philosopher struct { name string respChannel chan string } // NewPhilosopherPtr creates and initializes a Philosopher object and returns a pointer to it. func NewPhilosopherPtr(name string) *Philosopher { phi := new(Philosopher) phi.Init(name) return phi } // Init can be used to initialize a Philosopher. func (phi *Philosopher) Init(name string) { phi.name = name phi.respChannel = make(chan string) } // Name returns the name of the philosopher. func (phi *Philosopher) Name() string { return phi.name } // RespChannel returns the philosopher's dedicated response channel. func (phi *Philosopher) RespChannel() chan string { return phi.respChannel } // GoToDinner is the philosopher's main task. They periodically issue eat requests to the host, unless // not already eating. When asked so by the host, the philosopher leaves. func (phi *Philosopher) GoToDinner(waitGrp *sync.WaitGroup, requestChannel, finishChannel chan string) { defer waitGrp.Done() retryInterval := time.Duration(2000) * time.Millisecond eatingDuration := time.Duration(5000) * time.Millisecond for { requestChannel <- phi.name switch <-phi.respChannel { case "OK": time.Sleep(eatingDuration) finishChannel <- phi.name case "E:LIMIT": fmt.Println(strings.ToUpper("----- " + phi.name + " LEFT THE TABLE. -----")) fmt.Println() return default: time.Sleep(retryInterval) } } }
{ host.currentlyEating-- }
conditional_block
philo2.go
/* ----- This is an alternative implementation of the dining philosophers problem, where eating is controlled by a host. A philosopher is allowed to eat when the following criteria are fulfilled: // * The maximum number of parallelly eating philosophers has not yet been reached. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosophers last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // Note: seats are randomized. ----- */ package main import ( "fmt" "math/rand" "sort" "strconv" "strings" "sync" "time" ) func main() { var waitGrp sync.WaitGroup names := []string{"John", "Robert", "Lilla", "Charles", "Stella"} tableCount := len(names) host := NewDinnerHostPtr(tableCount, 2, 3) requestChannel, finishChannel := host.AskChannels() go host.Listen() for _, name := range names { phi := NewPhilosopherPtr(name) accepted := host.Add(*phi) if accepted { waitGrp.Add(1) go phi.GoToDinner(&waitGrp, requestChannel, finishChannel) } } waitGrp.Wait() fmt.Println("===== EVERYBODY LEFT THE TABLE. =====") fmt.Println() time.Sleep(time.Duration(2) * time.Second) }
The host of the dinner. --- */ // DinnerHost is the main data structure for the host of the dinner. type DinnerHost struct { phiData map[string]*philosopherData requestChannel chan string finishChannel chan string maxParallel int maxDinner int currentlyEating int tableCount int chopsticksFree []bool freeSeats []int } // philosopherData contains philosopher specific data. It is used within DinnerHost. type philosopherData struct { respChannel chan string eating bool dinnersSpent int seat int leftChopstick int rightChopstick int finishedAt time.Time } // NewDinnerHostPtr creates a new, initialized DinnerHost object and returns a pointer to it. func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost { host := new(DinnerHost) host.Init(tableCount, maxParallel, maxDinner) return host } // Init is used to initialize the DinnerHost. Note: seats are randomized. func (host *DinnerHost) Init(tableCount, maxParallel, maxDinner int) { host.phiData = make(map[string]*philosopherData) host.requestChannel = make(chan string) host.finishChannel = make(chan string) host.maxParallel = maxParallel if host.maxParallel > tableCount { host.maxParallel = tableCount } host.maxDinner = maxDinner host.currentlyEating = 0 host.tableCount = tableCount host.chopsticksFree = make([]bool, 5) for i := range host.chopsticksFree { host.chopsticksFree[i] = true } rand.Seed(time.Now().Unix()) host.freeSeats = rand.Perm(tableCount) } // newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it. func newPhilosopherDataPtr(respChannel chan string) *philosopherData { pd := new(philosopherData) pd.Init(respChannel) return pd } // Init is used to initialize the philosopherData. func (pd *philosopherData) Init(respChannel chan string) { pd.respChannel = respChannel pd.eating = false pd.dinnersSpent = 0 pd.seat = -1 pd.leftChopstick = -1 pd.rightChopstick = -1 } // ===== DinnerHost methods ===== // AskChannels can be used to obtain two common channels of the host, the first used to request dinner, // the second used to indicate that someone finished eating. func (host *DinnerHost) AskChannels() (chan string, chan string) { return host.requestChannel, host.finishChannel } // Add registers the philosopher at the host. It first checks if they can join (table full, already at // the table), then creates a new philosopher data record and assigns a seat to the func (host *DinnerHost) Add(newPhilosopher Philosopher) bool { newName := newPhilosopher.Name() fmt.Println(newName + " WANTS TO JOIN THE TABLE.") if len(host.phiData) >= host.tableCount { fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.") fmt.Println() return false } if host.phiData[newName] != nil { fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.") fmt.Println() return false } host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel()) host.phiData[newName].TakeSeat(host.freeSeats[0]) host.freeSeats = host.freeSeats[1:] fmt.Println(newName + " JOINED THE TABLE.") fmt.Println() return true } // Listen is the main function of the host, which handles dinner requests and finish // indications coming from the philosophers on _requestChannel_ and _finishChannel_. // Dinner request is authorized with a proper reply to a philosopher on its own // dedicated response channel. func (host *DinnerHost) Listen() { name := "" for { select { case name = <-host.requestChannel: fmt.Println(name + " WOULD LIKE TO EAT.") response := host.AllowEating(name) kickOut := false switch response { case "OK": fmt.Println(name + " STARTS EATING.") case "E:CHOPSTICKS": fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.") case "E:FULL": fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.") case "E:JUSTFINISHED": fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.") case "E:EATING": fmt.Println(name + " CANNOT EAT: ALREADY EATING.") case "E:LIMIT": fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.") host.freeSeats = append(host.freeSeats, host.phiData[name].Seat()) kickOut = true } fmt.Println() host.phiData[name].RespChannel() <- response if kickOut { delete(host.phiData, name) } case name = <-host.finishChannel: host.SomeoneFinished(name) } host.PrintReport(false) } } // AllowEating checks if the philosopher is allowed to have dinner. Criteria: // * No more than _maxParallel_ philosophers can eat in parallel. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosopher's last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // The function also takes care of chopstick reservation. Note: when only either of the // chopsticks is free, it is reserved in spite the philosopher cannot start eating. func (host *DinnerHost) AllowEating(name string) string { if host.currentlyEating >= host.maxParallel { return "E:FULL" } data := host.phiData[name] canEat := data.CanEat(host.maxDinner) if canEat != "OK" { return canEat } seatNumber := data.Seat() leftChop := seatNumber rightChop := (seatNumber + 1) % host.tableCount if host.chopsticksFree[leftChop] { host.chopsticksFree[leftChop] = false data.SetLeftChop(leftChop) } if host.chopsticksFree[rightChop] { host.chopsticksFree[rightChop] = false data.SetRightChop(rightChop) } if !data.HasBothChopsticks() { return "E:CHOPSTICKS" } host.currentlyEating++ data.StartedEating() return "OK" } // SomeoneFinished takes the necessary actions when a philosopher finished eating. func (host *DinnerHost) SomeoneFinished(name string) { if host.currentlyEating > 0 { host.currentlyEating-- } host.chopsticksFree[host.phiData[name].LeftChopstick()] = true host.chopsticksFree[host.phiData[name].RightChopstick()] = true host.phiData[name].FinishedEating() fmt.Println(name + " FINISHED EATING.") fmt.Println() } // PrintReport shows the status of the philosophers in a verbose format. func (host *DinnerHost) PrintReport(additionalInfo bool) { names := make([]string, 0, len(host.phiData)) maxNameLen := 0 for i := range host.phiData { names = append(names, i) if len(i) > maxNameLen { maxNameLen = len(i) } } sort.Strings(names) fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME") fmt.Println() for _, name := range names { data := host.phiData[name] status := "waiting" if data.eating == true { status = "eating" } leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1) rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1) repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s", maxNameLen, name, 4, data.seat, 7, leftChopStr, 8, rightChopStr, 7, data.dinnersSpent, status) fmt.Println(repLine) } if additionalInfo { freeChops := fmt.Sprintf("CHOPSTICKS:") for chopInd, chopStat := range host.chopsticksFree { status := "FREE" if chopStat == false { status = "RESERVED" } freeChops += fmt.Sprintf(" %d[%s]", chopInd, status) } fmt.Println(freeChops) } fmt.Println() } // ===== philosopherData methods // CanEat checks if the philosopher specific criteria of eating is fulfilled. func (pd *philosopherData) CanEat(maxDinner int) string { switch { case pd.eating: return "E:EATING" case pd.dinnersSpent >= maxDinner: return "E:LIMIT" case time.Now().Sub(pd.finishedAt) < (time.Duration(150) * time.Millisecond): return "E:JUSTFINISHED" } return "OK" } // StartedEating updates philosopher specific data when the philosopher starts eating. func (pd *philosopherData) StartedEating() { pd.eating = true pd.dinnersSpent++ } // FinishedEating updates philosopher specific data when the philosopher finished eating. func (pd *philosopherData) FinishedEating() { pd.eating = false pd.leftChopstick = -1 pd.rightChopstick = -1 pd.finishedAt = time.Now() } // RespChannel returns the philosopher's response channel. func (pd *philosopherData) RespChannel() chan string { return pd.respChannel } // LeftChopstick returns the ID of the philosopher's currently reserved left chopstick. // If no left chopstick is reserved, then -1 is returned. func (pd *philosopherData) LeftChopstick() int { return pd.leftChopstick } // RightChopstick returns the ID of the philosopher's currently reserved right chopstick. // If no right chopstick is reserved, then -1 is returned. func (pd *philosopherData) RightChopstick() int { return pd.rightChopstick } // HasBothChopsticks returns true if both of the chopstics are reserved for the philosopher. func (pd *philosopherData) HasBothChopsticks() bool { return (pd.leftChopstick >= 0) && (pd.rightChopstick >= 0) } // SetLeftChop can be used to set the left chopstick ID for the philosopher. func (pd *philosopherData) SetLeftChop(leftChop int) { pd.leftChopstick = leftChop } // SetRightChop can be used to set the right chopstick ID for the philosopher. func (pd *philosopherData) SetRightChop(rightChop int) { pd.rightChopstick = rightChop } // TakeSeat can be used to set the seat number for the philosopher. func (pd *philosopherData) TakeSeat(seatNumber int) { pd.seat = seatNumber } // Seat returns the seat number of the philosopher. func (pd *philosopherData) Seat() int { return pd.seat } /* --- The philosophers. --- */ // Philosopher represents an individual philosopher. type Philosopher struct { name string respChannel chan string } // NewPhilosopherPtr creates and initializes a Philosopher object and returns a pointer to it. func NewPhilosopherPtr(name string) *Philosopher { phi := new(Philosopher) phi.Init(name) return phi } // Init can be used to initialize a Philosopher. func (phi *Philosopher) Init(name string) { phi.name = name phi.respChannel = make(chan string) } // Name returns the name of the philosopher. func (phi *Philosopher) Name() string { return phi.name } // RespChannel returns the philosopher's dedicated response channel. func (phi *Philosopher) RespChannel() chan string { return phi.respChannel } // GoToDinner is the philosopher's main task. They periodically issue eat requests to the host, unless // not already eating. When asked so by the host, the philosopher leaves. func (phi *Philosopher) GoToDinner(waitGrp *sync.WaitGroup, requestChannel, finishChannel chan string) { defer waitGrp.Done() retryInterval := time.Duration(2000) * time.Millisecond eatingDuration := time.Duration(5000) * time.Millisecond for { requestChannel <- phi.name switch <-phi.respChannel { case "OK": time.Sleep(eatingDuration) finishChannel <- phi.name case "E:LIMIT": fmt.Println(strings.ToUpper("----- " + phi.name + " LEFT THE TABLE. -----")) fmt.Println() return default: time.Sleep(retryInterval) } } }
/* ---
random_line_split
philo2.go
/* ----- This is an alternative implementation of the dining philosophers problem, where eating is controlled by a host. A philosopher is allowed to eat when the following criteria are fulfilled: // * The maximum number of parallelly eating philosophers has not yet been reached. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosophers last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // Note: seats are randomized. ----- */ package main import ( "fmt" "math/rand" "sort" "strconv" "strings" "sync" "time" ) func main() { var waitGrp sync.WaitGroup names := []string{"John", "Robert", "Lilla", "Charles", "Stella"} tableCount := len(names) host := NewDinnerHostPtr(tableCount, 2, 3) requestChannel, finishChannel := host.AskChannels() go host.Listen() for _, name := range names { phi := NewPhilosopherPtr(name) accepted := host.Add(*phi) if accepted { waitGrp.Add(1) go phi.GoToDinner(&waitGrp, requestChannel, finishChannel) } } waitGrp.Wait() fmt.Println("===== EVERYBODY LEFT THE TABLE. =====") fmt.Println() time.Sleep(time.Duration(2) * time.Second) } /* --- The host of the dinner. --- */ // DinnerHost is the main data structure for the host of the dinner. type DinnerHost struct { phiData map[string]*philosopherData requestChannel chan string finishChannel chan string maxParallel int maxDinner int currentlyEating int tableCount int chopsticksFree []bool freeSeats []int } // philosopherData contains philosopher specific data. It is used within DinnerHost. type philosopherData struct { respChannel chan string eating bool dinnersSpent int seat int leftChopstick int rightChopstick int finishedAt time.Time } // NewDinnerHostPtr creates a new, initialized DinnerHost object and returns a pointer to it. func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost { host := new(DinnerHost) host.Init(tableCount, maxParallel, maxDinner) return host } // Init is used to initialize the DinnerHost. Note: seats are randomized. func (host *DinnerHost) Init(tableCount, maxParallel, maxDinner int) { host.phiData = make(map[string]*philosopherData) host.requestChannel = make(chan string) host.finishChannel = make(chan string) host.maxParallel = maxParallel if host.maxParallel > tableCount { host.maxParallel = tableCount } host.maxDinner = maxDinner host.currentlyEating = 0 host.tableCount = tableCount host.chopsticksFree = make([]bool, 5) for i := range host.chopsticksFree { host.chopsticksFree[i] = true } rand.Seed(time.Now().Unix()) host.freeSeats = rand.Perm(tableCount) } // newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it. func newPhilosopherDataPtr(respChannel chan string) *philosopherData { pd := new(philosopherData) pd.Init(respChannel) return pd } // Init is used to initialize the philosopherData. func (pd *philosopherData) Init(respChannel chan string) { pd.respChannel = respChannel pd.eating = false pd.dinnersSpent = 0 pd.seat = -1 pd.leftChopstick = -1 pd.rightChopstick = -1 } // ===== DinnerHost methods ===== // AskChannels can be used to obtain two common channels of the host, the first used to request dinner, // the second used to indicate that someone finished eating. func (host *DinnerHost) AskChannels() (chan string, chan string) { return host.requestChannel, host.finishChannel } // Add registers the philosopher at the host. It first checks if they can join (table full, already at // the table), then creates a new philosopher data record and assigns a seat to the func (host *DinnerHost) Add(newPhilosopher Philosopher) bool { newName := newPhilosopher.Name() fmt.Println(newName + " WANTS TO JOIN THE TABLE.") if len(host.phiData) >= host.tableCount { fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.") fmt.Println() return false } if host.phiData[newName] != nil { fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.") fmt.Println() return false } host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel()) host.phiData[newName].TakeSeat(host.freeSeats[0]) host.freeSeats = host.freeSeats[1:] fmt.Println(newName + " JOINED THE TABLE.") fmt.Println() return true } // Listen is the main function of the host, which handles dinner requests and finish // indications coming from the philosophers on _requestChannel_ and _finishChannel_. // Dinner request is authorized with a proper reply to a philosopher on its own // dedicated response channel. func (host *DinnerHost) Listen() { name := "" for { select { case name = <-host.requestChannel: fmt.Println(name + " WOULD LIKE TO EAT.") response := host.AllowEating(name) kickOut := false switch response { case "OK": fmt.Println(name + " STARTS EATING.") case "E:CHOPSTICKS": fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.") case "E:FULL": fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.") case "E:JUSTFINISHED": fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.") case "E:EATING": fmt.Println(name + " CANNOT EAT: ALREADY EATING.") case "E:LIMIT": fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.") host.freeSeats = append(host.freeSeats, host.phiData[name].Seat()) kickOut = true } fmt.Println() host.phiData[name].RespChannel() <- response if kickOut { delete(host.phiData, name) } case name = <-host.finishChannel: host.SomeoneFinished(name) } host.PrintReport(false) } } // AllowEating checks if the philosopher is allowed to have dinner. Criteria: // * No more than _maxParallel_ philosophers can eat in parallel. // * The philosopher is not already eating, do not exceed the number of allowed dinners and // there's enough time elapsed since the philosopher's last dinner. // * Both chopsticks corresponding to the philosopher's seat are free. // The function also takes care of chopstick reservation. Note: when only either of the // chopsticks is free, it is reserved in spite the philosopher cannot start eating. func (host *DinnerHost) AllowEating(name string) string { if host.currentlyEating >= host.maxParallel { return "E:FULL" } data := host.phiData[name] canEat := data.CanEat(host.maxDinner) if canEat != "OK" { return canEat } seatNumber := data.Seat() leftChop := seatNumber rightChop := (seatNumber + 1) % host.tableCount if host.chopsticksFree[leftChop] { host.chopsticksFree[leftChop] = false data.SetLeftChop(leftChop) } if host.chopsticksFree[rightChop] { host.chopsticksFree[rightChop] = false data.SetRightChop(rightChop) } if !data.HasBothChopsticks() { return "E:CHOPSTICKS" } host.currentlyEating++ data.StartedEating() return "OK" } // SomeoneFinished takes the necessary actions when a philosopher finished eating. func (host *DinnerHost) SomeoneFinished(name string) { if host.currentlyEating > 0 { host.currentlyEating-- } host.chopsticksFree[host.phiData[name].LeftChopstick()] = true host.chopsticksFree[host.phiData[name].RightChopstick()] = true host.phiData[name].FinishedEating() fmt.Println(name + " FINISHED EATING.") fmt.Println() } // PrintReport shows the status of the philosophers in a verbose format. func (host *DinnerHost) PrintReport(additionalInfo bool) { names := make([]string, 0, len(host.phiData)) maxNameLen := 0 for i := range host.phiData { names = append(names, i) if len(i) > maxNameLen { maxNameLen = len(i) } } sort.Strings(names) fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME") fmt.Println() for _, name := range names { data := host.phiData[name] status := "waiting" if data.eating == true { status = "eating" } leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1) rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1) repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s", maxNameLen, name, 4, data.seat, 7, leftChopStr, 8, rightChopStr, 7, data.dinnersSpent, status) fmt.Println(repLine) } if additionalInfo { freeChops := fmt.Sprintf("CHOPSTICKS:") for chopInd, chopStat := range host.chopsticksFree { status := "FREE" if chopStat == false { status = "RESERVED" } freeChops += fmt.Sprintf(" %d[%s]", chopInd, status) } fmt.Println(freeChops) } fmt.Println() } // ===== philosopherData methods // CanEat checks if the philosopher specific criteria of eating is fulfilled. func (pd *philosopherData) CanEat(maxDinner int) string { switch { case pd.eating: return "E:EATING" case pd.dinnersSpent >= maxDinner: return "E:LIMIT" case time.Now().Sub(pd.finishedAt) < (time.Duration(150) * time.Millisecond): return "E:JUSTFINISHED" } return "OK" } // StartedEating updates philosopher specific data when the philosopher starts eating. func (pd *philosopherData) StartedEating() { pd.eating = true pd.dinnersSpent++ } // FinishedEating updates philosopher specific data when the philosopher finished eating. func (pd *philosopherData) FinishedEating() { pd.eating = false pd.leftChopstick = -1 pd.rightChopstick = -1 pd.finishedAt = time.Now() } // RespChannel returns the philosopher's response channel. func (pd *philosopherData) RespChannel() chan string { return pd.respChannel } // LeftChopstick returns the ID of the philosopher's currently reserved left chopstick. // If no left chopstick is reserved, then -1 is returned. func (pd *philosopherData) LeftChopstick() int { return pd.leftChopstick } // RightChopstick returns the ID of the philosopher's currently reserved right chopstick. // If no right chopstick is reserved, then -1 is returned. func (pd *philosopherData) RightChopstick() int
// HasBothChopsticks returns true if both of the chopstics are reserved for the philosopher. func (pd *philosopherData) HasBothChopsticks() bool { return (pd.leftChopstick >= 0) && (pd.rightChopstick >= 0) } // SetLeftChop can be used to set the left chopstick ID for the philosopher. func (pd *philosopherData) SetLeftChop(leftChop int) { pd.leftChopstick = leftChop } // SetRightChop can be used to set the right chopstick ID for the philosopher. func (pd *philosopherData) SetRightChop(rightChop int) { pd.rightChopstick = rightChop } // TakeSeat can be used to set the seat number for the philosopher. func (pd *philosopherData) TakeSeat(seatNumber int) { pd.seat = seatNumber } // Seat returns the seat number of the philosopher. func (pd *philosopherData) Seat() int { return pd.seat } /* --- The philosophers. --- */ // Philosopher represents an individual philosopher. type Philosopher struct { name string respChannel chan string } // NewPhilosopherPtr creates and initializes a Philosopher object and returns a pointer to it. func NewPhilosopherPtr(name string) *Philosopher { phi := new(Philosopher) phi.Init(name) return phi } // Init can be used to initialize a Philosopher. func (phi *Philosopher) Init(name string) { phi.name = name phi.respChannel = make(chan string) } // Name returns the name of the philosopher. func (phi *Philosopher) Name() string { return phi.name } // RespChannel returns the philosopher's dedicated response channel. func (phi *Philosopher) RespChannel() chan string { return phi.respChannel } // GoToDinner is the philosopher's main task. They periodically issue eat requests to the host, unless // not already eating. When asked so by the host, the philosopher leaves. func (phi *Philosopher) GoToDinner(waitGrp *sync.WaitGroup, requestChannel, finishChannel chan string) { defer waitGrp.Done() retryInterval := time.Duration(2000) * time.Millisecond eatingDuration := time.Duration(5000) * time.Millisecond for { requestChannel <- phi.name switch <-phi.respChannel { case "OK": time.Sleep(eatingDuration) finishChannel <- phi.name case "E:LIMIT": fmt.Println(strings.ToUpper("----- " + phi.name + " LEFT THE TABLE. -----")) fmt.Println() return default: time.Sleep(retryInterval) } } }
{ return pd.rightChopstick }
identifier_body
verify_cert.rs
// Copyright 2015 Brian Smith. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. use crate::{ cert::{self, Cert, EndEntityOrCa}, der, name, signed_data, time, Error, SignatureAlgorithm, TrustAnchor, }; pub fn build_chain( required_eku_if_present: KeyPurposeId, supported_sig_algs: &[&SignatureAlgorithm], trust_anchors: &[TrustAnchor], intermediate_certs: &[&[u8]], cert: &Cert, time: time::Time, sub_ca_count: usize, ) -> Result<(), Error> { let used_as_ca = used_as_ca(&cert.ee_or_ca); check_issuer_independent_properties( cert, time, used_as_ca, sub_ca_count, required_eku_if_present, )?; // TODO: HPKP checks. match used_as_ca { UsedAsCa::Yes => { const MAX_SUB_CA_COUNT: usize = 6; if sub_ca_count >= MAX_SUB_CA_COUNT { return Err(Error::UnknownIssuer); } } UsedAsCa::No => { assert_eq!(0, sub_ca_count); } } // TODO: revocation. match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| { let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject); if cert.issuer != trust_anchor_subject { return Err(Error::UnknownIssuer); } let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from); untrusted::read_all_optional(name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki); // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; check_signatures(supported_sig_algs, cert, trust_anchor_spki)?; Ok(()) }) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } loop_while_non_fatal_error(intermediate_certs, |cert_der| { let potential_issuer = cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?; if potential_issuer.subject != cert.issuer { return Err(Error::UnknownIssuer); } // Prevent loops; see RFC 4158 section 5.2. let mut prev = cert; loop { if potential_issuer.spki.value() == prev.spki.value() && potential_issuer.subject == prev.subject { return Err(Error::UnknownIssuer); } match &prev.ee_or_ca { EndEntityOrCa::EndEntity => { break; } EndEntityOrCa::Ca(child_cert) => { prev = child_cert; } } } untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let next_sub_ca_count = match used_as_ca { UsedAsCa::No => sub_ca_count, UsedAsCa::Yes => sub_ca_count + 1, }; build_chain( required_eku_if_present, supported_sig_algs, trust_anchors, intermediate_certs, &potential_issuer, time, next_sub_ca_count, ) }) } fn check_signatures( supported_sig_algs: &[&SignatureAlgorithm], cert_chain: &Cert, trust_anchor_key: untrusted::Input, ) -> Result<(), Error> { let mut spki_value = trust_anchor_key; let mut cert = cert_chain; loop { signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?; // TODO: check revocation match &cert.ee_or_ca { EndEntityOrCa::Ca(child_cert) => { spki_value = cert.spki.value(); cert = child_cert; } EndEntityOrCa::EndEntity => { break; } } } Ok(()) } fn check_issuer_independent_properties( cert: &Cert, time: time::Time, used_as_ca: UsedAsCa, sub_ca_count: usize, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; // TODO: Check signature algorithm like mozilla::pkix. // TODO: Check SPKI like mozilla::pkix. // TODO: check for active distrust like mozilla::pkix. // See the comment in `remember_extension` for why we don't check the // KeyUsage extension. cert.validity .read_all(Error::BadDer, |value| check_validity(value, time))?; untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| { check_basic_constraints(value, used_as_ca, sub_ca_count) })?; untrusted::read_all_optional(cert.eku, Error::BadDer, |value| { check_eku(value, required_eku_if_present) })?; Ok(()) } // https://tools.ietf.org/html/rfc5280#section-4.1.2.5 fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> { let not_before = der::time_choice(input)?; let not_after = der::time_choice(input)?; if not_before > not_after { return Err(Error::InvalidCertValidity); } if time < not_before { return Err(Error::CertNotValidYet); } if time > not_after { return Err(Error::CertExpired); } // TODO: mozilla::pkix allows the TrustDomain to check not_before and // not_after, to enforce things like a maximum validity period. We should // do something similar. Ok(()) } #[derive(Clone, Copy)] enum UsedAsCa { Yes, No, } fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa { match ee_or_ca { EndEntityOrCa::EndEntity => UsedAsCa::No, EndEntityOrCa::Ca(..) => UsedAsCa::Yes, } } // https://tools.ietf.org/html/rfc5280#section-4.2.1.9 fn
( input: Option<&mut untrusted::Reader>, used_as_ca: UsedAsCa, sub_ca_count: usize, ) -> Result<(), Error> { let (is_ca, path_len_constraint) = match input { Some(input) => { let is_ca = der::optional_boolean(input)?; // https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280 // says that a certificate must not have pathLenConstraint unless // it is a CA certificate, but some real-world end-entity // certificates have pathLenConstraint. let path_len_constraint = if !input.at_end() { let value = der::small_nonnegative_integer(input)?; Some(usize::from(value)) } else { None }; (is_ca, path_len_constraint) } None => (false, None), }; match (used_as_ca, is_ca, path_len_constraint) { (UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity), (UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa), (UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => { Err(Error::PathLenConstraintViolated) } _ => Ok(()), } } #[derive(Clone, Copy)] pub struct KeyPurposeId { oid_value: untrusted::Input<'static>, } // id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 } // id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } // id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]), }; // id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]), }; // id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]), }; // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // Notable Differences from RFC 5280: // // * We follow the convention established by Microsoft's implementation and // mozilla::pkix of treating the EKU extension in a CA certificate as a // restriction on the allowable EKUs for certificates issued by that CA. RFC // 5280 doesn't prescribe any meaning to the EKU extension when a certificate // is being used as a CA certificate. // // * We do not recognize anyExtendedKeyUsage. NSS and mozilla::pkix do not // recognize it either. // // * We treat id-Netscape-stepUp as being equivalent to id-kp-serverAuth in CA // certificates (only). Comodo has issued certificates that require this // behavior that don't expire until June 2020. See https://bugzilla.mozilla.org/show_bug.cgi?id=982292. fn check_eku( input: Option<&mut untrusted::Reader>, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { match input { Some(input) => { loop { let value = der::expect_tag_and_get_value(input, der::Tag::OID)?; if value == required_eku_if_present.oid_value { input.skip_to_end(); break; } if input.at_end() { return Err(Error::RequiredEkuNotFound); } } Ok(()) } None => { // http://tools.ietf.org/html/rfc6960#section-4.2.2.2: // "OCSP signing delegation SHALL be designated by the inclusion of // id-kp-OCSPSigning in an extended key usage certificate extension // included in the OCSP response signer's certificate." // // A missing EKU extension generally means "any EKU", but it is // important that id-kp-OCSPSigning is explicit so that a normal // end-entity certificate isn't able to sign trusted OCSP responses // for itself or for other certificates issued by its issuing CA. if required_eku_if_present.oid_value == EKU_OCSP_SIGNING.oid_value { return Err(Error::RequiredEkuNotFound); } Ok(()) } } } fn loop_while_non_fatal_error<V>( values: V, f: impl Fn(V::Item) -> Result<(), Error>, ) -> Result<(), Error> where V: IntoIterator, { for v in values { match f(v) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } } Err(Error::UnknownIssuer) }
check_basic_constraints
identifier_name
verify_cert.rs
// Copyright 2015 Brian Smith. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. use crate::{ cert::{self, Cert, EndEntityOrCa}, der, name, signed_data, time, Error, SignatureAlgorithm, TrustAnchor, }; pub fn build_chain( required_eku_if_present: KeyPurposeId, supported_sig_algs: &[&SignatureAlgorithm], trust_anchors: &[TrustAnchor], intermediate_certs: &[&[u8]], cert: &Cert, time: time::Time, sub_ca_count: usize, ) -> Result<(), Error> { let used_as_ca = used_as_ca(&cert.ee_or_ca); check_issuer_independent_properties( cert, time, used_as_ca, sub_ca_count, required_eku_if_present, )?; // TODO: HPKP checks. match used_as_ca { UsedAsCa::Yes => { const MAX_SUB_CA_COUNT: usize = 6; if sub_ca_count >= MAX_SUB_CA_COUNT { return Err(Error::UnknownIssuer); } } UsedAsCa::No => { assert_eq!(0, sub_ca_count); } } // TODO: revocation. match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| { let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject); if cert.issuer != trust_anchor_subject { return Err(Error::UnknownIssuer); } let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from); untrusted::read_all_optional(name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki); // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; check_signatures(supported_sig_algs, cert, trust_anchor_spki)?; Ok(()) }) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } loop_while_non_fatal_error(intermediate_certs, |cert_der| { let potential_issuer = cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?; if potential_issuer.subject != cert.issuer { return Err(Error::UnknownIssuer); } // Prevent loops; see RFC 4158 section 5.2. let mut prev = cert; loop { if potential_issuer.spki.value() == prev.spki.value() && potential_issuer.subject == prev.subject { return Err(Error::UnknownIssuer); } match &prev.ee_or_ca { EndEntityOrCa::EndEntity => { break; } EndEntityOrCa::Ca(child_cert) => { prev = child_cert; } } } untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let next_sub_ca_count = match used_as_ca { UsedAsCa::No => sub_ca_count, UsedAsCa::Yes => sub_ca_count + 1, }; build_chain( required_eku_if_present, supported_sig_algs, trust_anchors, intermediate_certs, &potential_issuer, time, next_sub_ca_count, ) }) } fn check_signatures( supported_sig_algs: &[&SignatureAlgorithm], cert_chain: &Cert, trust_anchor_key: untrusted::Input, ) -> Result<(), Error> { let mut spki_value = trust_anchor_key; let mut cert = cert_chain; loop { signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?; // TODO: check revocation match &cert.ee_or_ca { EndEntityOrCa::Ca(child_cert) => { spki_value = cert.spki.value(); cert = child_cert; } EndEntityOrCa::EndEntity => { break; } } } Ok(()) } fn check_issuer_independent_properties( cert: &Cert, time: time::Time, used_as_ca: UsedAsCa, sub_ca_count: usize, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error>
// https://tools.ietf.org/html/rfc5280#section-4.1.2.5 fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> { let not_before = der::time_choice(input)?; let not_after = der::time_choice(input)?; if not_before > not_after { return Err(Error::InvalidCertValidity); } if time < not_before { return Err(Error::CertNotValidYet); } if time > not_after { return Err(Error::CertExpired); } // TODO: mozilla::pkix allows the TrustDomain to check not_before and // not_after, to enforce things like a maximum validity period. We should // do something similar. Ok(()) } #[derive(Clone, Copy)] enum UsedAsCa { Yes, No, } fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa { match ee_or_ca { EndEntityOrCa::EndEntity => UsedAsCa::No, EndEntityOrCa::Ca(..) => UsedAsCa::Yes, } } // https://tools.ietf.org/html/rfc5280#section-4.2.1.9 fn check_basic_constraints( input: Option<&mut untrusted::Reader>, used_as_ca: UsedAsCa, sub_ca_count: usize, ) -> Result<(), Error> { let (is_ca, path_len_constraint) = match input { Some(input) => { let is_ca = der::optional_boolean(input)?; // https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280 // says that a certificate must not have pathLenConstraint unless // it is a CA certificate, but some real-world end-entity // certificates have pathLenConstraint. let path_len_constraint = if !input.at_end() { let value = der::small_nonnegative_integer(input)?; Some(usize::from(value)) } else { None }; (is_ca, path_len_constraint) } None => (false, None), }; match (used_as_ca, is_ca, path_len_constraint) { (UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity), (UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa), (UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => { Err(Error::PathLenConstraintViolated) } _ => Ok(()), } } #[derive(Clone, Copy)] pub struct KeyPurposeId { oid_value: untrusted::Input<'static>, } // id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 } // id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } // id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]), }; // id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]), }; // id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]), }; // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 // // Notable Differences from RFC 5280: // // * We follow the convention established by Microsoft's implementation and // mozilla::pkix of treating the EKU extension in a CA certificate as a // restriction on the allowable EKUs for certificates issued by that CA. RFC // 5280 doesn't prescribe any meaning to the EKU extension when a certificate // is being used as a CA certificate. // // * We do not recognize anyExtendedKeyUsage. NSS and mozilla::pkix do not // recognize it either. // // * We treat id-Netscape-stepUp as being equivalent to id-kp-serverAuth in CA // certificates (only). Comodo has issued certificates that require this // behavior that don't expire until June 2020. See https://bugzilla.mozilla.org/show_bug.cgi?id=982292. fn check_eku( input: Option<&mut untrusted::Reader>, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { match input { Some(input) => { loop { let value = der::expect_tag_and_get_value(input, der::Tag::OID)?; if value == required_eku_if_present.oid_value { input.skip_to_end(); break; } if input.at_end() { return Err(Error::RequiredEkuNotFound); } } Ok(()) } None => { // http://tools.ietf.org/html/rfc6960#section-4.2.2.2: // "OCSP signing delegation SHALL be designated by the inclusion of // id-kp-OCSPSigning in an extended key usage certificate extension // included in the OCSP response signer's certificate." // // A missing EKU extension generally means "any EKU", but it is // important that id-kp-OCSPSigning is explicit so that a normal // end-entity certificate isn't able to sign trusted OCSP responses // for itself or for other certificates issued by its issuing CA. if required_eku_if_present.oid_value == EKU_OCSP_SIGNING.oid_value { return Err(Error::RequiredEkuNotFound); } Ok(()) } } } fn loop_while_non_fatal_error<V>( values: V, f: impl Fn(V::Item) -> Result<(), Error>, ) -> Result<(), Error> where V: IntoIterator, { for v in values { match f(v) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } } Err(Error::UnknownIssuer) }
{ // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; // TODO: Check signature algorithm like mozilla::pkix. // TODO: Check SPKI like mozilla::pkix. // TODO: check for active distrust like mozilla::pkix. // See the comment in `remember_extension` for why we don't check the // KeyUsage extension. cert.validity .read_all(Error::BadDer, |value| check_validity(value, time))?; untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| { check_basic_constraints(value, used_as_ca, sub_ca_count) })?; untrusted::read_all_optional(cert.eku, Error::BadDer, |value| { check_eku(value, required_eku_if_present) })?; Ok(()) }
identifier_body
verify_cert.rs
// Copyright 2015 Brian Smith. // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. use crate::{ cert::{self, Cert, EndEntityOrCa}, der, name, signed_data, time, Error, SignatureAlgorithm, TrustAnchor, }; pub fn build_chain( required_eku_if_present: KeyPurposeId, supported_sig_algs: &[&SignatureAlgorithm], trust_anchors: &[TrustAnchor], intermediate_certs: &[&[u8]], cert: &Cert, time: time::Time, sub_ca_count: usize, ) -> Result<(), Error> { let used_as_ca = used_as_ca(&cert.ee_or_ca); check_issuer_independent_properties( cert, time, used_as_ca, sub_ca_count, required_eku_if_present, )?; // TODO: HPKP checks. match used_as_ca { UsedAsCa::Yes => { const MAX_SUB_CA_COUNT: usize = 6; if sub_ca_count >= MAX_SUB_CA_COUNT { return Err(Error::UnknownIssuer); } } UsedAsCa::No => { assert_eq!(0, sub_ca_count); } } // TODO: revocation. match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| { let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject); if cert.issuer != trust_anchor_subject { return Err(Error::UnknownIssuer); } let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from); untrusted::read_all_optional(name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki); // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; check_signatures(supported_sig_algs, cert, trust_anchor_spki)?; Ok(()) }) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } loop_while_non_fatal_error(intermediate_certs, |cert_der| { let potential_issuer = cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?; if potential_issuer.subject != cert.issuer { return Err(Error::UnknownIssuer); } // Prevent loops; see RFC 4158 section 5.2. let mut prev = cert; loop { if potential_issuer.spki.value() == prev.spki.value() && potential_issuer.subject == prev.subject { return Err(Error::UnknownIssuer); } match &prev.ee_or_ca { EndEntityOrCa::EndEntity => { break; } EndEntityOrCa::Ca(child_cert) => { prev = child_cert; } } } untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| { name::check_name_constraints(value, &cert) })?; let next_sub_ca_count = match used_as_ca { UsedAsCa::No => sub_ca_count, UsedAsCa::Yes => sub_ca_count + 1, }; build_chain( required_eku_if_present, supported_sig_algs, trust_anchors, intermediate_certs, &potential_issuer, time, next_sub_ca_count, ) }) } fn check_signatures( supported_sig_algs: &[&SignatureAlgorithm], cert_chain: &Cert, trust_anchor_key: untrusted::Input, ) -> Result<(), Error> { let mut spki_value = trust_anchor_key; let mut cert = cert_chain; loop { signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?; // TODO: check revocation match &cert.ee_or_ca { EndEntityOrCa::Ca(child_cert) => { spki_value = cert.spki.value(); cert = child_cert; } EndEntityOrCa::EndEntity => { break; } } } Ok(()) } fn check_issuer_independent_properties( cert: &Cert, time: time::Time, used_as_ca: UsedAsCa, sub_ca_count: usize, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { // TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?; // TODO: Check signature algorithm like mozilla::pkix. // TODO: Check SPKI like mozilla::pkix. // TODO: check for active distrust like mozilla::pkix. // See the comment in `remember_extension` for why we don't check the // KeyUsage extension. cert.validity .read_all(Error::BadDer, |value| check_validity(value, time))?; untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| { check_basic_constraints(value, used_as_ca, sub_ca_count) })?; untrusted::read_all_optional(cert.eku, Error::BadDer, |value| { check_eku(value, required_eku_if_present) })?; Ok(()) } // https://tools.ietf.org/html/rfc5280#section-4.1.2.5 fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> { let not_before = der::time_choice(input)?; let not_after = der::time_choice(input)?; if not_before > not_after { return Err(Error::InvalidCertValidity); } if time < not_before { return Err(Error::CertNotValidYet); } if time > not_after { return Err(Error::CertExpired); } // TODO: mozilla::pkix allows the TrustDomain to check not_before and // not_after, to enforce things like a maximum validity period. We should // do something similar. Ok(()) } #[derive(Clone, Copy)] enum UsedAsCa { Yes, No, } fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa { match ee_or_ca { EndEntityOrCa::EndEntity => UsedAsCa::No, EndEntityOrCa::Ca(..) => UsedAsCa::Yes, } } // https://tools.ietf.org/html/rfc5280#section-4.2.1.9 fn check_basic_constraints( input: Option<&mut untrusted::Reader>, used_as_ca: UsedAsCa, sub_ca_count: usize, ) -> Result<(), Error> { let (is_ca, path_len_constraint) = match input { Some(input) => { let is_ca = der::optional_boolean(input)?; // https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280 // says that a certificate must not have pathLenConstraint unless // it is a CA certificate, but some real-world end-entity // certificates have pathLenConstraint. let path_len_constraint = if !input.at_end() { let value = der::small_nonnegative_integer(input)?; Some(usize::from(value)) } else { None }; (is_ca, path_len_constraint) } None => (false, None), }; match (used_as_ca, is_ca, path_len_constraint) { (UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity), (UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa), (UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => { Err(Error::PathLenConstraintViolated) } _ => Ok(()), } } #[derive(Clone, Copy)] pub struct KeyPurposeId { oid_value: untrusted::Input<'static>, } // id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 } // id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } // id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]), }; // id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]), }; // id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } #[allow(clippy::identity_op)] // TODO: Make this clearer pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId { oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]), }; // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 //
// * We follow the convention established by Microsoft's implementation and // mozilla::pkix of treating the EKU extension in a CA certificate as a // restriction on the allowable EKUs for certificates issued by that CA. RFC // 5280 doesn't prescribe any meaning to the EKU extension when a certificate // is being used as a CA certificate. // // * We do not recognize anyExtendedKeyUsage. NSS and mozilla::pkix do not // recognize it either. // // * We treat id-Netscape-stepUp as being equivalent to id-kp-serverAuth in CA // certificates (only). Comodo has issued certificates that require this // behavior that don't expire until June 2020. See https://bugzilla.mozilla.org/show_bug.cgi?id=982292. fn check_eku( input: Option<&mut untrusted::Reader>, required_eku_if_present: KeyPurposeId, ) -> Result<(), Error> { match input { Some(input) => { loop { let value = der::expect_tag_and_get_value(input, der::Tag::OID)?; if value == required_eku_if_present.oid_value { input.skip_to_end(); break; } if input.at_end() { return Err(Error::RequiredEkuNotFound); } } Ok(()) } None => { // http://tools.ietf.org/html/rfc6960#section-4.2.2.2: // "OCSP signing delegation SHALL be designated by the inclusion of // id-kp-OCSPSigning in an extended key usage certificate extension // included in the OCSP response signer's certificate." // // A missing EKU extension generally means "any EKU", but it is // important that id-kp-OCSPSigning is explicit so that a normal // end-entity certificate isn't able to sign trusted OCSP responses // for itself or for other certificates issued by its issuing CA. if required_eku_if_present.oid_value == EKU_OCSP_SIGNING.oid_value { return Err(Error::RequiredEkuNotFound); } Ok(()) } } } fn loop_while_non_fatal_error<V>( values: V, f: impl Fn(V::Item) -> Result<(), Error>, ) -> Result<(), Error> where V: IntoIterator, { for v in values { match f(v) { Ok(()) => { return Ok(()); } Err(..) => { // If the error is not fatal, then keep going. } } } Err(Error::UnknownIssuer) }
// Notable Differences from RFC 5280: //
random_line_split
move_vm.go
/* Copyright (c) 2016 Thomas Findelkind # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # # MORE ABOUT THIS SCRIPT AVAILABLE IN THE README AND AT: # # http://tfindelkind.com # # ---------------------------------------------------------------------------- */ package main import ( log "github.com/Sirupsen/logrus" ntnxAPI "github.com/Tfindelkind/acropolis-sdk-go" "bufio" "flag" "fmt" "os" "strconv" "strings" ) const appVersion = "0.9 beta" var ( host *string username *string password *string vmName *string newVMName *string vdiskMapping *string container *string listMapping *bool keepImages *bool debug *bool delete *bool overwrite *bool help *bool version *bool ) func init() { host = flag.String("host", "", "a string") username = flag.String("username", "", "a string") password = flag.String("password", "", "a string") vmName = flag.String("vm-name", "", "a string") newVMName = flag.String("new-vm-name", "", "a string") vdiskMapping = flag.String("vdisk-mapping", "", "a string") container = flag.String("container", "", "a string") listMapping = flag.Bool("list-mapping", false, "a bool") debug = flag.Bool("debug", false, "a bool") keepImages = flag.Bool("keep-images", false, "a bool") delete = flag.Bool("delete", false, "a bool") overwrite = flag.Bool("overwrite", false, "a bool") help = flag.Bool("help", false, "a bool") version = flag.Bool("version", false, "a bool") } func printHelp() { fmt.Println("Usage: move_vm [OPTIONS]") fmt.Println("move_vm [ --help | --version ]") fmt.Println("") fmt.Println("FOR NUTANIX AHV ONLY- clones a VM from one container to another") fmt.Println("vNic MAC Addresses will change unless --delete is used") fmt.Println("") fmt.Println("Options:") fmt.Println("") fmt.Println("--host Specify CVM host or Cluster IP") fmt.Println("--username Specify username for connect to host") fmt.Println("--password Specify password for user") fmt.Println("--vm-name Specify Virtual Machine name which will be moved") fmt.Println("--new-vm-name New Virtual Machine name if specified (clone)") fmt.Println("--vdisk-mapping Specify the container mapping for each vdisk - ORDER IS IMPORTANT") fmt.Println("--list-mapping Shows the actual vdisk-mapping") fmt.Println("--container Specify the container where vm will be moved to") fmt.Println("--debug Enables debug mode") fmt.Println("--keep-images If enabled clones to image service will not be deleted") fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?") fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)") fmt.Println("--help List this help") fmt.Println("--version Show the move_vm version") fmt.Println("") fmt.Println("Example:") fmt.Println("") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2") fmt.Println("") } // parse --vdisk-mapping or --container and checks if all container exist func parseVdiskMapping(n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) { defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping seems not to have right format") os.Exit(1) } }() var vdiskMappings []ntnxAPI.VMDisks var VMDisk ntnxAPI.VMDisks result := strings.Split(*vdiskMapping, ",") // add Mappings for i := range result { res := strings.Split(result[i], "/") resAddr := strings.Split(res[0], ".") VMDisk.Addr.DeviceBus = resAddr[0] VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1]) // check if right format is used if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") { log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) { log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if res[1] != "EMPTY" { containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1]) if err != nil { os.Exit(1) } VMDisk.ContainerUUID = containerUUID } vdiskMappings = append(vdiskMappings, VMDisk) } return vdiskMappings, nil } func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks)
func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) { //help if *help { printHelp() os.Exit(0) } //version if *version { fmt.Println("Version: " + appVersion) os.Exit(0) } //debug if *debug { log.SetLevel(log.DebugLevel) } else { log.SetLevel(log.InfoLevel) } //delete if *delete { reader := bufio.NewReader(os.Stdin) fmt.Println("THIS WILL DELETE source VM: " + *vmName) fmt.Print("If you want to continue type YES: ") text, _ := reader.ReadString('\n') fmt.Println(text) if strings.TrimRight(text, "\n") != "YES" { os.Exit(0) } } //host if *host == "" { log.Warn("mandatory option '--host=' is not set") os.Exit(0) } //username if *username == "" { log.Warn("option '--username=' is not set Default: admin is used") *username = "admin" } //password if *password == "" { log.Warn("option '--password=' is not set Default: nutanix/4u is used") *password = "nutanix/4u" } //vm-name if *vmName == "" { log.Warn("mandatory option '--vm-name=' is not set") os.Exit(0) } var vm ntnxAPI.VMJSONAHV vm.Config.Name = *vmName if *newVMName == "" { *newVMName = *vmName } var vNew ntnxAPI.VMJSONAHV vNew.Config.Name = *newVMName var n ntnxAPI.NTNXConnection n.NutanixHost = *host n.Username = *username n.Password = *password ntnxAPI.EncodeCredentials(&n) ntnxAPI.CreateHTTPClient(&n) ntnxAPI.NutanixCheckCredentials(&n) // list mapping if specified if *listMapping { var listMappingStr string exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name) if exist { vm, _ = ntnxAPI.GetVMbyName(&n, &vm) for i, elem := range vm.Config.VMDisks { if !elem.IsEmpty { containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID) listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName } else { listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY" } if i < len(vm.Config.VMDisks)-1 { listMappingStr = listMappingStr + "," } } fmt.Println(listMappingStr) os.Exit(0) } } // both options set container and vdisk-mapping if *container != "" && *vdiskMapping != "" { log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed") os.Exit(0) } // none options set container and vdisk-mapping if *container == "" && *vdiskMapping == "" { log.Warn("None of --container or --vdisk-mapping is set. One is mandatory") os.Exit(0) } // If container is not found exit if *container != "" { _, err := ntnxAPI.GetContainerUUIDbyName(&n, *container) if err != nil { os.Exit(1) } } var VdiskMapping []ntnxAPI.VMDisks var err error // If container is not found exit if *vdiskMapping != "" { VdiskMapping, err = parseVdiskMapping(&n) if err != nil { os.Exit(1) } } return n, vm, vNew, VdiskMapping } func main() { flag.Usage = printHelp flag.Parse() customFormatter := new(log.TextFormatter) customFormatter.TimestampFormat = "2006-01-02 15:04:05" log.SetFormatter(customFormatter) customFormatter.FullTimestamp = true var n ntnxAPI.NTNXConnection var v ntnxAPI.VMJSONAHV var vNew ntnxAPI.VMJSONAHV var d ntnxAPI.VDiskJSONREST var net ntnxAPI.NetworkREST var im ntnxAPI.ImageJSONAHV var taskUUID ntnxAPI.TaskUUID var VdiskMapping []ntnxAPI.VMDisks var existV bool var existVNew bool n, v, vNew, VdiskMapping = evaluateFlags() /* Short description what will be done 1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait 2. Create VM and wait 3. Clone Images to Disks and wait 4. Add network 5. delete images */ /*To-DO: 2. show_progress */ //check if source VM exists existV, _ = ntnxAPI.VMExist(&n, v.Config.Name) if existV { //check if new VM exists existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name) if existVNew { if vNew.Config.Name != v.Config.Name { log.Warn("VM " + vNew.Config.Name + " already exists") if !*overwrite { os.Exit(0) } else { vNew, _ = ntnxAPI.GetVMbyName(&n, &vNew) } } } v, _ = ntnxAPI.GetVMbyName(&n, &v) // check if Source VM is running state := ntnxAPI.GetVMState(&n, &v) if state != "off" { log.Warn("VM " + vNew.Config.Name + " is not powered off") os.Exit(0) } //check if all disks have been specified if *vdiskMapping != "" { checkVdiskMapping(v, VdiskMapping) } var taskUUIDS []ntnxAPI.TaskUUID // clone vDisk from source VM to image service for i, elem := range v.Config.VMDisks { if !elem.IsEmpty { var containerName string var containerUUID string d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" // set containerName dependent on --container or --vdisk-mapping if *container != "" { containerName = *container } else { containerName, _ = ntnxAPI.GetContainerNamebyUUID(&n, VdiskMapping[i].ContainerUUID) } var uploadNeeded = true // make sure Images don't exist and overwrite if flag is enabled else WARN and continue // let all Upload take place in parallel and save taskUUID in a Array if ntnxAPI.ImageExistbyName(&n, &im) { if *overwrite { task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Previos existing Image "+im.Name+" deleted") // if overwrite is disabled } else { log.Info("Image " + im.Name + " already exists - will use existing one instead") uploadNeeded = false } } if uploadNeeded { // let all Upload take place in parallel and save taskUUID in a Array // if container stays the same clone local if *vdiskMapping != "" { containerUUID = VdiskMapping[i].ContainerUUID } else { containerUUID, _ = ntnxAPI.GetContainerIDbyName(&n, containerName) } if containerUUID == elem.ContainerUUID { taskUUID, _ = ntnxAPI.CreateImageFromVdisk(&n, &d, &im) log.Info("Start cloning " + im.Name + " to image service") } else { taskUUID, _ = ntnxAPI.CreateImageFromURL(&n, &d, &im, containerName) log.Info("Start cloning " + im.Name + " to image service from URL") } } taskUUIDS = append(taskUUIDS, taskUUID) } } //Wait that all disks have been clone to new container- may take a while for _, task := range taskUUIDS { ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image from disk created") } //copy all VM settings vNew.Config.Description = v.Config.Description vNew.Config.MemoryMb = v.Config.MemoryMb vNew.Config.NumVcpus = v.Config.NumVcpus vNew.Config.NumCoresPerVcpu = v.Config.NumCoresPerVcpu // Delete target VM if overwrite mode if *overwrite && existVNew { taskUUID, _ = ntnxAPI.DeleteVM(&n, &vNew) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "New VM successfull deleted") } // Create target VM taskUUID, _ = ntnxAPI.CreateVMAHV(&n, &vNew) task, err := ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID) if err != nil { log.Fatal("Task does not exist") } else { log.Info("VM " + vNew.Config.Name + " created") } vNew.UUID = ntnxAPI.GetVMIDbyTask(&n, &task) // Create vdisk on new VM from images for _, elem := range v.Config.VMDisks { if elem.IsEmpty { taskUUID, _ = ntnxAPI.CreateCDforVMwithDetails(&n, &vNew, elem.Addr.DeviceBus, strconv.Itoa(elem.Addr.DeviceIndex)) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "CD successfully created") } else { im, _ = ntnxAPI.GetImagebyName(&n, v.Config.Name+"-"+elem.Addr.DeviceBus+"."+strconv.Itoa(elem.Addr.DeviceIndex)) if elem.IsCdrom { taskUUID, _ = ntnxAPI.CloneCDforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } else { taskUUID, _ = ntnxAPI.CloneDiskforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Disk ID"+v.UUID+" cloned") } } // Create vNics for _, elem := range v.Config.VMNics { if *delete { // delete nic from source VM because only one NIC with same MAC may exist taskUUID, _ = ntnxAPI.DelteVNicforVM(&n, &v, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") // Create nic net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVMwithMAC(&n, &vNew, &net, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } else { net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVM(&n, &vNew, &net) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } } // delete images if !*keepImages { for _, elem := range v.Config.VMDisks { if !elem.IsEmpty { d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image "+im.Name+" deleted") } } } // delete VM if flag is set if *delete { taskUUID, _ = ntnxAPI.DeleteVM(&n, &v) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "VM "+v.Config.Name+" successfull deleted") } } else { log.Warn("VM vm-name=" + v.Config.Name + " does not exist") } }
{ defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping is not correct") os.Exit(1) } }() for i, elem := range v.Config.VMDisks { if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } } if len(v.Config.VMDisks) != len(VdiskMapping) { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } }
identifier_body
move_vm.go
/* Copyright (c) 2016 Thomas Findelkind # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # # MORE ABOUT THIS SCRIPT AVAILABLE IN THE README AND AT: # # http://tfindelkind.com # # ---------------------------------------------------------------------------- */ package main import ( log "github.com/Sirupsen/logrus" ntnxAPI "github.com/Tfindelkind/acropolis-sdk-go" "bufio" "flag" "fmt" "os" "strconv" "strings" ) const appVersion = "0.9 beta" var ( host *string username *string password *string vmName *string newVMName *string vdiskMapping *string container *string listMapping *bool keepImages *bool debug *bool delete *bool overwrite *bool help *bool version *bool ) func init() { host = flag.String("host", "", "a string") username = flag.String("username", "", "a string") password = flag.String("password", "", "a string") vmName = flag.String("vm-name", "", "a string") newVMName = flag.String("new-vm-name", "", "a string") vdiskMapping = flag.String("vdisk-mapping", "", "a string") container = flag.String("container", "", "a string") listMapping = flag.Bool("list-mapping", false, "a bool") debug = flag.Bool("debug", false, "a bool") keepImages = flag.Bool("keep-images", false, "a bool") delete = flag.Bool("delete", false, "a bool") overwrite = flag.Bool("overwrite", false, "a bool") help = flag.Bool("help", false, "a bool") version = flag.Bool("version", false, "a bool") } func printHelp() { fmt.Println("Usage: move_vm [OPTIONS]") fmt.Println("move_vm [ --help | --version ]") fmt.Println("") fmt.Println("FOR NUTANIX AHV ONLY- clones a VM from one container to another") fmt.Println("vNic MAC Addresses will change unless --delete is used") fmt.Println("") fmt.Println("Options:") fmt.Println("") fmt.Println("--host Specify CVM host or Cluster IP") fmt.Println("--username Specify username for connect to host") fmt.Println("--password Specify password for user") fmt.Println("--vm-name Specify Virtual Machine name which will be moved") fmt.Println("--new-vm-name New Virtual Machine name if specified (clone)") fmt.Println("--vdisk-mapping Specify the container mapping for each vdisk - ORDER IS IMPORTANT") fmt.Println("--list-mapping Shows the actual vdisk-mapping") fmt.Println("--container Specify the container where vm will be moved to") fmt.Println("--debug Enables debug mode") fmt.Println("--keep-images If enabled clones to image service will not be deleted") fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?") fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)") fmt.Println("--help List this help") fmt.Println("--version Show the move_vm version") fmt.Println("") fmt.Println("Example:") fmt.Println("") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2") fmt.Println("") } // parse --vdisk-mapping or --container and checks if all container exist func parseVdiskMapping(n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) { defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping seems not to have right format") os.Exit(1) } }() var vdiskMappings []ntnxAPI.VMDisks var VMDisk ntnxAPI.VMDisks result := strings.Split(*vdiskMapping, ",") // add Mappings for i := range result { res := strings.Split(result[i], "/") resAddr := strings.Split(res[0], ".") VMDisk.Addr.DeviceBus = resAddr[0] VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1]) // check if right format is used if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") { log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) { log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if res[1] != "EMPTY" { containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1]) if err != nil { os.Exit(1) } VMDisk.ContainerUUID = containerUUID } vdiskMappings = append(vdiskMappings, VMDisk) } return vdiskMappings, nil } func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks) { defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping is not correct") os.Exit(1) } }() for i, elem := range v.Config.VMDisks { if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } } if len(v.Config.VMDisks) != len(VdiskMapping) { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } } func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) { //help if *help { printHelp() os.Exit(0) } //version if *version { fmt.Println("Version: " + appVersion) os.Exit(0) } //debug if *debug { log.SetLevel(log.DebugLevel) } else { log.SetLevel(log.InfoLevel) } //delete if *delete { reader := bufio.NewReader(os.Stdin) fmt.Println("THIS WILL DELETE source VM: " + *vmName) fmt.Print("If you want to continue type YES: ") text, _ := reader.ReadString('\n') fmt.Println(text) if strings.TrimRight(text, "\n") != "YES" { os.Exit(0) } } //host if *host == "" { log.Warn("mandatory option '--host=' is not set") os.Exit(0) } //username if *username == "" { log.Warn("option '--username=' is not set Default: admin is used") *username = "admin" } //password if *password == "" { log.Warn("option '--password=' is not set Default: nutanix/4u is used") *password = "nutanix/4u" } //vm-name if *vmName == "" { log.Warn("mandatory option '--vm-name=' is not set") os.Exit(0) } var vm ntnxAPI.VMJSONAHV vm.Config.Name = *vmName if *newVMName == "" { *newVMName = *vmName } var vNew ntnxAPI.VMJSONAHV vNew.Config.Name = *newVMName var n ntnxAPI.NTNXConnection n.NutanixHost = *host n.Username = *username n.Password = *password ntnxAPI.EncodeCredentials(&n) ntnxAPI.CreateHTTPClient(&n) ntnxAPI.NutanixCheckCredentials(&n) // list mapping if specified if *listMapping { var listMappingStr string exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name) if exist { vm, _ = ntnxAPI.GetVMbyName(&n, &vm) for i, elem := range vm.Config.VMDisks { if !elem.IsEmpty { containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID) listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName } else { listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY" } if i < len(vm.Config.VMDisks)-1 { listMappingStr = listMappingStr + "," } } fmt.Println(listMappingStr) os.Exit(0) } } // both options set container and vdisk-mapping if *container != "" && *vdiskMapping != "" { log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed") os.Exit(0) } // none options set container and vdisk-mapping if *container == "" && *vdiskMapping == "" { log.Warn("None of --container or --vdisk-mapping is set. One is mandatory") os.Exit(0) } // If container is not found exit if *container != "" { _, err := ntnxAPI.GetContainerUUIDbyName(&n, *container) if err != nil { os.Exit(1) } } var VdiskMapping []ntnxAPI.VMDisks var err error // If container is not found exit if *vdiskMapping != ""
return n, vm, vNew, VdiskMapping } func main() { flag.Usage = printHelp flag.Parse() customFormatter := new(log.TextFormatter) customFormatter.TimestampFormat = "2006-01-02 15:04:05" log.SetFormatter(customFormatter) customFormatter.FullTimestamp = true var n ntnxAPI.NTNXConnection var v ntnxAPI.VMJSONAHV var vNew ntnxAPI.VMJSONAHV var d ntnxAPI.VDiskJSONREST var net ntnxAPI.NetworkREST var im ntnxAPI.ImageJSONAHV var taskUUID ntnxAPI.TaskUUID var VdiskMapping []ntnxAPI.VMDisks var existV bool var existVNew bool n, v, vNew, VdiskMapping = evaluateFlags() /* Short description what will be done 1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait 2. Create VM and wait 3. Clone Images to Disks and wait 4. Add network 5. delete images */ /*To-DO: 2. show_progress */ //check if source VM exists existV, _ = ntnxAPI.VMExist(&n, v.Config.Name) if existV { //check if new VM exists existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name) if existVNew { if vNew.Config.Name != v.Config.Name { log.Warn("VM " + vNew.Config.Name + " already exists") if !*overwrite { os.Exit(0) } else { vNew, _ = ntnxAPI.GetVMbyName(&n, &vNew) } } } v, _ = ntnxAPI.GetVMbyName(&n, &v) // check if Source VM is running state := ntnxAPI.GetVMState(&n, &v) if state != "off" { log.Warn("VM " + vNew.Config.Name + " is not powered off") os.Exit(0) } //check if all disks have been specified if *vdiskMapping != "" { checkVdiskMapping(v, VdiskMapping) } var taskUUIDS []ntnxAPI.TaskUUID // clone vDisk from source VM to image service for i, elem := range v.Config.VMDisks { if !elem.IsEmpty { var containerName string var containerUUID string d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" // set containerName dependent on --container or --vdisk-mapping if *container != "" { containerName = *container } else { containerName, _ = ntnxAPI.GetContainerNamebyUUID(&n, VdiskMapping[i].ContainerUUID) } var uploadNeeded = true // make sure Images don't exist and overwrite if flag is enabled else WARN and continue // let all Upload take place in parallel and save taskUUID in a Array if ntnxAPI.ImageExistbyName(&n, &im) { if *overwrite { task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Previos existing Image "+im.Name+" deleted") // if overwrite is disabled } else { log.Info("Image " + im.Name + " already exists - will use existing one instead") uploadNeeded = false } } if uploadNeeded { // let all Upload take place in parallel and save taskUUID in a Array // if container stays the same clone local if *vdiskMapping != "" { containerUUID = VdiskMapping[i].ContainerUUID } else { containerUUID, _ = ntnxAPI.GetContainerIDbyName(&n, containerName) } if containerUUID == elem.ContainerUUID { taskUUID, _ = ntnxAPI.CreateImageFromVdisk(&n, &d, &im) log.Info("Start cloning " + im.Name + " to image service") } else { taskUUID, _ = ntnxAPI.CreateImageFromURL(&n, &d, &im, containerName) log.Info("Start cloning " + im.Name + " to image service from URL") } } taskUUIDS = append(taskUUIDS, taskUUID) } } //Wait that all disks have been clone to new container- may take a while for _, task := range taskUUIDS { ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image from disk created") } //copy all VM settings vNew.Config.Description = v.Config.Description vNew.Config.MemoryMb = v.Config.MemoryMb vNew.Config.NumVcpus = v.Config.NumVcpus vNew.Config.NumCoresPerVcpu = v.Config.NumCoresPerVcpu // Delete target VM if overwrite mode if *overwrite && existVNew { taskUUID, _ = ntnxAPI.DeleteVM(&n, &vNew) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "New VM successfull deleted") } // Create target VM taskUUID, _ = ntnxAPI.CreateVMAHV(&n, &vNew) task, err := ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID) if err != nil { log.Fatal("Task does not exist") } else { log.Info("VM " + vNew.Config.Name + " created") } vNew.UUID = ntnxAPI.GetVMIDbyTask(&n, &task) // Create vdisk on new VM from images for _, elem := range v.Config.VMDisks { if elem.IsEmpty { taskUUID, _ = ntnxAPI.CreateCDforVMwithDetails(&n, &vNew, elem.Addr.DeviceBus, strconv.Itoa(elem.Addr.DeviceIndex)) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "CD successfully created") } else { im, _ = ntnxAPI.GetImagebyName(&n, v.Config.Name+"-"+elem.Addr.DeviceBus+"."+strconv.Itoa(elem.Addr.DeviceIndex)) if elem.IsCdrom { taskUUID, _ = ntnxAPI.CloneCDforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } else { taskUUID, _ = ntnxAPI.CloneDiskforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Disk ID"+v.UUID+" cloned") } } // Create vNics for _, elem := range v.Config.VMNics { if *delete { // delete nic from source VM because only one NIC with same MAC may exist taskUUID, _ = ntnxAPI.DelteVNicforVM(&n, &v, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") // Create nic net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVMwithMAC(&n, &vNew, &net, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } else { net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVM(&n, &vNew, &net) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } } // delete images if !*keepImages { for _, elem := range v.Config.VMDisks { if !elem.IsEmpty { d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image "+im.Name+" deleted") } } } // delete VM if flag is set if *delete { taskUUID, _ = ntnxAPI.DeleteVM(&n, &v) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "VM "+v.Config.Name+" successfull deleted") } } else { log.Warn("VM vm-name=" + v.Config.Name + " does not exist") } }
{ VdiskMapping, err = parseVdiskMapping(&n) if err != nil { os.Exit(1) } }
conditional_block
move_vm.go
/* Copyright (c) 2016 Thomas Findelkind # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # # MORE ABOUT THIS SCRIPT AVAILABLE IN THE README AND AT: # # http://tfindelkind.com # # ---------------------------------------------------------------------------- */ package main import ( log "github.com/Sirupsen/logrus" ntnxAPI "github.com/Tfindelkind/acropolis-sdk-go" "bufio" "flag" "fmt" "os" "strconv" "strings" ) const appVersion = "0.9 beta" var ( host *string username *string password *string vmName *string newVMName *string vdiskMapping *string container *string listMapping *bool keepImages *bool debug *bool delete *bool overwrite *bool help *bool version *bool ) func init() { host = flag.String("host", "", "a string") username = flag.String("username", "", "a string") password = flag.String("password", "", "a string") vmName = flag.String("vm-name", "", "a string") newVMName = flag.String("new-vm-name", "", "a string") vdiskMapping = flag.String("vdisk-mapping", "", "a string") container = flag.String("container", "", "a string") listMapping = flag.Bool("list-mapping", false, "a bool") debug = flag.Bool("debug", false, "a bool") keepImages = flag.Bool("keep-images", false, "a bool") delete = flag.Bool("delete", false, "a bool") overwrite = flag.Bool("overwrite", false, "a bool") help = flag.Bool("help", false, "a bool") version = flag.Bool("version", false, "a bool") } func printHelp() { fmt.Println("Usage: move_vm [OPTIONS]") fmt.Println("move_vm [ --help | --version ]") fmt.Println("") fmt.Println("FOR NUTANIX AHV ONLY- clones a VM from one container to another") fmt.Println("vNic MAC Addresses will change unless --delete is used") fmt.Println("") fmt.Println("Options:") fmt.Println("") fmt.Println("--host Specify CVM host or Cluster IP") fmt.Println("--username Specify username for connect to host") fmt.Println("--password Specify password for user") fmt.Println("--vm-name Specify Virtual Machine name which will be moved") fmt.Println("--new-vm-name New Virtual Machine name if specified (clone)") fmt.Println("--vdisk-mapping Specify the container mapping for each vdisk - ORDER IS IMPORTANT") fmt.Println("--list-mapping Shows the actual vdisk-mapping") fmt.Println("--container Specify the container where vm will be moved to") fmt.Println("--debug Enables debug mode") fmt.Println("--keep-images If enabled clones to image service will not be deleted") fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?") fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)") fmt.Println("--help List this help") fmt.Println("--version Show the move_vm version") fmt.Println("") fmt.Println("Example:") fmt.Println("") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2") fmt.Println("") } // parse --vdisk-mapping or --container and checks if all container exist func
(n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) { defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping seems not to have right format") os.Exit(1) } }() var vdiskMappings []ntnxAPI.VMDisks var VMDisk ntnxAPI.VMDisks result := strings.Split(*vdiskMapping, ",") // add Mappings for i := range result { res := strings.Split(result[i], "/") resAddr := strings.Split(res[0], ".") VMDisk.Addr.DeviceBus = resAddr[0] VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1]) // check if right format is used if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") { log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) { log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if res[1] != "EMPTY" { containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1]) if err != nil { os.Exit(1) } VMDisk.ContainerUUID = containerUUID } vdiskMappings = append(vdiskMappings, VMDisk) } return vdiskMappings, nil } func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks) { defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping is not correct") os.Exit(1) } }() for i, elem := range v.Config.VMDisks { if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } } if len(v.Config.VMDisks) != len(VdiskMapping) { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } } func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) { //help if *help { printHelp() os.Exit(0) } //version if *version { fmt.Println("Version: " + appVersion) os.Exit(0) } //debug if *debug { log.SetLevel(log.DebugLevel) } else { log.SetLevel(log.InfoLevel) } //delete if *delete { reader := bufio.NewReader(os.Stdin) fmt.Println("THIS WILL DELETE source VM: " + *vmName) fmt.Print("If you want to continue type YES: ") text, _ := reader.ReadString('\n') fmt.Println(text) if strings.TrimRight(text, "\n") != "YES" { os.Exit(0) } } //host if *host == "" { log.Warn("mandatory option '--host=' is not set") os.Exit(0) } //username if *username == "" { log.Warn("option '--username=' is not set Default: admin is used") *username = "admin" } //password if *password == "" { log.Warn("option '--password=' is not set Default: nutanix/4u is used") *password = "nutanix/4u" } //vm-name if *vmName == "" { log.Warn("mandatory option '--vm-name=' is not set") os.Exit(0) } var vm ntnxAPI.VMJSONAHV vm.Config.Name = *vmName if *newVMName == "" { *newVMName = *vmName } var vNew ntnxAPI.VMJSONAHV vNew.Config.Name = *newVMName var n ntnxAPI.NTNXConnection n.NutanixHost = *host n.Username = *username n.Password = *password ntnxAPI.EncodeCredentials(&n) ntnxAPI.CreateHTTPClient(&n) ntnxAPI.NutanixCheckCredentials(&n) // list mapping if specified if *listMapping { var listMappingStr string exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name) if exist { vm, _ = ntnxAPI.GetVMbyName(&n, &vm) for i, elem := range vm.Config.VMDisks { if !elem.IsEmpty { containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID) listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName } else { listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY" } if i < len(vm.Config.VMDisks)-1 { listMappingStr = listMappingStr + "," } } fmt.Println(listMappingStr) os.Exit(0) } } // both options set container and vdisk-mapping if *container != "" && *vdiskMapping != "" { log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed") os.Exit(0) } // none options set container and vdisk-mapping if *container == "" && *vdiskMapping == "" { log.Warn("None of --container or --vdisk-mapping is set. One is mandatory") os.Exit(0) } // If container is not found exit if *container != "" { _, err := ntnxAPI.GetContainerUUIDbyName(&n, *container) if err != nil { os.Exit(1) } } var VdiskMapping []ntnxAPI.VMDisks var err error // If container is not found exit if *vdiskMapping != "" { VdiskMapping, err = parseVdiskMapping(&n) if err != nil { os.Exit(1) } } return n, vm, vNew, VdiskMapping } func main() { flag.Usage = printHelp flag.Parse() customFormatter := new(log.TextFormatter) customFormatter.TimestampFormat = "2006-01-02 15:04:05" log.SetFormatter(customFormatter) customFormatter.FullTimestamp = true var n ntnxAPI.NTNXConnection var v ntnxAPI.VMJSONAHV var vNew ntnxAPI.VMJSONAHV var d ntnxAPI.VDiskJSONREST var net ntnxAPI.NetworkREST var im ntnxAPI.ImageJSONAHV var taskUUID ntnxAPI.TaskUUID var VdiskMapping []ntnxAPI.VMDisks var existV bool var existVNew bool n, v, vNew, VdiskMapping = evaluateFlags() /* Short description what will be done 1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait 2. Create VM and wait 3. Clone Images to Disks and wait 4. Add network 5. delete images */ /*To-DO: 2. show_progress */ //check if source VM exists existV, _ = ntnxAPI.VMExist(&n, v.Config.Name) if existV { //check if new VM exists existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name) if existVNew { if vNew.Config.Name != v.Config.Name { log.Warn("VM " + vNew.Config.Name + " already exists") if !*overwrite { os.Exit(0) } else { vNew, _ = ntnxAPI.GetVMbyName(&n, &vNew) } } } v, _ = ntnxAPI.GetVMbyName(&n, &v) // check if Source VM is running state := ntnxAPI.GetVMState(&n, &v) if state != "off" { log.Warn("VM " + vNew.Config.Name + " is not powered off") os.Exit(0) } //check if all disks have been specified if *vdiskMapping != "" { checkVdiskMapping(v, VdiskMapping) } var taskUUIDS []ntnxAPI.TaskUUID // clone vDisk from source VM to image service for i, elem := range v.Config.VMDisks { if !elem.IsEmpty { var containerName string var containerUUID string d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" // set containerName dependent on --container or --vdisk-mapping if *container != "" { containerName = *container } else { containerName, _ = ntnxAPI.GetContainerNamebyUUID(&n, VdiskMapping[i].ContainerUUID) } var uploadNeeded = true // make sure Images don't exist and overwrite if flag is enabled else WARN and continue // let all Upload take place in parallel and save taskUUID in a Array if ntnxAPI.ImageExistbyName(&n, &im) { if *overwrite { task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Previos existing Image "+im.Name+" deleted") // if overwrite is disabled } else { log.Info("Image " + im.Name + " already exists - will use existing one instead") uploadNeeded = false } } if uploadNeeded { // let all Upload take place in parallel and save taskUUID in a Array // if container stays the same clone local if *vdiskMapping != "" { containerUUID = VdiskMapping[i].ContainerUUID } else { containerUUID, _ = ntnxAPI.GetContainerIDbyName(&n, containerName) } if containerUUID == elem.ContainerUUID { taskUUID, _ = ntnxAPI.CreateImageFromVdisk(&n, &d, &im) log.Info("Start cloning " + im.Name + " to image service") } else { taskUUID, _ = ntnxAPI.CreateImageFromURL(&n, &d, &im, containerName) log.Info("Start cloning " + im.Name + " to image service from URL") } } taskUUIDS = append(taskUUIDS, taskUUID) } } //Wait that all disks have been clone to new container- may take a while for _, task := range taskUUIDS { ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image from disk created") } //copy all VM settings vNew.Config.Description = v.Config.Description vNew.Config.MemoryMb = v.Config.MemoryMb vNew.Config.NumVcpus = v.Config.NumVcpus vNew.Config.NumCoresPerVcpu = v.Config.NumCoresPerVcpu // Delete target VM if overwrite mode if *overwrite && existVNew { taskUUID, _ = ntnxAPI.DeleteVM(&n, &vNew) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "New VM successfull deleted") } // Create target VM taskUUID, _ = ntnxAPI.CreateVMAHV(&n, &vNew) task, err := ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID) if err != nil { log.Fatal("Task does not exist") } else { log.Info("VM " + vNew.Config.Name + " created") } vNew.UUID = ntnxAPI.GetVMIDbyTask(&n, &task) // Create vdisk on new VM from images for _, elem := range v.Config.VMDisks { if elem.IsEmpty { taskUUID, _ = ntnxAPI.CreateCDforVMwithDetails(&n, &vNew, elem.Addr.DeviceBus, strconv.Itoa(elem.Addr.DeviceIndex)) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "CD successfully created") } else { im, _ = ntnxAPI.GetImagebyName(&n, v.Config.Name+"-"+elem.Addr.DeviceBus+"."+strconv.Itoa(elem.Addr.DeviceIndex)) if elem.IsCdrom { taskUUID, _ = ntnxAPI.CloneCDforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } else { taskUUID, _ = ntnxAPI.CloneDiskforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Disk ID"+v.UUID+" cloned") } } // Create vNics for _, elem := range v.Config.VMNics { if *delete { // delete nic from source VM because only one NIC with same MAC may exist taskUUID, _ = ntnxAPI.DelteVNicforVM(&n, &v, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") // Create nic net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVMwithMAC(&n, &vNew, &net, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } else { net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVM(&n, &vNew, &net) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } } // delete images if !*keepImages { for _, elem := range v.Config.VMDisks { if !elem.IsEmpty { d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image "+im.Name+" deleted") } } } // delete VM if flag is set if *delete { taskUUID, _ = ntnxAPI.DeleteVM(&n, &v) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "VM "+v.Config.Name+" successfull deleted") } } else { log.Warn("VM vm-name=" + v.Config.Name + " does not exist") } }
parseVdiskMapping
identifier_name
move_vm.go
/* Copyright (c) 2016 Thomas Findelkind # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # # MORE ABOUT THIS SCRIPT AVAILABLE IN THE README AND AT: # # http://tfindelkind.com # # ---------------------------------------------------------------------------- */ package main import ( log "github.com/Sirupsen/logrus" ntnxAPI "github.com/Tfindelkind/acropolis-sdk-go" "bufio" "flag" "fmt" "os" "strconv" "strings" ) const appVersion = "0.9 beta" var ( host *string username *string password *string vmName *string newVMName *string vdiskMapping *string container *string listMapping *bool keepImages *bool debug *bool delete *bool overwrite *bool help *bool version *bool ) func init() { host = flag.String("host", "", "a string") username = flag.String("username", "", "a string") password = flag.String("password", "", "a string") vmName = flag.String("vm-name", "", "a string") newVMName = flag.String("new-vm-name", "", "a string") vdiskMapping = flag.String("vdisk-mapping", "", "a string") container = flag.String("container", "", "a string") listMapping = flag.Bool("list-mapping", false, "a bool") debug = flag.Bool("debug", false, "a bool") keepImages = flag.Bool("keep-images", false, "a bool") delete = flag.Bool("delete", false, "a bool") overwrite = flag.Bool("overwrite", false, "a bool") help = flag.Bool("help", false, "a bool") version = flag.Bool("version", false, "a bool") } func printHelp() { fmt.Println("Usage: move_vm [OPTIONS]") fmt.Println("move_vm [ --help | --version ]") fmt.Println("") fmt.Println("FOR NUTANIX AHV ONLY- clones a VM from one container to another") fmt.Println("vNic MAC Addresses will change unless --delete is used") fmt.Println("") fmt.Println("Options:") fmt.Println("") fmt.Println("--host Specify CVM host or Cluster IP") fmt.Println("--username Specify username for connect to host") fmt.Println("--password Specify password for user") fmt.Println("--vm-name Specify Virtual Machine name which will be moved") fmt.Println("--new-vm-name New Virtual Machine name if specified (clone)") fmt.Println("--vdisk-mapping Specify the container mapping for each vdisk - ORDER IS IMPORTANT") fmt.Println("--list-mapping Shows the actual vdisk-mapping") fmt.Println("--container Specify the container where vm will be moved to") fmt.Println("--debug Enables debug mode") fmt.Println("--keep-images If enabled clones to image service will not be deleted") fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?") fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)") fmt.Println("--help List this help") fmt.Println("--version Show the move_vm version") fmt.Println("") fmt.Println("Example:") fmt.Println("") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO") fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2") fmt.Println("") } // parse --vdisk-mapping or --container and checks if all container exist func parseVdiskMapping(n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) { defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping seems not to have right format") os.Exit(1) } }() var vdiskMappings []ntnxAPI.VMDisks var VMDisk ntnxAPI.VMDisks result := strings.Split(*vdiskMapping, ",") // add Mappings for i := range result { res := strings.Split(result[i], "/") resAddr := strings.Split(res[0], ".") VMDisk.Addr.DeviceBus = resAddr[0] VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1])
log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) { log.Error("--vdisk-mapping seems not to have right format") os.Exit(1) } if res[1] != "EMPTY" { containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1]) if err != nil { os.Exit(1) } VMDisk.ContainerUUID = containerUUID } vdiskMappings = append(vdiskMappings, VMDisk) } return vdiskMappings, nil } func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks) { defer func() { if err := recover(); err != nil { log.Fatal("--vdisk-mapping is not correct") os.Exit(1) } }() for i, elem := range v.Config.VMDisks { if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } } if len(v.Config.VMDisks) != len(VdiskMapping) { log.Error("--vdisk-mapping some source vdisks are not mapped") os.Exit(1) } } func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) { //help if *help { printHelp() os.Exit(0) } //version if *version { fmt.Println("Version: " + appVersion) os.Exit(0) } //debug if *debug { log.SetLevel(log.DebugLevel) } else { log.SetLevel(log.InfoLevel) } //delete if *delete { reader := bufio.NewReader(os.Stdin) fmt.Println("THIS WILL DELETE source VM: " + *vmName) fmt.Print("If you want to continue type YES: ") text, _ := reader.ReadString('\n') fmt.Println(text) if strings.TrimRight(text, "\n") != "YES" { os.Exit(0) } } //host if *host == "" { log.Warn("mandatory option '--host=' is not set") os.Exit(0) } //username if *username == "" { log.Warn("option '--username=' is not set Default: admin is used") *username = "admin" } //password if *password == "" { log.Warn("option '--password=' is not set Default: nutanix/4u is used") *password = "nutanix/4u" } //vm-name if *vmName == "" { log.Warn("mandatory option '--vm-name=' is not set") os.Exit(0) } var vm ntnxAPI.VMJSONAHV vm.Config.Name = *vmName if *newVMName == "" { *newVMName = *vmName } var vNew ntnxAPI.VMJSONAHV vNew.Config.Name = *newVMName var n ntnxAPI.NTNXConnection n.NutanixHost = *host n.Username = *username n.Password = *password ntnxAPI.EncodeCredentials(&n) ntnxAPI.CreateHTTPClient(&n) ntnxAPI.NutanixCheckCredentials(&n) // list mapping if specified if *listMapping { var listMappingStr string exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name) if exist { vm, _ = ntnxAPI.GetVMbyName(&n, &vm) for i, elem := range vm.Config.VMDisks { if !elem.IsEmpty { containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID) listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName } else { listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY" } if i < len(vm.Config.VMDisks)-1 { listMappingStr = listMappingStr + "," } } fmt.Println(listMappingStr) os.Exit(0) } } // both options set container and vdisk-mapping if *container != "" && *vdiskMapping != "" { log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed") os.Exit(0) } // none options set container and vdisk-mapping if *container == "" && *vdiskMapping == "" { log.Warn("None of --container or --vdisk-mapping is set. One is mandatory") os.Exit(0) } // If container is not found exit if *container != "" { _, err := ntnxAPI.GetContainerUUIDbyName(&n, *container) if err != nil { os.Exit(1) } } var VdiskMapping []ntnxAPI.VMDisks var err error // If container is not found exit if *vdiskMapping != "" { VdiskMapping, err = parseVdiskMapping(&n) if err != nil { os.Exit(1) } } return n, vm, vNew, VdiskMapping } func main() { flag.Usage = printHelp flag.Parse() customFormatter := new(log.TextFormatter) customFormatter.TimestampFormat = "2006-01-02 15:04:05" log.SetFormatter(customFormatter) customFormatter.FullTimestamp = true var n ntnxAPI.NTNXConnection var v ntnxAPI.VMJSONAHV var vNew ntnxAPI.VMJSONAHV var d ntnxAPI.VDiskJSONREST var net ntnxAPI.NetworkREST var im ntnxAPI.ImageJSONAHV var taskUUID ntnxAPI.TaskUUID var VdiskMapping []ntnxAPI.VMDisks var existV bool var existVNew bool n, v, vNew, VdiskMapping = evaluateFlags() /* Short description what will be done 1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait 2. Create VM and wait 3. Clone Images to Disks and wait 4. Add network 5. delete images */ /*To-DO: 2. show_progress */ //check if source VM exists existV, _ = ntnxAPI.VMExist(&n, v.Config.Name) if existV { //check if new VM exists existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name) if existVNew { if vNew.Config.Name != v.Config.Name { log.Warn("VM " + vNew.Config.Name + " already exists") if !*overwrite { os.Exit(0) } else { vNew, _ = ntnxAPI.GetVMbyName(&n, &vNew) } } } v, _ = ntnxAPI.GetVMbyName(&n, &v) // check if Source VM is running state := ntnxAPI.GetVMState(&n, &v) if state != "off" { log.Warn("VM " + vNew.Config.Name + " is not powered off") os.Exit(0) } //check if all disks have been specified if *vdiskMapping != "" { checkVdiskMapping(v, VdiskMapping) } var taskUUIDS []ntnxAPI.TaskUUID // clone vDisk from source VM to image service for i, elem := range v.Config.VMDisks { if !elem.IsEmpty { var containerName string var containerUUID string d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" // set containerName dependent on --container or --vdisk-mapping if *container != "" { containerName = *container } else { containerName, _ = ntnxAPI.GetContainerNamebyUUID(&n, VdiskMapping[i].ContainerUUID) } var uploadNeeded = true // make sure Images don't exist and overwrite if flag is enabled else WARN and continue // let all Upload take place in parallel and save taskUUID in a Array if ntnxAPI.ImageExistbyName(&n, &im) { if *overwrite { task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Previos existing Image "+im.Name+" deleted") // if overwrite is disabled } else { log.Info("Image " + im.Name + " already exists - will use existing one instead") uploadNeeded = false } } if uploadNeeded { // let all Upload take place in parallel and save taskUUID in a Array // if container stays the same clone local if *vdiskMapping != "" { containerUUID = VdiskMapping[i].ContainerUUID } else { containerUUID, _ = ntnxAPI.GetContainerIDbyName(&n, containerName) } if containerUUID == elem.ContainerUUID { taskUUID, _ = ntnxAPI.CreateImageFromVdisk(&n, &d, &im) log.Info("Start cloning " + im.Name + " to image service") } else { taskUUID, _ = ntnxAPI.CreateImageFromURL(&n, &d, &im, containerName) log.Info("Start cloning " + im.Name + " to image service from URL") } } taskUUIDS = append(taskUUIDS, taskUUID) } } //Wait that all disks have been clone to new container- may take a while for _, task := range taskUUIDS { ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image from disk created") } //copy all VM settings vNew.Config.Description = v.Config.Description vNew.Config.MemoryMb = v.Config.MemoryMb vNew.Config.NumVcpus = v.Config.NumVcpus vNew.Config.NumCoresPerVcpu = v.Config.NumCoresPerVcpu // Delete target VM if overwrite mode if *overwrite && existVNew { taskUUID, _ = ntnxAPI.DeleteVM(&n, &vNew) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "New VM successfull deleted") } // Create target VM taskUUID, _ = ntnxAPI.CreateVMAHV(&n, &vNew) task, err := ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID) if err != nil { log.Fatal("Task does not exist") } else { log.Info("VM " + vNew.Config.Name + " created") } vNew.UUID = ntnxAPI.GetVMIDbyTask(&n, &task) // Create vdisk on new VM from images for _, elem := range v.Config.VMDisks { if elem.IsEmpty { taskUUID, _ = ntnxAPI.CreateCDforVMwithDetails(&n, &vNew, elem.Addr.DeviceBus, strconv.Itoa(elem.Addr.DeviceIndex)) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "CD successfully created") } else { im, _ = ntnxAPI.GetImagebyName(&n, v.Config.Name+"-"+elem.Addr.DeviceBus+"."+strconv.Itoa(elem.Addr.DeviceIndex)) if elem.IsCdrom { taskUUID, _ = ntnxAPI.CloneCDforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } else { taskUUID, _ = ntnxAPI.CloneDiskforVMwithDetails(&n, &vNew, &im, elem.Addr.DeviceBus) } ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Disk ID"+v.UUID+" cloned") } } // Create vNics for _, elem := range v.Config.VMNics { if *delete { // delete nic from source VM because only one NIC with same MAC may exist taskUUID, _ = ntnxAPI.DelteVNicforVM(&n, &v, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") // Create nic net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVMwithMAC(&n, &vNew, &net, elem.MacAddress) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } else { net.UUID = elem.NetworkUUID taskUUID, _ = ntnxAPI.CreateVNicforVM(&n, &vNew, &net) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "Nic with MAC "+elem.MacAddress+" created") } } // delete images if !*keepImages { for _, elem := range v.Config.VMDisks { if !elem.IsEmpty { d.VdiskUUID = elem.VMDiskUUID d.ContainerID = elem.ContainerUUID im.Name = v.Config.Name + "-" + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) im.Annotation = "vm_move helper Image" task, _ := ntnxAPI.DeleteImagebyName(&n, im.Name) ntnxAPI.WrappWaitUntilTaskFinished(&n, task.TaskUUID, "Image "+im.Name+" deleted") } } } // delete VM if flag is set if *delete { taskUUID, _ = ntnxAPI.DeleteVM(&n, &v) ntnxAPI.WrappWaitUntilTaskFinished(&n, taskUUID.TaskUUID, "VM "+v.Config.Name+" successfull deleted") } } else { log.Warn("VM vm-name=" + v.Config.Name + " does not exist") } }
// check if right format is used if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") {
random_line_split
model.py
import json import os from typing import * import numpy as np import pandas as pd from gensim.models import LdaModel from sklearn.linear_model import LinearRegression from .info_extractor import InfoExtractor from .utils import ( loadDocumentIntoSpacy, getAllTokensAndChunks, loadDefaultNLP, ) class RatingModel: class RatingModelError(Exception): pass def __init__(self,_type: Optional[str] = None, pre_trained_model_json: Optional[str] = None, spacy_nlp: Optional[pd.DataFrame] = None): """ Initialize a pre-trained or empty model """ if _type is None: # empty model self.model = None self.keywords = None elif _type == "fixed": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelFixed(pre_trained_model_json) elif _type == "lda": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelLDA(pre_trained_model_json) else: raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'") print("Loading nlp tools...") if spacy_nlp is None: # load default model self.nlp = loadDefaultNLP() else: self.nlp = spacy_nlp print("Loading pdf parser...") # takes some time from tika import parser self.parser = parser def loadModelLDA(self, model_json: str) -> None: """ Function to load a pre-trained ;da model :param model_csv: the json filename of the model """ dirname = os.path.dirname(model_json) try: with open(model_json, "r") as f: j = json.load(f) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_json %s is not a valid path" % model_json ) try: path = os.path.join(dirname, j["model_csv"]) self.model = pd.read_csv(path) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_csv %s in model_json is not a valid path" % path ) try: path = os.path.join(dirname, j["lda"]) self.lda = LdaModel.load(path) self.dictionary = self.lda.id2word except Exception as e: print(e) raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path) try: path = os.path.join(dirname, j["top_k_words"]) self.top_k_words = [] with open(path, "r") as f: for line in f: if line: self.top_k_words.append(line.strip()) except Exception as e: print(e) raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path) self._type = "lda" def __keep_top_k_words(self, text):
def __trainKMWM(self,seen_chunks_words: List[str],all_tokens_chunks: List[Any], keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]: """ Hidden function to obtain KM and WM scores from keywords :param seen_chunks_words: n-grams of words in doc :param all_tokens_chunks: list of all tokens and chunks :param keywords: keywords to train on :return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores if no errors. Else None """ # get word2vec correlation matrix of all tokens + keyword_tokens keywords_tokenized = self.nlp(" ".join(keywords)) # prepare word embedding matrix pd_series_all = [] # convert tokens and chunks into word embeddings and put them into a pd.Series for tc in all_tokens_chunks: name = tc.lemma_.lower() pd_series_all.append(pd.Series(tc.vector, name=name)) # convert keywords into word embeddings and put them into a pd.Series for kwt in keywords_tokenized: name = kwt.text.lower() if name not in seen_chunks_words: pd_series_all.append(pd.Series(kwt.vector, name=name)) seen_chunks_words.append(name) # get embedding matrix by concatenating all pd.Series embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index() corrmat = embedd_mat_df.corr() # top n words correlated to keyword top_n = list(range(10, 100, 10)) km_scores = [] wm_scores = [] try: for kw in keywords: km_similarities = [] wm_similarities = [] # for top n words based on correlation to kw for n in top_n: cols = np.append( corrmat[kw] .drop(keywords) .sort_values(ascending=False) .index.values[: n - 1], kw, ) cm = np.corrcoef(embedd_mat_df[cols].values.T) # KM score # avg of top n correlations wrt kw (less the keyword # itself since it has corr = 1) avg_sim = np.mean(cm[0, :][1:]) km_similarities.append(avg_sim) # WM score # avg of top n correlations (without kw) # amongst each other len_minus = ( cm.shape[0] - 1 ) # cm.shape to remove all the self correlations len_minus_sq = len_minus ** 2 # 1. sum the correlations less the # correlations with the keyword # 2. subtract len_minus since there are # len_minus autocorrelations # 3. get mean by dividing the size of the rest # i.e. (len_minus_sq - len_minus) avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / ( len_minus_sq - len_minus ) wm_similarities.append(avg_wm) # get 8th degree of X and perform LR to get intercept X = np.array(top_n) Xes = [X] # for i in range(2, 9): # Xes.append(X ** i) X_transformed = np.array(Xes).T lm = LinearRegression() # KM score y = np.array(km_similarities) lm.fit(X_transformed, y) km_scores.append(lm.intercept_) # WM score y = np.array(wm_similarities) lm.fit(X_transformed, y) wm_scores.append(lm.intercept_) except Exception as e: print(e) return None return km_scores, wm_scores def test(self, filename: str, info_extractor: Optional[InfoExtractor]): """ Test a document and print the extracted information and rating :param filename: name of resume file :param info_extractor: InfoExtractor object """ if self.model is None: raise RatingModel.RatingModelError("model is not loaded or trained yet") doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp) print("Getting rating...") if self._type == "fixed": print("working on fixed model") if self.keywords is None: raise RatingModel.RatingModelError("Keywords not found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) # scoring temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords) if temp_out is None: raise RatingModel.RatingModelError( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.mean(km_scores) wm_score = np.mean(wm_scores) final_score = km_score * wm_score elif self._type == "lda": if self.lda is None or self.dictionary is None or self.top_k_words is None: raise RatingModel.RatingModelError("No LDA found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) seen_chunks_words, all_tokens_chunks = ( list(seen_chunks_words), list(all_tokens_chunks), ) # scoring new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words) bow = self.dictionary.doc2bow(new_seen_chunks_words) doc_distribution = np.array( [tup[1] for tup in self.lda.get_document_topics(bow=bow)] ) # get keywords and weights keywords = [] all_pair_scores = [] all_topic_scores = [] all_diff_scores = [] # take top 5 topics for j in doc_distribution.argsort()[-5:][::-1]: topic_prob = doc_distribution[j] # take top 5 words for each topic st = self.lda.show_topic(topicid=j, topn=5) sum_st = np.sum(list(map(lambda x: x[1], st))) pair_scores = [] for pair in st: keywords.append(pair[0]) pair_scores.append(pair[1]) all_pair_scores.append(np.array(pair_scores)) all_topic_scores.append(np.array(topic_prob)) all_pair_scores = np.array(all_pair_scores) norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1) norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores) all_diff_scores = (norm_all_pair_scores * norm_all_topic_scores).flatten() weights = pd.Series(all_diff_scores, index=keywords) weights.sort_values(ascending=False, inplace=True) temp_out = self.__trainKMWM(seen_chunks_words, all_tokens_chunks, keywords) if temp_out is None: print( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former. Skip document." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.dot(weights.values, km_scores) wm_score = np.dot(weights.values, wm_scores) final_score = km_score * wm_score # max_score = self.model["score"].iloc[0] - np.std(self.model["score"]) # min_score = self.model["score"].iloc[-1] mean = np.mean(self.model["score"]) sd = np.std(self.model["score"]) rating = min(10, max(0, round(5 + (final_score-mean)/sd, 2))) if info_extractor is not None: print("-" * 20) # info_extractor.extractFromFile(filename) output= info_extractor.extractFromFile(filename) print("output:----",output) print("-" * 20) print("Rating: %.1f" % rating) # if info_extractor is not None: # print("info extractor is not working") # env = os.environ # subprocess.call([sys.executable, filename], env=env) return output
return [word for word in text if word in self.top_k_words]
identifier_body
model.py
import json import os from typing import * import numpy as np import pandas as pd from gensim.models import LdaModel from sklearn.linear_model import LinearRegression from .info_extractor import InfoExtractor from .utils import ( loadDocumentIntoSpacy, getAllTokensAndChunks, loadDefaultNLP, ) class RatingModel: class RatingModelError(Exception): pass def __init__(self,_type: Optional[str] = None, pre_trained_model_json: Optional[str] = None, spacy_nlp: Optional[pd.DataFrame] = None): """ Initialize a pre-trained or empty model """ if _type is None: # empty model self.model = None self.keywords = None elif _type == "fixed": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelFixed(pre_trained_model_json) elif _type == "lda": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelLDA(pre_trained_model_json) else: raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'") print("Loading nlp tools...") if spacy_nlp is None: # load default model self.nlp = loadDefaultNLP() else: self.nlp = spacy_nlp print("Loading pdf parser...") # takes some time from tika import parser self.parser = parser def loadModelLDA(self, model_json: str) -> None: """ Function to load a pre-trained ;da model :param model_csv: the json filename of the model """ dirname = os.path.dirname(model_json) try: with open(model_json, "r") as f: j = json.load(f) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_json %s is not a valid path" % model_json ) try: path = os.path.join(dirname, j["model_csv"]) self.model = pd.read_csv(path) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_csv %s in model_json is not a valid path" % path ) try: path = os.path.join(dirname, j["lda"]) self.lda = LdaModel.load(path) self.dictionary = self.lda.id2word except Exception as e: print(e) raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path) try: path = os.path.join(dirname, j["top_k_words"]) self.top_k_words = [] with open(path, "r") as f: for line in f: if line: self.top_k_words.append(line.strip()) except Exception as e: print(e) raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path) self._type = "lda" def __keep_top_k_words(self, text): return [word for word in text if word in self.top_k_words] def __trainKMWM(self,seen_chunks_words: List[str],all_tokens_chunks: List[Any], keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]: """ Hidden function to obtain KM and WM scores from keywords :param seen_chunks_words: n-grams of words in doc :param all_tokens_chunks: list of all tokens and chunks :param keywords: keywords to train on :return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores if no errors. Else None """ # get word2vec correlation matrix of all tokens + keyword_tokens keywords_tokenized = self.nlp(" ".join(keywords)) # prepare word embedding matrix pd_series_all = [] # convert tokens and chunks into word embeddings and put them into a pd.Series for tc in all_tokens_chunks: name = tc.lemma_.lower() pd_series_all.append(pd.Series(tc.vector, name=name)) # convert keywords into word embeddings and put them into a pd.Series for kwt in keywords_tokenized: name = kwt.text.lower() if name not in seen_chunks_words: pd_series_all.append(pd.Series(kwt.vector, name=name)) seen_chunks_words.append(name) # get embedding matrix by concatenating all pd.Series embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index() corrmat = embedd_mat_df.corr() # top n words correlated to keyword top_n = list(range(10, 100, 10)) km_scores = [] wm_scores = [] try: for kw in keywords: km_similarities = [] wm_similarities = [] # for top n words based on correlation to kw for n in top_n: cols = np.append( corrmat[kw] .drop(keywords) .sort_values(ascending=False) .index.values[: n - 1], kw, ) cm = np.corrcoef(embedd_mat_df[cols].values.T) # KM score # avg of top n correlations wrt kw (less the keyword # itself since it has corr = 1) avg_sim = np.mean(cm[0, :][1:]) km_similarities.append(avg_sim) # WM score # avg of top n correlations (without kw) # amongst each other len_minus = ( cm.shape[0] - 1 ) # cm.shape to remove all the self correlations len_minus_sq = len_minus ** 2 # 1. sum the correlations less the # correlations with the keyword # 2. subtract len_minus since there are # len_minus autocorrelations # 3. get mean by dividing the size of the rest # i.e. (len_minus_sq - len_minus) avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / ( len_minus_sq - len_minus ) wm_similarities.append(avg_wm) # get 8th degree of X and perform LR to get intercept X = np.array(top_n) Xes = [X] # for i in range(2, 9): # Xes.append(X ** i) X_transformed = np.array(Xes).T lm = LinearRegression() # KM score y = np.array(km_similarities) lm.fit(X_transformed, y) km_scores.append(lm.intercept_) # WM score y = np.array(wm_similarities) lm.fit(X_transformed, y) wm_scores.append(lm.intercept_) except Exception as e: print(e) return None return km_scores, wm_scores def test(self, filename: str, info_extractor: Optional[InfoExtractor]): """ Test a document and print the extracted information and rating :param filename: name of resume file :param info_extractor: InfoExtractor object """ if self.model is None: raise RatingModel.RatingModelError("model is not loaded or trained yet") doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp) print("Getting rating...") if self._type == "fixed":
elif self._type == "lda": if self.lda is None or self.dictionary is None or self.top_k_words is None: raise RatingModel.RatingModelError("No LDA found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) seen_chunks_words, all_tokens_chunks = ( list(seen_chunks_words), list(all_tokens_chunks), ) # scoring new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words) bow = self.dictionary.doc2bow(new_seen_chunks_words) doc_distribution = np.array( [tup[1] for tup in self.lda.get_document_topics(bow=bow)] ) # get keywords and weights keywords = [] all_pair_scores = [] all_topic_scores = [] all_diff_scores = [] # take top 5 topics for j in doc_distribution.argsort()[-5:][::-1]: topic_prob = doc_distribution[j] # take top 5 words for each topic st = self.lda.show_topic(topicid=j, topn=5) sum_st = np.sum(list(map(lambda x: x[1], st))) pair_scores = [] for pair in st: keywords.append(pair[0]) pair_scores.append(pair[1]) all_pair_scores.append(np.array(pair_scores)) all_topic_scores.append(np.array(topic_prob)) all_pair_scores = np.array(all_pair_scores) norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1) norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores) all_diff_scores = (norm_all_pair_scores * norm_all_topic_scores).flatten() weights = pd.Series(all_diff_scores, index=keywords) weights.sort_values(ascending=False, inplace=True) temp_out = self.__trainKMWM(seen_chunks_words, all_tokens_chunks, keywords) if temp_out is None: print( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former. Skip document." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.dot(weights.values, km_scores) wm_score = np.dot(weights.values, wm_scores) final_score = km_score * wm_score # max_score = self.model["score"].iloc[0] - np.std(self.model["score"]) # min_score = self.model["score"].iloc[-1] mean = np.mean(self.model["score"]) sd = np.std(self.model["score"]) rating = min(10, max(0, round(5 + (final_score-mean)/sd, 2))) if info_extractor is not None: print("-" * 20) # info_extractor.extractFromFile(filename) output= info_extractor.extractFromFile(filename) print("output:----",output) print("-" * 20) print("Rating: %.1f" % rating) # if info_extractor is not None: # print("info extractor is not working") # env = os.environ # subprocess.call([sys.executable, filename], env=env) return output
print("working on fixed model") if self.keywords is None: raise RatingModel.RatingModelError("Keywords not found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) # scoring temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords) if temp_out is None: raise RatingModel.RatingModelError( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.mean(km_scores) wm_score = np.mean(wm_scores) final_score = km_score * wm_score
conditional_block
model.py
import json import os from typing import * import numpy as np import pandas as pd from gensim.models import LdaModel from sklearn.linear_model import LinearRegression from .info_extractor import InfoExtractor from .utils import ( loadDocumentIntoSpacy, getAllTokensAndChunks, loadDefaultNLP, ) class RatingModel: class RatingModelError(Exception): pass def __init__(self,_type: Optional[str] = None, pre_trained_model_json: Optional[str] = None, spacy_nlp: Optional[pd.DataFrame] = None): """ Initialize a pre-trained or empty model """ if _type is None: # empty model self.model = None self.keywords = None elif _type == "fixed": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelFixed(pre_trained_model_json) elif _type == "lda": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelLDA(pre_trained_model_json) else: raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'") print("Loading nlp tools...") if spacy_nlp is None: # load default model self.nlp = loadDefaultNLP() else: self.nlp = spacy_nlp print("Loading pdf parser...") # takes some time from tika import parser self.parser = parser def loadModelLDA(self, model_json: str) -> None: """ Function to load a pre-trained ;da model :param model_csv: the json filename of the model """ dirname = os.path.dirname(model_json) try: with open(model_json, "r") as f: j = json.load(f) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_json %s is not a valid path" % model_json ) try: path = os.path.join(dirname, j["model_csv"]) self.model = pd.read_csv(path) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_csv %s in model_json is not a valid path" % path ) try: path = os.path.join(dirname, j["lda"]) self.lda = LdaModel.load(path) self.dictionary = self.lda.id2word except Exception as e: print(e) raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path) try: path = os.path.join(dirname, j["top_k_words"]) self.top_k_words = [] with open(path, "r") as f: for line in f: if line: self.top_k_words.append(line.strip()) except Exception as e: print(e) raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path) self._type = "lda" def __keep_top_k_words(self, text): return [word for word in text if word in self.top_k_words] def __trainKMWM(self,seen_chunks_words: List[str],all_tokens_chunks: List[Any], keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]: """ Hidden function to obtain KM and WM scores from keywords :param seen_chunks_words: n-grams of words in doc :param all_tokens_chunks: list of all tokens and chunks :param keywords: keywords to train on :return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores if no errors. Else None """ # get word2vec correlation matrix of all tokens + keyword_tokens keywords_tokenized = self.nlp(" ".join(keywords)) # prepare word embedding matrix pd_series_all = [] # convert tokens and chunks into word embeddings and put them into a pd.Series for tc in all_tokens_chunks: name = tc.lemma_.lower() pd_series_all.append(pd.Series(tc.vector, name=name)) # convert keywords into word embeddings and put them into a pd.Series for kwt in keywords_tokenized: name = kwt.text.lower() if name not in seen_chunks_words: pd_series_all.append(pd.Series(kwt.vector, name=name)) seen_chunks_words.append(name) # get embedding matrix by concatenating all pd.Series embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index() corrmat = embedd_mat_df.corr() # top n words correlated to keyword top_n = list(range(10, 100, 10)) km_scores = [] wm_scores = [] try: for kw in keywords: km_similarities = [] wm_similarities = [] # for top n words based on correlation to kw for n in top_n: cols = np.append( corrmat[kw] .drop(keywords) .sort_values(ascending=False) .index.values[: n - 1], kw, ) cm = np.corrcoef(embedd_mat_df[cols].values.T) # KM score # avg of top n correlations wrt kw (less the keyword # itself since it has corr = 1) avg_sim = np.mean(cm[0, :][1:]) km_similarities.append(avg_sim) # WM score # avg of top n correlations (without kw) # amongst each other len_minus = ( cm.shape[0] - 1 ) # cm.shape to remove all the self correlations len_minus_sq = len_minus ** 2 # 1. sum the correlations less the # correlations with the keyword # 2. subtract len_minus since there are # len_minus autocorrelations # 3. get mean by dividing the size of the rest # i.e. (len_minus_sq - len_minus) avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / ( len_minus_sq - len_minus ) wm_similarities.append(avg_wm) # get 8th degree of X and perform LR to get intercept X = np.array(top_n) Xes = [X] # for i in range(2, 9): # Xes.append(X ** i) X_transformed = np.array(Xes).T lm = LinearRegression() # KM score y = np.array(km_similarities) lm.fit(X_transformed, y) km_scores.append(lm.intercept_) # WM score y = np.array(wm_similarities) lm.fit(X_transformed, y) wm_scores.append(lm.intercept_) except Exception as e: print(e) return None return km_scores, wm_scores def test(self, filename: str, info_extractor: Optional[InfoExtractor]): """ Test a document and print the extracted information and rating :param filename: name of resume file :param info_extractor: InfoExtractor object """ if self.model is None: raise RatingModel.RatingModelError("model is not loaded or trained yet") doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp) print("Getting rating...") if self._type == "fixed": print("working on fixed model") if self.keywords is None: raise RatingModel.RatingModelError("Keywords not found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) # scoring temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords) if temp_out is None: raise RatingModel.RatingModelError( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.mean(km_scores) wm_score = np.mean(wm_scores)
elif self._type == "lda": if self.lda is None or self.dictionary is None or self.top_k_words is None: raise RatingModel.RatingModelError("No LDA found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) seen_chunks_words, all_tokens_chunks = ( list(seen_chunks_words), list(all_tokens_chunks), ) # scoring new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words) bow = self.dictionary.doc2bow(new_seen_chunks_words) doc_distribution = np.array( [tup[1] for tup in self.lda.get_document_topics(bow=bow)] ) # get keywords and weights keywords = [] all_pair_scores = [] all_topic_scores = [] all_diff_scores = [] # take top 5 topics for j in doc_distribution.argsort()[-5:][::-1]: topic_prob = doc_distribution[j] # take top 5 words for each topic st = self.lda.show_topic(topicid=j, topn=5) sum_st = np.sum(list(map(lambda x: x[1], st))) pair_scores = [] for pair in st: keywords.append(pair[0]) pair_scores.append(pair[1]) all_pair_scores.append(np.array(pair_scores)) all_topic_scores.append(np.array(topic_prob)) all_pair_scores = np.array(all_pair_scores) norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1) norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores) all_diff_scores = (norm_all_pair_scores * norm_all_topic_scores).flatten() weights = pd.Series(all_diff_scores, index=keywords) weights.sort_values(ascending=False, inplace=True) temp_out = self.__trainKMWM(seen_chunks_words, all_tokens_chunks, keywords) if temp_out is None: print( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former. Skip document." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.dot(weights.values, km_scores) wm_score = np.dot(weights.values, wm_scores) final_score = km_score * wm_score # max_score = self.model["score"].iloc[0] - np.std(self.model["score"]) # min_score = self.model["score"].iloc[-1] mean = np.mean(self.model["score"]) sd = np.std(self.model["score"]) rating = min(10, max(0, round(5 + (final_score-mean)/sd, 2))) if info_extractor is not None: print("-" * 20) # info_extractor.extractFromFile(filename) output= info_extractor.extractFromFile(filename) print("output:----",output) print("-" * 20) print("Rating: %.1f" % rating) # if info_extractor is not None: # print("info extractor is not working") # env = os.environ # subprocess.call([sys.executable, filename], env=env) return output
final_score = km_score * wm_score
random_line_split
model.py
import json import os from typing import * import numpy as np import pandas as pd from gensim.models import LdaModel from sklearn.linear_model import LinearRegression from .info_extractor import InfoExtractor from .utils import ( loadDocumentIntoSpacy, getAllTokensAndChunks, loadDefaultNLP, ) class RatingModel: class RatingModelError(Exception): pass def __init__(self,_type: Optional[str] = None, pre_trained_model_json: Optional[str] = None, spacy_nlp: Optional[pd.DataFrame] = None): """ Initialize a pre-trained or empty model """ if _type is None: # empty model self.model = None self.keywords = None elif _type == "fixed": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelFixed(pre_trained_model_json) elif _type == "lda": if pre_trained_model_json is None: raise RatingModel.RatingModel.Error("pre_trained_model_json is None") self.loadModelLDA(pre_trained_model_json) else: raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'") print("Loading nlp tools...") if spacy_nlp is None: # load default model self.nlp = loadDefaultNLP() else: self.nlp = spacy_nlp print("Loading pdf parser...") # takes some time from tika import parser self.parser = parser def loadModelLDA(self, model_json: str) -> None: """ Function to load a pre-trained ;da model :param model_csv: the json filename of the model """ dirname = os.path.dirname(model_json) try: with open(model_json, "r") as f: j = json.load(f) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_json %s is not a valid path" % model_json ) try: path = os.path.join(dirname, j["model_csv"]) self.model = pd.read_csv(path) except Exception as e: print(e) raise RatingModel.RatingModelError( "model_csv %s in model_json is not a valid path" % path ) try: path = os.path.join(dirname, j["lda"]) self.lda = LdaModel.load(path) self.dictionary = self.lda.id2word except Exception as e: print(e) raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path) try: path = os.path.join(dirname, j["top_k_words"]) self.top_k_words = [] with open(path, "r") as f: for line in f: if line: self.top_k_words.append(line.strip()) except Exception as e: print(e) raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path) self._type = "lda" def __keep_top_k_words(self, text): return [word for word in text if word in self.top_k_words] def
(self,seen_chunks_words: List[str],all_tokens_chunks: List[Any], keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]: """ Hidden function to obtain KM and WM scores from keywords :param seen_chunks_words: n-grams of words in doc :param all_tokens_chunks: list of all tokens and chunks :param keywords: keywords to train on :return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores if no errors. Else None """ # get word2vec correlation matrix of all tokens + keyword_tokens keywords_tokenized = self.nlp(" ".join(keywords)) # prepare word embedding matrix pd_series_all = [] # convert tokens and chunks into word embeddings and put them into a pd.Series for tc in all_tokens_chunks: name = tc.lemma_.lower() pd_series_all.append(pd.Series(tc.vector, name=name)) # convert keywords into word embeddings and put them into a pd.Series for kwt in keywords_tokenized: name = kwt.text.lower() if name not in seen_chunks_words: pd_series_all.append(pd.Series(kwt.vector, name=name)) seen_chunks_words.append(name) # get embedding matrix by concatenating all pd.Series embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index() corrmat = embedd_mat_df.corr() # top n words correlated to keyword top_n = list(range(10, 100, 10)) km_scores = [] wm_scores = [] try: for kw in keywords: km_similarities = [] wm_similarities = [] # for top n words based on correlation to kw for n in top_n: cols = np.append( corrmat[kw] .drop(keywords) .sort_values(ascending=False) .index.values[: n - 1], kw, ) cm = np.corrcoef(embedd_mat_df[cols].values.T) # KM score # avg of top n correlations wrt kw (less the keyword # itself since it has corr = 1) avg_sim = np.mean(cm[0, :][1:]) km_similarities.append(avg_sim) # WM score # avg of top n correlations (without kw) # amongst each other len_minus = ( cm.shape[0] - 1 ) # cm.shape to remove all the self correlations len_minus_sq = len_minus ** 2 # 1. sum the correlations less the # correlations with the keyword # 2. subtract len_minus since there are # len_minus autocorrelations # 3. get mean by dividing the size of the rest # i.e. (len_minus_sq - len_minus) avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / ( len_minus_sq - len_minus ) wm_similarities.append(avg_wm) # get 8th degree of X and perform LR to get intercept X = np.array(top_n) Xes = [X] # for i in range(2, 9): # Xes.append(X ** i) X_transformed = np.array(Xes).T lm = LinearRegression() # KM score y = np.array(km_similarities) lm.fit(X_transformed, y) km_scores.append(lm.intercept_) # WM score y = np.array(wm_similarities) lm.fit(X_transformed, y) wm_scores.append(lm.intercept_) except Exception as e: print(e) return None return km_scores, wm_scores def test(self, filename: str, info_extractor: Optional[InfoExtractor]): """ Test a document and print the extracted information and rating :param filename: name of resume file :param info_extractor: InfoExtractor object """ if self.model is None: raise RatingModel.RatingModelError("model is not loaded or trained yet") doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp) print("Getting rating...") if self._type == "fixed": print("working on fixed model") if self.keywords is None: raise RatingModel.RatingModelError("Keywords not found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) # scoring temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords) if temp_out is None: raise RatingModel.RatingModelError( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.mean(km_scores) wm_score = np.mean(wm_scores) final_score = km_score * wm_score elif self._type == "lda": if self.lda is None or self.dictionary is None or self.top_k_words is None: raise RatingModel.RatingModelError("No LDA found") seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc) seen_chunks_words, all_tokens_chunks = ( list(seen_chunks_words), list(all_tokens_chunks), ) # scoring new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words) bow = self.dictionary.doc2bow(new_seen_chunks_words) doc_distribution = np.array( [tup[1] for tup in self.lda.get_document_topics(bow=bow)] ) # get keywords and weights keywords = [] all_pair_scores = [] all_topic_scores = [] all_diff_scores = [] # take top 5 topics for j in doc_distribution.argsort()[-5:][::-1]: topic_prob = doc_distribution[j] # take top 5 words for each topic st = self.lda.show_topic(topicid=j, topn=5) sum_st = np.sum(list(map(lambda x: x[1], st))) pair_scores = [] for pair in st: keywords.append(pair[0]) pair_scores.append(pair[1]) all_pair_scores.append(np.array(pair_scores)) all_topic_scores.append(np.array(topic_prob)) all_pair_scores = np.array(all_pair_scores) norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1) norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores) all_diff_scores = (norm_all_pair_scores * norm_all_topic_scores).flatten() weights = pd.Series(all_diff_scores, index=keywords) weights.sort_values(ascending=False, inplace=True) temp_out = self.__trainKMWM(seen_chunks_words, all_tokens_chunks, keywords) if temp_out is None: print( "Either parser cannot detect text or too few words in resume for analysis. Most usually the former. Skip document." ) km_scores, wm_scores = temp_out # average of km/wm scores for all keywords km_score = np.dot(weights.values, km_scores) wm_score = np.dot(weights.values, wm_scores) final_score = km_score * wm_score # max_score = self.model["score"].iloc[0] - np.std(self.model["score"]) # min_score = self.model["score"].iloc[-1] mean = np.mean(self.model["score"]) sd = np.std(self.model["score"]) rating = min(10, max(0, round(5 + (final_score-mean)/sd, 2))) if info_extractor is not None: print("-" * 20) # info_extractor.extractFromFile(filename) output= info_extractor.extractFromFile(filename) print("output:----",output) print("-" * 20) print("Rating: %.1f" % rating) # if info_extractor is not None: # print("info extractor is not working") # env = os.environ # subprocess.call([sys.executable, filename], env=env) return output
__trainKMWM
identifier_name
ng-typeview.ts
import {writeFileSync, unlinkSync} from "fs"; import {sync} from "glob"; import {HashMap, Vector} from "prelude-ts"; import {parse} from "path"; import {parseView, collectionKeepDefined } from "./view-parser" import {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" export {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule, ControllerViewInfo, ControllerScopeInfo, ControllerViewConnector, defaultCtrlViewConnectors, CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors, ModelViewConnector, defaultModelViewConnectors} from "./controller-parser" import {NgFilter, defaultNgFilters} from "./filters" export {ControllerViewInfo} from "./controller-parser"; // we only repeat the imports, type synonyms and custom interfaces // if there is a module, because otherwise those are dumped in the // global namespace anyway function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo, contents: string): string
function getViewTestFilename(ctrlFname: string, viewFname: string): string { return `${ctrlFname}_${viewFname}_viewtest.ts`; } async function processControllerView(prjSettings: ProjectSettings, controllerPath: string, viewPath: string, ngFilters: NgFilter[], tagDirectives: TagDirectiveHandler[], attributeDirectives: AttributeDirectiveHandler[]) { const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo( controllerPath, prjSettings.ctrlViewFragmentExtractors); if (scopeContents.scopeInfo.isNone()) { // no point of writing anything if there is no scope block return; } const viewExprs = await parseView( prjSettings.resolveImportsAsNonScope || false, viewPath, scopeContents.viewFragments, scopeContents.importNames, Vector.ofIterable(tagDirectives), Vector.ofIterable(attributeDirectives), Vector.ofIterable(ngFilters)); const pathInfo = parse(controllerPath); const viewPathInfo = parse(viewPath); // putting both controller & view name in the output, as one controller // may be used for several views. const outputFname = pathInfo.dir + "/" + getViewTestFilename(pathInfo.name, viewPathInfo.name); const moduleWrap = (x:string) => scopeContents.tsModuleName .map(n => wrapInModule(n, scopeContents, x)) .getOrElse(x); const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ") const typeParams = scopeContents.scopeTypeParams.getOrElse(""); writeFileSync(outputFname, moduleWrap( scopeContents.scopeInfo.getOrThrow() + `\n\nfunction ___f${typeParams}($scope: Scope${ typeParams}, ${filterParams}) {\n` + viewExprs + "\n}\n") + "\n"); } /** * Configuration for a ng-typeview project. */ export interface ProjectSettings { /** * The path for the project on disk (root folder) */ path: string; /** * Folders within the project to exclude from analysis * (for instance external JS libraries, the folder where * your typescript is compiled to javascript, and so on). */ blacklistedPaths: string[]; /** * List of angular filters to handle during the analysis. * You can use [[defaultNgFilters]], add to that list, or specify your own. */ ngFilters: NgFilter[]; /** * List of controller-view connectors to use. * [[defaultCtrlViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ ctrlViewConnectors: ControllerViewConnector[]; /** * Hardcoded controller/view connections that'll be added * to the ones which were autodetected through ctrlViewConnectors. * Useful in case it's too hard to parse some connections * from source. */ extraCtrlViewConnections: ControllerViewInfo[]; /** * List of model-view connectors to use. * These tie model files to views. * This allows to express non-controller models, such * as directive models for instance. * [[defaultModelViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ modelViewConnectors: ModelViewConnector[]; /** * List of tag-bound angular directives to handle during the analysis. * [[defaultTagDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ tagDirectives: TagDirectiveHandler[]; /** * List of attribute-bound angular directives to handle during the analysis. * [[defaultAttrDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ attributeDirectives: AttributeDirectiveHandler[]; /** * Controller view fragment extractors. For instance, you may have * view fragments present in your controllers, for instance ng-grid has * 'cell templates' which typeview can also type-check through this mechanism. * Extractors allows you to tell ng-typeview about those. */ ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[]; /** * When resolving the scope for variables in the view, we prefix "$scope." * for all variables except those defined in the view. For instance, a * `ng-repeat` will define local variables. For these, we do not prefix with * "$scope.". 99% of the time, that works great. * One issue that can come up though, is if you have static fields for * instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript * and angular, due to the TS->JS transpilation. But in ng-typeview, we * can't declare on the scope a field of type [class of MyClass], so that * field.MY_STATIC_FIELD would work. * So a workaround is to specify in your controller: * `import MyClass = api.MyClass;` * In that case, if you enable this `resolveImportsAsNonScope` option * (disabled by default), ng-typeview will not resolve * `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore, * but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the * viewtest, it should work. * But it's pretty messy, so we rather encourage you to avoid statics if * at all possible. */ resolveImportsAsNonScope?: boolean; } function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void { const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"), {nodir:true, ignore: prjSettings.blacklistedPaths}); files.forEach(f => unlinkSync(f)); } /** * Will go through the views and controllers in the project folder and * generate viewtest typescript files to ascertain type-safety of the views. * NOTE: The function returns a promise but is not fully async: a good part of its * runtime is spend running synchronous functions. */ export async function processProject(prjSettings: ProjectSettings): Promise<any> { deletePreviouslyGeneratedFiles(prjSettings); const files = sync(prjSettings.path + "/**/*.@(js|ts)", {nodir:true, ignore: prjSettings.blacklistedPaths}); const viewInfos = await Promise.all( files.map(f => extractCtrlViewConnsAngularModule( f, prjSettings.path, prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors))); const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.controllerViewInfos)) .appendAll(prjSettings.extraCtrlViewConnections) .groupBy(cvi => cvi.viewPath); const controllerNameToFilename = Vector.ofIterable(viewInfos) .filter(vi => vi.controllerName.isSome()) // JS files are not going to have a scope interface // definition so they're not helpful. Also, we can // get twice the same file: original TS & compiled JS. // => keep only the original TS in that case. .filter(vi => vi.fileName.toLowerCase().endsWith(".ts")) .toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]); const viewFilenameToCtrlFilenamesViewConns = viewFilenameToControllerNames .map<string,Vector<string>>( (viewFname,ctrlViewInfos) => [viewFname, collectionKeepDefined( ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]); const viewFilenameToCtrlFilenamesModelConns = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.modelViewInfos)) .groupBy(mvi => mvi.viewPath) .mapValues(mvis => mvis.map(mvi => mvi.modelPath)); const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith( viewFilenameToCtrlFilenamesModelConns, (views1, views2) => views1.appendAll(views2)); return Promise.all(viewFilenameToCtrlFilenames.toVector().map( ([viewName, ctrlNames]) => Promise.all(ctrlNames.map( ctrlName => processControllerView(prjSettings, ctrlName, prjSettings.path + "/" + viewName, prjSettings.ngFilters, prjSettings.tagDirectives, prjSettings.attributeDirectives)).toArray())).toArray()); } try { processProject({ path: process.argv[2], blacklistedPaths: process.argv.slice(3), ngFilters: defaultNgFilters, ctrlViewConnectors: defaultCtrlViewConnectors, modelViewConnectors: defaultModelViewConnectors, extraCtrlViewConnections: [], tagDirectives: defaultTagDirectiveHandlers, attributeDirectives: defaultAttrDirectiveHandlers, ctrlViewFragmentExtractors: defaultCtrlViewFragmentExtractors }); } catch (e) { console.log(e); }
{ return "module " + moduleName + " {\n" + scopeInfo.imports.join("\n") + "\n" + scopeInfo.typeAliases.join("\n") + "\n" + scopeInfo.nonExportedDeclarations.join("\n") + "\n" + contents + "}\n"; }
identifier_body
ng-typeview.ts
import {writeFileSync, unlinkSync} from "fs"; import {sync} from "glob"; import {HashMap, Vector} from "prelude-ts"; import {parse} from "path"; import {parseView, collectionKeepDefined } from "./view-parser" import {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" export {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule, ControllerViewInfo, ControllerScopeInfo, ControllerViewConnector, defaultCtrlViewConnectors, CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors, ModelViewConnector, defaultModelViewConnectors} from "./controller-parser" import {NgFilter, defaultNgFilters} from "./filters" export {ControllerViewInfo} from "./controller-parser"; // we only repeat the imports, type synonyms and custom interfaces // if there is a module, because otherwise those are dumped in the // global namespace anyway function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo, contents: string): string { return "module " + moduleName + " {\n" + scopeInfo.imports.join("\n") + "\n" + scopeInfo.typeAliases.join("\n") + "\n" + scopeInfo.nonExportedDeclarations.join("\n") + "\n" + contents + "}\n"; } function getViewTestFilename(ctrlFname: string, viewFname: string): string { return `${ctrlFname}_${viewFname}_viewtest.ts`; } async function processControllerView(prjSettings: ProjectSettings, controllerPath: string, viewPath: string, ngFilters: NgFilter[], tagDirectives: TagDirectiveHandler[], attributeDirectives: AttributeDirectiveHandler[]) { const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo( controllerPath, prjSettings.ctrlViewFragmentExtractors); if (scopeContents.scopeInfo.isNone()) { // no point of writing anything if there is no scope block return; } const viewExprs = await parseView( prjSettings.resolveImportsAsNonScope || false, viewPath, scopeContents.viewFragments, scopeContents.importNames, Vector.ofIterable(tagDirectives), Vector.ofIterable(attributeDirectives), Vector.ofIterable(ngFilters)); const pathInfo = parse(controllerPath); const viewPathInfo = parse(viewPath); // putting both controller & view name in the output, as one controller // may be used for several views. const outputFname = pathInfo.dir + "/" + getViewTestFilename(pathInfo.name, viewPathInfo.name); const moduleWrap = (x:string) => scopeContents.tsModuleName .map(n => wrapInModule(n, scopeContents, x)) .getOrElse(x); const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ") const typeParams = scopeContents.scopeTypeParams.getOrElse(""); writeFileSync(outputFname, moduleWrap( scopeContents.scopeInfo.getOrThrow() + `\n\nfunction ___f${typeParams}($scope: Scope${ typeParams}, ${filterParams}) {\n` + viewExprs + "\n}\n") + "\n"); } /** * Configuration for a ng-typeview project. */ export interface ProjectSettings { /** * The path for the project on disk (root folder) */ path: string; /** * Folders within the project to exclude from analysis * (for instance external JS libraries, the folder where * your typescript is compiled to javascript, and so on). */ blacklistedPaths: string[]; /** * List of angular filters to handle during the analysis. * You can use [[defaultNgFilters]], add to that list, or specify your own. */
ngFilters: NgFilter[]; /** * List of controller-view connectors to use. * [[defaultCtrlViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ ctrlViewConnectors: ControllerViewConnector[]; /** * Hardcoded controller/view connections that'll be added * to the ones which were autodetected through ctrlViewConnectors. * Useful in case it's too hard to parse some connections * from source. */ extraCtrlViewConnections: ControllerViewInfo[]; /** * List of model-view connectors to use. * These tie model files to views. * This allows to express non-controller models, such * as directive models for instance. * [[defaultModelViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ modelViewConnectors: ModelViewConnector[]; /** * List of tag-bound angular directives to handle during the analysis. * [[defaultTagDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ tagDirectives: TagDirectiveHandler[]; /** * List of attribute-bound angular directives to handle during the analysis. * [[defaultAttrDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ attributeDirectives: AttributeDirectiveHandler[]; /** * Controller view fragment extractors. For instance, you may have * view fragments present in your controllers, for instance ng-grid has * 'cell templates' which typeview can also type-check through this mechanism. * Extractors allows you to tell ng-typeview about those. */ ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[]; /** * When resolving the scope for variables in the view, we prefix "$scope." * for all variables except those defined in the view. For instance, a * `ng-repeat` will define local variables. For these, we do not prefix with * "$scope.". 99% of the time, that works great. * One issue that can come up though, is if you have static fields for * instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript * and angular, due to the TS->JS transpilation. But in ng-typeview, we * can't declare on the scope a field of type [class of MyClass], so that * field.MY_STATIC_FIELD would work. * So a workaround is to specify in your controller: * `import MyClass = api.MyClass;` * In that case, if you enable this `resolveImportsAsNonScope` option * (disabled by default), ng-typeview will not resolve * `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore, * but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the * viewtest, it should work. * But it's pretty messy, so we rather encourage you to avoid statics if * at all possible. */ resolveImportsAsNonScope?: boolean; } function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void { const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"), {nodir:true, ignore: prjSettings.blacklistedPaths}); files.forEach(f => unlinkSync(f)); } /** * Will go through the views and controllers in the project folder and * generate viewtest typescript files to ascertain type-safety of the views. * NOTE: The function returns a promise but is not fully async: a good part of its * runtime is spend running synchronous functions. */ export async function processProject(prjSettings: ProjectSettings): Promise<any> { deletePreviouslyGeneratedFiles(prjSettings); const files = sync(prjSettings.path + "/**/*.@(js|ts)", {nodir:true, ignore: prjSettings.blacklistedPaths}); const viewInfos = await Promise.all( files.map(f => extractCtrlViewConnsAngularModule( f, prjSettings.path, prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors))); const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.controllerViewInfos)) .appendAll(prjSettings.extraCtrlViewConnections) .groupBy(cvi => cvi.viewPath); const controllerNameToFilename = Vector.ofIterable(viewInfos) .filter(vi => vi.controllerName.isSome()) // JS files are not going to have a scope interface // definition so they're not helpful. Also, we can // get twice the same file: original TS & compiled JS. // => keep only the original TS in that case. .filter(vi => vi.fileName.toLowerCase().endsWith(".ts")) .toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]); const viewFilenameToCtrlFilenamesViewConns = viewFilenameToControllerNames .map<string,Vector<string>>( (viewFname,ctrlViewInfos) => [viewFname, collectionKeepDefined( ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]); const viewFilenameToCtrlFilenamesModelConns = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.modelViewInfos)) .groupBy(mvi => mvi.viewPath) .mapValues(mvis => mvis.map(mvi => mvi.modelPath)); const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith( viewFilenameToCtrlFilenamesModelConns, (views1, views2) => views1.appendAll(views2)); return Promise.all(viewFilenameToCtrlFilenames.toVector().map( ([viewName, ctrlNames]) => Promise.all(ctrlNames.map( ctrlName => processControllerView(prjSettings, ctrlName, prjSettings.path + "/" + viewName, prjSettings.ngFilters, prjSettings.tagDirectives, prjSettings.attributeDirectives)).toArray())).toArray()); } try { processProject({ path: process.argv[2], blacklistedPaths: process.argv.slice(3), ngFilters: defaultNgFilters, ctrlViewConnectors: defaultCtrlViewConnectors, modelViewConnectors: defaultModelViewConnectors, extraCtrlViewConnections: [], tagDirectives: defaultTagDirectiveHandlers, attributeDirectives: defaultAttrDirectiveHandlers, ctrlViewFragmentExtractors: defaultCtrlViewFragmentExtractors }); } catch (e) { console.log(e); }
random_line_split
ng-typeview.ts
import {writeFileSync, unlinkSync} from "fs"; import {sync} from "glob"; import {HashMap, Vector} from "prelude-ts"; import {parse} from "path"; import {parseView, collectionKeepDefined } from "./view-parser" import {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" export {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule, ControllerViewInfo, ControllerScopeInfo, ControllerViewConnector, defaultCtrlViewConnectors, CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors, ModelViewConnector, defaultModelViewConnectors} from "./controller-parser" import {NgFilter, defaultNgFilters} from "./filters" export {ControllerViewInfo} from "./controller-parser"; // we only repeat the imports, type synonyms and custom interfaces // if there is a module, because otherwise those are dumped in the // global namespace anyway function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo, contents: string): string { return "module " + moduleName + " {\n" + scopeInfo.imports.join("\n") + "\n" + scopeInfo.typeAliases.join("\n") + "\n" + scopeInfo.nonExportedDeclarations.join("\n") + "\n" + contents + "}\n"; } function getViewTestFilename(ctrlFname: string, viewFname: string): string { return `${ctrlFname}_${viewFname}_viewtest.ts`; } async function processControllerView(prjSettings: ProjectSettings, controllerPath: string, viewPath: string, ngFilters: NgFilter[], tagDirectives: TagDirectiveHandler[], attributeDirectives: AttributeDirectiveHandler[]) { const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo( controllerPath, prjSettings.ctrlViewFragmentExtractors); if (scopeContents.scopeInfo.isNone())
const viewExprs = await parseView( prjSettings.resolveImportsAsNonScope || false, viewPath, scopeContents.viewFragments, scopeContents.importNames, Vector.ofIterable(tagDirectives), Vector.ofIterable(attributeDirectives), Vector.ofIterable(ngFilters)); const pathInfo = parse(controllerPath); const viewPathInfo = parse(viewPath); // putting both controller & view name in the output, as one controller // may be used for several views. const outputFname = pathInfo.dir + "/" + getViewTestFilename(pathInfo.name, viewPathInfo.name); const moduleWrap = (x:string) => scopeContents.tsModuleName .map(n => wrapInModule(n, scopeContents, x)) .getOrElse(x); const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ") const typeParams = scopeContents.scopeTypeParams.getOrElse(""); writeFileSync(outputFname, moduleWrap( scopeContents.scopeInfo.getOrThrow() + `\n\nfunction ___f${typeParams}($scope: Scope${ typeParams}, ${filterParams}) {\n` + viewExprs + "\n}\n") + "\n"); } /** * Configuration for a ng-typeview project. */ export interface ProjectSettings { /** * The path for the project on disk (root folder) */ path: string; /** * Folders within the project to exclude from analysis * (for instance external JS libraries, the folder where * your typescript is compiled to javascript, and so on). */ blacklistedPaths: string[]; /** * List of angular filters to handle during the analysis. * You can use [[defaultNgFilters]], add to that list, or specify your own. */ ngFilters: NgFilter[]; /** * List of controller-view connectors to use. * [[defaultCtrlViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ ctrlViewConnectors: ControllerViewConnector[]; /** * Hardcoded controller/view connections that'll be added * to the ones which were autodetected through ctrlViewConnectors. * Useful in case it's too hard to parse some connections * from source. */ extraCtrlViewConnections: ControllerViewInfo[]; /** * List of model-view connectors to use. * These tie model files to views. * This allows to express non-controller models, such * as directive models for instance. * [[defaultModelViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ modelViewConnectors: ModelViewConnector[]; /** * List of tag-bound angular directives to handle during the analysis. * [[defaultTagDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ tagDirectives: TagDirectiveHandler[]; /** * List of attribute-bound angular directives to handle during the analysis. * [[defaultAttrDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ attributeDirectives: AttributeDirectiveHandler[]; /** * Controller view fragment extractors. For instance, you may have * view fragments present in your controllers, for instance ng-grid has * 'cell templates' which typeview can also type-check through this mechanism. * Extractors allows you to tell ng-typeview about those. */ ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[]; /** * When resolving the scope for variables in the view, we prefix "$scope." * for all variables except those defined in the view. For instance, a * `ng-repeat` will define local variables. For these, we do not prefix with * "$scope.". 99% of the time, that works great. * One issue that can come up though, is if you have static fields for * instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript * and angular, due to the TS->JS transpilation. But in ng-typeview, we * can't declare on the scope a field of type [class of MyClass], so that * field.MY_STATIC_FIELD would work. * So a workaround is to specify in your controller: * `import MyClass = api.MyClass;` * In that case, if you enable this `resolveImportsAsNonScope` option * (disabled by default), ng-typeview will not resolve * `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore, * but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the * viewtest, it should work. * But it's pretty messy, so we rather encourage you to avoid statics if * at all possible. */ resolveImportsAsNonScope?: boolean; } function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void { const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"), {nodir:true, ignore: prjSettings.blacklistedPaths}); files.forEach(f => unlinkSync(f)); } /** * Will go through the views and controllers in the project folder and * generate viewtest typescript files to ascertain type-safety of the views. * NOTE: The function returns a promise but is not fully async: a good part of its * runtime is spend running synchronous functions. */ export async function processProject(prjSettings: ProjectSettings): Promise<any> { deletePreviouslyGeneratedFiles(prjSettings); const files = sync(prjSettings.path + "/**/*.@(js|ts)", {nodir:true, ignore: prjSettings.blacklistedPaths}); const viewInfos = await Promise.all( files.map(f => extractCtrlViewConnsAngularModule( f, prjSettings.path, prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors))); const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.controllerViewInfos)) .appendAll(prjSettings.extraCtrlViewConnections) .groupBy(cvi => cvi.viewPath); const controllerNameToFilename = Vector.ofIterable(viewInfos) .filter(vi => vi.controllerName.isSome()) // JS files are not going to have a scope interface // definition so they're not helpful. Also, we can // get twice the same file: original TS & compiled JS. // => keep only the original TS in that case. .filter(vi => vi.fileName.toLowerCase().endsWith(".ts")) .toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]); const viewFilenameToCtrlFilenamesViewConns = viewFilenameToControllerNames .map<string,Vector<string>>( (viewFname,ctrlViewInfos) => [viewFname, collectionKeepDefined( ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]); const viewFilenameToCtrlFilenamesModelConns = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.modelViewInfos)) .groupBy(mvi => mvi.viewPath) .mapValues(mvis => mvis.map(mvi => mvi.modelPath)); const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith( viewFilenameToCtrlFilenamesModelConns, (views1, views2) => views1.appendAll(views2)); return Promise.all(viewFilenameToCtrlFilenames.toVector().map( ([viewName, ctrlNames]) => Promise.all(ctrlNames.map( ctrlName => processControllerView(prjSettings, ctrlName, prjSettings.path + "/" + viewName, prjSettings.ngFilters, prjSettings.tagDirectives, prjSettings.attributeDirectives)).toArray())).toArray()); } try { processProject({ path: process.argv[2], blacklistedPaths: process.argv.slice(3), ngFilters: defaultNgFilters, ctrlViewConnectors: defaultCtrlViewConnectors, modelViewConnectors: defaultModelViewConnectors, extraCtrlViewConnections: [], tagDirectives: defaultTagDirectiveHandlers, attributeDirectives: defaultAttrDirectiveHandlers, ctrlViewFragmentExtractors: defaultCtrlViewFragmentExtractors }); } catch (e) { console.log(e); }
{ // no point of writing anything if there is no scope block return; }
conditional_block
ng-typeview.ts
import {writeFileSync, unlinkSync} from "fs"; import {sync} from "glob"; import {HashMap, Vector} from "prelude-ts"; import {parse} from "path"; import {parseView, collectionKeepDefined } from "./view-parser" import {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" export {AttributeDirectiveHandler, TagDirectiveHandler, defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives" import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule, ControllerViewInfo, ControllerScopeInfo, ControllerViewConnector, defaultCtrlViewConnectors, CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors, ModelViewConnector, defaultModelViewConnectors} from "./controller-parser" import {NgFilter, defaultNgFilters} from "./filters" export {ControllerViewInfo} from "./controller-parser"; // we only repeat the imports, type synonyms and custom interfaces // if there is a module, because otherwise those are dumped in the // global namespace anyway function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo, contents: string): string { return "module " + moduleName + " {\n" + scopeInfo.imports.join("\n") + "\n" + scopeInfo.typeAliases.join("\n") + "\n" + scopeInfo.nonExportedDeclarations.join("\n") + "\n" + contents + "}\n"; } function
(ctrlFname: string, viewFname: string): string { return `${ctrlFname}_${viewFname}_viewtest.ts`; } async function processControllerView(prjSettings: ProjectSettings, controllerPath: string, viewPath: string, ngFilters: NgFilter[], tagDirectives: TagDirectiveHandler[], attributeDirectives: AttributeDirectiveHandler[]) { const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo( controllerPath, prjSettings.ctrlViewFragmentExtractors); if (scopeContents.scopeInfo.isNone()) { // no point of writing anything if there is no scope block return; } const viewExprs = await parseView( prjSettings.resolveImportsAsNonScope || false, viewPath, scopeContents.viewFragments, scopeContents.importNames, Vector.ofIterable(tagDirectives), Vector.ofIterable(attributeDirectives), Vector.ofIterable(ngFilters)); const pathInfo = parse(controllerPath); const viewPathInfo = parse(viewPath); // putting both controller & view name in the output, as one controller // may be used for several views. const outputFname = pathInfo.dir + "/" + getViewTestFilename(pathInfo.name, viewPathInfo.name); const moduleWrap = (x:string) => scopeContents.tsModuleName .map(n => wrapInModule(n, scopeContents, x)) .getOrElse(x); const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ") const typeParams = scopeContents.scopeTypeParams.getOrElse(""); writeFileSync(outputFname, moduleWrap( scopeContents.scopeInfo.getOrThrow() + `\n\nfunction ___f${typeParams}($scope: Scope${ typeParams}, ${filterParams}) {\n` + viewExprs + "\n}\n") + "\n"); } /** * Configuration for a ng-typeview project. */ export interface ProjectSettings { /** * The path for the project on disk (root folder) */ path: string; /** * Folders within the project to exclude from analysis * (for instance external JS libraries, the folder where * your typescript is compiled to javascript, and so on). */ blacklistedPaths: string[]; /** * List of angular filters to handle during the analysis. * You can use [[defaultNgFilters]], add to that list, or specify your own. */ ngFilters: NgFilter[]; /** * List of controller-view connectors to use. * [[defaultCtrlViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ ctrlViewConnectors: ControllerViewConnector[]; /** * Hardcoded controller/view connections that'll be added * to the ones which were autodetected through ctrlViewConnectors. * Useful in case it's too hard to parse some connections * from source. */ extraCtrlViewConnections: ControllerViewInfo[]; /** * List of model-view connectors to use. * These tie model files to views. * This allows to express non-controller models, such * as directive models for instance. * [[defaultModelViewConnectors]] contains a default list; you can use * that, add to that list, or specify your own. */ modelViewConnectors: ModelViewConnector[]; /** * List of tag-bound angular directives to handle during the analysis. * [[defaultTagDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ tagDirectives: TagDirectiveHandler[]; /** * List of attribute-bound angular directives to handle during the analysis. * [[defaultAttrDirectiveHandlers]] contains a default list; you can use * that, add to that list, or specify your own. */ attributeDirectives: AttributeDirectiveHandler[]; /** * Controller view fragment extractors. For instance, you may have * view fragments present in your controllers, for instance ng-grid has * 'cell templates' which typeview can also type-check through this mechanism. * Extractors allows you to tell ng-typeview about those. */ ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[]; /** * When resolving the scope for variables in the view, we prefix "$scope." * for all variables except those defined in the view. For instance, a * `ng-repeat` will define local variables. For these, we do not prefix with * "$scope.". 99% of the time, that works great. * One issue that can come up though, is if you have static fields for * instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript * and angular, due to the TS->JS transpilation. But in ng-typeview, we * can't declare on the scope a field of type [class of MyClass], so that * field.MY_STATIC_FIELD would work. * So a workaround is to specify in your controller: * `import MyClass = api.MyClass;` * In that case, if you enable this `resolveImportsAsNonScope` option * (disabled by default), ng-typeview will not resolve * `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore, * but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the * viewtest, it should work. * But it's pretty messy, so we rather encourage you to avoid statics if * at all possible. */ resolveImportsAsNonScope?: boolean; } function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void { const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"), {nodir:true, ignore: prjSettings.blacklistedPaths}); files.forEach(f => unlinkSync(f)); } /** * Will go through the views and controllers in the project folder and * generate viewtest typescript files to ascertain type-safety of the views. * NOTE: The function returns a promise but is not fully async: a good part of its * runtime is spend running synchronous functions. */ export async function processProject(prjSettings: ProjectSettings): Promise<any> { deletePreviouslyGeneratedFiles(prjSettings); const files = sync(prjSettings.path + "/**/*.@(js|ts)", {nodir:true, ignore: prjSettings.blacklistedPaths}); const viewInfos = await Promise.all( files.map(f => extractCtrlViewConnsAngularModule( f, prjSettings.path, prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors))); const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.controllerViewInfos)) .appendAll(prjSettings.extraCtrlViewConnections) .groupBy(cvi => cvi.viewPath); const controllerNameToFilename = Vector.ofIterable(viewInfos) .filter(vi => vi.controllerName.isSome()) // JS files are not going to have a scope interface // definition so they're not helpful. Also, we can // get twice the same file: original TS & compiled JS. // => keep only the original TS in that case. .filter(vi => vi.fileName.toLowerCase().endsWith(".ts")) .toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]); const viewFilenameToCtrlFilenamesViewConns = viewFilenameToControllerNames .map<string,Vector<string>>( (viewFname,ctrlViewInfos) => [viewFname, collectionKeepDefined( ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]); const viewFilenameToCtrlFilenamesModelConns = Vector.ofIterable(viewInfos) .flatMap(vi => Vector.ofIterable(vi.modelViewInfos)) .groupBy(mvi => mvi.viewPath) .mapValues(mvis => mvis.map(mvi => mvi.modelPath)); const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith( viewFilenameToCtrlFilenamesModelConns, (views1, views2) => views1.appendAll(views2)); return Promise.all(viewFilenameToCtrlFilenames.toVector().map( ([viewName, ctrlNames]) => Promise.all(ctrlNames.map( ctrlName => processControllerView(prjSettings, ctrlName, prjSettings.path + "/" + viewName, prjSettings.ngFilters, prjSettings.tagDirectives, prjSettings.attributeDirectives)).toArray())).toArray()); } try { processProject({ path: process.argv[2], blacklistedPaths: process.argv.slice(3), ngFilters: defaultNgFilters, ctrlViewConnectors: defaultCtrlViewConnectors, modelViewConnectors: defaultModelViewConnectors, extraCtrlViewConnections: [], tagDirectives: defaultTagDirectiveHandlers, attributeDirectives: defaultAttrDirectiveHandlers, ctrlViewFragmentExtractors: defaultCtrlViewFragmentExtractors }); } catch (e) { console.log(e); }
getViewTestFilename
identifier_name
main_glcn.py
import argparse from torchvision import datasets, transforms import torch.optim as optim from model import * from model_glcn import GLCN from utils import * import os import torch num_labeled = 1000 num_valid = 1000 eval_freq = 10 lr = 0.005 cuda_device = "0" parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, help='cifar10 | svhn') parser.add_argument('--dataroot', required=True, help='path to dataset') parser.add_argument('--num_epochs', type=int, default=5000) parser.add_argument('--epoch_decay_start', type=int, default=80) parser.add_argument('--epsilon', type=float, default=2.5) parser.add_argument('--top_bn', type=bool, default=True) parser.add_argument('--method', default='vat') parser.add_argument('--lr', type=float, default=0.1) parser.add_argument('--in_channels', type=int, default=3) parser.add_argument('--out_channels', type=int, default=7) parser.add_argument('--topk', type=int, default=10) parser.add_argument('--ngcn_layers', type=int, default=30) parser.add_argument('--nclass', type=int, default=10) parser.add_argument('--gamma_reg', type=float, default=0.01) parser.add_argument('--lamda_reg', type=float, default=0.00001) parser.add_argument('--dropout', type=float, default=0.0) parser.add_argument('--cuda', dest='cuda', default='0', type=str) parser.add_argument('--mode', default='gpu', help='cpu/gpu') parser.add_argument('--train', default=True, action='store_false') opt = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device # set up gpu if opt.mode == 'gpu': os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda) print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True) else: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' print('Using CPU', flush= True) opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu') def tocuda(x): if opt.use_cuda: return x.cuda() return x def train(model, x, y, optimizer, lamda_reg=0.0):
def eval(y_pred, y): # print(semi_outputs.shape) # y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)] prob, idx = torch.max(y_pred, dim=1) return torch.eq(idx, y).float().mean() # Several Ways to initialize the weights # 1. initialize different weights using different initialization def weights_init(m): """ Usage: model.apply(weights_init) :param m: :return: """ classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.bias.data.fill_(0) # 2. weight different weights using different torch.nn methods def init_all(model, init_funcs): """ Usage: init_all(model, init_funcs) :param model: :param init_funcs: :return: """ for p in model.parameters(): init_func = init_funcs.get(len(p.shape), init_funcs["default"]) init_func(p) init_funcs = { 1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias 2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight 3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter 4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter "default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else } if opt.dataset == 'svhn': train_loader = torch.utils.data.DataLoader( datasets.SVHN(root=opt.dataroot, split='train', download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'cifar10': num_labeled = 1000 train_loader = torch.utils.data.DataLoader( datasets.CIFAR10(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'mnist': # num_labeled = 1000 opt.in_channels = 1 train_loader = torch.utils.data.DataLoader( datasets.MNIST(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=100, shuffle=True) else: raise NotImplementedError train_data = [] train_target = [] for (data, target) in train_loader: train_data.append(data) train_target.append(target) train_data = torch.cat(train_data, dim=0) train_target = torch.cat(train_target, dim=0) print(f"Total number of dataset {opt.dataset} is {train_data.shape}") unique_labels = np.unique(train_target) print("Unique Labels: ", unique_labels) n_class = len(unique_labels) nSamples_per_class_train = 100 nSamples_per_class_val = 100 nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val select_train_data = [] select_train_label = [] select_val_data = [] select_val_label = [] unlabeled_train_data = [] unlabeled_train_label = [] for label in unique_labels: label_mask = (train_target == label) current_label_X = train_data[label_mask] current_label_y = train_target[label_mask] select_train_data.append(current_label_X[:nSamples_per_class_train]) select_train_label.append(current_label_y[:nSamples_per_class_train]) select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val]) select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val]) unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000]) unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000]) train_data = torch.cat(select_train_data, dim=0).to(opt.device) train_target = torch.cat(select_train_label, dim=0).to(opt.device) valid_data = torch.cat(select_val_data, dim=0).to(opt.device) valid_target = torch.cat(select_val_label, dim=0).to(opt.device) test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device) test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device) # random shuffle the data train_random_ind = np.arange(nSamples_per_class_train * n_class) val_random_ind = np.arange(nSamples_per_class_val * n_class) test_random_ind = np.arange(nSamples_per_unlabel * n_class) np.random.shuffle(train_random_ind) np.random.shuffle(val_random_ind) np.random.shuffle(test_random_ind) train_data = train_data[train_random_ind] train_target = train_target[train_random_ind] valid_data = valid_data[val_random_ind] valid_target = valid_target[val_random_ind] test_data = test_data[test_random_ind] test_target = test_target[test_random_ind] all_data = torch.cat([train_data, valid_data, test_data], dim=0) all_data = torch.reshape(all_data, (1000*n_class, -1)) print(all_data.shape) path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models' if not os.path.exists(os.path.dirname(path_best_model)): os.mkdir(os.path.dirname(path_best_model)) opt.in_channels = all_data.shape[1] model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers, opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device) # model.apply(weights_init) init_all(model, init_funcs) optimizer = optim.Adam(model.parameters(), lr=opt.lr) if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) min_valid_acc = 0.0 no_increase_step = 0 final_output = None # train the network if opt.train: for epoch in range(opt.num_epochs): if epoch > opt.epoch_decay_start: decayed_lr = (opt.num_epochs - epoch) * opt.lr / (opt.num_epochs - opt.epoch_decay_start) optimizer.lr = decayed_lr optimizer.betas = (0.5, 0.999) # training semi_outputs, v_loss, ce_loss = train(model, all_data, train_target, optimizer, opt.lamda_reg) print("Epoch :", epoch, "GLCN Loss :", v_loss.item(), "CE Loss :", ce_loss.item(), flush=True) # evaluating if epoch % eval_freq == 0 or epoch + 1 == opt.num_epochs: train_preds = semi_outputs[:num_labeled] train_accuracy = eval(train_preds, train_target) print("Train accuracy :", train_accuracy.item(), flush=True) val_preds = semi_outputs[num_labeled:num_valid+num_labeled] val_accuracy = eval(val_preds, valid_target) print("Valid accuracy :", val_accuracy.item(), flush=True) print(semi_outputs.shape) if val_accuracy > min_valid_acc: min_valid_acc = val_accuracy no_increase_step = 0 torch.save(model.state_dict(), path_best_model) else: no_increase_step += 1 if no_increase_step == 100: final_output = semi_outputs break final_output = semi_outputs if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) model.eval() final_output, loss_GL, S = model(all_data) test_preds = final_output[num_valid+num_labeled:] test_accuracy = eval(test_preds, test_target) print("Test accuracy :", test_accuracy.item(), flush=True)
model.train() # ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class. # semi_outputs have been log_softmax, so only NLLLoss() here nll_loss = nn.NLLLoss() semi_outputs, loss_GL, S = model(x) # print("The learned S is ", torch.sum(S, dim=-1)) ce_loss = nll_loss(semi_outputs[:num_labeled], y) loss = ce_loss + lamda_reg * loss_GL optimizer.zero_grad() loss.backward() optimizer.step() # print("First Row of X") # print(x[0]) # print("Adj Matrix....") # print(S[S > 0]) return semi_outputs, loss, ce_loss
identifier_body
main_glcn.py
import argparse from torchvision import datasets, transforms import torch.optim as optim from model import * from model_glcn import GLCN from utils import * import os import torch num_labeled = 1000 num_valid = 1000 eval_freq = 10 lr = 0.005 cuda_device = "0" parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, help='cifar10 | svhn') parser.add_argument('--dataroot', required=True, help='path to dataset') parser.add_argument('--num_epochs', type=int, default=5000) parser.add_argument('--epoch_decay_start', type=int, default=80) parser.add_argument('--epsilon', type=float, default=2.5) parser.add_argument('--top_bn', type=bool, default=True) parser.add_argument('--method', default='vat') parser.add_argument('--lr', type=float, default=0.1) parser.add_argument('--in_channels', type=int, default=3) parser.add_argument('--out_channels', type=int, default=7) parser.add_argument('--topk', type=int, default=10) parser.add_argument('--ngcn_layers', type=int, default=30) parser.add_argument('--nclass', type=int, default=10) parser.add_argument('--gamma_reg', type=float, default=0.01) parser.add_argument('--lamda_reg', type=float, default=0.00001) parser.add_argument('--dropout', type=float, default=0.0) parser.add_argument('--cuda', dest='cuda', default='0', type=str) parser.add_argument('--mode', default='gpu', help='cpu/gpu') parser.add_argument('--train', default=True, action='store_false') opt = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device # set up gpu if opt.mode == 'gpu': os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda) print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True) else: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' print('Using CPU', flush= True) opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu') def tocuda(x): if opt.use_cuda: return x.cuda() return x def train(model, x, y, optimizer, lamda_reg=0.0): model.train() # ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class. # semi_outputs have been log_softmax, so only NLLLoss() here nll_loss = nn.NLLLoss() semi_outputs, loss_GL, S = model(x) # print("The learned S is ", torch.sum(S, dim=-1)) ce_loss = nll_loss(semi_outputs[:num_labeled], y) loss = ce_loss + lamda_reg * loss_GL optimizer.zero_grad()
# print("First Row of X") # print(x[0]) # print("Adj Matrix....") # print(S[S > 0]) return semi_outputs, loss, ce_loss def eval(y_pred, y): # print(semi_outputs.shape) # y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)] prob, idx = torch.max(y_pred, dim=1) return torch.eq(idx, y).float().mean() # Several Ways to initialize the weights # 1. initialize different weights using different initialization def weights_init(m): """ Usage: model.apply(weights_init) :param m: :return: """ classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.bias.data.fill_(0) # 2. weight different weights using different torch.nn methods def init_all(model, init_funcs): """ Usage: init_all(model, init_funcs) :param model: :param init_funcs: :return: """ for p in model.parameters(): init_func = init_funcs.get(len(p.shape), init_funcs["default"]) init_func(p) init_funcs = { 1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias 2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight 3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter 4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter "default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else } if opt.dataset == 'svhn': train_loader = torch.utils.data.DataLoader( datasets.SVHN(root=opt.dataroot, split='train', download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'cifar10': num_labeled = 1000 train_loader = torch.utils.data.DataLoader( datasets.CIFAR10(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'mnist': # num_labeled = 1000 opt.in_channels = 1 train_loader = torch.utils.data.DataLoader( datasets.MNIST(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=100, shuffle=True) else: raise NotImplementedError train_data = [] train_target = [] for (data, target) in train_loader: train_data.append(data) train_target.append(target) train_data = torch.cat(train_data, dim=0) train_target = torch.cat(train_target, dim=0) print(f"Total number of dataset {opt.dataset} is {train_data.shape}") unique_labels = np.unique(train_target) print("Unique Labels: ", unique_labels) n_class = len(unique_labels) nSamples_per_class_train = 100 nSamples_per_class_val = 100 nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val select_train_data = [] select_train_label = [] select_val_data = [] select_val_label = [] unlabeled_train_data = [] unlabeled_train_label = [] for label in unique_labels: label_mask = (train_target == label) current_label_X = train_data[label_mask] current_label_y = train_target[label_mask] select_train_data.append(current_label_X[:nSamples_per_class_train]) select_train_label.append(current_label_y[:nSamples_per_class_train]) select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val]) select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val]) unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000]) unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000]) train_data = torch.cat(select_train_data, dim=0).to(opt.device) train_target = torch.cat(select_train_label, dim=0).to(opt.device) valid_data = torch.cat(select_val_data, dim=0).to(opt.device) valid_target = torch.cat(select_val_label, dim=0).to(opt.device) test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device) test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device) # random shuffle the data train_random_ind = np.arange(nSamples_per_class_train * n_class) val_random_ind = np.arange(nSamples_per_class_val * n_class) test_random_ind = np.arange(nSamples_per_unlabel * n_class) np.random.shuffle(train_random_ind) np.random.shuffle(val_random_ind) np.random.shuffle(test_random_ind) train_data = train_data[train_random_ind] train_target = train_target[train_random_ind] valid_data = valid_data[val_random_ind] valid_target = valid_target[val_random_ind] test_data = test_data[test_random_ind] test_target = test_target[test_random_ind] all_data = torch.cat([train_data, valid_data, test_data], dim=0) all_data = torch.reshape(all_data, (1000*n_class, -1)) print(all_data.shape) path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models' if not os.path.exists(os.path.dirname(path_best_model)): os.mkdir(os.path.dirname(path_best_model)) opt.in_channels = all_data.shape[1] model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers, opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device) # model.apply(weights_init) init_all(model, init_funcs) optimizer = optim.Adam(model.parameters(), lr=opt.lr) if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) min_valid_acc = 0.0 no_increase_step = 0 final_output = None # train the network if opt.train: for epoch in range(opt.num_epochs): if epoch > opt.epoch_decay_start: decayed_lr = (opt.num_epochs - epoch) * opt.lr / (opt.num_epochs - opt.epoch_decay_start) optimizer.lr = decayed_lr optimizer.betas = (0.5, 0.999) # training semi_outputs, v_loss, ce_loss = train(model, all_data, train_target, optimizer, opt.lamda_reg) print("Epoch :", epoch, "GLCN Loss :", v_loss.item(), "CE Loss :", ce_loss.item(), flush=True) # evaluating if epoch % eval_freq == 0 or epoch + 1 == opt.num_epochs: train_preds = semi_outputs[:num_labeled] train_accuracy = eval(train_preds, train_target) print("Train accuracy :", train_accuracy.item(), flush=True) val_preds = semi_outputs[num_labeled:num_valid+num_labeled] val_accuracy = eval(val_preds, valid_target) print("Valid accuracy :", val_accuracy.item(), flush=True) print(semi_outputs.shape) if val_accuracy > min_valid_acc: min_valid_acc = val_accuracy no_increase_step = 0 torch.save(model.state_dict(), path_best_model) else: no_increase_step += 1 if no_increase_step == 100: final_output = semi_outputs break final_output = semi_outputs if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) model.eval() final_output, loss_GL, S = model(all_data) test_preds = final_output[num_valid+num_labeled:] test_accuracy = eval(test_preds, test_target) print("Test accuracy :", test_accuracy.item(), flush=True)
loss.backward() optimizer.step()
random_line_split
main_glcn.py
import argparse from torchvision import datasets, transforms import torch.optim as optim from model import * from model_glcn import GLCN from utils import * import os import torch num_labeled = 1000 num_valid = 1000 eval_freq = 10 lr = 0.005 cuda_device = "0" parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, help='cifar10 | svhn') parser.add_argument('--dataroot', required=True, help='path to dataset') parser.add_argument('--num_epochs', type=int, default=5000) parser.add_argument('--epoch_decay_start', type=int, default=80) parser.add_argument('--epsilon', type=float, default=2.5) parser.add_argument('--top_bn', type=bool, default=True) parser.add_argument('--method', default='vat') parser.add_argument('--lr', type=float, default=0.1) parser.add_argument('--in_channels', type=int, default=3) parser.add_argument('--out_channels', type=int, default=7) parser.add_argument('--topk', type=int, default=10) parser.add_argument('--ngcn_layers', type=int, default=30) parser.add_argument('--nclass', type=int, default=10) parser.add_argument('--gamma_reg', type=float, default=0.01) parser.add_argument('--lamda_reg', type=float, default=0.00001) parser.add_argument('--dropout', type=float, default=0.0) parser.add_argument('--cuda', dest='cuda', default='0', type=str) parser.add_argument('--mode', default='gpu', help='cpu/gpu') parser.add_argument('--train', default=True, action='store_false') opt = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device # set up gpu if opt.mode == 'gpu': os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda) print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True) else: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' print('Using CPU', flush= True) opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu') def tocuda(x): if opt.use_cuda: return x.cuda() return x def train(model, x, y, optimizer, lamda_reg=0.0): model.train() # ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class. # semi_outputs have been log_softmax, so only NLLLoss() here nll_loss = nn.NLLLoss() semi_outputs, loss_GL, S = model(x) # print("The learned S is ", torch.sum(S, dim=-1)) ce_loss = nll_loss(semi_outputs[:num_labeled], y) loss = ce_loss + lamda_reg * loss_GL optimizer.zero_grad() loss.backward() optimizer.step() # print("First Row of X") # print(x[0]) # print("Adj Matrix....") # print(S[S > 0]) return semi_outputs, loss, ce_loss def eval(y_pred, y): # print(semi_outputs.shape) # y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)] prob, idx = torch.max(y_pred, dim=1) return torch.eq(idx, y).float().mean() # Several Ways to initialize the weights # 1. initialize different weights using different initialization def
(m): """ Usage: model.apply(weights_init) :param m: :return: """ classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.bias.data.fill_(0) # 2. weight different weights using different torch.nn methods def init_all(model, init_funcs): """ Usage: init_all(model, init_funcs) :param model: :param init_funcs: :return: """ for p in model.parameters(): init_func = init_funcs.get(len(p.shape), init_funcs["default"]) init_func(p) init_funcs = { 1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias 2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight 3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter 4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter "default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else } if opt.dataset == 'svhn': train_loader = torch.utils.data.DataLoader( datasets.SVHN(root=opt.dataroot, split='train', download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'cifar10': num_labeled = 1000 train_loader = torch.utils.data.DataLoader( datasets.CIFAR10(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'mnist': # num_labeled = 1000 opt.in_channels = 1 train_loader = torch.utils.data.DataLoader( datasets.MNIST(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=100, shuffle=True) else: raise NotImplementedError train_data = [] train_target = [] for (data, target) in train_loader: train_data.append(data) train_target.append(target) train_data = torch.cat(train_data, dim=0) train_target = torch.cat(train_target, dim=0) print(f"Total number of dataset {opt.dataset} is {train_data.shape}") unique_labels = np.unique(train_target) print("Unique Labels: ", unique_labels) n_class = len(unique_labels) nSamples_per_class_train = 100 nSamples_per_class_val = 100 nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val select_train_data = [] select_train_label = [] select_val_data = [] select_val_label = [] unlabeled_train_data = [] unlabeled_train_label = [] for label in unique_labels: label_mask = (train_target == label) current_label_X = train_data[label_mask] current_label_y = train_target[label_mask] select_train_data.append(current_label_X[:nSamples_per_class_train]) select_train_label.append(current_label_y[:nSamples_per_class_train]) select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val]) select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val]) unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000]) unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000]) train_data = torch.cat(select_train_data, dim=0).to(opt.device) train_target = torch.cat(select_train_label, dim=0).to(opt.device) valid_data = torch.cat(select_val_data, dim=0).to(opt.device) valid_target = torch.cat(select_val_label, dim=0).to(opt.device) test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device) test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device) # random shuffle the data train_random_ind = np.arange(nSamples_per_class_train * n_class) val_random_ind = np.arange(nSamples_per_class_val * n_class) test_random_ind = np.arange(nSamples_per_unlabel * n_class) np.random.shuffle(train_random_ind) np.random.shuffle(val_random_ind) np.random.shuffle(test_random_ind) train_data = train_data[train_random_ind] train_target = train_target[train_random_ind] valid_data = valid_data[val_random_ind] valid_target = valid_target[val_random_ind] test_data = test_data[test_random_ind] test_target = test_target[test_random_ind] all_data = torch.cat([train_data, valid_data, test_data], dim=0) all_data = torch.reshape(all_data, (1000*n_class, -1)) print(all_data.shape) path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models' if not os.path.exists(os.path.dirname(path_best_model)): os.mkdir(os.path.dirname(path_best_model)) opt.in_channels = all_data.shape[1] model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers, opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device) # model.apply(weights_init) init_all(model, init_funcs) optimizer = optim.Adam(model.parameters(), lr=opt.lr) if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) min_valid_acc = 0.0 no_increase_step = 0 final_output = None # train the network if opt.train: for epoch in range(opt.num_epochs): if epoch > opt.epoch_decay_start: decayed_lr = (opt.num_epochs - epoch) * opt.lr / (opt.num_epochs - opt.epoch_decay_start) optimizer.lr = decayed_lr optimizer.betas = (0.5, 0.999) # training semi_outputs, v_loss, ce_loss = train(model, all_data, train_target, optimizer, opt.lamda_reg) print("Epoch :", epoch, "GLCN Loss :", v_loss.item(), "CE Loss :", ce_loss.item(), flush=True) # evaluating if epoch % eval_freq == 0 or epoch + 1 == opt.num_epochs: train_preds = semi_outputs[:num_labeled] train_accuracy = eval(train_preds, train_target) print("Train accuracy :", train_accuracy.item(), flush=True) val_preds = semi_outputs[num_labeled:num_valid+num_labeled] val_accuracy = eval(val_preds, valid_target) print("Valid accuracy :", val_accuracy.item(), flush=True) print(semi_outputs.shape) if val_accuracy > min_valid_acc: min_valid_acc = val_accuracy no_increase_step = 0 torch.save(model.state_dict(), path_best_model) else: no_increase_step += 1 if no_increase_step == 100: final_output = semi_outputs break final_output = semi_outputs if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) model.eval() final_output, loss_GL, S = model(all_data) test_preds = final_output[num_valid+num_labeled:] test_accuracy = eval(test_preds, test_target) print("Test accuracy :", test_accuracy.item(), flush=True)
weights_init
identifier_name
main_glcn.py
import argparse from torchvision import datasets, transforms import torch.optim as optim from model import * from model_glcn import GLCN from utils import * import os import torch num_labeled = 1000 num_valid = 1000 eval_freq = 10 lr = 0.005 cuda_device = "0" parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, help='cifar10 | svhn') parser.add_argument('--dataroot', required=True, help='path to dataset') parser.add_argument('--num_epochs', type=int, default=5000) parser.add_argument('--epoch_decay_start', type=int, default=80) parser.add_argument('--epsilon', type=float, default=2.5) parser.add_argument('--top_bn', type=bool, default=True) parser.add_argument('--method', default='vat') parser.add_argument('--lr', type=float, default=0.1) parser.add_argument('--in_channels', type=int, default=3) parser.add_argument('--out_channels', type=int, default=7) parser.add_argument('--topk', type=int, default=10) parser.add_argument('--ngcn_layers', type=int, default=30) parser.add_argument('--nclass', type=int, default=10) parser.add_argument('--gamma_reg', type=float, default=0.01) parser.add_argument('--lamda_reg', type=float, default=0.00001) parser.add_argument('--dropout', type=float, default=0.0) parser.add_argument('--cuda', dest='cuda', default='0', type=str) parser.add_argument('--mode', default='gpu', help='cpu/gpu') parser.add_argument('--train', default=True, action='store_false') opt = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device # set up gpu if opt.mode == 'gpu': os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda) print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True) else: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' print('Using CPU', flush= True) opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu') def tocuda(x): if opt.use_cuda: return x.cuda() return x def train(model, x, y, optimizer, lamda_reg=0.0): model.train() # ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class. # semi_outputs have been log_softmax, so only NLLLoss() here nll_loss = nn.NLLLoss() semi_outputs, loss_GL, S = model(x) # print("The learned S is ", torch.sum(S, dim=-1)) ce_loss = nll_loss(semi_outputs[:num_labeled], y) loss = ce_loss + lamda_reg * loss_GL optimizer.zero_grad() loss.backward() optimizer.step() # print("First Row of X") # print(x[0]) # print("Adj Matrix....") # print(S[S > 0]) return semi_outputs, loss, ce_loss def eval(y_pred, y): # print(semi_outputs.shape) # y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)] prob, idx = torch.max(y_pred, dim=1) return torch.eq(idx, y).float().mean() # Several Ways to initialize the weights # 1. initialize different weights using different initialization def weights_init(m): """ Usage: model.apply(weights_init) :param m: :return: """ classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.bias.data.fill_(0) # 2. weight different weights using different torch.nn methods def init_all(model, init_funcs): """ Usage: init_all(model, init_funcs) :param model: :param init_funcs: :return: """ for p in model.parameters(): init_func = init_funcs.get(len(p.shape), init_funcs["default"]) init_func(p) init_funcs = { 1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias 2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight 3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter 4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter "default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else } if opt.dataset == 'svhn': train_loader = torch.utils.data.DataLoader( datasets.SVHN(root=opt.dataroot, split='train', download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'cifar10': num_labeled = 1000 train_loader = torch.utils.data.DataLoader( datasets.CIFAR10(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) ])), batch_size=100, shuffle=True) elif opt.dataset == 'mnist': # num_labeled = 1000 opt.in_channels = 1 train_loader = torch.utils.data.DataLoader( datasets.MNIST(root=opt.dataroot, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=100, shuffle=True) else: raise NotImplementedError train_data = [] train_target = [] for (data, target) in train_loader: train_data.append(data) train_target.append(target) train_data = torch.cat(train_data, dim=0) train_target = torch.cat(train_target, dim=0) print(f"Total number of dataset {opt.dataset} is {train_data.shape}") unique_labels = np.unique(train_target) print("Unique Labels: ", unique_labels) n_class = len(unique_labels) nSamples_per_class_train = 100 nSamples_per_class_val = 100 nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val select_train_data = [] select_train_label = [] select_val_data = [] select_val_label = [] unlabeled_train_data = [] unlabeled_train_label = [] for label in unique_labels:
train_data = torch.cat(select_train_data, dim=0).to(opt.device) train_target = torch.cat(select_train_label, dim=0).to(opt.device) valid_data = torch.cat(select_val_data, dim=0).to(opt.device) valid_target = torch.cat(select_val_label, dim=0).to(opt.device) test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device) test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device) # random shuffle the data train_random_ind = np.arange(nSamples_per_class_train * n_class) val_random_ind = np.arange(nSamples_per_class_val * n_class) test_random_ind = np.arange(nSamples_per_unlabel * n_class) np.random.shuffle(train_random_ind) np.random.shuffle(val_random_ind) np.random.shuffle(test_random_ind) train_data = train_data[train_random_ind] train_target = train_target[train_random_ind] valid_data = valid_data[val_random_ind] valid_target = valid_target[val_random_ind] test_data = test_data[test_random_ind] test_target = test_target[test_random_ind] all_data = torch.cat([train_data, valid_data, test_data], dim=0) all_data = torch.reshape(all_data, (1000*n_class, -1)) print(all_data.shape) path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models' if not os.path.exists(os.path.dirname(path_best_model)): os.mkdir(os.path.dirname(path_best_model)) opt.in_channels = all_data.shape[1] model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers, opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device) # model.apply(weights_init) init_all(model, init_funcs) optimizer = optim.Adam(model.parameters(), lr=opt.lr) if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) min_valid_acc = 0.0 no_increase_step = 0 final_output = None # train the network if opt.train: for epoch in range(opt.num_epochs): if epoch > opt.epoch_decay_start: decayed_lr = (opt.num_epochs - epoch) * opt.lr / (opt.num_epochs - opt.epoch_decay_start) optimizer.lr = decayed_lr optimizer.betas = (0.5, 0.999) # training semi_outputs, v_loss, ce_loss = train(model, all_data, train_target, optimizer, opt.lamda_reg) print("Epoch :", epoch, "GLCN Loss :", v_loss.item(), "CE Loss :", ce_loss.item(), flush=True) # evaluating if epoch % eval_freq == 0 or epoch + 1 == opt.num_epochs: train_preds = semi_outputs[:num_labeled] train_accuracy = eval(train_preds, train_target) print("Train accuracy :", train_accuracy.item(), flush=True) val_preds = semi_outputs[num_labeled:num_valid+num_labeled] val_accuracy = eval(val_preds, valid_target) print("Valid accuracy :", val_accuracy.item(), flush=True) print(semi_outputs.shape) if val_accuracy > min_valid_acc: min_valid_acc = val_accuracy no_increase_step = 0 torch.save(model.state_dict(), path_best_model) else: no_increase_step += 1 if no_increase_step == 100: final_output = semi_outputs break final_output = semi_outputs if os.path.exists(path_best_model): # original saved file with DataParallel state_dict = torch.load(path_best_model) model.load_state_dict(state_dict) model.eval() final_output, loss_GL, S = model(all_data) test_preds = final_output[num_valid+num_labeled:] test_accuracy = eval(test_preds, test_target) print("Test accuracy :", test_accuracy.item(), flush=True)
label_mask = (train_target == label) current_label_X = train_data[label_mask] current_label_y = train_target[label_mask] select_train_data.append(current_label_X[:nSamples_per_class_train]) select_train_label.append(current_label_y[:nSamples_per_class_train]) select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val]) select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val]) unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000]) unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000])
conditional_block
plot.go
package plot import ( "fmt" "math" "os" "time" "trex-helpers/pkg/analytics" "trex-helpers/pkg/packet" "github.com/signintech/gopdf" ) //const xPaperSize float64 = 842.0 * 3 //const yPaperSize float64 = 595.0 type plotter struct { xPaperSize float64 yPaperSize float64 xLeftMargin float64 xRightMargin float64 yTopMargin float64 yBottomMargin float64 titlePrefix string inputFilename string xMin int64 xMax int64 yMin float64 yMax float64 xScale float64 yScale float64 yZeroAt float64 xLineStep int64 yLineStep int64 } func (plot plotter) width() float64 { return plot.xPaperSize - plot.xLeftMargin - plot.xRightMargin } func (plot plotter) height() float64 { return plot.yPaperSize - plot.yTopMargin - plot.yBottomMargin } func (plot *plotter) fromPackets(packets []packet.Packet) { plot.xMin, plot.xMax, plot.yMin, plot.yMax = maxPacketsValue(packets) plot.xScale, plot.yScale = plot.width()/float64(plot.xMax-plot.xMin), plot.height()/(plot.yMax-plot.yMin) plot.yZeroAt = plot.yPaperSize - plot.yBottomMargin + plot.yMin*plot.yScale } type stats struct {
averageLatency float64 periodicLatencies []analytics.PeriodicAvgLatency } func (sts *stats) fromPackets(packets []packet.Packet) { sts.averageLatency = analytics.CalcPositiveAverageLatency(packets) sts.periodicLatencies = analytics.CalcPeriodicAverageLatency(packets) } func maxPacketsValue(packets []packet.Packet) (xMin int64, xMax int64, yMin float64, yMax float64) { xMin, xMax = int64(1<<63-1), -int64(1<<63-1) yMin, yMax = float64(xMin), float64(xMax) for _, pkt := range packets { x := pkt.ReceivedAt().UnixNano() y := pkt.Value() if x < xMin { xMin = x } if x > xMax { xMax = x } if y < yMin { yMin = y } if y > yMax { yMax = y } } //fmt.Printf("boundaries: x = %v .. %v, y = %v .. %v\n", xMin, xMax, yMin, yMax) return } func SavePDF(packets []packet.Packet, inputFilename string, filename string, verbose bool) (err error) { plot := plotter{ xPaperSize: 842 * 4, yPaperSize: 595, xLeftMargin: 12, xRightMargin: 12, yTopMargin: 24, yBottomMargin: 12, titlePrefix: "TRex Packets Chart", inputFilename: inputFilename, //outputFilename: filename, } plot.fromPackets(packets) stats := stats{averageLatency: 0} stats.fromPackets(packets) pdf, err := preparePdf(&plot, stats) if err != nil { return } drawPackets(&pdf, packets, &plot) drawAnalytics(&pdf, stats, &plot) drawAxis(&pdf, &plot) f, err := os.Create(filename) if err != nil { return err } defer f.Close() err = pdf.Write(f) if err != nil { return err } return nil } func preparePdf(plot *plotter, sts stats) (pdf gopdf.GoPdf, err error) { pdf = gopdf.GoPdf{} pdf.Start(gopdf.Config{PageSize: gopdf.Rect{W: plot.xPaperSize, H: plot.yPaperSize}, Unit: gopdf.Unit_PT}) pdf.SetInfo(gopdf.PdfInfo{ Title: fmt.Sprintf("%v for %v", plot.titlePrefix, plot.inputFilename), Subject: plot.titlePrefix, Creator: "trex-helpers", Producer: "https://github.com/signintech/gopdf", CreationDate: time.Now(), }) pdf.AddPage() err = pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf") if err != nil { return } err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf") if err != nil { return } err = makeTitle(&pdf, plot.inputFilename) if err != nil { return } err = makeFootnote(&pdf, plot) if err != nil { return } // due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page err = makeAxisAnnotations(&pdf, plot) if err != nil { return } err = makeStatsAnnotations(&pdf, sts, plot) if err != nil { return } return pdf, err } func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) { pdf.SetTextColor(0x00, 0x00, 0x00) err = pdf.SetFont("FiraSans-Book", "", 18) if err != nil { return } pdf.SetX(4) pdf.SetY(22) err = pdf.Text("TRex Packets Chart for ") if err != nil { return } err = pdf.SetFont("FiraSans-Medium", "", 18) if err != nil { return } err = pdf.Text(inputFilename) if err != nil { return } return nil } func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) { err = pdf.SetFont("FiraSans-Book", "", 8) if err != nil { return } pdf.SetX(4) pdf.SetY(plot.yPaperSize - 3) err = pdf.Text(fmt.Sprintf("%v", time.Now())) if err != nil { return err } pdf.SetX(plot.xPaperSize - 106) err = pdf.Text("generated with trex-helpers") if err != nil { return err } pdf.AddExternalLink("https://github.com/mateumann/trex-helpers", plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10) return nil } func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) { pdf.SetTextColor(0, 0, 0) err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return } for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y)) if err != nil { return } } for _, x := range horizontalSteps(true, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000)) if err != nil { return } } return nil } func verticalSteps(plot *plotter) (steps []float64) { plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1)) lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep) hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep) for y := lo; y <= hi; y += plot.yLineStep { steps = append(steps, float64(y)) } return } func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) { if forAnnotations { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1)) } else { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2)) } //fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep) hi := plot.xLineStep * (plot.xMax / plot.xLineStep) for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep { steps = append(steps, float64(x-plot.xMin)) } return } func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) { pdf.SetTextColor(r, g, b) pdf.SetX(x) pdf.SetY(y) err = pdf.Text(text) if err != nil { return } return nil } func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return err } err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b, fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency)) if err != nil { return err } pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale if periodicData.Value < 0 { yOnPaper += 20 } err = makeAnnotation(pdf, x0OnPaper+5, yOnPaper-5, 0xfb, 0x49, 0x34, fmt.Sprintf("%.2f µs", periodicData.Value)) if err != nil { return err } } return nil } func drawAnalytics(pdf *gopdf.GoPdf, sts stats, plot *plotter) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetLineWidth(1) pdf.SetLineType("dashed") pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) //pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x1 := float64(periodicData.EndTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale x1OnPaper := plot.xLeftMargin + x1*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale pdf.Line(x0OnPaper, yOnPaper, x1OnPaper, yOnPaper) } } func drawPackets(pdf *gopdf.GoPdf, packets []packet.Packet, plot *plotter) { // first draw "other" packets, then the rest pdf.SetLineWidth(plot.xScale) pdf.SetLineType("solid") for _, pkt := range packets { if pkt.Type() != packet.TypeOther { continue } makeLine(pdf, pkt, plot) } for _, pkt := range packets { if pkt.Type() == packet.TypeOther { continue } makeLine(pdf, pkt, plot) } } func makeLine(pdf *gopdf.GoPdf, pkt packet.Packet, plot *plotter) { x := float64(pkt.ReceivedAt().UnixNano() - plot.xMin) y := pkt.Value() pdf.SetStrokeColor(pktColor(pkt)) xOnPaper := plot.xLeftMargin + x*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(xOnPaper, plot.yZeroAt, xOnPaper, yOnPaper) } func pktColor(pkt packet.Packet) (r uint8, g uint8, b uint8) { switch pkt.Type() { case packet.TypeLatency: return 0xb8, 0xbb, 0x26 case packet.TypePTP: return 0xcc, 0x24, 0x1d case packet.TypeOther: return 0xeb, 0xdb, 0xb2 } return 0xff, 0x00, 0x0 } func drawAxis(pdf *gopdf.GoPdf, plot *plotter) { // main X axis pdf.SetStrokeColor(0, 0, 0) pdf.SetLineWidth(1) pdf.SetLineType("solid") pdf.Line(plot.xLeftMargin, plot.yZeroAt, plot.xPaperSize-plot.xRightMargin, plot.yZeroAt) // X axis marks for _, x := range horizontalSteps(false, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale pdf.Line(xOnPaper, plot.yZeroAt-4, xOnPaper, plot.yZeroAt+4) } // helper X lines pdf.SetStrokeColor(0x66, 0x66, 0x66) pdf.SetLineWidth(0.01) pdf.SetLineType("dotted") for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) } }
random_line_split
plot.go
package plot import ( "fmt" "math" "os" "time" "trex-helpers/pkg/analytics" "trex-helpers/pkg/packet" "github.com/signintech/gopdf" ) //const xPaperSize float64 = 842.0 * 3 //const yPaperSize float64 = 595.0 type plotter struct { xPaperSize float64 yPaperSize float64 xLeftMargin float64 xRightMargin float64 yTopMargin float64 yBottomMargin float64 titlePrefix string inputFilename string xMin int64 xMax int64 yMin float64 yMax float64 xScale float64 yScale float64 yZeroAt float64 xLineStep int64 yLineStep int64 } func (plot plotter) width() float64 { return plot.xPaperSize - plot.xLeftMargin - plot.xRightMargin } func (plot plotter) height() float64 { return plot.yPaperSize - plot.yTopMargin - plot.yBottomMargin } func (plot *plotter) fromPackets(packets []packet.Packet) { plot.xMin, plot.xMax, plot.yMin, plot.yMax = maxPacketsValue(packets) plot.xScale, plot.yScale = plot.width()/float64(plot.xMax-plot.xMin), plot.height()/(plot.yMax-plot.yMin) plot.yZeroAt = plot.yPaperSize - plot.yBottomMargin + plot.yMin*plot.yScale } type stats struct { averageLatency float64 periodicLatencies []analytics.PeriodicAvgLatency } func (sts *stats)
(packets []packet.Packet) { sts.averageLatency = analytics.CalcPositiveAverageLatency(packets) sts.periodicLatencies = analytics.CalcPeriodicAverageLatency(packets) } func maxPacketsValue(packets []packet.Packet) (xMin int64, xMax int64, yMin float64, yMax float64) { xMin, xMax = int64(1<<63-1), -int64(1<<63-1) yMin, yMax = float64(xMin), float64(xMax) for _, pkt := range packets { x := pkt.ReceivedAt().UnixNano() y := pkt.Value() if x < xMin { xMin = x } if x > xMax { xMax = x } if y < yMin { yMin = y } if y > yMax { yMax = y } } //fmt.Printf("boundaries: x = %v .. %v, y = %v .. %v\n", xMin, xMax, yMin, yMax) return } func SavePDF(packets []packet.Packet, inputFilename string, filename string, verbose bool) (err error) { plot := plotter{ xPaperSize: 842 * 4, yPaperSize: 595, xLeftMargin: 12, xRightMargin: 12, yTopMargin: 24, yBottomMargin: 12, titlePrefix: "TRex Packets Chart", inputFilename: inputFilename, //outputFilename: filename, } plot.fromPackets(packets) stats := stats{averageLatency: 0} stats.fromPackets(packets) pdf, err := preparePdf(&plot, stats) if err != nil { return } drawPackets(&pdf, packets, &plot) drawAnalytics(&pdf, stats, &plot) drawAxis(&pdf, &plot) f, err := os.Create(filename) if err != nil { return err } defer f.Close() err = pdf.Write(f) if err != nil { return err } return nil } func preparePdf(plot *plotter, sts stats) (pdf gopdf.GoPdf, err error) { pdf = gopdf.GoPdf{} pdf.Start(gopdf.Config{PageSize: gopdf.Rect{W: plot.xPaperSize, H: plot.yPaperSize}, Unit: gopdf.Unit_PT}) pdf.SetInfo(gopdf.PdfInfo{ Title: fmt.Sprintf("%v for %v", plot.titlePrefix, plot.inputFilename), Subject: plot.titlePrefix, Creator: "trex-helpers", Producer: "https://github.com/signintech/gopdf", CreationDate: time.Now(), }) pdf.AddPage() err = pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf") if err != nil { return } err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf") if err != nil { return } err = makeTitle(&pdf, plot.inputFilename) if err != nil { return } err = makeFootnote(&pdf, plot) if err != nil { return } // due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page err = makeAxisAnnotations(&pdf, plot) if err != nil { return } err = makeStatsAnnotations(&pdf, sts, plot) if err != nil { return } return pdf, err } func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) { pdf.SetTextColor(0x00, 0x00, 0x00) err = pdf.SetFont("FiraSans-Book", "", 18) if err != nil { return } pdf.SetX(4) pdf.SetY(22) err = pdf.Text("TRex Packets Chart for ") if err != nil { return } err = pdf.SetFont("FiraSans-Medium", "", 18) if err != nil { return } err = pdf.Text(inputFilename) if err != nil { return } return nil } func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) { err = pdf.SetFont("FiraSans-Book", "", 8) if err != nil { return } pdf.SetX(4) pdf.SetY(plot.yPaperSize - 3) err = pdf.Text(fmt.Sprintf("%v", time.Now())) if err != nil { return err } pdf.SetX(plot.xPaperSize - 106) err = pdf.Text("generated with trex-helpers") if err != nil { return err } pdf.AddExternalLink("https://github.com/mateumann/trex-helpers", plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10) return nil } func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) { pdf.SetTextColor(0, 0, 0) err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return } for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y)) if err != nil { return } } for _, x := range horizontalSteps(true, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000)) if err != nil { return } } return nil } func verticalSteps(plot *plotter) (steps []float64) { plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1)) lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep) hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep) for y := lo; y <= hi; y += plot.yLineStep { steps = append(steps, float64(y)) } return } func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) { if forAnnotations { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1)) } else { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2)) } //fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep) hi := plot.xLineStep * (plot.xMax / plot.xLineStep) for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep { steps = append(steps, float64(x-plot.xMin)) } return } func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) { pdf.SetTextColor(r, g, b) pdf.SetX(x) pdf.SetY(y) err = pdf.Text(text) if err != nil { return } return nil } func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return err } err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b, fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency)) if err != nil { return err } pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale if periodicData.Value < 0 { yOnPaper += 20 } err = makeAnnotation(pdf, x0OnPaper+5, yOnPaper-5, 0xfb, 0x49, 0x34, fmt.Sprintf("%.2f µs", periodicData.Value)) if err != nil { return err } } return nil } func drawAnalytics(pdf *gopdf.GoPdf, sts stats, plot *plotter) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetLineWidth(1) pdf.SetLineType("dashed") pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) //pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x1 := float64(periodicData.EndTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale x1OnPaper := plot.xLeftMargin + x1*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale pdf.Line(x0OnPaper, yOnPaper, x1OnPaper, yOnPaper) } } func drawPackets(pdf *gopdf.GoPdf, packets []packet.Packet, plot *plotter) { // first draw "other" packets, then the rest pdf.SetLineWidth(plot.xScale) pdf.SetLineType("solid") for _, pkt := range packets { if pkt.Type() != packet.TypeOther { continue } makeLine(pdf, pkt, plot) } for _, pkt := range packets { if pkt.Type() == packet.TypeOther { continue } makeLine(pdf, pkt, plot) } } func makeLine(pdf *gopdf.GoPdf, pkt packet.Packet, plot *plotter) { x := float64(pkt.ReceivedAt().UnixNano() - plot.xMin) y := pkt.Value() pdf.SetStrokeColor(pktColor(pkt)) xOnPaper := plot.xLeftMargin + x*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(xOnPaper, plot.yZeroAt, xOnPaper, yOnPaper) } func pktColor(pkt packet.Packet) (r uint8, g uint8, b uint8) { switch pkt.Type() { case packet.TypeLatency: return 0xb8, 0xbb, 0x26 case packet.TypePTP: return 0xcc, 0x24, 0x1d case packet.TypeOther: return 0xeb, 0xdb, 0xb2 } return 0xff, 0x00, 0x0 } func drawAxis(pdf *gopdf.GoPdf, plot *plotter) { // main X axis pdf.SetStrokeColor(0, 0, 0) pdf.SetLineWidth(1) pdf.SetLineType("solid") pdf.Line(plot.xLeftMargin, plot.yZeroAt, plot.xPaperSize-plot.xRightMargin, plot.yZeroAt) // X axis marks for _, x := range horizontalSteps(false, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale pdf.Line(xOnPaper, plot.yZeroAt-4, xOnPaper, plot.yZeroAt+4) } // helper X lines pdf.SetStrokeColor(0x66, 0x66, 0x66) pdf.SetLineWidth(0.01) pdf.SetLineType("dotted") for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) } }
fromPackets
identifier_name
plot.go
package plot import ( "fmt" "math" "os" "time" "trex-helpers/pkg/analytics" "trex-helpers/pkg/packet" "github.com/signintech/gopdf" ) //const xPaperSize float64 = 842.0 * 3 //const yPaperSize float64 = 595.0 type plotter struct { xPaperSize float64 yPaperSize float64 xLeftMargin float64 xRightMargin float64 yTopMargin float64 yBottomMargin float64 titlePrefix string inputFilename string xMin int64 xMax int64 yMin float64 yMax float64 xScale float64 yScale float64 yZeroAt float64 xLineStep int64 yLineStep int64 } func (plot plotter) width() float64 { return plot.xPaperSize - plot.xLeftMargin - plot.xRightMargin } func (plot plotter) height() float64 { return plot.yPaperSize - plot.yTopMargin - plot.yBottomMargin } func (plot *plotter) fromPackets(packets []packet.Packet) { plot.xMin, plot.xMax, plot.yMin, plot.yMax = maxPacketsValue(packets) plot.xScale, plot.yScale = plot.width()/float64(plot.xMax-plot.xMin), plot.height()/(plot.yMax-plot.yMin) plot.yZeroAt = plot.yPaperSize - plot.yBottomMargin + plot.yMin*plot.yScale } type stats struct { averageLatency float64 periodicLatencies []analytics.PeriodicAvgLatency } func (sts *stats) fromPackets(packets []packet.Packet) { sts.averageLatency = analytics.CalcPositiveAverageLatency(packets) sts.periodicLatencies = analytics.CalcPeriodicAverageLatency(packets) } func maxPacketsValue(packets []packet.Packet) (xMin int64, xMax int64, yMin float64, yMax float64) { xMin, xMax = int64(1<<63-1), -int64(1<<63-1) yMin, yMax = float64(xMin), float64(xMax) for _, pkt := range packets { x := pkt.ReceivedAt().UnixNano() y := pkt.Value() if x < xMin { xMin = x } if x > xMax { xMax = x } if y < yMin { yMin = y } if y > yMax { yMax = y } } //fmt.Printf("boundaries: x = %v .. %v, y = %v .. %v\n", xMin, xMax, yMin, yMax) return } func SavePDF(packets []packet.Packet, inputFilename string, filename string, verbose bool) (err error) { plot := plotter{ xPaperSize: 842 * 4, yPaperSize: 595, xLeftMargin: 12, xRightMargin: 12, yTopMargin: 24, yBottomMargin: 12, titlePrefix: "TRex Packets Chart", inputFilename: inputFilename, //outputFilename: filename, } plot.fromPackets(packets) stats := stats{averageLatency: 0} stats.fromPackets(packets) pdf, err := preparePdf(&plot, stats) if err != nil { return } drawPackets(&pdf, packets, &plot) drawAnalytics(&pdf, stats, &plot) drawAxis(&pdf, &plot) f, err := os.Create(filename) if err != nil { return err } defer f.Close() err = pdf.Write(f) if err != nil { return err } return nil } func preparePdf(plot *plotter, sts stats) (pdf gopdf.GoPdf, err error) { pdf = gopdf.GoPdf{} pdf.Start(gopdf.Config{PageSize: gopdf.Rect{W: plot.xPaperSize, H: plot.yPaperSize}, Unit: gopdf.Unit_PT}) pdf.SetInfo(gopdf.PdfInfo{ Title: fmt.Sprintf("%v for %v", plot.titlePrefix, plot.inputFilename), Subject: plot.titlePrefix, Creator: "trex-helpers", Producer: "https://github.com/signintech/gopdf", CreationDate: time.Now(), }) pdf.AddPage() err = pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf") if err != nil { return } err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf") if err != nil { return } err = makeTitle(&pdf, plot.inputFilename) if err != nil { return } err = makeFootnote(&pdf, plot) if err != nil { return } // due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page err = makeAxisAnnotations(&pdf, plot) if err != nil { return } err = makeStatsAnnotations(&pdf, sts, plot) if err != nil { return } return pdf, err } func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) { pdf.SetTextColor(0x00, 0x00, 0x00) err = pdf.SetFont("FiraSans-Book", "", 18) if err != nil { return } pdf.SetX(4) pdf.SetY(22) err = pdf.Text("TRex Packets Chart for ") if err != nil { return } err = pdf.SetFont("FiraSans-Medium", "", 18) if err != nil { return } err = pdf.Text(inputFilename) if err != nil { return } return nil } func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) { err = pdf.SetFont("FiraSans-Book", "", 8) if err != nil { return } pdf.SetX(4) pdf.SetY(plot.yPaperSize - 3) err = pdf.Text(fmt.Sprintf("%v", time.Now())) if err != nil { return err } pdf.SetX(plot.xPaperSize - 106) err = pdf.Text("generated with trex-helpers") if err != nil { return err } pdf.AddExternalLink("https://github.com/mateumann/trex-helpers", plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10) return nil } func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) { pdf.SetTextColor(0, 0, 0) err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return } for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y)) if err != nil { return } } for _, x := range horizontalSteps(true, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000)) if err != nil {
return nil } func verticalSteps(plot *plotter) (steps []float64) { plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1)) lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep) hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep) for y := lo; y <= hi; y += plot.yLineStep { steps = append(steps, float64(y)) } return } func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) { if forAnnotations { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1)) } else { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2)) } //fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep) hi := plot.xLineStep * (plot.xMax / plot.xLineStep) for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep { steps = append(steps, float64(x-plot.xMin)) } return } func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) { pdf.SetTextColor(r, g, b) pdf.SetX(x) pdf.SetY(y) err = pdf.Text(text) if err != nil { return } return nil } func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return err } err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b, fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency)) if err != nil { return err } pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale if periodicData.Value < 0 { yOnPaper += 20 } err = makeAnnotation(pdf, x0OnPaper+5, yOnPaper-5, 0xfb, 0x49, 0x34, fmt.Sprintf("%.2f µs", periodicData.Value)) if err != nil { return err } } return nil } func drawAnalytics(pdf *gopdf.GoPdf, sts stats, plot *plotter) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetLineWidth(1) pdf.SetLineType("dashed") pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) //pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x1 := float64(periodicData.EndTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale x1OnPaper := plot.xLeftMargin + x1*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale pdf.Line(x0OnPaper, yOnPaper, x1OnPaper, yOnPaper) } } func drawPackets(pdf *gopdf.GoPdf, packets []packet.Packet, plot *plotter) { // first draw "other" packets, then the rest pdf.SetLineWidth(plot.xScale) pdf.SetLineType("solid") for _, pkt := range packets { if pkt.Type() != packet.TypeOther { continue } makeLine(pdf, pkt, plot) } for _, pkt := range packets { if pkt.Type() == packet.TypeOther { continue } makeLine(pdf, pkt, plot) } } func makeLine(pdf *gopdf.GoPdf, pkt packet.Packet, plot *plotter) { x := float64(pkt.ReceivedAt().UnixNano() - plot.xMin) y := pkt.Value() pdf.SetStrokeColor(pktColor(pkt)) xOnPaper := plot.xLeftMargin + x*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(xOnPaper, plot.yZeroAt, xOnPaper, yOnPaper) } func pktColor(pkt packet.Packet) (r uint8, g uint8, b uint8) { switch pkt.Type() { case packet.TypeLatency: return 0xb8, 0xbb, 0x26 case packet.TypePTP: return 0xcc, 0x24, 0x1d case packet.TypeOther: return 0xeb, 0xdb, 0xb2 } return 0xff, 0x00, 0x0 } func drawAxis(pdf *gopdf.GoPdf, plot *plotter) { // main X axis pdf.SetStrokeColor(0, 0, 0) pdf.SetLineWidth(1) pdf.SetLineType("solid") pdf.Line(plot.xLeftMargin, plot.yZeroAt, plot.xPaperSize-plot.xRightMargin, plot.yZeroAt) // X axis marks for _, x := range horizontalSteps(false, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale pdf.Line(xOnPaper, plot.yZeroAt-4, xOnPaper, plot.yZeroAt+4) } // helper X lines pdf.SetStrokeColor(0x66, 0x66, 0x66) pdf.SetLineWidth(0.01) pdf.SetLineType("dotted") for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) } }
return } }
conditional_block
plot.go
package plot import ( "fmt" "math" "os" "time" "trex-helpers/pkg/analytics" "trex-helpers/pkg/packet" "github.com/signintech/gopdf" ) //const xPaperSize float64 = 842.0 * 3 //const yPaperSize float64 = 595.0 type plotter struct { xPaperSize float64 yPaperSize float64 xLeftMargin float64 xRightMargin float64 yTopMargin float64 yBottomMargin float64 titlePrefix string inputFilename string xMin int64 xMax int64 yMin float64 yMax float64 xScale float64 yScale float64 yZeroAt float64 xLineStep int64 yLineStep int64 } func (plot plotter) width() float64 { return plot.xPaperSize - plot.xLeftMargin - plot.xRightMargin } func (plot plotter) height() float64 { return plot.yPaperSize - plot.yTopMargin - plot.yBottomMargin } func (plot *plotter) fromPackets(packets []packet.Packet) { plot.xMin, plot.xMax, plot.yMin, plot.yMax = maxPacketsValue(packets) plot.xScale, plot.yScale = plot.width()/float64(plot.xMax-plot.xMin), plot.height()/(plot.yMax-plot.yMin) plot.yZeroAt = plot.yPaperSize - plot.yBottomMargin + plot.yMin*plot.yScale } type stats struct { averageLatency float64 periodicLatencies []analytics.PeriodicAvgLatency } func (sts *stats) fromPackets(packets []packet.Packet) { sts.averageLatency = analytics.CalcPositiveAverageLatency(packets) sts.periodicLatencies = analytics.CalcPeriodicAverageLatency(packets) } func maxPacketsValue(packets []packet.Packet) (xMin int64, xMax int64, yMin float64, yMax float64) { xMin, xMax = int64(1<<63-1), -int64(1<<63-1) yMin, yMax = float64(xMin), float64(xMax) for _, pkt := range packets { x := pkt.ReceivedAt().UnixNano() y := pkt.Value() if x < xMin { xMin = x } if x > xMax { xMax = x } if y < yMin { yMin = y } if y > yMax { yMax = y } } //fmt.Printf("boundaries: x = %v .. %v, y = %v .. %v\n", xMin, xMax, yMin, yMax) return } func SavePDF(packets []packet.Packet, inputFilename string, filename string, verbose bool) (err error) { plot := plotter{ xPaperSize: 842 * 4, yPaperSize: 595, xLeftMargin: 12, xRightMargin: 12, yTopMargin: 24, yBottomMargin: 12, titlePrefix: "TRex Packets Chart", inputFilename: inputFilename, //outputFilename: filename, } plot.fromPackets(packets) stats := stats{averageLatency: 0} stats.fromPackets(packets) pdf, err := preparePdf(&plot, stats) if err != nil { return } drawPackets(&pdf, packets, &plot) drawAnalytics(&pdf, stats, &plot) drawAxis(&pdf, &plot) f, err := os.Create(filename) if err != nil { return err } defer f.Close() err = pdf.Write(f) if err != nil { return err } return nil } func preparePdf(plot *plotter, sts stats) (pdf gopdf.GoPdf, err error) { pdf = gopdf.GoPdf{} pdf.Start(gopdf.Config{PageSize: gopdf.Rect{W: plot.xPaperSize, H: plot.yPaperSize}, Unit: gopdf.Unit_PT}) pdf.SetInfo(gopdf.PdfInfo{ Title: fmt.Sprintf("%v for %v", plot.titlePrefix, plot.inputFilename), Subject: plot.titlePrefix, Creator: "trex-helpers", Producer: "https://github.com/signintech/gopdf", CreationDate: time.Now(), }) pdf.AddPage() err = pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf") if err != nil { return } err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf") if err != nil { return } err = makeTitle(&pdf, plot.inputFilename) if err != nil { return } err = makeFootnote(&pdf, plot) if err != nil { return } // due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page err = makeAxisAnnotations(&pdf, plot) if err != nil { return } err = makeStatsAnnotations(&pdf, sts, plot) if err != nil { return } return pdf, err } func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) { pdf.SetTextColor(0x00, 0x00, 0x00) err = pdf.SetFont("FiraSans-Book", "", 18) if err != nil { return } pdf.SetX(4) pdf.SetY(22) err = pdf.Text("TRex Packets Chart for ") if err != nil { return } err = pdf.SetFont("FiraSans-Medium", "", 18) if err != nil { return } err = pdf.Text(inputFilename) if err != nil { return } return nil } func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) { err = pdf.SetFont("FiraSans-Book", "", 8) if err != nil { return } pdf.SetX(4) pdf.SetY(plot.yPaperSize - 3) err = pdf.Text(fmt.Sprintf("%v", time.Now())) if err != nil { return err } pdf.SetX(plot.xPaperSize - 106) err = pdf.Text("generated with trex-helpers") if err != nil { return err } pdf.AddExternalLink("https://github.com/mateumann/trex-helpers", plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10) return nil } func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) { pdf.SetTextColor(0, 0, 0) err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return } for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y)) if err != nil { return } } for _, x := range horizontalSteps(true, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000)) if err != nil { return } } return nil } func verticalSteps(plot *plotter) (steps []float64) { plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1)) lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep) hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep) for y := lo; y <= hi; y += plot.yLineStep { steps = append(steps, float64(y)) } return } func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) { if forAnnotations { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1)) } else { plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2)) } //fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep) hi := plot.xLineStep * (plot.xMax / plot.xLineStep) for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep { steps = append(steps, float64(x-plot.xMin)) } return } func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) { pdf.SetTextColor(r, g, b) pdf.SetX(x) pdf.SetY(y) err = pdf.Text(text) if err != nil { return } return nil } func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale err = pdf.SetFont("FiraSans-Book", "", 12) if err != nil { return err } err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b, fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency)) if err != nil { return err } pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale if periodicData.Value < 0 { yOnPaper += 20 } err = makeAnnotation(pdf, x0OnPaper+5, yOnPaper-5, 0xfb, 0x49, 0x34, fmt.Sprintf("%.2f µs", periodicData.Value)) if err != nil { return err } } return nil } func drawAnalytics(pdf *gopdf.GoPdf, sts stats, plot *plotter) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetLineWidth(1) pdf.SetLineType("dashed") pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) //pdf.SetStrokeColor(0xd3, 0x86, 0x9b) pdf.SetStrokeColor(0xfb, 0x49, 0x34) for _, periodicData := range sts.periodicLatencies { x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin) x1 := float64(periodicData.EndTimestamp.UnixNano() - plot.xMin) x0OnPaper := plot.xLeftMargin + x0*plot.xScale x1OnPaper := plot.xLeftMargin + x1*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale pdf.Line(x0OnPaper, yOnPaper, x1OnPaper, yOnPaper) } } func drawPackets(pdf *gopdf.GoPdf, packets []packet.Packet, plot *plotter) { // first draw "other" packets, then the rest pdf.SetLineWidth(plot.xScale) pdf.SetLineType("solid") for _, pkt := range packets { if pkt.Type() != packet.TypeOther { continue } makeLine(pdf, pkt, plot) } for _, pkt := range packets { if pkt.Type() == packet.TypeOther { continue } makeLine(pdf, pkt, plot) } } func makeLine(pdf *gopdf.GoPdf, pkt packet.Packet, plot *plotter) { x :=
pktColor(pkt packet.Packet) (r uint8, g uint8, b uint8) { switch pkt.Type() { case packet.TypeLatency: return 0xb8, 0xbb, 0x26 case packet.TypePTP: return 0xcc, 0x24, 0x1d case packet.TypeOther: return 0xeb, 0xdb, 0xb2 } return 0xff, 0x00, 0x0 } func drawAxis(pdf *gopdf.GoPdf, plot *plotter) { // main X axis pdf.SetStrokeColor(0, 0, 0) pdf.SetLineWidth(1) pdf.SetLineType("solid") pdf.Line(plot.xLeftMargin, plot.yZeroAt, plot.xPaperSize-plot.xRightMargin, plot.yZeroAt) // X axis marks for _, x := range horizontalSteps(false, plot) { xOnPaper := plot.xLeftMargin + x*plot.xScale pdf.Line(xOnPaper, plot.yZeroAt-4, xOnPaper, plot.yZeroAt+4) } // helper X lines pdf.SetStrokeColor(0x66, 0x66, 0x66) pdf.SetLineWidth(0.01) pdf.SetLineType("dotted") for _, y := range verticalSteps(plot) { yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper) } }
float64(pkt.ReceivedAt().UnixNano() - plot.xMin) y := pkt.Value() pdf.SetStrokeColor(pktColor(pkt)) xOnPaper := plot.xLeftMargin + x*plot.xScale yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale pdf.Line(xOnPaper, plot.yZeroAt, xOnPaper, yOnPaper) } func
identifier_body
cc.rs
use crate::collect; use crate::collect::AbstractObjectSpace; use crate::collect::ObjectSpace; use crate::debug; use crate::ref_count::RefCount; use crate::trace::Trace; use crate::trace::Tracer; use std::cell::UnsafeCell; use std::mem; use std::mem::ManuallyDrop; use std::ops::Deref; use std::ops::DerefMut; use std::panic::UnwindSafe; use std::ptr::NonNull; // Types not tracked by the cycle collector: // // CcBox<T> // +-----------+ <---+--- Cc<T> (pointer) // | ref_count | | // +-----------+ +--- Cc<T> (pointer) // | T (data) | // +-----------+ // // Types tracked by the cycle collector: // // CcBoxWithHeader<T> // +----------------------+ // | GcHeader | next | (GcHeader is in a linked list) // | | prev | // | | vptr<T> | // +----------------------+ <---+--- Cc<T> (pointer) // | CcBox<T> | ref_count | | // | | T (data) | +--- Cc<T> (pointer) // +----------------------+ /// The data shared by multiple `RawCc<T, O>` pointers. #[repr(C)] pub(crate) struct RawCcBox<T: ?Sized, O: AbstractObjectSpace> { pub(crate) ref_count: O::RefCount, #[cfg(test)] pub(crate) name: String, value: UnsafeCell<ManuallyDrop<T>>, } /// The real layout if `T` is tracked by the collector. The main APIs still use /// the `CcBox` type. This type is only used for allocation and deallocation. /// /// This is a private type. #[repr(C)] pub struct RawCcBoxWithGcHeader<T: ?Sized, O: AbstractObjectSpace> { header: O::Header, cc_box: RawCcBox<T, O>, } /// A single-threaded reference-counting pointer that integrates /// with cyclic garbage collection. /// /// See [module level documentation](index.html) for more details. /// /// [`Cc`](type.Cc.html) is not thread-safe. It does not implement `Send` /// or `Sync`: /// /// ```compile_fail /// use std::ops::Deref; /// use gcmodule::Cc; /// let cc = Cc::new(5); /// std::thread::spawn(move || { /// println!("{}", cc.deref()); /// }); /// ``` pub type Cc<T> = RawCc<T, ObjectSpace>; /// Weak reference of [`Cc`](type.Cc.html). pub type Weak<T> = RawWeak<T, ObjectSpace>; /// Low-level type for [`Cc<T>`](type.Cc.html). pub struct RawCc<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); /// Low-level type for [`Weak<T>`](type.Weak.html). pub struct RawWeak<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); // `ManuallyDrop<T>` does not implement `UnwindSafe`. But `CcBox::drop` does // make sure `T` is dropped. If `T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe + ?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone { /// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T> { collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) } } impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else
; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T: ?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if !already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if !self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if !self.is_dropped() && crate::debug::GC_DROPPING.with(|t| !t.get()) { let debug = self.deref().optional_debug(); if !debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: ?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug + ?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T: ?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T: ?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T: ?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr); Box::new(cc) } #[cfg(feature = "debug")] fn gc_debug_name(&self) -> String { self.debug_name() } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> GcClone for RawCc<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_drop_t(&self) { self.inner().drop_t() } } impl<T: Trace> Trace for Cc<T> { fn trace(&self, tracer: &mut Tracer) { Cc::<T>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { T::is_type_tracked() } } impl Trace for Cc<dyn Trace> { fn trace(&self, tracer: &mut Tracer) { Cc::<dyn Trace>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { // Trait objects can be anything. true } } #[cfg(feature = "nightly")] impl<T: ?Sized + std::marker::Unsize<U>, U: ?Sized, O: AbstractObjectSpace> std::ops::CoerceUnsized<RawCc<U, O>> for RawCc<T, O> { } #[inline] unsafe fn cast_ref<T: ?Sized, R>(value: &T, offset_bytes: isize) -> &R { let ptr: *const T = value; let ptr: *const u8 = ptr as _; let ptr = ptr.offset(offset_bytes); &*(ptr as *const R) } #[inline] unsafe fn cast_box<T: ?Sized, O: AbstractObjectSpace>( value: Box<RawCcBox<T, O>>, ) -> Box<RawCcBoxWithGcHeader<T, O>> { let mut ptr: *const RawCcBox<T, O> = Box::into_raw(value); // ptr can be "thin" (1 pointer) or "fat" (2 pointers). // Change the first byte to point to the GcHeader. let pptr: *mut *const RawCcBox<T, O> = &mut ptr; let pptr: *mut *const O::Header = pptr as _; *pptr = (*pptr).offset(-1); let ptr: *mut RawCcBoxWithGcHeader<T, O> = mem::transmute(ptr); Box::from_raw(ptr) } #[cfg(test)] mod tests { use super::*; use crate::collect::Linked; /// Check that `GcHeader::value()` returns a working trait object. #[test] fn test_gc_header_value() { let v1: Cc<Box<dyn Trace>> = Cc::new(Box::new(1)); assert_eq!(v1.ref_count(), 1); let v2 = v1.clone(); assert_eq!(v1.ref_count(), 2); assert_eq!(v2.ref_count(), 2); let v3: &dyn CcDyn = v1.inner() as &dyn CcDyn; assert_eq!(v3.gc_ref_count(), 2); let v4: &dyn CcDyn = v2.inner().header().value(); assert_eq!(v4.gc_ref_count(), 2); } #[cfg(feature = "nightly")] #[test] fn test_unsize_coerce() { let _v: Cc<dyn Trace> = Cc::new(vec![1u8, 2, 3]); } }
{ Box::into_raw(Box::new(cc_box)) }
conditional_block
cc.rs
use crate::collect; use crate::collect::AbstractObjectSpace; use crate::collect::ObjectSpace; use crate::debug; use crate::ref_count::RefCount; use crate::trace::Trace; use crate::trace::Tracer; use std::cell::UnsafeCell; use std::mem; use std::mem::ManuallyDrop; use std::ops::Deref; use std::ops::DerefMut; use std::panic::UnwindSafe; use std::ptr::NonNull; // Types not tracked by the cycle collector: // // CcBox<T> // +-----------+ <---+--- Cc<T> (pointer) // | ref_count | | // +-----------+ +--- Cc<T> (pointer) // | T (data) | // +-----------+ // // Types tracked by the cycle collector: // // CcBoxWithHeader<T> // +----------------------+ // | GcHeader | next | (GcHeader is in a linked list) // | | prev | // | | vptr<T> | // +----------------------+ <---+--- Cc<T> (pointer) // | CcBox<T> | ref_count | | // | | T (data) | +--- Cc<T> (pointer) // +----------------------+ /// The data shared by multiple `RawCc<T, O>` pointers. #[repr(C)] pub(crate) struct RawCcBox<T: ?Sized, O: AbstractObjectSpace> { pub(crate) ref_count: O::RefCount, #[cfg(test)] pub(crate) name: String, value: UnsafeCell<ManuallyDrop<T>>, } /// The real layout if `T` is tracked by the collector. The main APIs still use /// the `CcBox` type. This type is only used for allocation and deallocation. /// /// This is a private type. #[repr(C)] pub struct RawCcBoxWithGcHeader<T: ?Sized, O: AbstractObjectSpace> { header: O::Header, cc_box: RawCcBox<T, O>, } /// A single-threaded reference-counting pointer that integrates /// with cyclic garbage collection. /// /// See [module level documentation](index.html) for more details. /// /// [`Cc`](type.Cc.html) is not thread-safe. It does not implement `Send` /// or `Sync`: /// /// ```compile_fail /// use std::ops::Deref; /// use gcmodule::Cc; /// let cc = Cc::new(5); /// std::thread::spawn(move || { /// println!("{}", cc.deref()); /// }); /// ``` pub type Cc<T> = RawCc<T, ObjectSpace>; /// Weak reference of [`Cc`](type.Cc.html). pub type Weak<T> = RawWeak<T, ObjectSpace>; /// Low-level type for [`Cc<T>`](type.Cc.html). pub struct RawCc<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); /// Low-level type for [`Weak<T>`](type.Weak.html). pub struct RawWeak<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); // `ManuallyDrop<T>` does not implement `UnwindSafe`. But `CcBox::drop` does // make sure `T` is dropped. If `T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe + ?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone { /// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T> { collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) } } impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else { Box::into_raw(Box::new(cc_box)) }; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T: ?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if !already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if !self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if !self.is_dropped() && crate::debug::GC_DROPPING.with(|t| !t.get()) { let debug = self.deref().optional_debug(); if !debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: ?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug + ?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T: ?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T: ?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T: ?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr); Box::new(cc) } #[cfg(feature = "debug")] fn gc_debug_name(&self) -> String { self.debug_name() } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> GcClone for RawCc<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_drop_t(&self) { self.inner().drop_t() } } impl<T: Trace> Trace for Cc<T> { fn trace(&self, tracer: &mut Tracer) { Cc::<T>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { T::is_type_tracked() } } impl Trace for Cc<dyn Trace> { fn trace(&self, tracer: &mut Tracer) { Cc::<dyn Trace>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { // Trait objects can be anything. true } } #[cfg(feature = "nightly")] impl<T: ?Sized + std::marker::Unsize<U>, U: ?Sized, O: AbstractObjectSpace> std::ops::CoerceUnsized<RawCc<U, O>> for RawCc<T, O> { } #[inline] unsafe fn
<T: ?Sized, R>(value: &T, offset_bytes: isize) -> &R { let ptr: *const T = value; let ptr: *const u8 = ptr as _; let ptr = ptr.offset(offset_bytes); &*(ptr as *const R) } #[inline] unsafe fn cast_box<T: ?Sized, O: AbstractObjectSpace>( value: Box<RawCcBox<T, O>>, ) -> Box<RawCcBoxWithGcHeader<T, O>> { let mut ptr: *const RawCcBox<T, O> = Box::into_raw(value); // ptr can be "thin" (1 pointer) or "fat" (2 pointers). // Change the first byte to point to the GcHeader. let pptr: *mut *const RawCcBox<T, O> = &mut ptr; let pptr: *mut *const O::Header = pptr as _; *pptr = (*pptr).offset(-1); let ptr: *mut RawCcBoxWithGcHeader<T, O> = mem::transmute(ptr); Box::from_raw(ptr) } #[cfg(test)] mod tests { use super::*; use crate::collect::Linked; /// Check that `GcHeader::value()` returns a working trait object. #[test] fn test_gc_header_value() { let v1: Cc<Box<dyn Trace>> = Cc::new(Box::new(1)); assert_eq!(v1.ref_count(), 1); let v2 = v1.clone(); assert_eq!(v1.ref_count(), 2); assert_eq!(v2.ref_count(), 2); let v3: &dyn CcDyn = v1.inner() as &dyn CcDyn; assert_eq!(v3.gc_ref_count(), 2); let v4: &dyn CcDyn = v2.inner().header().value(); assert_eq!(v4.gc_ref_count(), 2); } #[cfg(feature = "nightly")] #[test] fn test_unsize_coerce() { let _v: Cc<dyn Trace> = Cc::new(vec![1u8, 2, 3]); } }
cast_ref
identifier_name
cc.rs
use crate::collect; use crate::collect::AbstractObjectSpace; use crate::collect::ObjectSpace; use crate::debug; use crate::ref_count::RefCount; use crate::trace::Trace; use crate::trace::Tracer; use std::cell::UnsafeCell; use std::mem; use std::mem::ManuallyDrop; use std::ops::Deref; use std::ops::DerefMut; use std::panic::UnwindSafe; use std::ptr::NonNull; // Types not tracked by the cycle collector: // // CcBox<T> // +-----------+ <---+--- Cc<T> (pointer) // | ref_count | | // +-----------+ +--- Cc<T> (pointer) // | T (data) | // +-----------+ // // Types tracked by the cycle collector: // // CcBoxWithHeader<T> // +----------------------+ // | GcHeader | next | (GcHeader is in a linked list) // | | prev | // | | vptr<T> | // +----------------------+ <---+--- Cc<T> (pointer) // | CcBox<T> | ref_count | | // | | T (data) | +--- Cc<T> (pointer) // +----------------------+ /// The data shared by multiple `RawCc<T, O>` pointers. #[repr(C)] pub(crate) struct RawCcBox<T: ?Sized, O: AbstractObjectSpace> { pub(crate) ref_count: O::RefCount, #[cfg(test)] pub(crate) name: String, value: UnsafeCell<ManuallyDrop<T>>, } /// The real layout if `T` is tracked by the collector. The main APIs still use /// the `CcBox` type. This type is only used for allocation and deallocation. /// /// This is a private type. #[repr(C)] pub struct RawCcBoxWithGcHeader<T: ?Sized, O: AbstractObjectSpace> { header: O::Header, cc_box: RawCcBox<T, O>, } /// A single-threaded reference-counting pointer that integrates /// with cyclic garbage collection. /// /// See [module level documentation](index.html) for more details. /// /// [`Cc`](type.Cc.html) is not thread-safe. It does not implement `Send` /// or `Sync`: /// /// ```compile_fail /// use std::ops::Deref; /// use gcmodule::Cc; /// let cc = Cc::new(5); /// std::thread::spawn(move || { /// println!("{}", cc.deref()); /// }); /// ``` pub type Cc<T> = RawCc<T, ObjectSpace>; /// Weak reference of [`Cc`](type.Cc.html). pub type Weak<T> = RawWeak<T, ObjectSpace>; /// Low-level type for [`Cc<T>`](type.Cc.html). pub struct RawCc<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); /// Low-level type for [`Weak<T>`](type.Weak.html). pub struct RawWeak<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); // `ManuallyDrop<T>` does not implement `UnwindSafe`. But `CcBox::drop` does // make sure `T` is dropped. If `T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe + ?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone { /// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T>
} impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else { Box::into_raw(Box::new(cc_box)) }; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T: ?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if !already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if !self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if !self.is_dropped() && crate::debug::GC_DROPPING.with(|t| !t.get()) { let debug = self.deref().optional_debug(); if !debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: ?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug + ?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T: ?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T: ?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T: ?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr); Box::new(cc) } #[cfg(feature = "debug")] fn gc_debug_name(&self) -> String { self.debug_name() } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> GcClone for RawCc<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_drop_t(&self) { self.inner().drop_t() } } impl<T: Trace> Trace for Cc<T> { fn trace(&self, tracer: &mut Tracer) { Cc::<T>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { T::is_type_tracked() } } impl Trace for Cc<dyn Trace> { fn trace(&self, tracer: &mut Tracer) { Cc::<dyn Trace>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { // Trait objects can be anything. true } } #[cfg(feature = "nightly")] impl<T: ?Sized + std::marker::Unsize<U>, U: ?Sized, O: AbstractObjectSpace> std::ops::CoerceUnsized<RawCc<U, O>> for RawCc<T, O> { } #[inline] unsafe fn cast_ref<T: ?Sized, R>(value: &T, offset_bytes: isize) -> &R { let ptr: *const T = value; let ptr: *const u8 = ptr as _; let ptr = ptr.offset(offset_bytes); &*(ptr as *const R) } #[inline] unsafe fn cast_box<T: ?Sized, O: AbstractObjectSpace>( value: Box<RawCcBox<T, O>>, ) -> Box<RawCcBoxWithGcHeader<T, O>> { let mut ptr: *const RawCcBox<T, O> = Box::into_raw(value); // ptr can be "thin" (1 pointer) or "fat" (2 pointers). // Change the first byte to point to the GcHeader. let pptr: *mut *const RawCcBox<T, O> = &mut ptr; let pptr: *mut *const O::Header = pptr as _; *pptr = (*pptr).offset(-1); let ptr: *mut RawCcBoxWithGcHeader<T, O> = mem::transmute(ptr); Box::from_raw(ptr) } #[cfg(test)] mod tests { use super::*; use crate::collect::Linked; /// Check that `GcHeader::value()` returns a working trait object. #[test] fn test_gc_header_value() { let v1: Cc<Box<dyn Trace>> = Cc::new(Box::new(1)); assert_eq!(v1.ref_count(), 1); let v2 = v1.clone(); assert_eq!(v1.ref_count(), 2); assert_eq!(v2.ref_count(), 2); let v3: &dyn CcDyn = v1.inner() as &dyn CcDyn; assert_eq!(v3.gc_ref_count(), 2); let v4: &dyn CcDyn = v2.inner().header().value(); assert_eq!(v4.gc_ref_count(), 2); } #[cfg(feature = "nightly")] #[test] fn test_unsize_coerce() { let _v: Cc<dyn Trace> = Cc::new(vec![1u8, 2, 3]); } }
{ collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) }
identifier_body
cc.rs
use crate::collect; use crate::collect::AbstractObjectSpace; use crate::collect::ObjectSpace; use crate::debug; use crate::ref_count::RefCount; use crate::trace::Trace; use crate::trace::Tracer; use std::cell::UnsafeCell; use std::mem; use std::mem::ManuallyDrop; use std::ops::Deref; use std::ops::DerefMut; use std::panic::UnwindSafe; use std::ptr::NonNull; // Types not tracked by the cycle collector: // // CcBox<T> // +-----------+ <---+--- Cc<T> (pointer) // | ref_count | | // +-----------+ +--- Cc<T> (pointer) // | T (data) | // +-----------+ // // Types tracked by the cycle collector: // // CcBoxWithHeader<T> // +----------------------+ // | GcHeader | next | (GcHeader is in a linked list) // | | prev | // | | vptr<T> | // +----------------------+ <---+--- Cc<T> (pointer) // | CcBox<T> | ref_count | | // | | T (data) | +--- Cc<T> (pointer) // +----------------------+ /// The data shared by multiple `RawCc<T, O>` pointers. #[repr(C)] pub(crate) struct RawCcBox<T: ?Sized, O: AbstractObjectSpace> { pub(crate) ref_count: O::RefCount, #[cfg(test)] pub(crate) name: String, value: UnsafeCell<ManuallyDrop<T>>, } /// The real layout if `T` is tracked by the collector. The main APIs still use /// the `CcBox` type. This type is only used for allocation and deallocation. /// /// This is a private type. #[repr(C)] pub struct RawCcBoxWithGcHeader<T: ?Sized, O: AbstractObjectSpace> { header: O::Header, cc_box: RawCcBox<T, O>, } /// A single-threaded reference-counting pointer that integrates /// with cyclic garbage collection. /// /// See [module level documentation](index.html) for more details. /// /// [`Cc`](type.Cc.html) is not thread-safe. It does not implement `Send` /// or `Sync`: /// /// ```compile_fail /// use std::ops::Deref; /// use gcmodule::Cc; /// let cc = Cc::new(5); /// std::thread::spawn(move || { /// println!("{}", cc.deref()); /// }); /// ``` pub type Cc<T> = RawCc<T, ObjectSpace>; /// Weak reference of [`Cc`](type.Cc.html). pub type Weak<T> = RawWeak<T, ObjectSpace>; /// Low-level type for [`Cc<T>`](type.Cc.html). pub struct RawCc<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); /// Low-level type for [`Weak<T>`](type.Weak.html). pub struct RawWeak<T: ?Sized, O: AbstractObjectSpace>(NonNull<RawCcBox<T, O>>); // `ManuallyDrop<T>` does not implement `UnwindSafe`. But `CcBox::drop` does // make sure `T` is dropped. If `T` is unwind-safe, so does `CcBox<T>`. impl<T: UnwindSafe + ?Sized> UnwindSafe for RawCcBox<T, ObjectSpace> {} // `NonNull` does not implement `UnwindSafe`. But `Cc` and `Weak` only use it // as a "const" pointer. If `T` is unwind-safe, so does `Cc<T>`. impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawCc<T, O> {} impl<T: UnwindSafe + ?Sized, O: AbstractObjectSpace> UnwindSafe for RawWeak<T, O> {} /// Type-erased `Cc<T>` with interfaces needed by GC. /// /// This is a private type. pub trait CcDyn { /// Returns the reference count for cycle detection. fn gc_ref_count(&self) -> usize; /// Visit referents for cycle detection. fn gc_traverse(&self, tracer: &mut Tracer); /// Get an cloned `Cc<dyn Trace>`. This has 2 purposes: /// - Keep a reference so `CcBox<T>` is not released in the next step. /// So metadata like `ref_count` can still be read. /// - Operate on the object. fn gc_clone(&self) -> Box<dyn GcClone>; #[cfg(feature = "debug")] /// Name used in collect.rs. fn gc_debug_name(&self) -> String { "?".to_string() } } /// Type-erased gc_clone result. /// /// This is a private type. pub trait GcClone {
/// Force drop the value T. fn gc_drop_t(&self); /// Returns the reference count. This is useful for verification. fn gc_ref_count(&self) -> usize; } /// A dummy implementation without drop side-effects. pub(crate) struct CcDummy; impl CcDummy { pub(crate) fn ccdyn_vptr() -> *mut () { let mut dummy = CcDummy; // safety: To access vtable pointer. Stable API cannot do it. let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) }; fat_ptr[1] } } impl CcDyn for CcDummy { fn gc_ref_count(&self) -> usize { 1 } fn gc_traverse(&self, _tracer: &mut Tracer) {} fn gc_clone(&self) -> Box<dyn GcClone> { panic!("bug: CcDummy::gc_clone should never be called"); } } impl<T: Trace> Cc<T> { /// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage. /// /// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html). pub fn new(value: T) -> Cc<T> { collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space)) } } impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> { /// Constructs a new [`Cc<T>`](type.Cc.html) in the given /// [`ObjectSpace`](struct.ObjectSpace.html). /// /// To collect cycles, call `ObjectSpace::collect_cycles()`. pub(crate) fn new_in_space(value: T, space: &O) -> Self { let is_tracked = T::is_type_tracked(); let cc_box = RawCcBox { ref_count: space.new_ref_count(is_tracked), value: UnsafeCell::new(ManuallyDrop::new(value)), #[cfg(test)] name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()), }; let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked { // Create a GcHeader before the CcBox. This is similar to cpython. let header = space.empty_header(); let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box }; let mut boxed = Box::new(cc_box_with_header); // Fix-up fields in GcHeader. This is done after the creation of the // Box so the memory addresses are stable. space.insert(&mut boxed.header, &boxed.cc_box); debug_assert_eq!( mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(), mem::size_of::<RawCcBoxWithGcHeader<T, O>>() ); let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box; Box::leak(boxed); ptr } else { Box::into_raw(Box::new(cc_box)) }; // safety: ccbox_ptr cannot be null from the above code. let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) }; let result = Self(non_null); if is_tracked { debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)")); } else { debug::log(|| (result.debug_name(), "new (CcBox)")); } debug_assert_eq!(result.ref_count(), 1); result } /// Convert to `RawCc<dyn Trace>`. pub fn into_dyn(self) -> RawCc<dyn Trace, O> { #[cfg(feature = "nightly")] { // Requires CoerceUnsized, which is currently unstable. self } // safety: Trait object magic. Test by test_dyn_downcast. #[cfg(not(feature = "nightly"))] unsafe { // XXX: This depends on rust internals. But it works on stable. // Replace this with CoerceUnsized once that becomes stable. // Cc<dyn Trace> has 2 usize values: The first one is the same // as Cc<T>. The second one is the vtable. The vtable pointer // is the same as the second pointer of `&dyn Trace`. let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace); let self_ptr: usize = mem::transmute(self); fat_ptr[0] = self_ptr; mem::transmute(fat_ptr) } } } impl<T: Trace + Clone> Cc<T> { /// Update the value `T` in a copy-on-write way. /// /// If the ref count is 1, the value is updated in-place. /// Otherwise a new `Cc<T>` will be created. pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) { let need_clone = self.ref_count() > 1; if need_clone { let mut value = <Cc<T>>::deref(self).clone(); update_func(&mut value); *self = Cc::new(value); } else { let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get(); let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut(); update_func(value_mut); } } } impl<T: ?Sized, O: AbstractObjectSpace> RawCcBox<T, O> { #[inline] fn header_ptr(&self) -> *const () { self.header() as *const _ as _ } #[inline] fn header(&self) -> &O::Header { debug_assert!(self.is_tracked()); // safety: See `Cc::new`. GcHeader is before CcBox for tracked objects. unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) } } #[inline] fn is_tracked(&self) -> bool { self.ref_count.is_tracked() } #[inline] fn is_dropped(&self) -> bool { self.ref_count.is_dropped() } #[inline] fn inc_ref(&self) -> usize { self.ref_count.inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.ref_count.dec_ref() } #[inline] fn ref_count(&self) -> usize { self.ref_count.ref_count() } #[inline] fn weak_count(&self) -> usize { self.ref_count.weak_count() } #[inline] fn set_dropped(&self) -> bool { self.ref_count.set_dropped() } #[inline] pub(crate) fn drop_t(&self) { let already_dropped = self.set_dropped(); if !already_dropped { debug::log(|| (self.debug_name(), "drop (T)")); // safety: is_dropped() check ensures T is only dropped once. Other // places (ex. gc collector) ensure that T is no longer accessed. unsafe { ManuallyDrop::drop(&mut *(self.value.get())) }; } } pub(crate) fn trace_t(&self, tracer: &mut Tracer) { if !self.is_tracked() { return; } debug::log(|| (self.debug_name(), "trace")); // For other non-`Cc<T>` container types, `trace` visit referents, // is recursive, and does not call `tracer` directly. For `Cc<T>`, // `trace` stops here, is non-recursive, and does apply `tracer` // to the actual `GcHeader`. It's expected that the upper layer // calls `gc_traverse` on everything (not just roots). tracer(self.header_ptr()); } pub(crate) fn debug_name(&self) -> String { #[cfg(test)] { self.name.clone() } #[cfg(not(test))] { #[allow(unused_mut)] let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value); #[cfg(all(feature = "debug", feature = "nightly"))] { if !self.is_dropped() && crate::debug::GC_DROPPING.with(|t| !t.get()) { let debug = self.deref().optional_debug(); if !debug.is_empty() { result += &format!(" {}", debug); } } } return result; } } } #[cfg(all(feature = "debug", feature = "nightly"))] pub(crate) trait OptionalDebug { fn optional_debug(&self) -> String; } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: ?Sized> OptionalDebug for T { default fn optional_debug(&self) -> String { "".to_string() } } #[cfg(all(feature = "debug", feature = "nightly"))] impl<T: std::fmt::Debug + ?Sized> OptionalDebug for T { fn optional_debug(&self) -> String { format!("{:?}", self) } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { /// Obtains a "weak reference", a non-owning pointer. pub fn downgrade(&self) -> RawWeak<T, O> { let inner = self.inner(); inner.ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("new-weak ({})", inner.ref_count.weak_count()), ) }); RawWeak(self.0) } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.ref_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { /// Attempts to obtain a "strong reference". /// /// Returns `None` if the value has already been dropped. pub fn upgrade(&self) -> Option<RawCc<T, O>> { let inner = self.inner(); // Make the below operation "atomic". let _locked = inner.ref_count.locked(); if inner.is_dropped() { None } else { inner.inc_ref(); debug::log(|| { ( inner.debug_name(), format!("new-strong ({})", inner.ref_count.ref_count()), ) }); Some(RawCc(self.0)) } } /// Gets the reference count not considering weak references. #[inline] pub fn strong_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } } impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> { #[inline] pub(crate) fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } /// `trace` without `T: Trace` bound. /// /// Useful for structures with `Cc<T>` fields where `T` does not implement /// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement /// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`. #[inline] pub fn trace(&self, tracer: &mut Tracer) { self.inner().trace_t(tracer); } #[inline] fn inc_ref(&self) -> usize { self.inner().inc_ref() } #[inline] fn dec_ref(&self) -> usize { self.inner().dec_ref() } #[inline] pub(crate) fn ref_count(&self) -> usize { self.inner().ref_count() } /// Get the weak (non-owning) reference count. #[inline] pub fn weak_count(&self) -> usize { self.inner().weak_count() } pub(crate) fn debug_name(&self) -> String { self.inner().debug_name() } } impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> { #[inline] fn inner(&self) -> &RawCcBox<T, O> { // safety: CcBox lifetime maintained by ref count. Pointer is valid. unsafe { self.0.as_ref() } } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> { #[inline] fn clone(&self) -> Self { // In theory self.inner().ref_count.locked() is needed. // Practically this is an atomic operation that cannot be split so locking // becomes optional. // let _locked = self.inner().ref_count.locked(); self.inc_ref(); debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count()))); Self(self.0) } } impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> { #[inline] fn clone(&self) -> Self { let inner = self.inner(); let ref_count = &inner.ref_count; ref_count.inc_weak(); debug::log(|| { ( inner.debug_name(), format!("clone-weak ({})", ref_count.weak_count()), ) }); Self(self.0) } } impl<T: ?Sized> Deref for Cc<T> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { self.inner().deref() } } impl<T: ?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> { type Target = T; #[inline] fn deref(&self) -> &Self::Target { debug_assert!( !self.is_dropped(), concat!( "bug: accessing a dropped CcBox detected\n", "This usually happens after ignoring another panic triggered by the collector." ) ); // safety: CcBox (and its value) lifetime maintained by ref count. // If `Trace` is implemented correctly then the GC won't drop_t() // incorrectly and this pointer is valid. Otherwise the above // assertion can prevent UBs on debug build. unsafe { &*self.value.get() } } } fn drop_ccbox<T: ?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) { // safety: See Cc::new. The pointer was created by Box::into_raw. let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) }; let is_tracked = cc_box.is_tracked(); if is_tracked { // The real object is CcBoxWithGcHeader. Drop that instead. // safety: See Cc::new for CcBoxWithGcHeader. let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) }; O::remove(&gc_box.header); // Drop T if it hasn't been dropped yet. // This needs to be after O::remove so the collector won't have a // chance to read dropped content. gc_box.cc_box.drop_t(); debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)")); drop(gc_box); } else { // Drop T if it hasn't been dropped yet. cc_box.drop_t(); debug::log(|| (cc_box.debug_name(), "drop (CcBox)")); drop(cc_box); } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); // Block threaded collector. This is needed because "drop()" is a // complex operation. The whole operation needs to be "atomic". let _locked = inner.ref_count.locked(); let old_ref_count = self.dec_ref(); debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count()))); debug_assert!(old_ref_count >= 1); if old_ref_count == 1 { if self.weak_count() == 0 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } else { inner.drop_t(); } } } } impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> { fn drop(&mut self) { let ptr: *mut RawCcBox<T, O> = self.0.as_ptr(); let inner = self.inner(); let ref_count = &inner.ref_count; // Block threaded collector to "freeze" the ref count, for safety. let _locked = ref_count.locked(); let old_ref_count = ref_count.ref_count(); let old_weak_count = ref_count.dec_weak(); debug::log(|| { ( inner.debug_name(), format!("drop-weak ({})", ref_count.weak_count()), ) }); debug_assert!(old_weak_count >= 1); if old_ref_count == 0 && old_weak_count == 1 { // safety: CcBox lifetime maintained by ref count. drop_ccbox(ptr); } } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_traverse(&self, tracer: &mut Tracer) { debug::log(|| (self.debug_name(), "gc_traverse")); T::trace(self.deref(), tracer) } fn gc_clone(&self) -> Box<dyn GcClone> { self.ref_count.inc_ref(); debug::log(|| { let msg = format!("gc_clone ({})", self.ref_count()); (self.debug_name(), msg) }); // safety: The pointer is compatible. The mutability is different only // to satisfy NonNull (NonNull::new requires &mut). The returned value // is still "immutable". &self can also never be nonnull. let ptr: NonNull<RawCcBox<T, O>> = unsafe { NonNull::new_unchecked(self as *const _ as *mut _) }; let cc = RawCc::<T, O>(ptr); Box::new(cc) } #[cfg(feature = "debug")] fn gc_debug_name(&self) -> String { self.debug_name() } } impl<T: Trace + ?Sized, O: AbstractObjectSpace> GcClone for RawCc<T, O> { fn gc_ref_count(&self) -> usize { self.ref_count() } fn gc_drop_t(&self) { self.inner().drop_t() } } impl<T: Trace> Trace for Cc<T> { fn trace(&self, tracer: &mut Tracer) { Cc::<T>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { T::is_type_tracked() } } impl Trace for Cc<dyn Trace> { fn trace(&self, tracer: &mut Tracer) { Cc::<dyn Trace>::trace(self, tracer) } #[inline] fn is_type_tracked() -> bool { // Trait objects can be anything. true } } #[cfg(feature = "nightly")] impl<T: ?Sized + std::marker::Unsize<U>, U: ?Sized, O: AbstractObjectSpace> std::ops::CoerceUnsized<RawCc<U, O>> for RawCc<T, O> { } #[inline] unsafe fn cast_ref<T: ?Sized, R>(value: &T, offset_bytes: isize) -> &R { let ptr: *const T = value; let ptr: *const u8 = ptr as _; let ptr = ptr.offset(offset_bytes); &*(ptr as *const R) } #[inline] unsafe fn cast_box<T: ?Sized, O: AbstractObjectSpace>( value: Box<RawCcBox<T, O>>, ) -> Box<RawCcBoxWithGcHeader<T, O>> { let mut ptr: *const RawCcBox<T, O> = Box::into_raw(value); // ptr can be "thin" (1 pointer) or "fat" (2 pointers). // Change the first byte to point to the GcHeader. let pptr: *mut *const RawCcBox<T, O> = &mut ptr; let pptr: *mut *const O::Header = pptr as _; *pptr = (*pptr).offset(-1); let ptr: *mut RawCcBoxWithGcHeader<T, O> = mem::transmute(ptr); Box::from_raw(ptr) } #[cfg(test)] mod tests { use super::*; use crate::collect::Linked; /// Check that `GcHeader::value()` returns a working trait object. #[test] fn test_gc_header_value() { let v1: Cc<Box<dyn Trace>> = Cc::new(Box::new(1)); assert_eq!(v1.ref_count(), 1); let v2 = v1.clone(); assert_eq!(v1.ref_count(), 2); assert_eq!(v2.ref_count(), 2); let v3: &dyn CcDyn = v1.inner() as &dyn CcDyn; assert_eq!(v3.gc_ref_count(), 2); let v4: &dyn CcDyn = v2.inner().header().value(); assert_eq!(v4.gc_ref_count(), 2); } #[cfg(feature = "nightly")] #[test] fn test_unsize_coerce() { let _v: Cc<dyn Trace> = Cc::new(vec![1u8, 2, 3]); } }
random_line_split
agent_test.py
""" This file contains test cases to verify the correct implementation of the functions required for this project including minimax, alphabeta, and iterative deepening. The heuristic function is tested for conformance to the expected interface, but cannot be automatically assessed for correctness. """ import unittest import timeit import signal import isolation import game_agent from collections import Counter from copy import deepcopy from copy import copy from functools import wraps WRONG_MOVE = "Your {} search returned an invalid move at search depth {}." + \ "\nValid choices: {}\nYour selection: {}" WRONG_NUM_EXPLORED = "Your {} search visited the wrong nodes at search " + \ "depth {}. If the number of visits is too large, " + \ "make sure that iterative deepening is only running " + \ "when the `iterative` flag is set in the agent " + \ "constructor.\nMax explored size: {}\nNumber you " + \ "explored: {}" UNEXPECTED_VISIT = "Your {} search did not visit the number of expected " + \ "unique nodes at search depth {}.\nMax explored size: " + \ "{}\nNumber you explored: {}" ID_ERROR = "Your ID search returned the wrong move at a depth of {} with " + \ "a {}ms time limit. {} {} {}" ID_FAIL = "Your agent did not explore enough nodes during the search; it " + \ "did not finish the first layer of available moves." TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout def curr_time_millis(): return 1000 * timeit.default_timer() def timeout(time_limit): """ Function decorator for unittest test cases to specify test case timeout. """ class TimeoutException(Exception): """ Subclass Exception to catch timer expiration during search """ pass def handler(*args, **kwargs): """ Generic handler to raise an exception when a timer expires """ raise TimeoutException("Test aborted due to timeout. Test was " + "expected to finish in less than {} second(s).".format(time_limit)) def wrapUnitTest(testcase): @wraps(testcase) def testWrapper(self, *args, **kwargs): signal.signal(signal.SIGALRM, handler) signal.alarm(time_limit) try: return testcase(self, *args, **kwargs) finally: signal.alarm(0) return testWrapper return wrapUnitTest class EvalTable(): def __init__(self, table): self.table = table def score(self, game, player): row, col = game.get_player_location(player) return self.table[row][col] class CounterBoard(isolation.Board): def __init__(self, *args, **kwargs): super(CounterBoard, self).__init__(*args, **kwargs) self.counter = Counter() self.visited = set() def copy(self): new_board = CounterBoard(self.__player_1__, self.__player_2__, width=self.width, height=self.height) new_board.move_count = self.move_count new_board.__active_player__ = self.__active_player__ new_board.__inactive_player__ = self.__inactive_player__ new_board.__last_player_move__ = copy(self.__last_player_move__) new_board.__player_symbols__ = copy(self.__player_symbols__) new_board.__board_state__ = deepcopy(self.__board_state__) new_board.counter = self.counter new_board.visited = self.visited return new_board def forecast_move(self, move): self.counter[move] += 1 self.visited.add(move) new_board = self.copy() new_board.apply_move(move) return new_board @property def counts(self): """ Return counts of (total, unique) nodes visited """ return sum(self.counter.values()), len(self.visited) class Project1Test(unittest.TestCase): def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7): reload(game_agent) agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method) board = CounterBoard(agentUT, 'null_agent', w, h) board.apply_move(loc1) board.apply_move(loc2) return agentUT, board @timeout(1) # @unittest.skip("Skip minimax test.") # Uncomment this line to skip test def test_minimax(self): """ Test CustomPlayer.minimax """ h, w = 7, 7 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[1][5] = 1 value_table[4][3] = 2 value_table[6][6] = 3 eval_fn = EvalTable(value_table) expected_moves = [set([(1, 5)]), set([(3, 1), (3, 5)]), set([(3, 5), (4, 2)])] counts = [(8, 8), (92, 27), (1650, 43)] for idx, depth in enumerate([1, 3, 5]):
@timeout(1) # @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test def test_alphabeta(self): """ Test CustomPlayer.alphabeta """ h, w = 7, 7 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[2][5] = 1 value_table[0][4] = 2 value_table[1][0] = 3 value_table[5][5] = 4 eval_fn = EvalTable(value_table) expected_moves = [set([(2, 5)]), set([(2, 5)]), set([(1, 4)]), set([(1, 4), (2, 5)])] counts = [(2, 2), (26, 13), (552, 36), (10564, 47)] for idx, depth in enumerate([1, 3, 5, 7]): agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) num_explored_valid = board.counts[0] <= counts[idx][0] num_unique_valid = board.counts[1] <= counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move)) @timeout(1) # @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test def test_alphabeta_pruning(self): """ Test pruning in CustomPlayer.alphabeta """ h, w = 15, 15 depth = 6 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[3][14] = 1 eval_fn = EvalTable(value_table) blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14), (2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12), (4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13), (5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3), (11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)] agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h) for r, c in blocked_cells: board.__board_state__[r][c] = "X" move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) expected_move = (2, 13) max_visits = (40, 18) num_explored_valid = board.counts[0] < max_visits[0] num_unique_valid = board.counts[1] <= max_visits[1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1])) self.assertEqual(move, expected_move, WRONG_MOVE.format(method, depth, expected_move, move)) @timeout(10) # @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test def test_id(self): """ Test iterative deepening for CustomPlayer.minimax """ w, h = 11, 11 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[3][0] = 1 value_table[2][3] = 1 value_table[4][4] = 2 value_table[7][2] = 3 eval_fn = EvalTable(value_table) depths = ["7+", "6", "5", "4", "3", "2", "1"] exact_counts = [((4, 4), set([(2, 3), (3, 0)])), ((16, 6), set([(2, 3), (3, 0)])), ((68, 20), set([(2, 3), (3, 2)])), ((310, 21), set([(2, 3), (3, 2)])), ((1582, 45), set([(3, 0), (3, 2)])), ((7534, 45), set([(3, 0), (3, 2)])), ((38366, 74), set([(0, 3), (2, 3), (3, 0), (3, 2)]))] time_limit = 3200 while time_limit >= TIMER_MARGIN: agentUT, board = self.initAUT(-1, eval_fn, True, method, (1, 1), (0, 0), w, h) legal_moves = board.get_legal_moves() timer_start = curr_time_millis() time_left = lambda : time_limit - (curr_time_millis() - timer_start) move = agentUT.get_move(board, legal_moves, time_left) finish_time = time_left() self.assertTrue(len(board.visited) > 4, ID_FAIL) self.assertTrue(finish_time > 0, "Your search failed iterative deepening due to timeout.") # print time_limit, board.counts, move time_limit /= 2 # Skip testing if the search exceeded 7 move horizon if (board.counts[0] > exact_counts[-1][0][0] or board.counts[1] > exact_counts[-1][0][1] or finish_time < 5): continue for idx, ((n, m), c) in enumerate(exact_counts[::-1]): if n > board.counts[0]: continue self.assertIn(move, c, ID_ERROR.format(depths[idx], 2 * time_limit, move, *board.counts)) break @timeout(1) # @unittest.skip("Skip eval function test.") # Uncomment this line to skip test def test_custom_eval(self): """ Test output interface of CustomEval """ player1 = "Player1" player2 = "Player2" game = isolation.Board(player1, player2) heuristic = game_agent.CustomEval() self.assertIsInstance(heuristic.score(game, player1), float, "The heuristic function should return a floating point") if __name__ == '__main__': unittest.main()
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3) num_explored_valid = board.counts[0] == counts[idx][0] num_unique_valid = board.counts[1] == counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move))
conditional_block
agent_test.py
""" This file contains test cases to verify the correct implementation of the functions required for this project including minimax, alphabeta, and iterative deepening. The heuristic function is tested for conformance to the expected interface, but cannot be automatically assessed for correctness. """ import unittest import timeit import signal import isolation import game_agent from collections import Counter from copy import deepcopy from copy import copy from functools import wraps WRONG_MOVE = "Your {} search returned an invalid move at search depth {}." + \ "\nValid choices: {}\nYour selection: {}" WRONG_NUM_EXPLORED = "Your {} search visited the wrong nodes at search " + \ "depth {}. If the number of visits is too large, " + \ "make sure that iterative deepening is only running " + \ "when the `iterative` flag is set in the agent " + \ "constructor.\nMax explored size: {}\nNumber you " + \ "explored: {}" UNEXPECTED_VISIT = "Your {} search did not visit the number of expected " + \ "unique nodes at search depth {}.\nMax explored size: " + \ "{}\nNumber you explored: {}" ID_ERROR = "Your ID search returned the wrong move at a depth of {} with " + \ "a {}ms time limit. {} {} {}" ID_FAIL = "Your agent did not explore enough nodes during the search; it " + \ "did not finish the first layer of available moves." TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout def curr_time_millis(): return 1000 * timeit.default_timer() def timeout(time_limit): """ Function decorator for unittest test cases to specify test case timeout. """ class TimeoutException(Exception): """ Subclass Exception to catch timer expiration during search """ pass def handler(*args, **kwargs): """ Generic handler to raise an exception when a timer expires """ raise TimeoutException("Test aborted due to timeout. Test was " + "expected to finish in less than {} second(s).".format(time_limit)) def wrapUnitTest(testcase): @wraps(testcase) def testWrapper(self, *args, **kwargs): signal.signal(signal.SIGALRM, handler) signal.alarm(time_limit) try: return testcase(self, *args, **kwargs) finally: signal.alarm(0) return testWrapper return wrapUnitTest class EvalTable(): def __init__(self, table): self.table = table def score(self, game, player): row, col = game.get_player_location(player) return self.table[row][col] class CounterBoard(isolation.Board): def __init__(self, *args, **kwargs): super(CounterBoard, self).__init__(*args, **kwargs) self.counter = Counter() self.visited = set() def copy(self): new_board = CounterBoard(self.__player_1__, self.__player_2__, width=self.width, height=self.height) new_board.move_count = self.move_count new_board.__active_player__ = self.__active_player__ new_board.__inactive_player__ = self.__inactive_player__ new_board.__last_player_move__ = copy(self.__last_player_move__) new_board.__player_symbols__ = copy(self.__player_symbols__) new_board.__board_state__ = deepcopy(self.__board_state__) new_board.counter = self.counter new_board.visited = self.visited return new_board def forecast_move(self, move): self.counter[move] += 1 self.visited.add(move) new_board = self.copy() new_board.apply_move(move) return new_board @property def counts(self): """ Return counts of (total, unique) nodes visited """ return sum(self.counter.values()), len(self.visited) class Project1Test(unittest.TestCase): def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7): reload(game_agent) agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method) board = CounterBoard(agentUT, 'null_agent', w, h) board.apply_move(loc1) board.apply_move(loc2) return agentUT, board @timeout(1) # @unittest.skip("Skip minimax test.") # Uncomment this line to skip test def test_minimax(self): """ Test CustomPlayer.minimax """ h, w = 7, 7 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[1][5] = 1 value_table[4][3] = 2 value_table[6][6] = 3 eval_fn = EvalTable(value_table) expected_moves = [set([(1, 5)]), set([(3, 1), (3, 5)]), set([(3, 5), (4, 2)])] counts = [(8, 8), (92, 27), (1650, 43)] for idx, depth in enumerate([1, 3, 5]): agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3) num_explored_valid = board.counts[0] == counts[idx][0] num_unique_valid = board.counts[1] == counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move)) @timeout(1) # @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test def test_alphabeta(self): """ Test CustomPlayer.alphabeta """ h, w = 7, 7 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[2][5] = 1 value_table[0][4] = 2 value_table[1][0] = 3 value_table[5][5] = 4 eval_fn = EvalTable(value_table) expected_moves = [set([(2, 5)]), set([(2, 5)]),
set([(1, 4)]), set([(1, 4), (2, 5)])] counts = [(2, 2), (26, 13), (552, 36), (10564, 47)] for idx, depth in enumerate([1, 3, 5, 7]): agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) num_explored_valid = board.counts[0] <= counts[idx][0] num_unique_valid = board.counts[1] <= counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move)) @timeout(1) # @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test def test_alphabeta_pruning(self): """ Test pruning in CustomPlayer.alphabeta """ h, w = 15, 15 depth = 6 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[3][14] = 1 eval_fn = EvalTable(value_table) blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14), (2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12), (4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13), (5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3), (11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)] agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h) for r, c in blocked_cells: board.__board_state__[r][c] = "X" move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) expected_move = (2, 13) max_visits = (40, 18) num_explored_valid = board.counts[0] < max_visits[0] num_unique_valid = board.counts[1] <= max_visits[1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1])) self.assertEqual(move, expected_move, WRONG_MOVE.format(method, depth, expected_move, move)) @timeout(10) # @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test def test_id(self): """ Test iterative deepening for CustomPlayer.minimax """ w, h = 11, 11 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[3][0] = 1 value_table[2][3] = 1 value_table[4][4] = 2 value_table[7][2] = 3 eval_fn = EvalTable(value_table) depths = ["7+", "6", "5", "4", "3", "2", "1"] exact_counts = [((4, 4), set([(2, 3), (3, 0)])), ((16, 6), set([(2, 3), (3, 0)])), ((68, 20), set([(2, 3), (3, 2)])), ((310, 21), set([(2, 3), (3, 2)])), ((1582, 45), set([(3, 0), (3, 2)])), ((7534, 45), set([(3, 0), (3, 2)])), ((38366, 74), set([(0, 3), (2, 3), (3, 0), (3, 2)]))] time_limit = 3200 while time_limit >= TIMER_MARGIN: agentUT, board = self.initAUT(-1, eval_fn, True, method, (1, 1), (0, 0), w, h) legal_moves = board.get_legal_moves() timer_start = curr_time_millis() time_left = lambda : time_limit - (curr_time_millis() - timer_start) move = agentUT.get_move(board, legal_moves, time_left) finish_time = time_left() self.assertTrue(len(board.visited) > 4, ID_FAIL) self.assertTrue(finish_time > 0, "Your search failed iterative deepening due to timeout.") # print time_limit, board.counts, move time_limit /= 2 # Skip testing if the search exceeded 7 move horizon if (board.counts[0] > exact_counts[-1][0][0] or board.counts[1] > exact_counts[-1][0][1] or finish_time < 5): continue for idx, ((n, m), c) in enumerate(exact_counts[::-1]): if n > board.counts[0]: continue self.assertIn(move, c, ID_ERROR.format(depths[idx], 2 * time_limit, move, *board.counts)) break @timeout(1) # @unittest.skip("Skip eval function test.") # Uncomment this line to skip test def test_custom_eval(self): """ Test output interface of CustomEval """ player1 = "Player1" player2 = "Player2" game = isolation.Board(player1, player2) heuristic = game_agent.CustomEval() self.assertIsInstance(heuristic.score(game, player1), float, "The heuristic function should return a floating point") if __name__ == '__main__': unittest.main()
random_line_split
agent_test.py
""" This file contains test cases to verify the correct implementation of the functions required for this project including minimax, alphabeta, and iterative deepening. The heuristic function is tested for conformance to the expected interface, but cannot be automatically assessed for correctness. """ import unittest import timeit import signal import isolation import game_agent from collections import Counter from copy import deepcopy from copy import copy from functools import wraps WRONG_MOVE = "Your {} search returned an invalid move at search depth {}." + \ "\nValid choices: {}\nYour selection: {}" WRONG_NUM_EXPLORED = "Your {} search visited the wrong nodes at search " + \ "depth {}. If the number of visits is too large, " + \ "make sure that iterative deepening is only running " + \ "when the `iterative` flag is set in the agent " + \ "constructor.\nMax explored size: {}\nNumber you " + \ "explored: {}" UNEXPECTED_VISIT = "Your {} search did not visit the number of expected " + \ "unique nodes at search depth {}.\nMax explored size: " + \ "{}\nNumber you explored: {}" ID_ERROR = "Your ID search returned the wrong move at a depth of {} with " + \ "a {}ms time limit. {} {} {}" ID_FAIL = "Your agent did not explore enough nodes during the search; it " + \ "did not finish the first layer of available moves." TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout def curr_time_millis(): return 1000 * timeit.default_timer() def timeout(time_limit): """ Function decorator for unittest test cases to specify test case timeout. """ class TimeoutException(Exception): """ Subclass Exception to catch timer expiration during search """ pass def handler(*args, **kwargs): """ Generic handler to raise an exception when a timer expires """ raise TimeoutException("Test aborted due to timeout. Test was " + "expected to finish in less than {} second(s).".format(time_limit)) def wrapUnitTest(testcase): @wraps(testcase) def testWrapper(self, *args, **kwargs): signal.signal(signal.SIGALRM, handler) signal.alarm(time_limit) try: return testcase(self, *args, **kwargs) finally: signal.alarm(0) return testWrapper return wrapUnitTest class EvalTable(): def __init__(self, table): self.table = table def score(self, game, player): row, col = game.get_player_location(player) return self.table[row][col] class CounterBoard(isolation.Board): def __init__(self, *args, **kwargs): super(CounterBoard, self).__init__(*args, **kwargs) self.counter = Counter() self.visited = set() def copy(self): new_board = CounterBoard(self.__player_1__, self.__player_2__, width=self.width, height=self.height) new_board.move_count = self.move_count new_board.__active_player__ = self.__active_player__ new_board.__inactive_player__ = self.__inactive_player__ new_board.__last_player_move__ = copy(self.__last_player_move__) new_board.__player_symbols__ = copy(self.__player_symbols__) new_board.__board_state__ = deepcopy(self.__board_state__) new_board.counter = self.counter new_board.visited = self.visited return new_board def forecast_move(self, move): self.counter[move] += 1 self.visited.add(move) new_board = self.copy() new_board.apply_move(move) return new_board @property def counts(self): """ Return counts of (total, unique) nodes visited """ return sum(self.counter.values()), len(self.visited) class Project1Test(unittest.TestCase): def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7): reload(game_agent) agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method) board = CounterBoard(agentUT, 'null_agent', w, h) board.apply_move(loc1) board.apply_move(loc2) return agentUT, board @timeout(1) # @unittest.skip("Skip minimax test.") # Uncomment this line to skip test def test_minimax(self): """ Test CustomPlayer.minimax """ h, w = 7, 7 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[1][5] = 1 value_table[4][3] = 2 value_table[6][6] = 3 eval_fn = EvalTable(value_table) expected_moves = [set([(1, 5)]), set([(3, 1), (3, 5)]), set([(3, 5), (4, 2)])] counts = [(8, 8), (92, 27), (1650, 43)] for idx, depth in enumerate([1, 3, 5]): agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3) num_explored_valid = board.counts[0] == counts[idx][0] num_unique_valid = board.counts[1] == counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move)) @timeout(1) # @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test def test_alphabeta(self):
@timeout(1) # @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test def test_alphabeta_pruning(self): """ Test pruning in CustomPlayer.alphabeta """ h, w = 15, 15 depth = 6 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[3][14] = 1 eval_fn = EvalTable(value_table) blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14), (2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12), (4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13), (5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3), (11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)] agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h) for r, c in blocked_cells: board.__board_state__[r][c] = "X" move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) expected_move = (2, 13) max_visits = (40, 18) num_explored_valid = board.counts[0] < max_visits[0] num_unique_valid = board.counts[1] <= max_visits[1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1])) self.assertEqual(move, expected_move, WRONG_MOVE.format(method, depth, expected_move, move)) @timeout(10) # @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test def test_id(self): """ Test iterative deepening for CustomPlayer.minimax """ w, h = 11, 11 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[3][0] = 1 value_table[2][3] = 1 value_table[4][4] = 2 value_table[7][2] = 3 eval_fn = EvalTable(value_table) depths = ["7+", "6", "5", "4", "3", "2", "1"] exact_counts = [((4, 4), set([(2, 3), (3, 0)])), ((16, 6), set([(2, 3), (3, 0)])), ((68, 20), set([(2, 3), (3, 2)])), ((310, 21), set([(2, 3), (3, 2)])), ((1582, 45), set([(3, 0), (3, 2)])), ((7534, 45), set([(3, 0), (3, 2)])), ((38366, 74), set([(0, 3), (2, 3), (3, 0), (3, 2)]))] time_limit = 3200 while time_limit >= TIMER_MARGIN: agentUT, board = self.initAUT(-1, eval_fn, True, method, (1, 1), (0, 0), w, h) legal_moves = board.get_legal_moves() timer_start = curr_time_millis() time_left = lambda : time_limit - (curr_time_millis() - timer_start) move = agentUT.get_move(board, legal_moves, time_left) finish_time = time_left() self.assertTrue(len(board.visited) > 4, ID_FAIL) self.assertTrue(finish_time > 0, "Your search failed iterative deepening due to timeout.") # print time_limit, board.counts, move time_limit /= 2 # Skip testing if the search exceeded 7 move horizon if (board.counts[0] > exact_counts[-1][0][0] or board.counts[1] > exact_counts[-1][0][1] or finish_time < 5): continue for idx, ((n, m), c) in enumerate(exact_counts[::-1]): if n > board.counts[0]: continue self.assertIn(move, c, ID_ERROR.format(depths[idx], 2 * time_limit, move, *board.counts)) break @timeout(1) # @unittest.skip("Skip eval function test.") # Uncomment this line to skip test def test_custom_eval(self): """ Test output interface of CustomEval """ player1 = "Player1" player2 = "Player2" game = isolation.Board(player1, player2) heuristic = game_agent.CustomEval() self.assertIsInstance(heuristic.score(game, player1), float, "The heuristic function should return a floating point") if __name__ == '__main__': unittest.main()
""" Test CustomPlayer.alphabeta """ h, w = 7, 7 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[2][5] = 1 value_table[0][4] = 2 value_table[1][0] = 3 value_table[5][5] = 4 eval_fn = EvalTable(value_table) expected_moves = [set([(2, 5)]), set([(2, 5)]), set([(1, 4)]), set([(1, 4), (2, 5)])] counts = [(2, 2), (26, 13), (552, 36), (10564, 47)] for idx, depth in enumerate([1, 3, 5, 7]): agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) num_explored_valid = board.counts[0] <= counts[idx][0] num_unique_valid = board.counts[1] <= counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move))
identifier_body
agent_test.py
""" This file contains test cases to verify the correct implementation of the functions required for this project including minimax, alphabeta, and iterative deepening. The heuristic function is tested for conformance to the expected interface, but cannot be automatically assessed for correctness. """ import unittest import timeit import signal import isolation import game_agent from collections import Counter from copy import deepcopy from copy import copy from functools import wraps WRONG_MOVE = "Your {} search returned an invalid move at search depth {}." + \ "\nValid choices: {}\nYour selection: {}" WRONG_NUM_EXPLORED = "Your {} search visited the wrong nodes at search " + \ "depth {}. If the number of visits is too large, " + \ "make sure that iterative deepening is only running " + \ "when the `iterative` flag is set in the agent " + \ "constructor.\nMax explored size: {}\nNumber you " + \ "explored: {}" UNEXPECTED_VISIT = "Your {} search did not visit the number of expected " + \ "unique nodes at search depth {}.\nMax explored size: " + \ "{}\nNumber you explored: {}" ID_ERROR = "Your ID search returned the wrong move at a depth of {} with " + \ "a {}ms time limit. {} {} {}" ID_FAIL = "Your agent did not explore enough nodes during the search; it " + \ "did not finish the first layer of available moves." TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout def curr_time_millis(): return 1000 * timeit.default_timer() def timeout(time_limit): """ Function decorator for unittest test cases to specify test case timeout. """ class TimeoutException(Exception): """ Subclass Exception to catch timer expiration during search """ pass def handler(*args, **kwargs): """ Generic handler to raise an exception when a timer expires """ raise TimeoutException("Test aborted due to timeout. Test was " + "expected to finish in less than {} second(s).".format(time_limit)) def wrapUnitTest(testcase): @wraps(testcase) def testWrapper(self, *args, **kwargs): signal.signal(signal.SIGALRM, handler) signal.alarm(time_limit) try: return testcase(self, *args, **kwargs) finally: signal.alarm(0) return testWrapper return wrapUnitTest class EvalTable(): def __init__(self, table): self.table = table def score(self, game, player): row, col = game.get_player_location(player) return self.table[row][col] class CounterBoard(isolation.Board): def __init__(self, *args, **kwargs): super(CounterBoard, self).__init__(*args, **kwargs) self.counter = Counter() self.visited = set() def
(self): new_board = CounterBoard(self.__player_1__, self.__player_2__, width=self.width, height=self.height) new_board.move_count = self.move_count new_board.__active_player__ = self.__active_player__ new_board.__inactive_player__ = self.__inactive_player__ new_board.__last_player_move__ = copy(self.__last_player_move__) new_board.__player_symbols__ = copy(self.__player_symbols__) new_board.__board_state__ = deepcopy(self.__board_state__) new_board.counter = self.counter new_board.visited = self.visited return new_board def forecast_move(self, move): self.counter[move] += 1 self.visited.add(move) new_board = self.copy() new_board.apply_move(move) return new_board @property def counts(self): """ Return counts of (total, unique) nodes visited """ return sum(self.counter.values()), len(self.visited) class Project1Test(unittest.TestCase): def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7): reload(game_agent) agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method) board = CounterBoard(agentUT, 'null_agent', w, h) board.apply_move(loc1) board.apply_move(loc2) return agentUT, board @timeout(1) # @unittest.skip("Skip minimax test.") # Uncomment this line to skip test def test_minimax(self): """ Test CustomPlayer.minimax """ h, w = 7, 7 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[1][5] = 1 value_table[4][3] = 2 value_table[6][6] = 3 eval_fn = EvalTable(value_table) expected_moves = [set([(1, 5)]), set([(3, 1), (3, 5)]), set([(3, 5), (4, 2)])] counts = [(8, 8), (92, 27), (1650, 43)] for idx, depth in enumerate([1, 3, 5]): agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3) num_explored_valid = board.counts[0] == counts[idx][0] num_unique_valid = board.counts[1] == counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move)) @timeout(1) # @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test def test_alphabeta(self): """ Test CustomPlayer.alphabeta """ h, w = 7, 7 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[2][5] = 1 value_table[0][4] = 2 value_table[1][0] = 3 value_table[5][5] = 4 eval_fn = EvalTable(value_table) expected_moves = [set([(2, 5)]), set([(2, 5)]), set([(1, 4)]), set([(1, 4), (2, 5)])] counts = [(2, 2), (26, 13), (552, 36), (10564, 47)] for idx, depth in enumerate([1, 3, 5, 7]): agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0)) move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) num_explored_valid = board.counts[0] <= counts[idx][0] num_unique_valid = board.counts[1] <= counts[idx][1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1])) self.assertIn(move, expected_moves[idx], WRONG_MOVE.format(method, depth, expected_moves[idx], move)) @timeout(1) # @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test def test_alphabeta_pruning(self): """ Test pruning in CustomPlayer.alphabeta """ h, w = 15, 15 depth = 6 method = "alphabeta" value_table = [[0] * w for _ in range(h)] value_table[3][14] = 1 eval_fn = EvalTable(value_table) blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14), (2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12), (4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13), (5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3), (11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)] agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h) for r, c in blocked_cells: board.__board_state__[r][c] = "X" move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4) expected_move = (2, 13) max_visits = (40, 18) num_explored_valid = board.counts[0] < max_visits[0] num_unique_valid = board.counts[1] <= max_visits[1] self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0])) self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1])) self.assertEqual(move, expected_move, WRONG_MOVE.format(method, depth, expected_move, move)) @timeout(10) # @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test def test_id(self): """ Test iterative deepening for CustomPlayer.minimax """ w, h = 11, 11 method = "minimax" value_table = [[0] * w for _ in range(h)] value_table[3][0] = 1 value_table[2][3] = 1 value_table[4][4] = 2 value_table[7][2] = 3 eval_fn = EvalTable(value_table) depths = ["7+", "6", "5", "4", "3", "2", "1"] exact_counts = [((4, 4), set([(2, 3), (3, 0)])), ((16, 6), set([(2, 3), (3, 0)])), ((68, 20), set([(2, 3), (3, 2)])), ((310, 21), set([(2, 3), (3, 2)])), ((1582, 45), set([(3, 0), (3, 2)])), ((7534, 45), set([(3, 0), (3, 2)])), ((38366, 74), set([(0, 3), (2, 3), (3, 0), (3, 2)]))] time_limit = 3200 while time_limit >= TIMER_MARGIN: agentUT, board = self.initAUT(-1, eval_fn, True, method, (1, 1), (0, 0), w, h) legal_moves = board.get_legal_moves() timer_start = curr_time_millis() time_left = lambda : time_limit - (curr_time_millis() - timer_start) move = agentUT.get_move(board, legal_moves, time_left) finish_time = time_left() self.assertTrue(len(board.visited) > 4, ID_FAIL) self.assertTrue(finish_time > 0, "Your search failed iterative deepening due to timeout.") # print time_limit, board.counts, move time_limit /= 2 # Skip testing if the search exceeded 7 move horizon if (board.counts[0] > exact_counts[-1][0][0] or board.counts[1] > exact_counts[-1][0][1] or finish_time < 5): continue for idx, ((n, m), c) in enumerate(exact_counts[::-1]): if n > board.counts[0]: continue self.assertIn(move, c, ID_ERROR.format(depths[idx], 2 * time_limit, move, *board.counts)) break @timeout(1) # @unittest.skip("Skip eval function test.") # Uncomment this line to skip test def test_custom_eval(self): """ Test output interface of CustomEval """ player1 = "Player1" player2 = "Player2" game = isolation.Board(player1, player2) heuristic = game_agent.CustomEval() self.assertIsInstance(heuristic.score(game, player1), float, "The heuristic function should return a floating point") if __name__ == '__main__': unittest.main()
copy
identifier_name
zoekmachine.py
import sys import json from elasticsearch import Elasticsearch from wordcloud import WordCloud from datetime import datetime import collections as c import numpy as np import matplotlib.pyplot as plt #Connect to elastic cloud HOST = 'https://558a85fda07b4bbea2ff78028c0f63a1.europe-west1.gcp.cloud.es.io:9243/' es = Elasticsearch(hosts=[HOST], http_auth=('elastic','UgYdqcaJjmsYDaU5HfNwlDyL'), verify_certs=False) file_directory = "Data1.json" query={ "query": { "match": { "content": "term" } } } #Makes the results into a SERP def wrapStringInHTMLWindows(term, program, list_results, body): import datetime from webbrowser import open_new_tab now = datetime.datetime.today().strftime("%Y%m%d-%H%M%S") filename = program + '.html' f = open(filename,'w') # Fill in blocks with results from queries wrapper = """<html> <head> <title>%s output - %s</title> </head> <body> <div style="text-align:center"> <h1>Your top 10 searchresults for: %s</h1> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> </body> </html>""" #Write it into html file and open once finished whole = wrapper % (program, now, term, list_results[0], list_results[1], list_results[2], list_results[3], list_results[4], list_results[5], list_results[6], list_results[7], list_results[8], list_results[9]) f.write(whole) f.close() open_new_tab(filename) #Wrap results into hrefs to place in result blocks def wrapinresults(url, question): wrapper = """<p><a href=%s>%s</a></p>""" whole = wrapper % (url, question) return(whole) #Makes wordcloud, removing stopwords, showing image def makeWordCloud(term ,text): text = text.lower() text = text.replace(term, '') text = text.replace('de', '') text = text.replace('het', '') text = text.replace('een', '') text = text.replace('zijn', '') wordcloud = WordCloud().generate(text) image = wordcloud.to_image() image.show() #Shows in which year the most hits have been made def showTimeLine(res): from datetime import datetime import matplotlib.pyplot as plt timeline = [] #finds date of creatio of the questions for doc in res['hits']['hits']: date = doc['_source']['Date'] date = date.split(" ", 1)[0] datetime_object = datetime.strptime(date, '%Y-%m-%d').date() timeline.append(datetime_object) #creates figure x = timeline y = range(len(timeline)) fig, ax = plt.subplots() ax.bar(timeline, y, width = 10) fig.autofmt_xdate() plt.show() #Performs actual search def search(term, filter): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] #Connect to cloud and find results results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) #Create the results to print on SERP for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title date = doc['_source']['Date'] date = date.split("-", 1)[0] if int(date) >= int(filter): results.append(wrapinresults(url, title)) #Show all work to user makeWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") showTimeLine(res) #Ask user for query def simple(): print("What are you looking for?") term = sys.stdin.readline() search(term, 0000) #Ask user for query in which time setting def advanced(): print("What are you looking for? (ADV)") term = sys.stdin.readline() print("From what year on?") year = sys.stdin.readline() search(term, year) #Return that the choice was not available def invalid(): print("Not a valid choice") #List of all categories in data set switcherGetCat = { "1" : "Alle categorieën", "2" : "Persoon & Gezondheid", "3" : "Maatschappij", "4" : "Financiën & Werk", "5" : "Vervoer", "6" : "Computers & Internet", "7" : "Elektronica", "8" : "Entertainment & Muziek", "9" : "Eten & Drinken", "10": "Sport, Spel & Recreatie", '11': "Huis & Tuin", "12": "Wetenschap", "13": "Vakantie & Reizen", "14": "Kunst & Cultuur", "15": "Overig", '16': "Biologie", "17": "Wiskunde", "18": "Natuur- en scheikunde", "19": "Psychologie", "20": "Sociale wetenschap", "21": "Overig", "22": "Auto's", "23" : "Vliegtuigen", "24" : "Boten", "25" : "Openbaar vervoer", "26" : "Motorfietsen", "27" : "Fietsen", "28": 'Overig', "29": 'Spellen', "30": 'Computergames', "31": "Hobby\'s", "32": "Sporten", "33": "Overig", "34": 'Caraïben', "35": "Noord-Amerika", "36": 'Zuid-Amerika', "37": "Afrika", "38": 'Antarctica', "39": 'Azië', '40': 'Europa', "41": "Midden-Amerika", '42': "Midden-Oosten", "43": "Oceanië", "44": "Overig", "45": "Overig", "46": "Mode & Accessoires", "47": "Familie & Relatie", "48": "Gezondheid", "49": "Zwangerschap", "50": 'Onderwijs', "51": 'Milieu', "52": "Politiek & Overheid", "53": "Samenleving", "54": 'Overig', "55": "Boeken & Auteurs", "56": "Genealogie", "57": 'Geschiedenis', "58": 'Filosofie', "59": 'Poëzie', "60": "Beeldende kunst", "61": "Overig", "62": "Schoonmaken & Wassen", "63": 'Interieur', "64": 'Doe-Het-Zelf', "65": 'Tuin', "66": 'Huisdieren', "67": 'Overig', "68": 'Dranken', "69": 'Koken & Recepten', "70": 'Vegetarisch & Veganistisch', "71": "Uit eten", "72": "Overig", "73": 'Beroemdheden', "74": 'Stripboeken & Tekenfilms', "75": 'Tijdschriften', "76": 'Horoscoop', "77": 'Films', "78": 'Muziek', "79": 'Radio', "80": 'Televisie', "81": 'Overig', "82": 'Videocameras', "83": "Camera\'s", "84": "Telefoon & Abonnementen", "85": 'Spelcomputers', "86": 'Audio', "87": "Handhelds & Smartphones", "88": "Televisies", "89": 'Overig', "90": 'Hardware', "91": 'Software', "92": 'Internet', "93": 'Programmeren & Design', "94": 'Veiligheid', "95": 'Overig', "96": 'Carrière & Werk', "97": 'Financiën', "98": 'Huren & Vastgoed', "100": 'Belasting', "101": 'Overig', "103": 'Ondernemen', "104": "Religie", "106": 'Vrachtwagens & Transport', "107": 'Treinen', "108": "Taal", "109": "Spiritualiteit", "110": 'Ruimtevaart & Sterrenkunde', "111": "Besturingssystemen", "113": "Voetbal", "114": 'Wielrennen', "115": 'Tennis', "116": "Formule 1", "117": "Hockey", "118": 'Schaatsen', "119": 'Overig', "120": 'Vragen aan mannen', "121": 'Vragen aan vrouwen', "122": "GoeieVraag.nl", "123": "Ouderschap & Opvoeding", "124": 'Wetgeving', "125": 'Wintersport', "126": 'Feestdagen', "127": "Sinterklaas", "128": 'Kerst', "129": 'Pasen', "130": "Andere feestdagen", "131": "Seksualiteit", "132": "Aardrijkskunde & Aardwetenschappen", "133": "Energie", "134": "Verzekeringen", "135": "Sparen & Beleggen", "136": "Overig", "137": "Alternatieve geneeswijzen", "138": "Gebit", "139": 'Psyche', "140": 'Voeding', "141": "Ziekten", "142": "Optiek", "143": "Lichamelijke klachten", "144": "Mannelijk lichaam", "145": "Vrouwelijk lichaam", "146": "Overig", "147": 'Kinderen', "148": "Reparaties", "149": "Banden", "150": "Brom- & Snorfietsen", "151": "Weblogs", "152": 'Webshops', "156": "Meteorologie", "157": "Lenen", "158": "Sparen", "159": "Hypotheek", "160": "Economie", "161": "Techniek", "162": "Landbouw & Veeteelt", "163": "Medicijnen", "164": "Huid-, haarverzorging en Make-up", "165": "Fotografie", "166": 'Winkels', "167": "Huishoudelijke apparaten", "168": "Sociale Media" } #Search for documents containing the query and make a list of categories def searchFAC(term): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] cat = c.Counter() res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title results.append(wrapinresults(url, title)) caterogynr= doc['_source']['Category'] cat[switcherGetCat.get(caterogynr, "None")] += 1 return cat, res #Search for documents containing the query and filter out unwanted categories def searchFAC2(term, catNr): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "h
keWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") # makeWordCloud(text) wrapStringInHTMLWindows(term, "serp", results, "body") #Show the user all possible categories and make them choose one def getUserCat(cat): for c in cat: print(c, "("+str(cat[c])+")") print("\nPlease, type categorie:") dingenCat = sys.stdin.readline() # dingenCat = dingenCat catNr = "geen categorie" for number in switcherGetCat: if switcherGetCat.get(number) + "\n" == dingenCat: catNr = int(number) if not isinstance(catNr, int): print("Category doesn't exist\n") getUserCat(cat) else: return catNr #Let user choose in which category to search for query def faceted(): print("What are you looking for? (FAC)") dingen = sys.stdin.readline() cat, res = searchFAC(dingen) catNr = getUserCat(cat) res = searchFAC2(dingen, catNr) #Let user choose which type of search to execute, then forward to that search def getUserInput(): print("Welcome to our search engine in terminal") print("What kind of search would you like?") print("Input a for simple search") print("Input b for advanced search") print("Input c for faceted search") choise = sys.stdin.readline().split()[0] #Forward to right search method switcher = { 'a': simple, "b": advanced, "c": faceted, } switcher.get(choise, invalid)() ##Put information into the cloud #json_data=open(file_directory) #counter = 0 #for line in json_data: # data = json.loads(line) # if('index' not in data.keys()) : # counter += 1 # Test if succesfull in writing # resp = es.index(index='zoekmachine', doc_type='question', id=counter, body=data) # print(counter) #Get the searchterm from the user getUserInput()
ttps://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title if int(doc['_source']['Category']) == catNr: results.append(wrapinresults(url, title)) ma
conditional_block
zoekmachine.py
import sys import json from elasticsearch import Elasticsearch from wordcloud import WordCloud from datetime import datetime import collections as c import numpy as np import matplotlib.pyplot as plt #Connect to elastic cloud HOST = 'https://558a85fda07b4bbea2ff78028c0f63a1.europe-west1.gcp.cloud.es.io:9243/' es = Elasticsearch(hosts=[HOST], http_auth=('elastic','UgYdqcaJjmsYDaU5HfNwlDyL'), verify_certs=False) file_directory = "Data1.json" query={ "query": { "match": { "content": "term" } } } #Makes the results into a SERP def wrapStringInHTMLWindows(term, program, list_results, body): import datetime from webbrowser import open_new_tab now = datetime.datetime.today().strftime("%Y%m%d-%H%M%S") filename = program + '.html' f = open(filename,'w') # Fill in blocks with results from queries wrapper = """<html> <head> <title>%s output - %s</title> </head> <body> <div style="text-align:center"> <h1>Your top 10 searchresults for: %s</h1> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> </body> </html>""" #Write it into html file and open once finished whole = wrapper % (program, now, term, list_results[0], list_results[1], list_results[2], list_results[3], list_results[4], list_results[5], list_results[6], list_results[7], list_results[8], list_results[9]) f.write(whole) f.close() open_new_tab(filename) #Wrap results into hrefs to place in result blocks def wrapinresults(url, question): wrapper = """<p><a href=%s>%s</a></p>""" whole = wrapper % (url, question) return(whole) #Makes wordcloud, removing stopwords, showing image def makeWordCloud(term ,text): text = text.lower() text = text.replace(term, '') text = text.replace('de', '') text = text.replace('het', '') text = text.replace('een', '') text = text.replace('zijn', '') wordcloud = WordCloud().generate(text) image = wordcloud.to_image() image.show() #Shows in which year the most hits have been made def showTimeLine(res): from datetime import datetime import matplotlib.pyplot as plt timeline = [] #finds date of creatio of the questions for doc in res['hits']['hits']: date = doc['_source']['Date'] date = date.split(" ", 1)[0] datetime_object = datetime.strptime(date, '%Y-%m-%d').date() timeline.append(datetime_object) #creates figure x = timeline y = range(len(timeline)) fig, ax = plt.subplots() ax.bar(timeline, y, width = 10) fig.autofmt_xdate() plt.show() #Performs actual search def search(term, filter): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] #Connect to cloud and find results results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) #Create the results to print on SERP for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title date = doc['_source']['Date'] date = date.split("-", 1)[0] if int(date) >= int(filter): results.append(wrapinresults(url, title)) #Show all work to user makeWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") showTimeLine(res) #Ask user for query def simple(): print("What are you looking for?") term = sys.stdin.readline() search(term, 0000) #Ask user for query in which time setting def advanced(): print("What are you looking for? (ADV)") term = sys.stdin.readline() print("From what year on?") year = sys.stdin.readline() search(term, year) #Return that the choice was not available def invalid(): print("Not a valid choice") #List of all categories in data set switcherGetCat = { "1" : "Alle categorieën", "2" : "Persoon & Gezondheid", "3" : "Maatschappij", "4" : "Financiën & Werk", "5" : "Vervoer", "6" : "Computers & Internet", "7" : "Elektronica", "8" : "Entertainment & Muziek", "9" : "Eten & Drinken", "10": "Sport, Spel & Recreatie", '11': "Huis & Tuin", "12": "Wetenschap", "13": "Vakantie & Reizen", "14": "Kunst & Cultuur", "15": "Overig", '16': "Biologie", "17": "Wiskunde", "18": "Natuur- en scheikunde", "19": "Psychologie", "20": "Sociale wetenschap", "21": "Overig", "22": "Auto's", "23" : "Vliegtuigen", "24" : "Boten", "25" : "Openbaar vervoer", "26" : "Motorfietsen", "27" : "Fietsen", "28": 'Overig', "29": 'Spellen', "30": 'Computergames', "31": "Hobby\'s", "32": "Sporten", "33": "Overig", "34": 'Caraïben', "35": "Noord-Amerika", "36": 'Zuid-Amerika', "37": "Afrika", "38": 'Antarctica', "39": 'Azië', '40': 'Europa', "41": "Midden-Amerika", '42': "Midden-Oosten", "43": "Oceanië", "44": "Overig", "45": "Overig", "46": "Mode & Accessoires", "47": "Familie & Relatie", "48": "Gezondheid", "49": "Zwangerschap", "50": 'Onderwijs', "51": 'Milieu', "52": "Politiek & Overheid", "53": "Samenleving", "54": 'Overig', "55": "Boeken & Auteurs", "56": "Genealogie", "57": 'Geschiedenis', "58": 'Filosofie', "59": 'Poëzie', "60": "Beeldende kunst", "61": "Overig", "62": "Schoonmaken & Wassen", "63": 'Interieur', "64": 'Doe-Het-Zelf', "65": 'Tuin', "66": 'Huisdieren', "67": 'Overig', "68": 'Dranken', "69": 'Koken & Recepten', "70": 'Vegetarisch & Veganistisch', "71": "Uit eten", "72": "Overig", "73": 'Beroemdheden', "74": 'Stripboeken & Tekenfilms', "75": 'Tijdschriften', "76": 'Horoscoop', "77": 'Films', "78": 'Muziek', "79": 'Radio', "80": 'Televisie', "81": 'Overig', "82": 'Videocameras', "83": "Camera\'s", "84": "Telefoon & Abonnementen", "85": 'Spelcomputers', "86": 'Audio', "87": "Handhelds & Smartphones", "88": "Televisies", "89": 'Overig', "90": 'Hardware', "91": 'Software', "92": 'Internet', "93": 'Programmeren & Design', "94": 'Veiligheid', "95": 'Overig', "96": 'Carrière & Werk', "97": 'Financiën', "98": 'Huren & Vastgoed', "100": 'Belasting', "101": 'Overig', "103": 'Ondernemen', "104": "Religie", "106": 'Vrachtwagens & Transport', "107": 'Treinen', "108": "Taal", "109": "Spiritualiteit", "110": 'Ruimtevaart & Sterrenkunde', "111": "Besturingssystemen", "113": "Voetbal", "114": 'Wielrennen', "115": 'Tennis', "116": "Formule 1", "117": "Hockey", "118": 'Schaatsen', "119": 'Overig', "120": 'Vragen aan mannen', "121": 'Vragen aan vrouwen', "122": "GoeieVraag.nl", "123": "Ouderschap & Opvoeding", "124": 'Wetgeving', "125": 'Wintersport', "126": 'Feestdagen', "127": "Sinterklaas", "128": 'Kerst', "129": 'Pasen', "130": "Andere feestdagen", "131": "Seksualiteit", "132": "Aardrijkskunde & Aardwetenschappen", "133": "Energie", "134": "Verzekeringen", "135": "Sparen & Beleggen", "136": "Overig", "137": "Alternatieve geneeswijzen", "138": "Gebit", "139": 'Psyche', "140": 'Voeding', "141": "Ziekten", "142": "Optiek", "143": "Lichamelijke klachten", "144": "Mannelijk lichaam", "145": "Vrouwelijk lichaam", "146": "Overig", "147": 'Kinderen', "148": "Reparaties", "149": "Banden", "150": "Brom- & Snorfietsen", "151": "Weblogs", "152": 'Webshops', "156": "Meteorologie", "157": "Lenen", "158": "Sparen", "159": "Hypotheek", "160": "Economie", "161": "Techniek", "162": "Landbouw & Veeteelt", "163": "Medicijnen", "164": "Huid-, haarverzorging en Make-up", "165": "Fotografie", "166": 'Winkels', "167": "Huishoudelijke apparaten", "168": "Sociale Media" } #Search for documents containing the query and make a list of categories def searchFAC(term): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] cat = c.Counter() res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title results.append(wrapinresults(url, title)) caterogynr= doc['_source']['Category'] cat[switcherGetCat.get(caterogynr, "None")] += 1 return cat, res #Search for documents containing the query and filter out unwanted categories def searchFA
atNr): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title if int(doc['_source']['Category']) == catNr: results.append(wrapinresults(url, title)) makeWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") # makeWordCloud(text) wrapStringInHTMLWindows(term, "serp", results, "body") #Show the user all possible categories and make them choose one def getUserCat(cat): for c in cat: print(c, "("+str(cat[c])+")") print("\nPlease, type categorie:") dingenCat = sys.stdin.readline() # dingenCat = dingenCat catNr = "geen categorie" for number in switcherGetCat: if switcherGetCat.get(number) + "\n" == dingenCat: catNr = int(number) if not isinstance(catNr, int): print("Category doesn't exist\n") getUserCat(cat) else: return catNr #Let user choose in which category to search for query def faceted(): print("What are you looking for? (FAC)") dingen = sys.stdin.readline() cat, res = searchFAC(dingen) catNr = getUserCat(cat) res = searchFAC2(dingen, catNr) #Let user choose which type of search to execute, then forward to that search def getUserInput(): print("Welcome to our search engine in terminal") print("What kind of search would you like?") print("Input a for simple search") print("Input b for advanced search") print("Input c for faceted search") choise = sys.stdin.readline().split()[0] #Forward to right search method switcher = { 'a': simple, "b": advanced, "c": faceted, } switcher.get(choise, invalid)() ##Put information into the cloud #json_data=open(file_directory) #counter = 0 #for line in json_data: # data = json.loads(line) # if('index' not in data.keys()) : # counter += 1 # Test if succesfull in writing # resp = es.index(index='zoekmachine', doc_type='question', id=counter, body=data) # print(counter) #Get the searchterm from the user getUserInput()
C2(term, c
identifier_name
zoekmachine.py
import sys import json from elasticsearch import Elasticsearch from wordcloud import WordCloud from datetime import datetime import collections as c import numpy as np import matplotlib.pyplot as plt #Connect to elastic cloud HOST = 'https://558a85fda07b4bbea2ff78028c0f63a1.europe-west1.gcp.cloud.es.io:9243/' es = Elasticsearch(hosts=[HOST], http_auth=('elastic','UgYdqcaJjmsYDaU5HfNwlDyL'), verify_certs=False) file_directory = "Data1.json" query={ "query": { "match": { "content": "term" } } } #Makes the results into a SERP def wrapStringInHTMLWindows(term, program, list_results, body): import datetime from webbrowser import open_new_tab now = datetime.datetime.today().strftime("%Y%m%d-%H%M%S") filename = program + '.html' f = open(filename,'w') # Fill in blocks with results from queries wrapper = """<html> <head> <title>%s output - %s</title> </head> <body> <div style="text-align:center"> <h1>Your top 10 searchresults for: %s</h1> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> </body> </html>""" #Write it into html file and open once finished whole = wrapper % (program, now, term, list_results[0], list_results[1], list_results[2], list_results[3], list_results[4], list_results[5], list_results[6], list_results[7], list_results[8], list_results[9]) f.write(whole) f.close() open_new_tab(filename) #Wrap results into hrefs to place in result blocks def wrapinresults(url, question): wrapper = """<p><a href=%s>%s</a></p>""" whole = wrapper % (url, question) return(whole) #Makes wordcloud, removing stopwords, showing image def makeWordCloud(term ,text): text = text.lower() text = text.replace(term, '') text = text.replace('de', '') text = text.replace('het', '') text = text.replace('een', '') text = text.replace('zijn', '') wordcloud = WordCloud().generate(text) image = wordcloud.to_image() image.show() #Shows in which year the most hits have been made def showTimeLine(res): from datetime import datetime import matplotlib.pyplot as plt timeline = [] #finds date of creatio of the questions for doc in res['hits']['hits']: date = doc['_source']['Date'] date = date.split(" ", 1)[0] datetime_object = datetime.strptime(date, '%Y-%m-%d').date() timeline.append(datetime_object) #creates figure x = timeline
fig, ax = plt.subplots() ax.bar(timeline, y, width = 10) fig.autofmt_xdate() plt.show() #Performs actual search def search(term, filter): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] #Connect to cloud and find results results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) #Create the results to print on SERP for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title date = doc['_source']['Date'] date = date.split("-", 1)[0] if int(date) >= int(filter): results.append(wrapinresults(url, title)) #Show all work to user makeWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") showTimeLine(res) #Ask user for query def simple(): print("What are you looking for?") term = sys.stdin.readline() search(term, 0000) #Ask user for query in which time setting def advanced(): print("What are you looking for? (ADV)") term = sys.stdin.readline() print("From what year on?") year = sys.stdin.readline() search(term, year) #Return that the choice was not available def invalid(): print("Not a valid choice") #List of all categories in data set switcherGetCat = { "1" : "Alle categorieën", "2" : "Persoon & Gezondheid", "3" : "Maatschappij", "4" : "Financiën & Werk", "5" : "Vervoer", "6" : "Computers & Internet", "7" : "Elektronica", "8" : "Entertainment & Muziek", "9" : "Eten & Drinken", "10": "Sport, Spel & Recreatie", '11': "Huis & Tuin", "12": "Wetenschap", "13": "Vakantie & Reizen", "14": "Kunst & Cultuur", "15": "Overig", '16': "Biologie", "17": "Wiskunde", "18": "Natuur- en scheikunde", "19": "Psychologie", "20": "Sociale wetenschap", "21": "Overig", "22": "Auto's", "23" : "Vliegtuigen", "24" : "Boten", "25" : "Openbaar vervoer", "26" : "Motorfietsen", "27" : "Fietsen", "28": 'Overig', "29": 'Spellen', "30": 'Computergames', "31": "Hobby\'s", "32": "Sporten", "33": "Overig", "34": 'Caraïben', "35": "Noord-Amerika", "36": 'Zuid-Amerika', "37": "Afrika", "38": 'Antarctica', "39": 'Azië', '40': 'Europa', "41": "Midden-Amerika", '42': "Midden-Oosten", "43": "Oceanië", "44": "Overig", "45": "Overig", "46": "Mode & Accessoires", "47": "Familie & Relatie", "48": "Gezondheid", "49": "Zwangerschap", "50": 'Onderwijs', "51": 'Milieu', "52": "Politiek & Overheid", "53": "Samenleving", "54": 'Overig', "55": "Boeken & Auteurs", "56": "Genealogie", "57": 'Geschiedenis', "58": 'Filosofie', "59": 'Poëzie', "60": "Beeldende kunst", "61": "Overig", "62": "Schoonmaken & Wassen", "63": 'Interieur', "64": 'Doe-Het-Zelf', "65": 'Tuin', "66": 'Huisdieren', "67": 'Overig', "68": 'Dranken', "69": 'Koken & Recepten', "70": 'Vegetarisch & Veganistisch', "71": "Uit eten", "72": "Overig", "73": 'Beroemdheden', "74": 'Stripboeken & Tekenfilms', "75": 'Tijdschriften', "76": 'Horoscoop', "77": 'Films', "78": 'Muziek', "79": 'Radio', "80": 'Televisie', "81": 'Overig', "82": 'Videocameras', "83": "Camera\'s", "84": "Telefoon & Abonnementen", "85": 'Spelcomputers', "86": 'Audio', "87": "Handhelds & Smartphones", "88": "Televisies", "89": 'Overig', "90": 'Hardware', "91": 'Software', "92": 'Internet', "93": 'Programmeren & Design', "94": 'Veiligheid', "95": 'Overig', "96": 'Carrière & Werk', "97": 'Financiën', "98": 'Huren & Vastgoed', "100": 'Belasting', "101": 'Overig', "103": 'Ondernemen', "104": "Religie", "106": 'Vrachtwagens & Transport', "107": 'Treinen', "108": "Taal", "109": "Spiritualiteit", "110": 'Ruimtevaart & Sterrenkunde', "111": "Besturingssystemen", "113": "Voetbal", "114": 'Wielrennen', "115": 'Tennis', "116": "Formule 1", "117": "Hockey", "118": 'Schaatsen', "119": 'Overig', "120": 'Vragen aan mannen', "121": 'Vragen aan vrouwen', "122": "GoeieVraag.nl", "123": "Ouderschap & Opvoeding", "124": 'Wetgeving', "125": 'Wintersport', "126": 'Feestdagen', "127": "Sinterklaas", "128": 'Kerst', "129": 'Pasen', "130": "Andere feestdagen", "131": "Seksualiteit", "132": "Aardrijkskunde & Aardwetenschappen", "133": "Energie", "134": "Verzekeringen", "135": "Sparen & Beleggen", "136": "Overig", "137": "Alternatieve geneeswijzen", "138": "Gebit", "139": 'Psyche', "140": 'Voeding', "141": "Ziekten", "142": "Optiek", "143": "Lichamelijke klachten", "144": "Mannelijk lichaam", "145": "Vrouwelijk lichaam", "146": "Overig", "147": 'Kinderen', "148": "Reparaties", "149": "Banden", "150": "Brom- & Snorfietsen", "151": "Weblogs", "152": 'Webshops', "156": "Meteorologie", "157": "Lenen", "158": "Sparen", "159": "Hypotheek", "160": "Economie", "161": "Techniek", "162": "Landbouw & Veeteelt", "163": "Medicijnen", "164": "Huid-, haarverzorging en Make-up", "165": "Fotografie", "166": 'Winkels', "167": "Huishoudelijke apparaten", "168": "Sociale Media" } #Search for documents containing the query and make a list of categories def searchFAC(term): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] cat = c.Counter() res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title results.append(wrapinresults(url, title)) caterogynr= doc['_source']['Category'] cat[switcherGetCat.get(caterogynr, "None")] += 1 return cat, res #Search for documents containing the query and filter out unwanted categories def searchFAC2(term, catNr): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title if int(doc['_source']['Category']) == catNr: results.append(wrapinresults(url, title)) makeWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") # makeWordCloud(text) wrapStringInHTMLWindows(term, "serp", results, "body") #Show the user all possible categories and make them choose one def getUserCat(cat): for c in cat: print(c, "("+str(cat[c])+")") print("\nPlease, type categorie:") dingenCat = sys.stdin.readline() # dingenCat = dingenCat catNr = "geen categorie" for number in switcherGetCat: if switcherGetCat.get(number) + "\n" == dingenCat: catNr = int(number) if not isinstance(catNr, int): print("Category doesn't exist\n") getUserCat(cat) else: return catNr #Let user choose in which category to search for query def faceted(): print("What are you looking for? (FAC)") dingen = sys.stdin.readline() cat, res = searchFAC(dingen) catNr = getUserCat(cat) res = searchFAC2(dingen, catNr) #Let user choose which type of search to execute, then forward to that search def getUserInput(): print("Welcome to our search engine in terminal") print("What kind of search would you like?") print("Input a for simple search") print("Input b for advanced search") print("Input c for faceted search") choise = sys.stdin.readline().split()[0] #Forward to right search method switcher = { 'a': simple, "b": advanced, "c": faceted, } switcher.get(choise, invalid)() ##Put information into the cloud #json_data=open(file_directory) #counter = 0 #for line in json_data: # data = json.loads(line) # if('index' not in data.keys()) : # counter += 1 # Test if succesfull in writing # resp = es.index(index='zoekmachine', doc_type='question', id=counter, body=data) # print(counter) #Get the searchterm from the user getUserInput()
y = range(len(timeline))
random_line_split
zoekmachine.py
import sys import json from elasticsearch import Elasticsearch from wordcloud import WordCloud from datetime import datetime import collections as c import numpy as np import matplotlib.pyplot as plt #Connect to elastic cloud HOST = 'https://558a85fda07b4bbea2ff78028c0f63a1.europe-west1.gcp.cloud.es.io:9243/' es = Elasticsearch(hosts=[HOST], http_auth=('elastic','UgYdqcaJjmsYDaU5HfNwlDyL'), verify_certs=False) file_directory = "Data1.json" query={ "query": { "match": { "content": "term" } } } #Makes the results into a SERP def wrapStringInHTMLWindows(term, program, list_results, body): import datetime from webbrowser import open_new_tab now = datetime.datetime.today().strftime("%Y%m%d-%H%M%S") filename = program + '.html' f = open(filename,'w') # Fill in blocks with results from queries wrapper = """<html> <head> <title>%s output - %s</title> </head> <body> <div style="text-align:center"> <h1>Your top 10 searchresults for: %s</h1> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> <div style="background:grey"> <p> %s </p> </div> </body> </html>""" #Write it into html file and open once finished whole = wrapper % (program, now, term, list_results[0], list_results[1], list_results[2], list_results[3], list_results[4], list_results[5], list_results[6], list_results[7], list_results[8], list_results[9]) f.write(whole) f.close() open_new_tab(filename) #Wrap results into hrefs to place in result blocks def wrapinresults(url, question): wrapper = """<p><a href=%s>%s</a></p>""" whole = wrapper % (url, question) return(whole) #Makes wordcloud, removing stopwords, showing image def makeWordCloud(term ,text): text = text.lower() text = text.replace(term, '') text = text.replace('de', '') text = text.replace('het', '') text = text.replace('een', '') text = text.replace('zijn', '') wordcloud = WordCloud().generate(text) image = wordcloud.to_image() image.show() #Shows in which year the most hits have been made def showTimeLine(res): from datetime import datetime import matplotlib.pyplot as plt timeline = [] #finds date of creatio of the questions for doc in res['hits']['hits']: date = doc['_source']['Date'] date = date.split(" ", 1)[0] datetime_object = datetime.strptime(date, '%Y-%m-%d').date() timeline.append(datetime_object) #creates figure x = timeline y = range(len(timeline)) fig, ax = plt.subplots() ax.bar(timeline, y, width = 10) fig.autofmt_xdate() plt.show() #Performs actual search def search(term, filter): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] #Connect to cloud and find results results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) #Create the results to print on SERP for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title date = doc['_source']['Date'] date = date.split("-", 1)[0] if int(date) >= int(filter): results.append(wrapinresults(url, title)) #Show all work to user makeWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") showTimeLine(res) #Ask user for query def simple():
#Ask user for query in which time setting def advanced(): print("What are you looking for? (ADV)") term = sys.stdin.readline() print("From what year on?") year = sys.stdin.readline() search(term, year) #Return that the choice was not available def invalid(): print("Not a valid choice") #List of all categories in data set switcherGetCat = { "1" : "Alle categorieën", "2" : "Persoon & Gezondheid", "3" : "Maatschappij", "4" : "Financiën & Werk", "5" : "Vervoer", "6" : "Computers & Internet", "7" : "Elektronica", "8" : "Entertainment & Muziek", "9" : "Eten & Drinken", "10": "Sport, Spel & Recreatie", '11': "Huis & Tuin", "12": "Wetenschap", "13": "Vakantie & Reizen", "14": "Kunst & Cultuur", "15": "Overig", '16': "Biologie", "17": "Wiskunde", "18": "Natuur- en scheikunde", "19": "Psychologie", "20": "Sociale wetenschap", "21": "Overig", "22": "Auto's", "23" : "Vliegtuigen", "24" : "Boten", "25" : "Openbaar vervoer", "26" : "Motorfietsen", "27" : "Fietsen", "28": 'Overig', "29": 'Spellen', "30": 'Computergames', "31": "Hobby\'s", "32": "Sporten", "33": "Overig", "34": 'Caraïben', "35": "Noord-Amerika", "36": 'Zuid-Amerika', "37": "Afrika", "38": 'Antarctica', "39": 'Azië', '40': 'Europa', "41": "Midden-Amerika", '42': "Midden-Oosten", "43": "Oceanië", "44": "Overig", "45": "Overig", "46": "Mode & Accessoires", "47": "Familie & Relatie", "48": "Gezondheid", "49": "Zwangerschap", "50": 'Onderwijs', "51": 'Milieu', "52": "Politiek & Overheid", "53": "Samenleving", "54": 'Overig', "55": "Boeken & Auteurs", "56": "Genealogie", "57": 'Geschiedenis', "58": 'Filosofie', "59": 'Poëzie', "60": "Beeldende kunst", "61": "Overig", "62": "Schoonmaken & Wassen", "63": 'Interieur', "64": 'Doe-Het-Zelf', "65": 'Tuin', "66": 'Huisdieren', "67": 'Overig', "68": 'Dranken', "69": 'Koken & Recepten', "70": 'Vegetarisch & Veganistisch', "71": "Uit eten", "72": "Overig", "73": 'Beroemdheden', "74": 'Stripboeken & Tekenfilms', "75": 'Tijdschriften', "76": 'Horoscoop', "77": 'Films', "78": 'Muziek', "79": 'Radio', "80": 'Televisie', "81": 'Overig', "82": 'Videocameras', "83": "Camera\'s", "84": "Telefoon & Abonnementen", "85": 'Spelcomputers', "86": 'Audio', "87": "Handhelds & Smartphones", "88": "Televisies", "89": 'Overig', "90": 'Hardware', "91": 'Software', "92": 'Internet', "93": 'Programmeren & Design', "94": 'Veiligheid', "95": 'Overig', "96": 'Carrière & Werk', "97": 'Financiën', "98": 'Huren & Vastgoed', "100": 'Belasting', "101": 'Overig', "103": 'Ondernemen', "104": "Religie", "106": 'Vrachtwagens & Transport', "107": 'Treinen', "108": "Taal", "109": "Spiritualiteit", "110": 'Ruimtevaart & Sterrenkunde', "111": "Besturingssystemen", "113": "Voetbal", "114": 'Wielrennen', "115": 'Tennis', "116": "Formule 1", "117": "Hockey", "118": 'Schaatsen', "119": 'Overig', "120": 'Vragen aan mannen', "121": 'Vragen aan vrouwen', "122": "GoeieVraag.nl", "123": "Ouderschap & Opvoeding", "124": 'Wetgeving', "125": 'Wintersport', "126": 'Feestdagen', "127": "Sinterklaas", "128": 'Kerst', "129": 'Pasen', "130": "Andere feestdagen", "131": "Seksualiteit", "132": "Aardrijkskunde & Aardwetenschappen", "133": "Energie", "134": "Verzekeringen", "135": "Sparen & Beleggen", "136": "Overig", "137": "Alternatieve geneeswijzen", "138": "Gebit", "139": 'Psyche', "140": 'Voeding', "141": "Ziekten", "142": "Optiek", "143": "Lichamelijke klachten", "144": "Mannelijk lichaam", "145": "Vrouwelijk lichaam", "146": "Overig", "147": 'Kinderen', "148": "Reparaties", "149": "Banden", "150": "Brom- & Snorfietsen", "151": "Weblogs", "152": 'Webshops', "156": "Meteorologie", "157": "Lenen", "158": "Sparen", "159": "Hypotheek", "160": "Economie", "161": "Techniek", "162": "Landbouw & Veeteelt", "163": "Medicijnen", "164": "Huid-, haarverzorging en Make-up", "165": "Fotografie", "166": 'Winkels', "167": "Huishoudelijke apparaten", "168": "Sociale Media" } #Search for documents containing the query and make a list of categories def searchFAC(term): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] cat = c.Counter() res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title results.append(wrapinresults(url, title)) caterogynr= doc['_source']['Category'] cat[switcherGetCat.get(caterogynr, "None")] += 1 return cat, res #Search for documents containing the query and filter out unwanted categories def searchFAC2(term, catNr): text = '' quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source'] results = [] res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}}) print("%d documents found" % res['hits']['total']) for doc in res['hits']['hits']: url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/" title = doc['_source']['Question'] text = text + title if int(doc['_source']['Category']) == catNr: results.append(wrapinresults(url, title)) makeWordCloud(term, text) docscount = len(results) if docscount < 10: for x in range(0, (10 - docscount)): results.append("") wrapStringInHTMLWindows(term, "serp", results, "body") # makeWordCloud(text) wrapStringInHTMLWindows(term, "serp", results, "body") #Show the user all possible categories and make them choose one def getUserCat(cat): for c in cat: print(c, "("+str(cat[c])+")") print("\nPlease, type categorie:") dingenCat = sys.stdin.readline() # dingenCat = dingenCat catNr = "geen categorie" for number in switcherGetCat: if switcherGetCat.get(number) + "\n" == dingenCat: catNr = int(number) if not isinstance(catNr, int): print("Category doesn't exist\n") getUserCat(cat) else: return catNr #Let user choose in which category to search for query def faceted(): print("What are you looking for? (FAC)") dingen = sys.stdin.readline() cat, res = searchFAC(dingen) catNr = getUserCat(cat) res = searchFAC2(dingen, catNr) #Let user choose which type of search to execute, then forward to that search def getUserInput(): print("Welcome to our search engine in terminal") print("What kind of search would you like?") print("Input a for simple search") print("Input b for advanced search") print("Input c for faceted search") choise = sys.stdin.readline().split()[0] #Forward to right search method switcher = { 'a': simple, "b": advanced, "c": faceted, } switcher.get(choise, invalid)() ##Put information into the cloud #json_data=open(file_directory) #counter = 0 #for line in json_data: # data = json.loads(line) # if('index' not in data.keys()) : # counter += 1 # Test if succesfull in writing # resp = es.index(index='zoekmachine', doc_type='question', id=counter, body=data) # print(counter) #Get the searchterm from the user getUserInput()
print("What are you looking for?") term = sys.stdin.readline() search(term, 0000)
identifier_body
bayes.py
# coding=utf-8 from numpy import * """ 1 函数loadDataSet()创建了一些实验样本。 该函数返回的第一个变量是进行词条切分后的文档集合, 这些文档来自斑点犬爱好者留言 板。 这些留言文本被切分成一系列的词条集合, 标点符号从文本中去掉, loadDataSet( )函数返回的第二个 变量是一个类别标签的集合。 这里有两类, 侮辱性和非侮辱性。 这些文本的类别由人工标注, 这些标注信息用于训练程序以便自动检测侮辱性留言 """ def loadDataSet(): postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0,1,0,1,0,1] #1 is abusive, 0 not return postingList,classVec """ 2 函数createVocabList()会创建一个包含在所有文档中出现的不重复词的列表, 为此使用了Python的set数据类型。 将词条列表输给 set构造函数, set就会返回一个不重复词表。 首先, 创建一个空集合❶, 然后将每篇文档返回的新词集合添加到该集合中❷。 操作符|用于 求两个集合的并集, 这也是一个按位或(OR) 操作符在数学符号表示上, 按位或操作与集合求并操作使用相同记号 """ def createVocabList(dataSet): vocabSet = set([]) #create empty set for document in dataSet: vocabSet = vocabSet | set(document) #union of the two sets return list(vocabSet) """ 3 获得词汇表后, 便可以使用函数setOfWords2Vec(), 该函数的输入参数为词汇表及某个文档, 输出的是文档向量, 向量的每一元素为1或0, 分别表示词汇表中的单词在输入文档中是否出现。 函数首先创建一个和词汇表等长的向量, 并将其元素都设置为0❸。 接着, 遍历文档中的所有单词, 如果出现了词汇表中的单词, 则将输出的文档向量中的对应值 设为1。 一切都顺利的话, 就不需要检查某个词是否还在vocabList中, 后边可能会用到这一操作 """ def setOfWords2Vec(vocabList, inputSet): returnVec = [0]*len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1 return returnVec """ 先看看前三个函数的执行效果 """ def test1(): listPosts, listClass = loadDataSet() mVocabList = createVocabList(listPosts) print mVocabList setOfWords2Vec(mVocabList, listPosts[0]) # test1() # ---------------------------训练算法: 从词向量计算概率-------------------------- """ 函数中的输入参数为文档矩阵trainMatrix, 以及由每篇文档类别标签所构成的向量trainCategory。 首先, 计算文档属于侮辱性文档 (class=1) 的概率, 即P(1)。 因为这是一个二类分类问题, 所以可以通过1-P(1)得到P(0)。 对于多于两类的分类问题, 则需要对代码稍加 修改。计算p(wi|c1) 和p(wi|c0), 需要初始化程序中的分子变量和分母变量❶。 由于w中元素如此众多, 因此可以使用NumPy数组快速计算这些 值。 上述程序中的分母变量是一个元素个数等于词汇表大小的NumPy数组。 在for循环中, 要遍历训练集trainMatrix中的所有文档。 一旦某 个词语(侮辱性或正常词语) 在某一文档中出现, 则该词对应的个数(p1Num或者p0Num) 就加1, 而且在所有的文档中, 该文档的总词数也 相应加1❷。 对于两个类别都要进行同样的计算处理。最后, 对每个元素除以该类别中的总词数❸。 利用NumPy可以很好实现, 用一个数组除以浮点数即可, 若使用常规的Python列表则难以完成这种任务, 读者可以自己尝试一下。 最后, 函数会返回两个向量和一个概率。 """ def trainNB0(trainMatrix,trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory)/float(numTrainDocs) # 1. (以下两行) 初始化概率 """ 利用贝叶斯分类器对文档进行分类时, 要计算多个概率的乘积以获得文档属于某个类别的概率, 即计算p(w0|1)p(w1|1)p(w2|1)。 如果其中一 个概率值为0, 那么最后的乘积也为0。 为降低这种影响, 可以将所有词的出现数初始化为1, 并将分母初始化为2 """ p0Num = ones(numWords); p1Num = ones(numWords) # change to ones() p0Denom = 2.0; p1Denom = 2.0 # change to 2.0 for i in range(numTrainDocs): if trainCategory[i] == 1: # 2. (以下两行) 向量相加 p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) # 3. 对每个元素做除法 """ 另一个遇到的问题是下溢出, 这是由于太多很小的数相乘造成的。 当计算乘积p(w0|ci)p(w1|ci)p(w2|ci)...p(wN|ci)时, 由于大部分因子都 非常小, 所以程序会下溢出或者得到不正确的答案。 (读者可以用Python尝试相乘许多很小的数, 最后四舍五入后会得到0。 ) 一种解决 办法是对乘积取自然对数。 在代数中有ln(a*b) = ln(a)+ln(b), 于是通过求对数可以避免下溢出或者浮点数舍入导致的错误。 同时, 采用 自然对数进行处理不会有任何损失。 图4-4给出函数f(x)与ln(f(x))的曲线。 检查这两条曲线, 就会发现它们在相同区域内同时增加或者减 少, 并且在相同点上取到极值。 它们的取值虽然不同, 但不影响最终结果。 通过修改return前的两行代码, 将上述做法用到分类器中: """ p1Vect = log(p1Num/p1Denom) # change to log() p0Vect = log(p0Num/p0Denom) # change to log() return p0Vect,p1Vect,pAbusive def test2(): listPosts, listClass = loadDataSet() # 构建了一个包含所有词的列表mVocabList mVocabList = createVocabList(listPosts) setOfWords2Vec(mVocabList, listPosts[0]) trainMat = [] for postinDoc in listPosts: temp = setOfWords2Vec(mVocabList, postinDoc) trainMat.append(temp) # 文档属于侮辱类的概率pAb p0v, p1v, pAb = trainNB0(trainMat, listClass) print pAb """ 接下来看一看在给定文档类别条件下词汇表中单词的出现概率, 看看是否正确。 词汇表中的第一个词是cute, 其在类别0中出现1次, 而在类别1中 从未出现。 对应的条件概率分别为0.041 666 67与0.0。 该计算是正确的。 我们找找所有概率中的最大值, 该值出现在P(1)数组第26个下标位 置, 大小为0.157 894 74。 在myVocabList的第26个下标位置上可以查到该单词是stupid。 这意味着stupid是最能表征类别1(侮辱性文档类)的单词。 """ """ 代码有4个输入: 要分类的向量vec2Classify以及使用函数trainNB0()计算得到的三个概率。 使用NumPy的数组来计算两个 向量相乘的结果❶。 这里的相乘是指对应元素相乘, 即先将两个向量中的第1个元素相乘, 然后将第2个元素相乘, 以此类推。 接下来将词汇表 中所有词的对应值相加, 然后将该值加到类别的对数概率上。 最后, 比较类别的概率返回大概率对应的类别标签。 这一切不是很难, 对吧? """ def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1): # 1. 元素相乘 分类计算的核心 p1 = sum(vec2Classify * p1Vec) + log(pClass1) # element-wise mult p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1) if p1 > p0: return 1 else: return 0 """ 对文本做一些修改, 看看分类器会输出什么结果。 这个例子非常简单,但是它展示了朴素贝叶斯分类器的工作原理。 接下来,我们会对代码做些修改, 使分类器工作得更好。 函数setOfWords2Vec()稍加修改, 修改后的函数称为bagOfWords2Vec() -----------------------------------准备数据: 文档词袋模型--------------------------------------- """ def bagOfWords2VecMN(vocabList, inputSet): returnVec = [0]*len(vocabList) for word in inputSet: if word in vocabList: # todo 这个词的操作 returnVec[vocabList.index(word)] += 1 return returnVec """ 函数是一个便利函数(convenience function) , 该函数封装所有操作, 以节省输入 """ def testingNB(): listOPosts,listClasses = loadDataSet() myVocabList = createVocabList(listOPosts) trainMat=[] for postinDoc in listOPosts: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses)) testEntry = ['love', 'my', 'dalmation'] thisDoc = array(setOfWords2Vec(myVocabList, testEntry)) print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)) testEntry = ['stupid', 'garbage'] thisDoc = array(setOfWords2Vec(myVocabList, testEntry)) print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)) # testingNB() # -----------------------------------使用朴素贝叶斯过滤垃圾邮件---------------------------- """ 准备数据: 切分文本 可以看到, 切分的结果不错, 但是标点符号也被当成了词的一部分。 可以使用正则表示式来切分句子, 其中分隔符是除单词、 数字外的任意字符串 """ def textParse(bigString): #input is big string, #output is word list import re listOfTokens = re.split(r'\W*', bigString) return [tok.lower() for tok in listOfTokens if len(tok) > 2] """" 函数spamTest()对贝叶斯垃圾邮件分类器进行自动化处理。 导入文件夹spam与ham下的文本文件, 并将它们解析为词列表❶。 接下来 构建一个测试集与一个训练集, 两个集合中的邮件都是随机选出的。 本例中共有50封电子邮件, 并不是很多, 其中的10封电子邮件被随机选择 为测试集。 分类器所需要的概率计算只利用训练集中的文档来完成。Python变量trainingSet是一个整数列表, 其中的值从0到49。 接下 来, 随机选择其中10个文件❷。 选择出的数字所对应的文档被添加到测试集, 同时也将其从训练集中剔除。 这种随机选择数据的一部分作为训 练集, 而剩余部分作为测试集的过程称为留存交叉验证(hold-out crossvalidation) 。 假定现在只完成了一次迭代, 那么为了更精确地估计分类 器的错误率, 就应该进行多次迭代后求出平均错误率。接下来的for循环遍历训练集的所有文档, 对每封邮件基于词汇表并使 用setOfWords2Vec()函数来构建词向量。 这些词在traindNB0()函数中用于计算分类所需的概率。 然后遍历测试集, 对其中每封电子邮件进 行分类❸。 如果邮件分类错误, 则错误数加1, 最后给出总的错误百分比 """ def spamTest(): docList=[]; classList = []; fullText =[] for i in range(1,26): wordList = textParse(open('email/spam/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(1) wordList = textParse(open('email/ham/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(0) vocabList = createVocabList(docList)# create vocabulary
for i in range(10): randIndex = int(random.uniform(0,len(trainingSet))) testSet.append(trainingSet[randIndex]) # todo del这个操作步骤 del(trainingSet[randIndex]) trainMat=[]; trainClasses = [] # (以下四行) 对测试集分类 for docIndex in trainingSet:#train the classifier (get probs) trainNB0 trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex])) trainClasses.append(classList[docIndex]) # todo 入参出参的计算方法 p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses)) errorCount = 0 for docIndex in testSet: #classify the remaining items wordVector = bagOfWords2VecMN(vocabList, docList[docIndex]) if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]: errorCount += 1 print ("classification error",docList[docIndex]) print ('the error rate is: ',float(errorCount)/len(testSet)) #return vocabList,fullText # ------------------------自动化处理---------------------------------------- spamTest() # ----------------------------- 4.7. 示例: 使用朴素贝叶斯分类器从个人广告中获取区域倾向--------------------------- """" RSS源分类器及高频词去除函数 函数calcMostFreq() ❶。 该函数遍历词汇表中的每个词并统计它在文本中出现的次数, 然后根据出现次数从高到低对词典进行排序, 最后返回排序最高的30个单词。 你很快就会明白这个函数的重要性 以下四行) 计算出现频率 """ def calcMostFreq(vocabList,fullText): import operator freqDict = {} for token in vocabList: freqDict[token]=fullText.count(token) sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedFreq[:30] """" 函数localWords()使用两个RSS源作为参数。 RSS源要在函数外 导入, 这样做的原因是RSS源会随时间而改变。 如果想通过改变代码来 比较程序执行的差异, 就应该使用相同的输入。 重新加载RSS源就会得 到新的数据, 但很难确定是代码原因还是输入原因导致输出结果的改 变。 函数localWords()与程序清单4-5中的spamTest()函数几乎相 同, 区别在于这里访问的是RSS源❷而不是文件。 然后调用函 数calcMostFreq()来获得排序最高的30个单词并随后将它们移除❸。 函数的剩余部分与spamTest()基本类似, 不同的是最后一行要返回下 面要用到的值。 """ def localWords(feed1,feed0): import feedparser docList=[]; classList = []; fullText =[] minLen = min(len(feed1['entries']),len(feed0['entries'])) for i in range(minLen): # 2 每次访问一条RSS源 wordList = textParse(feed1['entries'][i]['summary']) docList.append(wordList) fullText.extend(wordList) classList.append(1) #NY is class 1 wordList = textParse(feed0['entries'][i]['summary']) docList.append(wordList) fullText.extend(wordList) classList.append(0) # (以下四行) 去掉出现次数最高的那些词 vocabList = createVocabList(docList)#create vocabulary top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words for pairW in top30Words: if pairW[0] in vocabList: vocabList.remove(pairW[0]) trainingSet = range(2*minLen); testSet=[] #create test set for i in range(20): randIndex = int(random.uniform(0,len(trainingSet))) testSet.append(trainingSet[randIndex]) del(trainingSet[randIndex]) trainMat=[]; trainClasses = [] for docIndex in trainingSet:#train the classifier (get probs) trainNB0 trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex])) trainClasses.append(classList[docIndex]) p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses)) errorCount = 0 for docIndex in testSet: #classify the remaining items wordVector = bagOfWords2VecMN(vocabList, docList[docIndex]) if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]: errorCount += 1 print ('the error rate is: ',float(errorCount)/len(testSet)) return vocabList,p0V,p1V def getTopWords(ny,sf): import operator vocabList,p0V,p1V=localWords(ny,sf) topNY=[]; topSF=[] for i in range(len(p0V)): if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i])) if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i])) sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True) print ("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**") for item in sortedSF: print (item[0]) sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True) print ("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**") for item in sortedNY: print (item[0])
trainingSet = range(50); testSet=[] # create test set # (以下四行) 随机构建训练集
random_line_split
bayes.py
# coding=utf-8 from numpy import * """ 1 函数loadDataSet()创建了一些实验样本。 该函数返回的第一个变量是进行词条切分后的文档集合, 这些文档来自斑点犬爱好者留言 板。 这些留言文本被切分成一系列的词条集合, 标点符号从文本中去掉, loadDataSet( )函数返回的第二个 变量是一个类别标签的集合。 这里有两类, 侮辱性和非侮辱性。 这些文本的类别由人工标注, 这些标注信息用于训练程序以便自动检测侮辱性留言 """ def loadDataSet(): postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0,1,0,1,0,1] #1 is abusive, 0 not return postingList,classVec """ 2 函数createVocabList()会创建一个包含在所有文档中出现的不重复词的列表, 为此使用了Python的set数据类型。 将词条列表输给 set构造函数, set就会返回一个不重复词表。 首先, 创建一个空集合❶, 然后将每篇文档返回的新词集合添加到该集合中❷。 操作符|用于 求两个集合的并集, 这也是一个按位或(OR) 操作符在数学符号表示上, 按位或操作与集合求并操作使用相同记号 """ def createVocabList(dataSet): vocabSet = set([]) #create empty set for document in dataSet: vocabSet = vocabSet | set(document) #union of the two sets return list(vocabSet) """ 3 获得词汇表后, 便可以使用函数setOfWords2Vec(), 该函数的输入参数为词汇表及某个文档, 输出的是文档向量, 向量的每一元素为1或0, 分别表示词汇表中的单词在输入文档中是否出现。 函数首先创建一个和词汇表等长的向量, 并将其元素都设置为0❸。 接着, 遍历文档中的所有单词, 如果出现了词汇表中的单词, 则将输出的文档向量中的对应值 设为1。 一切都顺利的话, 就不需要检查某个词是否还在vocabList中, 后边可能会用到这一操作 """ def setOfWords2Vec(vocabList, inputSet): returnVec = [0]*len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1 return returnVec """ 先看看前三个函数的执行效果 """ def test1(): listPosts, listClass = loadDataSet() mVocabList = createVocabList(listPosts) print mVocabList setOfWords2Vec(mVocabList, listPosts[0]) # test1() # ---------------------------训练算法: 从词向量计算概率-------------------------- """ 函数中的输入参数为文档矩阵trainMatrix, 以及由每篇文档类别标签所构成的向量trainCategory。 首先, 计算文档属于侮辱性文档 (class=1) 的概率, 即P(1)。 因为这是一个二类分类问题, 所以可以通过1-P(1)得到P(0)。 对于多于两类的分类问题, 则需要对代码稍加 修改。计算p(wi|c1) 和p(wi|c0), 需要初始化程序中的分子变量和分母变量❶。 由于w中元素如此众多, 因此可以使用NumPy数组快速计算这些 值。 上述程序中的分母变量是一个元素个数等于词汇表大小的NumPy数组。 在for循环中, 要遍历训练集trainMatrix中的所有文档。 一旦某 个词语(侮辱性或正常词语) 在某一文档中出现, 则该词对应的个数(p1Num或者p0Num) 就加1, 而且在所有的文档中, 该文档的总词数也 相应加1❷。 对于两个类别都要进行同样的计算处理。最后, 对每个元素除以该类别中的总词数❸。 利用NumPy可以很好实现, 用一个数组除以浮点数即可, 若使用常规的Python列表则难以完成这种任务, 读者可以自己尝试一下。 最后, 函数会返回两个向量和一个概率。 """ def trainNB0(trainMatrix,trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory)/float(numTrainDocs) # 1. (以下两行) 初始化概率 """ 利用贝叶斯分类器对文档进行分类时, 要计算多个概率的乘积以获得文档属于某个类别的概率, 即计算p(w0|1)p(w1|1)p(w2|1)。 如果其中一 个概率值为0, 那么最后的乘积也为0。 为降低这种影响, 可以将所有词的出现数初始化为1, 并将分母初始化为2 """ p0Num = ones(numWords); p1Num = ones(numWords) # change to ones() p0Denom = 2.0; p1Denom = 2.0 # change to 2.0 for i in range(numTrainDocs): if trainCategory[i] == 1: # 2. (以下两行) 向量相加 p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) # 3. 对每个元素做除法 """ 另一个遇到的问题是下溢出, 这是由于太多很小的数相乘造成的。 当计算乘积p(w0|ci)p(w1|ci)p(w2|ci)...p(wN|ci)时, 由于大部分因子都 非常小, 所以程序会下溢出或者得到不正确的答案。 (读者可以用Python尝试相乘许多很小的数, 最后四舍五入后会得到0。 ) 一种解决 办法是对乘积取自然对数。 在代数中有ln(a*b) = ln(a)+ln(b), 于是通过求对数可以避免下溢出或者浮点数舍入导致的错误。 同时, 采用 自然对数进行处理不会有任何损失。 图4-4给出函数f(x)与ln(f(x))的曲线。 检查这两条曲线, 就会发现它们在相同区域内同时增加或者减 少, 并且在相同点上取到极值。 它们的取值虽然不同, 但不影响最终结果。 通过修改return前的两行代码, 将上述做法用到分类器中: """ p1Vect = log(p1Num/p1Denom) # change to log() p0Vect = log(p0Num/p0Denom) # change to log() return p0Vect,p1Vect,pAbusive def test2(): listPosts, listClass = loadDataSet() # 构建了一个包含所有词的列表mVocabList mVocabList = createVocabList(listPosts) setOfWords2Vec(mVocabList, listPosts[0]) trainMat = [] for postinDoc in listPosts: temp = setOfWords2Vec(mVocabList, postinDoc) trainMat.append(temp) # 文档属于侮辱类的概率pAb p0v, p1v, pAb = trainNB0(trainMat, listClass) print pAb """ 接下来看一看在给定文档类别条件下词汇表中单词的出现概率, 看看是否正确。 词汇表中的第一个词是cute, 其在类别0中出现1次, 而在类别1中 从未出现。 对应的条件概率分别为0.041 666 67与0.0。 该计算是正确的。 我们找找所有概率中的最大值, 该值出现在P(1)数组第26个下标位 置, 大小为0.157 894 74。 在myVocabList的第26个下标位置上可以查到该单词是stupid。 这意味着stupid是最能表征类别1(侮辱性文档类)的单词。 """ """ 代码有4个输入: 要分类的向量vec2Classify以及使用函数trainNB0()计算得到的三个概率。 使用NumPy的数组来计算两个 向量相乘的结果❶。 这里的相乘是指对应元素相乘, 即先将两个向量中的第1个元素相乘, 然后将第2个元素相乘, 以此类推。 接下来将词汇表 中所有词的对应值相加, 然后将该值加到类别的对数概率上。 最后, 比较类别的概率返回大概率对应的类别标签。 这一切不是很难, 对吧? """ def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1): # 1. 元素相乘 分类计算的核心 p1 = sum(vec2Classify * p1Vec) + log(pClass1) # element-wise mult p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1) if p1 > p0: return 1 else: return 0 """ 对文本做一些修改, 看看分类器会输出什么结果。 这个例子非常简单,但是它展示了朴素贝叶斯分类器的工作原理。 接下来,我们会对代码做些修改, 使分类器工作得更好。 函数setOfWords2Vec()稍加修改, 修改后的函数称为bagOfWords2Vec() -----------------------------------准备数据: 文档词袋模型--------------------------------------- """ def bagOfWords2VecMN(vocabList, inputSet): returnVec = [0]*len(vocabList) for word in inputSet: if word in vocabList: # todo 这个词的操作 returnVec[vocabList.index(word)] += 1 return returnVec """ 函数是一个便利函数(convenience function) , 该函数封装所有操作, 以节省输入 """ def testingNB(): listOPosts,listClasses = loadDataSet() myVocabList = createVocabList(listOPosts) trainMat=[] for postinDoc in listOPosts: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses)) testEntry = ['love', 'my', 'dalmation'] thisDoc = array(setOfWords2Vec(myVocabList, testEntry)) print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)) testEntry = ['stupid', 'garbage'] thisDoc = array(setOfWords2Vec(myVocabList, testEntry)) print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)) # testingNB() # -----------------------------------使用朴素贝叶斯过滤垃圾邮件---------------------------- """ 准备数据: 切分文本 可以看到, 切分的结果不错, 但是标点符号也被当成了词的一部分。 可以使用正则表示式来切分句子, 其中分隔符是除单词、 数字外的任意字符串 """ def textParse(bigString): #input is big string, #output is word list import re listOfTokens = re.split(r'\W*', bigString) return [tok.lower() for tok in listOfTokens if len(tok) > 2] """" 函数spamTest()对贝叶斯垃圾邮件分类器进行自动化处理。 导入文件夹spam与ham下的文本文件, 并将它们解析为词列表❶。 接下来 构建一个测试集与一个训练集, 两个集合中的邮件都是随机选出的。 本例中共有50封电子邮件, 并不是很多, 其中的10封电子邮件被随机选择 为测试集。 分类器所需要的概率计算只利用训练集中的文档来完成。Python变量trainingSet是一个整数列表, 其中的值从0到49。 接下 来, 随机选择其中10个文件❷。 选择出的数字所对应的文档被添加到测试集, 同时也将其从训练集中剔除。 这种随机选择数据的一部分作为训 练集, 而剩余部分作为测试集的过程称为留存交叉验证(hold-out crossvalidation) 。 假定现在只完成了一次迭代, 那么为了更精确地估计分类 器的错误率, 就应该进行多次迭代后求出平均错误率。接下来的for循环遍历训练集的所有文档, 对每封邮件基于词汇表并使 用setOfWords2Vec()函数来构建词向量。 这些词在traindNB0()函数中用于计算分类所需的概率。 然后遍历测试集, 对其中每封电子邮件进 行分类❸。 如果邮件分类错误, 则错误数加1, 最后给出总的错误百分比 """ def spamTest(): docList=[]; classList = []; fullText =[] for i in range(1,26): wordList = textParse(open('email/spam/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(1) wordList = textParse(open('email/ham/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(0) vocabList = createVocabList(docList)# create vocabulary trainingSet = range(50); testSet=[] # create test set # (以下四行) 随机构建训练集 for i in range(10): randIndex = int(random.uniform(0,len(trainingSet))) testSet.append(trainingSet[randIndex]) # todo del这个操作步骤 del(trainingSet[randIndex]) trainMat=[]; trainClasses = [] # (以下四行) 对测试集分类 for docIndex in trainingSet:#train the classifier (get probs) trainNB0 trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex])) trainClasses.append(classList[docIndex]) # todo 入参出参的计算方法 p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses)) errorCount = 0 for docIndex in testSet: #classify the remaining items wordVector = bagOfWords2VecMN(vocabList, docList[docIndex]) if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]: errorCount += 1 print ("classification error",docList[docIndex]) print ('the error rate is: ',float(errorCount)/len(testSet)) #return vocabList,fullText # ------------------------自动化处理---------------------------------------- spamTest() # ----------------------------- 4.7. 示例: 使用朴素贝叶斯分类器从个人广告中获取区域倾向--------------------------- """" RSS源分类器及高频词去除函数 函数calcMostFreq() ❶。 该函数遍历词汇表中的每个词并统计它在文本中出现的次数, 然后根据出现次数从高到低对词典进行排序, 最后返回排序最高的30个单词。 你很快就会明白这个函数的重要性 以下四行) 计算出现频率 """ def calcMostFreq(vocabList,fullText): import operator freqDict = {} for token in vocabList: freqDict[token]=fullText.count(token) sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedFreq[:30] """" 函数localWords()使用两个RSS源作为参数。 RSS源要在函数外 导入, 这样做的原因是RSS源会随时间而改变。 如果想通过改变代码来 比较程序执行的差异, 就应该使用相同的输入。 重新加载RSS源就会得 到新的数据, 但很难确定是代码原因还是输入原因导致输出结果的改 变。 函数localWords()与程序清单4-5中的spamTest()函数几乎相 同, 区别在于这里访问的是RSS源❷而不是文件。 然后调用函 数calcMostFreq()来获得排序最高的30个单词并随后将它们移除❸。 函数的剩余部分与spamTest()基本类似, 不同的是最后一行要返回下 面要用到的值。 """ def localWords(feed1,feed0): import feedparser docList=[]; classList = []; fullText =[] minLen = min(len(feed1['entries']),len(feed0['entries'])) for i in range(minLen): # 2 每次访问一条RSS源 wordList = textParse(feed1['entries'][i]['summary']) docList.append(wordList) fullText.extend(wordList) classList.append(1) #NY is class 1 wordList = textParse(feed0['entries'][i]['summary']) docList.append(wordList) fullText.extend(wordList) classList.app
# (以下四行) 去掉出现次数最高的那些词 vocabList = createVocabList(docList)#create vocabulary top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words for pairW in top30Words: if pairW[0] in vocabList: vocabList.remove(pairW[0]) trainingSet = range(2*minLen); testSet=[] #create test set for i in range(20): randIndex = int(random.uniform(0,len(trainingSet))) testSet.append(trainingSet[randIndex]) del(trainingSet[randIndex]) trainMat=[]; trainClasses = [] for docIndex in trainingSet:#train the classifier (get probs) trainNB0 trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex])) trainClasses.append(classList[docIndex]) p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses)) errorCount = 0 for docIndex in testSet: #classify the remaining items wordVector = bagOfWords2VecMN(vocabList, docList[docIndex]) if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]: errorCount += 1 print ('the error rate is: ',float(errorCount)/len(testSet)) return vocabList,p0V,p1V def getTopWords(ny,sf): import operator vocabList,p0V,p1V=localWords(ny,sf) topNY=[]; topSF=[] for i in range(len(p0V)): if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i])) if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i])) sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True) print ("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**") for item in sortedSF: print (item[0]) sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True) print ("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**") for item in sortedNY: print (item[0])
end(0)
identifier_name
bayes.py
# coding=utf-8 from numpy import * """ 1 函数loadDataSet()创建了一些实验样本。 该函数返回的第一个变量是进行词条切分后的文档集合, 这些文档来自斑点犬爱好者留言 板。 这些留言文本被切分成一系列的词条集合, 标点符号从文本中去掉, loadDataSet( )函数返回的第二个 变量是一个类别标签的集合。 这里有两类, 侮辱性和非侮辱性。 这些文本的类别由人工标注, 这些标注信息用于训练程序以便自动检测侮辱性留言 """ def loadDataSet(): postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0,1,0,1,0,1] #1 is abusive, 0 not return postingList,classVec """ 2 函数createVocabList()会创建一个包含在所有文档中出现的不重复词的列表, 为此使用了Python的set数据类型。 将词条列表输给 set构造函数, set就会返回一个不重复词表。 首先, 创建一个空集合❶, 然后将每篇文档返回的新词集合添加到该集合中❷。 操作符|用于 求两个集合的并集, 这也是一个按位或(OR) 操作符在数学符号表示上, 按位或操作与集合求并操作使用相同记号 """ def createVocabList(dataSet): vocabSet = set([]) #create empty set for document in dataSet: vocabSet = vocabSet | set(document) #union of the two sets return list(vocabSet) """ 3 获得词汇表后, 便可以使用函数setOfWords2Vec(), 该函数的输入参数为词汇表及某个文档, 输出的是文档向量, 向量的每一元素为1或0, 分别表示词汇表中的单词在输入文档中是否出现。 函数首先创建一个和词汇表等长的向量, 并将其元素都设置为0❸。 接着, 遍历文档中的所有单词, 如果出现了词汇表中的单词, 则将输出的文档向量中的对应值 设为1。 一切都顺利的话, 就不需要检查某个词是否还在vocabList中, 后边可能会用到这一操作 """ def setOfWords2Vec(vocabList, inputSet): returnVec = [0]*len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1 return returnVec """ 先看看前三个函数的执行效果 """ def test1(): listPosts, listClass = loadDataSet() mVocabList = createVocabList(listPosts) print mVocabList setOfWords2Vec(mVocabList, listPosts[0]) # test1() # ---------------------------训练算法: 从词向量计算概率-------------------------- """ 函数中的输入参数为文档矩阵trainMatrix, 以及由每篇文档类别标签所构成的向量trainCategory。 首先, 计算文档属于侮辱性文档 (class=1) 的概率, 即P(1)。 因为这是一个二类分类问题, 所以可以通过1-P(1)得到P(0)。 对于多于两类的分类问题, 则需要对代码稍加 修改。计算p(wi|c1) 和p(wi|c0), 需要初始化程序中的分子变量和分母变量❶。 由于w中元素如此众多, 因此可以使用NumPy数组快速计算这些 值。 上述程序中的分母变量是一个元素个数等于词汇表大小的NumPy数组。 在for循环中, 要遍历训练集trainMatrix中的所有文档。 一旦某 个词语(侮辱性或正常词语) 在某一文档中出现, 则该词对应的个数(p1Num或者p0Num) 就加1, 而且在所有的文档中, 该文档的总词数也 相应加1❷。 对于两个类别都要进行同样的计算处理。最后, 对每个元素除以该类别中的总词数❸。 利用NumPy可以很好实现, 用一个数组除以浮点数即可, 若使用常规的Python列表则难以完成这种任务, 读者可以自己尝试一下。 最后, 函数会返回两个向量和一个概率。 """ def trainNB0(trainMatrix,trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory)/float(numTrainDocs) # 1. (以下两行) 初始化概率 """ 利用贝叶斯分类器对文档进行分类时, 要计算多个概率的乘积以获得文档属于某个类别的概率, 即计算p(w0|1)p(w1|1)p(w2|1)。 如果其中一 个概率值为0, 那么最后的乘积也为0。 为降低这种影响, 可以将所有词的出现数初始化为1, 并将分母初始化为2 """ p0Num = ones(numWords); p1Num = ones(numWords) # change to ones() p0Denom = 2.0; p1Denom = 2.0 # change to 2.0 for i in range(numTrainDocs): if trainCategory[i] == 1: # 2. (以下两行) 向量相加 p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) # 3. 对每个元素做除法 """ 另一个遇到的问题是下溢出, 这是由于太多很小的数相乘造成的。 当计算乘积p(w0|ci)p(w1|ci)p(w2|ci)...p(wN|ci)时, 由于大部分因子都 非常小, 所以程序会下溢出或者得到不正确的答案。 (读者可以用Python尝试相乘许多很小的数, 最后四舍五入后会得到0。 ) 一种解决 办法是对乘积取自然对数。 在代数中有ln(a*b) = ln(a)+ln(b), 于是通过求对数可以避免下溢出或者浮点数舍入导致的错误。 同时, 采用 自然对数进行处理不会有任何损失。 图4-4给出函数f(x)与ln(f(x))的曲线。 检查这两条曲线, 就会发现它们在相同区域内同时增加或者减 少, 并且在相同点上取到极值。 它们的取值虽然不同, 但不影响最终结果。 通过修改return前的两行代码, 将上述做法用到分类器中: """ p1Vect = log(p1Num/p1Denom) # change to log() p0Vect = log(p0Num/p0Denom) # change to log() return p0Vect,p1Vect,pAbusive def test2(): listPosts, listClass = loadDataSet() # 构建了一个包含所有词的列表mVocabList mVocabList = createVocabList(listPosts) setOfWords2Vec(mVocabList, listPosts[0]) trainMat = [] for postinDoc in listPosts: temp = setOfWords2Vec(mVocabList, postinDoc) trainMat.append(temp) # 文档属于侮辱类的概率pAb p0v, p1v, pAb = trainNB0(trainMat, listClass) print pAb """ 接下来看一看在给定文档类别条件下词汇表中单词的出现概率, 看看是否正确。 词汇表中的第一个词是cute, 其在类别0中出现1次, 而在类别1中 从未出现。 对应的条件概率分别为0.041 666 67与0.0。 该计算是正确的。 我们找找所有概率中的最大值, 该值出现在P(1)数组第26个下标位 置, 大小为0.157 894 74。 在myVocabList的第26个下标位置上可以查到该单词是stupid。 这意味着stupid是最能表征类别1(侮辱性文档类)的单词。 """ """ 代码有4个输入: 要分类的向量vec2Classify以及使用函数trainNB0()计算得到的三个概率。 使用NumPy的数组来计算两个 向量相乘的结果❶。 这里的相乘是指对应元素相乘, 即先将两个向量中的第1个元素相乘, 然后将第2个元素相乘, 以此类推。 接下来将词汇表 中所有词的对应值相加, 然后将该值加到类别的对数概率上。 最后, 比较类别的概率返回大概率对应的类别标签。 这一切不是很难, 对吧? """ def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1): # 1. 元素相乘 分类计算的核心 p1 = sum(vec2Classify * p1Vec) + log(pClass1) # element-wise mult p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1) if p1 > p0: return 1 else: return 0 """ 对文本做一些修改, 看看分类器会输出什么结果。 这个例子非常简单,但是它展示了朴素贝叶斯分类器的工作原理。 接下来,我们会对代码做些修改, 使分类器工作得更好。 函数setOfWords2Vec()稍加修改, 修改后的函数称为bagOfWords2Vec() -----------------------------------准备数据: 文档词袋模型--------------------------------------- """ def bagOfWords2VecMN(vocabList, inputSet): returnVec = [0]*len(vocabList) for word in inputSet: if word in vocabList: # todo 这个词的操作 returnVec[vocabList.index(word)] += 1 return returnVec """ 函数是一个便利函数(convenience function) , 该函数封装所有操作, 以节省输入 """ def testingNB(): listOPosts,listClasses = loadDataSet() myVocabList = createVocabList(listOPosts) trainMat=[] for postinDoc in listOPosts: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses)) testEntry = ['love', 'my', 'dalmation'] thisDoc = array(setOfWords2Vec(myVocabList, testEntry)) print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)) testEntry = ['stupid', 'garbage'] thisDoc = array(setOfWords2Vec(myVocabList, testEntry)) print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb)) # testingNB() # -----------------------------------使用朴素贝叶斯过滤垃圾邮件---------------------------- """ 准备数据: 切分文本 可以看到, 切分的结果不错, 但是标点符号也被当成了词的一部分。 可以使用正则表示式来切分句子, 其中分隔符是除单词、 数字外的任意字符串 """ def textParse(bigString): #input is big string, #output is word list import re listOfTokens = re.split(r'\W*', bigString) return [tok.lower() for tok in listOfTokens if len(tok) > 2] """" 函数spamTest()对贝叶斯垃圾邮件分类器进行自动化处理。 导入文件夹spam与ham下的文本文件, 并将它们解析为词列表❶。 接下来 构建一个测试集与一个训练集, 两个集合中的邮件都是随机选出的。 本例中共有50封电子邮件, 并不是很多, 其中的10封电子邮件被随机选择 为测试集。 分类器所需要的概率计算只利用训练集中的文档来完成。Python变量trainingSet是一个整数列表, 其中的值从0到49。 接下 来, 随机选择其中10个文件❷。 选择出的数字所对应的文档被添加到测试集, 同时也将其从训练集中剔除。 这种随机选择数据的一部分作为训 练集, 而剩余部分作为测试集的过程称为留存交叉验证(hold-out crossvalidation) 。 假定现在只完成了一次迭代, 那么为了更精确地估计分类 器的错误率, 就应该进行多次迭代后求出平均错误率。接下来的for循环遍历训练集的所有文档, 对每封邮件基于词汇表并使 用setOfWords2Vec()函数来构建词向量。 这些词在traindNB0()函数中用于计算分类所需的概率。 然后遍历测试集, 对其中每封电子邮件进 行分类❸。 如果邮件分类错误, 则错误数加1, 最后给出总的错误百分比 """ def spamTest(): docList=[]; classList = []; fullText =[] for i in range(1,26): wordList = textParse(open('email/spam/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(1) wordList = textParse(open('email/ham/%d.txt' % i).read()) docList.append(wordList) fullText.extend(wordList) classList.append(0) vocabList = createVocabList(docList)# create vocabulary trainingSet = range(50); testSet=[] # create test set # (以下四行) 随机构建训练集 for i in range(10): randIndex = int(random.uniform(0,len(trainingSet))) testSet.append(trainingSet[randIndex]) # todo del这个操作步骤 del(trainingSet[randIndex]) trainMat=[]; trainClasses = [] # (以下四行) 对测试集分类 for docIndex in trainingSet:#train the classifier (get probs) trainNB0 trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex])) trainClasses.append(classList[docIndex]) # todo 入参出参的计算方法 p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses)) errorCount = 0 for docIndex in testSet: #classify the remaining items wordVector = bagOfWords2VecMN(vocabList, docList[docIndex]) if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]: errorCount += 1 print ("classification error",docList[docIndex]) print ('the error rate is: ',float(errorCount)/len(testSet)) #return vocabList,fullText # ------------------------自动化处理---------------------------------------- spamTest() # ----------------------------- 4.7. 示例: 使用朴素贝叶斯分类器从个人广告中获取区域倾向--------------------------- """" RSS源分类器及高频词去除函数 函数calcMostFreq() ❶。 该函数遍历词汇表中的每个词并统计它在文本中出现的次数, 然后根据出现次数从高到低对词典进行排序, 最后返回排序最高的30个单词。 你很快就会明白这个函数的重要性 以下四行) 计算出现频率 """ def calcMostFreq(vocabList,fullText): import operator freqDict = {} for token in vocabList: freqDict[token]=fullText.count(token) sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True) return sortedFreq[:30] """" 函数localWords()使用两个RSS源作为参数。 RSS源要在函数外 导入, 这样做的原因是RSS源会随时间而改变。 如果想通过改变代码来 比较程序执行的差异, 就应该使用相同的输入。 重新加载RSS源就会得 到新的数据, 但很难确定是代码原因还是输入原因导致输出结果的改 变。 函数localWords()与程序清单4-5中的spamTest()函数几乎相 同, 区别在于这里访问的是RSS源❷而不是文件。 然后调用函 数calcMostFreq()来获得排序最高的30个单词并随后将它们移除❸。 函数的剩余部分与spamTest()基本类似, 不同的是最后一行要返回下 面要用到的值。 """ def localWords(feed1,feed0): import feedparser docList=[]; classList = []; fullText =[] minLen = min(len(feed1['entries']),len(feed0['entries'])) for i in range(minLen): # 2 每次访问一条RSS源 wordList = textParse(feed1['entries'][i]['summary']) docList.append(wordList) fullText.extend(wordList) classList.append(1) #NY is class 1 wordList = textParse(feed0['entries'][i]['summary']) docList.append(wordList) fullText.extend(wordList) classList.append(0) # (以下四行) 去掉出现次数最高的那些词 vocabList = createVocabList(docList)#create vocabulary top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words for pairW in top30Words: if pairW[0] in vocabList: vocabList.remove(pairW[0]) trainingSet = range(2*minLen); testSet=[] #create test set for i in range(20): randIndex = int(random.uniform(0,len(trainingSet))) testSet.append(trainingSet[randIndex]) del(trainingSet[randIndex]) trainMat=[]; trainClasses = [] for docIndex in trainingSet:#train the classifier (get probs) trainNB0 trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex])) trainClasses.append(classList[docIndex]) p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses)) errorCount = 0 for docIndex in testSet: #classify the remaining items wordVector = bagOfWords2VecMN(vocabList, docList[docIndex]) if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]: errorCount += 1 print ('the error rate is: ',float(errorCount)/len(testSet)) return vocabList,p0V,p1V def getTopWords(ny,sf): import operator vocabList,p0V,p1V=localWords(ny,sf) topNY=[]; topSF=[] for i in range(len(p0V)): if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i])) if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i])) sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True) print ("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**") for item in sortedSF: print (item[0]) sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True) print ("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**") for item in sortedNY: print (item[0])
conditional_block