file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main.rs | }
fn mul_sub(x: Self, y: Self, z: Self) -> Self {
Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) })
}
}
impl Add for WideF32 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(unsafe { _mm256_add_ps(self.0, other.0) })
}
}
impl AddAssign for WideF32 {
fn add_assign(&mut self, other: Self) {
self.0 = unsafe { _mm256_add_ps(self.0, other.0) }
}
}
impl BitAnd for WideF32 {
type Output = Self;
fn bitand(self, other: Self) -> Self {
Self(unsafe { _mm256_and_ps(self.0, other.0) })
}
}
impl BitOr for WideF32 {
type Output = Self;
fn bitor(self, other: Self) -> Self |
}
impl Div for WideF32 {
type Output = Self;
fn div(self, other: Self) -> Self {
Self(unsafe { _mm256_div_ps(self.0, other.0) })
}
}
impl Sub for WideF32 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(unsafe { _mm256_sub_ps(self.0, other.0) })
}
}
impl Mul for WideF32 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(unsafe { _mm256_mul_ps(self.0, other.0) })
}
}
impl MulAssign for WideF32 {
fn mul_assign(&mut self, other: Self) {
self.0 = unsafe { _mm256_mul_ps(self.0, other.0) }
}
}
impl Neg for WideF32 {
type Output = Self;
fn neg(self) -> Self {
Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) })
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
struct V3(f32, f32, f32);
impl V3 {
fn dot(self, other: V3) -> f32 {
self.0 * other.0 + self.1 * other.1 + self.2 * other.2
}
fn cross(self, other: V3) -> V3 {
V3(
self.1 * other.2 - self.2 * other.1,
self.2 * other.0 - self.0 * other.2,
self.0 * other.1 - self.1 * other.0,
)
}
fn normalize(self) -> V3 {
self * (1.0 / self.len())
}
fn reflect(self, normal: V3) -> V3 {
self - normal * self.dot(normal) * 2.0
}
fn len(self) -> f32 {
self.dot(self).sqrt()
}
fn is_unit_vector(self) -> bool {
(self.dot(self) - 1.0).abs() < TOLERANCE
}
}
impl Add for V3 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(self.0 + other.0, self.1 + other.1, self.2 + other.2)
}
}
impl Add<f32> for V3 {
type Output = Self;
fn add(self, rhs: f32) -> Self {
Self(self.0 + rhs, self.1 + rhs, self.2 + rhs)
}
}
impl AddAssign for V3 {
fn add_assign(&mut self, other: Self) {
*self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2)
}
}
impl Div<f32> for V3 {
type Output = Self;
fn div(self, rhs: f32) -> Self {
Self(self.0 / rhs, self.1 / rhs, self.2 / rhs)
}
}
impl Sub for V3 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(self.0 - other.0, self.1 - other.1, self.2 - other.2)
}
}
impl Sub<f32> for V3 {
type Output = Self;
fn sub(self, rhs: f32) -> Self {
Self(self.0 - rhs, self.1 - rhs, self.2 - rhs)
}
}
impl Mul for V3 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(self.0 * other.0, self.1 * other.1, self.2 * other.2)
}
}
impl Mul<f32> for V3 {
type Output = Self;
fn mul(self, rhs: f32) -> Self {
Self(self.0 * rhs, self.1 * rhs, self.2 * rhs)
}
}
impl MulAssign<f32> for V3 {
fn mul_assign(&mut self, rhs: f32) {
*self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs)
}
}
impl MulAssign for V3 {
fn mul_assign(&mut self, other: Self) {
*self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2)
}
}
#[derive(Debug)]
struct Camera {
origin: V3,
x: V3,
y: V3,
z: V3,
film_lower_left: V3,
film_width: f32,
film_height: f32,
}
impl Camera {
fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera {
assert!(aspect_ratio > 1.0, "width must be greater than height");
let origin = look_from - look_at;
let z = origin.normalize();
let x = V3(0.0, 0.0, 1.0).cross(z).normalize();
let y = z.cross(x).normalize();
let film_height = 1.0;
let film_width = film_height * aspect_ratio;
let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width;
Camera {
origin,
x,
y,
z,
film_lower_left,
film_width,
film_height,
}
}
}
#[derive(Debug, Clone, PartialEq)]
enum MaterialType {
Diffuse,
Specular,
}
#[derive(Debug, Clone, PartialEq)]
struct Material {
emit_color: V3,
reflect_color: V3,
t: MaterialType,
}
struct Sphere {
p: V3,
rsqrd: f32,
m: Material,
}
impl Sphere {
fn new(p: V3, r: f32, m: Material) -> Sphere {
Sphere { p, rsqrd: r * r, m }
}
}
struct Spheres {
xs: Vec<f32>,
ys: Vec<f32>,
zs: Vec<f32>,
rsqrds: Vec<f32>,
mats: Vec<Material>,
}
impl Spheres {
fn new(spheres: Vec<Sphere>) -> Self {
let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH;
let mut me = Self {
xs: Vec::with_capacity(len),
ys: Vec::with_capacity(len),
zs: Vec::with_capacity(len),
rsqrds: Vec::with_capacity(len),
mats: Vec::with_capacity(len),
};
for s in spheres {
me.xs.push(s.p.0);
me.ys.push(s.p.1);
me.zs.push(s.p.2);
me.rsqrds.push(s.rsqrd);
me.mats.push(s.m);
}
// pad everything out to the simd width
me.xs.resize(len, 0.0);
me.ys.resize(len, 0.0);
me.zs.resize(len, 0.0);
me.rsqrds.resize(len, 0.0);
let default_mat = Material {
emit_color: V3(0.0, 0.0, 0.0),
reflect_color: V3(0.0, 0.0, 0.0),
t: MaterialType::Specular,
};
me.mats.resize(len, default_mat);
me
}
fn len(&self) -> usize {
self.xs.len()
}
}
// https://entropymine.com/imageworsener/srgbformula/
fn linear_to_srgb(x: f32) -> f32 {
if x < 0.0 {
0.0
} else if x > | {
Self(unsafe { _mm256_or_ps(self.0, other.0) })
} | identifier_body |
main.rs | }
fn mul_sub(x: Self, y: Self, z: Self) -> Self {
Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) })
}
}
impl Add for WideF32 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(unsafe { _mm256_add_ps(self.0, other.0) })
}
}
impl AddAssign for WideF32 {
fn add_assign(&mut self, other: Self) {
self.0 = unsafe { _mm256_add_ps(self.0, other.0) }
}
}
impl BitAnd for WideF32 {
type Output = Self;
fn bitand(self, other: Self) -> Self {
Self(unsafe { _mm256_and_ps(self.0, other.0) })
}
}
impl BitOr for WideF32 {
type Output = Self;
fn bitor(self, other: Self) -> Self {
Self(unsafe { _mm256_or_ps(self.0, other.0) })
}
}
impl Div for WideF32 {
type Output = Self;
fn div(self, other: Self) -> Self {
Self(unsafe { _mm256_div_ps(self.0, other.0) })
}
}
impl Sub for WideF32 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(unsafe { _mm256_sub_ps(self.0, other.0) })
}
}
impl Mul for WideF32 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(unsafe { _mm256_mul_ps(self.0, other.0) })
}
}
impl MulAssign for WideF32 {
fn mul_assign(&mut self, other: Self) {
self.0 = unsafe { _mm256_mul_ps(self.0, other.0) }
}
}
impl Neg for WideF32 {
type Output = Self;
fn neg(self) -> Self {
Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) })
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
struct V3(f32, f32, f32);
impl V3 {
fn dot(self, other: V3) -> f32 {
self.0 * other.0 + self.1 * other.1 + self.2 * other.2
}
fn cross(self, other: V3) -> V3 {
V3(
self.1 * other.2 - self.2 * other.1,
self.2 * other.0 - self.0 * other.2,
self.0 * other.1 - self.1 * other.0,
)
}
fn normalize(self) -> V3 {
self * (1.0 / self.len())
}
fn reflect(self, normal: V3) -> V3 {
self - normal * self.dot(normal) * 2.0
}
fn len(self) -> f32 {
self.dot(self).sqrt()
}
fn is_unit_vector(self) -> bool {
(self.dot(self) - 1.0).abs() < TOLERANCE
}
}
impl Add for V3 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(self.0 + other.0, self.1 + other.1, self.2 + other.2)
}
}
impl Add<f32> for V3 {
type Output = Self;
fn add(self, rhs: f32) -> Self {
Self(self.0 + rhs, self.1 + rhs, self.2 + rhs)
}
}
impl AddAssign for V3 {
fn add_assign(&mut self, other: Self) {
*self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2)
}
}
impl Div<f32> for V3 {
type Output = Self;
fn div(self, rhs: f32) -> Self {
Self(self.0 / rhs, self.1 / rhs, self.2 / rhs)
}
}
impl Sub for V3 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(self.0 - other.0, self.1 - other.1, self.2 - other.2)
}
}
impl Sub<f32> for V3 {
type Output = Self;
fn sub(self, rhs: f32) -> Self {
Self(self.0 - rhs, self.1 - rhs, self.2 - rhs)
}
}
impl Mul for V3 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(self.0 * other.0, self.1 * other.1, self.2 * other.2)
}
}
impl Mul<f32> for V3 {
type Output = Self;
fn mul(self, rhs: f32) -> Self {
Self(self.0 * rhs, self.1 * rhs, self.2 * rhs)
}
}
impl MulAssign<f32> for V3 {
fn mul_assign(&mut self, rhs: f32) {
*self = Self(self.0 * rhs, self.1 * rhs, self.2 * rhs)
}
}
impl MulAssign for V3 {
fn mul_assign(&mut self, other: Self) {
*self = Self(self.0 * other.0, self.1 * other.1, self.2 * other.2)
}
}
#[derive(Debug)]
struct Camera {
origin: V3,
x: V3,
y: V3,
z: V3,
film_lower_left: V3,
film_width: f32,
film_height: f32,
}
impl Camera {
fn new(look_from: V3, look_at: V3, aspect_ratio: f32) -> Camera {
assert!(aspect_ratio > 1.0, "width must be greater than height");
let origin = look_from - look_at;
let z = origin.normalize();
let x = V3(0.0, 0.0, 1.0).cross(z).normalize();
let y = z.cross(x).normalize();
let film_height = 1.0;
let film_width = film_height * aspect_ratio;
let film_lower_left = origin - z - y * 0.5 * film_height - x * 0.5 * film_width;
Camera {
origin,
x,
y,
z,
film_lower_left,
film_width,
film_height,
}
}
}
#[derive(Debug, Clone, PartialEq)]
enum MaterialType {
Diffuse,
Specular,
}
#[derive(Debug, Clone, PartialEq)]
struct Material {
emit_color: V3,
reflect_color: V3,
t: MaterialType,
}
struct Sphere {
p: V3,
rsqrd: f32,
m: Material,
}
impl Sphere {
fn new(p: V3, r: f32, m: Material) -> Sphere {
Sphere { p, rsqrd: r * r, m }
}
}
struct | {
xs: Vec<f32>,
ys: Vec<f32>,
zs: Vec<f32>,
rsqrds: Vec<f32>,
mats: Vec<Material>,
}
impl Spheres {
fn new(spheres: Vec<Sphere>) -> Self {
let len = (spheres.len() + SIMD_WIDTH - 1) / SIMD_WIDTH * SIMD_WIDTH;
let mut me = Self {
xs: Vec::with_capacity(len),
ys: Vec::with_capacity(len),
zs: Vec::with_capacity(len),
rsqrds: Vec::with_capacity(len),
mats: Vec::with_capacity(len),
};
for s in spheres {
me.xs.push(s.p.0);
me.ys.push(s.p.1);
me.zs.push(s.p.2);
me.rsqrds.push(s.rsqrd);
me.mats.push(s.m);
}
// pad everything out to the simd width
me.xs.resize(len, 0.0);
me.ys.resize(len, 0.0);
me.zs.resize(len, 0.0);
me.rsqrds.resize(len, 0.0);
let default_mat = Material {
emit_color: V3(0.0, 0.0, 0.0),
reflect_color: V3(0.0, 0.0, 0.0),
t: MaterialType::Specular,
};
me.mats.resize(len, default_mat);
me
}
fn len(&self) -> usize {
self.xs.len()
}
}
// https://entropymine.com/imageworsener/srgbformula/
fn linear_to_srgb(x: f32) -> f32 {
if x < 0.0 {
0.0
} else if x > 1 | Spheres | identifier_name |
main.rs | unsafe { _mm256_movemask_ps(self.0) }
}
fn hmin(&self) -> f32 {
unsafe {
/*
This can be done entirely in avx with permute2f128, but that is allegedly very
slow on AMD prior to Zen2 (and is anecdotally slower on my Intels as well)
initial m256
1 2 3 4 5 6 7 8
extract half, cast the other half down to m128, min
1 2 3 4
5 6 7 8
= 1 2 3 4
permute backwards, min
1 2 3 4
4 3 2 1
= 1 2 2 1
unpack hi, min
1 2 2 1
1 1 2 2
= 1 1 2 1
*/
let x = self.0;
let y = _mm256_extractf128_ps(x, 1);
let m1 = _mm_min_ps(_mm256_castps256_ps128(x), y);
let m2 = _mm_permute_ps(m1, 27);
let m2 = _mm_min_ps(m1, m2);
let m3 = _mm_unpackhi_ps(m2, m2);
let m = _mm_min_ps(m2, m3);
_mm_cvtss_f32(m)
}
}
fn splat(x: f32) -> Self {
Self(unsafe { _mm256_set1_ps(x) })
}
fn select(x: WideF32, y: WideF32, mask: WideF32) -> Self {
Self(unsafe { _mm256_blendv_ps(x.0, y.0, mask.0) })
}
fn sqrt(&self) -> Self {
Self(unsafe { _mm256_sqrt_ps(self.0) })
}
#[allow(dead_code)]
fn rsqrt(&self) -> Self {
Self(unsafe { _mm256_rsqrt_ps(self.0) })
}
// approximate a sqrt using an inverse sqrt and one iteration of Newton-Raphson
// https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
// Note: on many architectures this is significantly faster than the sqrt intrinsic. But this is not so on Skylake
// for this program: approx_sqrt crowds the ports with additional mul/subs so is net slower
#[allow(dead_code)]
fn approx_sqrt(self) -> Self {
let half = WideF32::splat(0.5);
let three = WideF32::splat(3.0);
let rsqrt = self.rsqrt();
let x = three - rsqrt * rsqrt * self;
rsqrt * half * x * self
}
fn gt(&self, other: Self) -> Self {
Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_GT_OQ) })
}
fn lt(&self, other: Self) -> Self {
Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_LT_OQ) })
}
fn eq(&self, other: Self) -> Self {
Self(unsafe { _mm256_cmp_ps(self.0, other.0, _CMP_EQ_OQ) })
}
fn mul_add(x: Self, y: Self, z: Self) -> Self {
Self(unsafe { _mm256_fmadd_ps(x.0, y.0, z.0) })
}
fn mul_sub(x: Self, y: Self, z: Self) -> Self {
Self(unsafe { _mm256_fmsub_ps(x.0, y.0, z.0) })
}
}
impl Add for WideF32 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(unsafe { _mm256_add_ps(self.0, other.0) })
}
}
impl AddAssign for WideF32 {
fn add_assign(&mut self, other: Self) {
self.0 = unsafe { _mm256_add_ps(self.0, other.0) }
}
}
impl BitAnd for WideF32 {
type Output = Self;
fn bitand(self, other: Self) -> Self {
Self(unsafe { _mm256_and_ps(self.0, other.0) })
}
}
impl BitOr for WideF32 {
type Output = Self;
fn bitor(self, other: Self) -> Self {
Self(unsafe { _mm256_or_ps(self.0, other.0) })
}
}
impl Div for WideF32 {
type Output = Self;
fn div(self, other: Self) -> Self {
Self(unsafe { _mm256_div_ps(self.0, other.0) })
}
}
impl Sub for WideF32 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(unsafe { _mm256_sub_ps(self.0, other.0) })
}
}
impl Mul for WideF32 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(unsafe { _mm256_mul_ps(self.0, other.0) })
}
}
impl MulAssign for WideF32 {
fn mul_assign(&mut self, other: Self) {
self.0 = unsafe { _mm256_mul_ps(self.0, other.0) }
}
}
impl Neg for WideF32 {
type Output = Self;
fn neg(self) -> Self {
Self(unsafe { _mm256_xor_ps(self.0, _mm256_set1_ps(-0.0)) })
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
struct V3(f32, f32, f32);
impl V3 {
fn dot(self, other: V3) -> f32 {
self.0 * other.0 + self.1 * other.1 + self.2 * other.2
}
fn cross(self, other: V3) -> V3 {
V3(
self.1 * other.2 - self.2 * other.1,
self.2 * other.0 - self.0 * other.2,
self.0 * other.1 - self.1 * other.0,
)
}
fn normalize(self) -> V3 {
self * (1.0 / self.len())
}
fn reflect(self, normal: V3) -> V3 {
self - normal * self.dot(normal) * 2.0
}
fn len(self) -> f32 {
self.dot(self).sqrt()
}
fn is_unit_vector(self) -> bool {
(self.dot(self) - 1.0).abs() < TOLERANCE
}
}
impl Add for V3 {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(self.0 + other.0, self.1 + other.1, self.2 + other.2)
}
}
impl Add<f32> for V3 {
type Output = Self;
fn add(self, rhs: f32) -> Self {
Self(self.0 + rhs, self.1 + rhs, self.2 + rhs)
}
}
impl AddAssign for V3 {
fn add_assign(&mut self, other: Self) {
*self = Self(self.0 + other.0, self.1 + other.1, self.2 + other.2)
}
}
impl Div<f32> for V3 {
type Output = Self;
fn div(self, rhs: f32) -> Self {
Self(self.0 / rhs, self.1 / rhs, self.2 / rhs)
}
}
impl Sub for V3 {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(self.0 - other.0, self.1 - other.1, self.2 - other.2)
}
}
impl Sub<f32> for V3 {
type Output = Self;
fn sub(self, rhs: f32) -> Self {
Self(self.0 - rhs, self.1 - rhs, self.2 - rhs)
}
}
impl Mul for V3 {
type Output = Self;
fn mul(self, other: Self) -> Self {
Self(self.0 * other.0, self.1 * other.1, self.2 * other.2)
}
}
impl Mul<f32> for V3 {
type Output = Self;
fn mul(self, rhs: f32) -> Self {
Self(self.0 * rhs, self.1 * | self.mask() != 0
}
fn mask(&self) -> i32 { | random_line_split | |
CLCDcurve.py | 1))
masstot = mass + passmass + fuelblock
Weight_CL = [(masstot - FU_CL[i])*g for i in range(len(FU_CL))]
CLgraph_mat = Weight_CL/(0.5 * VTAS_CL**2 * rho1_CL * S)
print(min(Mach_CL), max(Mach_CL))
#find linear relation for CL measurements
clalpha_mat,ma_mat = np.polyfit(AOA_CL[:,0],CLgraph_mat[:,0],1)
CLline_CL = clalpha_mat*AOA_CL[:,0] + ma_mat
print('Cl_alpha =', clalpha_mat, clalpha_mat*(180/pi))
##Calculate CD##
CDgraph_mat = CD0 + (CLgraph_mat) ** 2 / (pi * A * e)
#From Numerical Model#
AOAstat = np.array(AOAlist)
linecl_stat = CLalpha*AOAstat + b | #Plots CL and CD##
# plt.grid()
# plt.scatter(AOA_CL,CLgraph_mat,marker= '.', label='Measure point')
# # plt.plot(AOAstat,linecl_stat, label='Stationary Flight Measurements')
# plt.plot(AOA_CL[:,0],CLline_CL,c='darkorange', label= 'Least Squares of Flightdata')
# plt.ylabel('Lift Coefficient [-]')
# plt.xlabel('Angle of Attack [deg]')
# plt.legend()
# # plt.savefig('CLalphacompare.jpg')
# plt.show()
#
plt.grid()
plt.scatter(CDgraph_mat,CLgraph_mat, marker='.', label='Measure Point Flightdata')
# plt.plot(CDstat,CLlist,c='orange', label='Stationary Flight Measurements')
plt.ylabel('Lift Coefficient [-]')
plt.xlabel('Drag Coefficient [-]')
plt.legend()
plt.savefig('CLCDcompare.jpg')
plt.show()
##------------Reynolds Number Range-----------##
# b = 1.458*10**(-6) #kg/msK^1/2
# St = 110.4 #K
# Tst = np.array(SAT[16710:23911])
# mu = (b * Tst ** (3/2))/(Tst + St)
# Reyn = np.array([(rho1_CL[i] * VTAS_CL[i] * c/mu[i]) for i in range(len(mu))])
# print('Reynoldsnumber Range =', max(Reyn), min(Reyn))
##_______________________________________Stationary Flight Data_________________________________________##
##------------Calculate Cmdelta and Cmalpha using Post Flight Data-------------------------##
dde1 = [i.de for i in CGshift]
dde = (dde1[1] - dde1[0])*(pi/180)
dxcg = shiftxcg[1]-shiftxcg[0]
hp = CGshift[1].height
Vias = CGshift[1].IAS
Tm = float(CGshift[1].TAT) + 273.15
VTAS = Vequi(hp,Vias,Tm)[0]
rhoTAS = Vequi(hp,Vias,Tm)[1]
Fused = CGshift[1].Fused
Weight = (mass + passmass + fuelblock - Fused)*g
CN = Weight/(0.5*rhoTAS*(VTAS**2)*S)
print('CN =', CN)
Cmdelta = -(1/dde) * CN * dxcg/c
print('Cmdelta =', Cmdelta)
##--------------Elevator Trim Curve Ve-----------------##
height = np.array([i.height for i in EleTrimCurve])
V_ias = np.array([i.IAS for i in EleTrimCurve])
Temp = np.array([(i.TAT + 273.15) for i in EleTrimCurve])
# Vtasele = Vequi(height,V_ias,Temp)[0]
# rhoele = Vequi(height,V_ias,Temp)[1]
V_e = Vequi(height,V_ias,Temp)[2]
Fusedele = np.array([i.Fused for i in EleTrimCurve])
mtot_el = mass + passmass + fuelblock - Fusedele
Wele = mtot_el * g
Ws = 60500 #N
Ve_e = V_e * np.sqrt(Ws/Wele)
##-----------Elevator Trim Curve Ele defl eq-----------##
Cmtc = -.0064 #reader appendix
eledefl = np.array([i.de for i in EleTrimCurve])
aoa = np.array([i.AoA for i in EleTrimCurve])
d_eng = 0.686 #m
Tc = totalthrustele/(0.5*rho0*Ve_e**2*S)
print(Tc)
Tcs = totalthrustelestand/(0.5*rho0*Ve_e**2*d_eng**2)
print(Tcs)
Cmdelta_veri = -0.41532
deleq = eledefl - (1/Cmdelta *Cmtc * (Tcs - Tc))
##-------Plotting AoA against Ele delfection and determine Cmalpha------##
deda, q = np.polyfit(aoa,deleq,1)
line = deda*aoa+q
print('deda =', deda)
plt.grid()
plt.scatter(aoa,deleq, label='Measure Point')
plt.plot(aoa,line, c='orange', label='Least Squares')
plt.ylim(1.2,-0.5)
plt.ylabel('Reduced Elevator Deflection [deg]')
plt.xlabel('Angle of Attack [deg]')
plt.legend()
plt.savefig('DedAOA_verification.jpg')
plt.show()
Cmalpha = -deda * Cmdelta
print('Cmalpha =', Cmalpha)
#-------------------Plotting Ele defl against Ve----##
Ve_e_dde1 = np.column_stack([Ve_e,deleq])
Ve_e_dde = Ve_e_dde1[Ve_e_dde1[:,0].argsort()]
d, f, j = np.polyfit(Ve_e_dde[:,0],Ve_e_dde[:,1],2)
line_eleV = d*Ve_e_dde[:,0]**2 + f*Ve_e_dde[:,0] + j
# plt.grid()
# plt.scatter(Ve_e_dde[:,0],Ve_e_dde[:,1], label='Measure Point')
# plt.plot(Ve_e_dde[:,0],d*Ve_e_dde[:,0]**2 + f*Ve_e_dde[:,0] + j, c='orange', label='Least Squares')
# plt.ylim(1.2,-0.4)
# plt.ylabel('Reduced Elevator Deflection [deg]')
# plt.xlabel('Reduced Equivalent Airspeed [m/s]')
# plt.legend()
# plt.savefig('DedV.jpg')
# plt.show()
##------------Reduced Elevator control Curve----------##
Femea = np.array([i.Fe for i in EleTrimCurve])
Fe = Femea * (Ws/Wele)
Ve_e_Fe1 = np.column_stack([Ve_e,Fe])
Ve_e_Fe = Ve_e_Fe1[Ve_e_Fe1[:,0].argsort()]
d, f, j = np.polyfit(Ve_e_Fe[:,0],Ve_e_Fe[:,1],2)
line_feele = d*Ve_e_Fe[:,0]**2 + f*Ve_e_Fe[:,0] + j
# plt.grid()
# plt.scatter(Ve_e_Fe[:,0],Ve_e_Fe[:,1], label='Measure Point')
# plt.plot(Ve_e_Fe[:,0],d*Ve_e_Fe[:,0]**2 + f*Ve_e_Fe[:,0] + j, c='orange', label='Least Squares')
# plt.ylim(70,-40)
# plt.ylabel('Reduced Force on Elevator Control Wheel [N]')
# plt.xlabel('Reduced Equivalent Speed [m/s]')
# plt.legend()
# plt.savefig('FeV.jpg')
# plt.show()
##_______________________________________Flight test DATA_______________________________________##
##---------------------Cmdelta determination of matlab data-----------------------------------##
time_cg = time[33510:35911]
xcg_cg = np.array(xcg[33510:35911])
dxcg_cg1 = np.array([xcg_cg[i] - xcg_cg[i-1] for i in range(1,len(xcg_cg))])
dxcg_cg = min(dxcg_cg1)
de_cg = np.array(de[33510:35911])
dde_cg = (de_cg[2000] - de_cg[399]) * pi/180 #determined by exact time of interval stationary data
FUtot_cg = FUtot[33510:35911]
index = np.where(dxcg_cg1 == np.amin(dxcg_cg1))
W_cg = (masstot - FUtot_cg[2000])*g
h_cg = alt2[35512]
rho_cg = rho0 * pow((1 + (Tempgrad*h_cg)/Temp0),(-g/(R*Tempgrad) - 1))
Vtas_cg = TAS2[35512]
CN_cg = W_cg/(0.5*rho_cg*Vtas_cg**2*S)
Cmdelta_mat | CDstat = CD0 + linecl_stat/(pi * A * e)
| random_line_split |
mod.rs | step).
Ok(self.iterate(heap)?.iter().collect())
}
/// Produce an iterable from a value.
pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> {
let me: ARef<'v, dyn StarlarkValue> = self.get_aref();
me.iterate()?;
Ok(RefIterable::new(
heap,
ARef::map(me, |e| e.iterate().unwrap()),
))
}
/// Get the [`Hashed`] version of this [`Value`].
pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> {
ValueLike::get_hashed(self)
}
/// Get a reference to underlying data or [`None`]
/// if contained object has different type than requested.
///
/// This function panics if the [`Value`] is borrowed mutably.
///
/// In many cases you may wish to call [`FromValue`] instead, as that can
/// get a non-frozen value from an underlying frozen value.
pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> {
ValueLike::downcast_ref(self)
}
/// Are two values equal. If the values are of different types it will
/// return [`false`]. It will only error if there is excessive recursion.
pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> {
ValueLike::equals(self, other)
}
/// How are two values comparable. For values of different types will return [`Err`].
pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> {
ValueLike::compare(self, other)
}
/// Get a mutable reference to underlying data or [`None`]
/// if contained object has different type than requested.
///
/// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen,
/// or frozen for iteration.
///
/// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will
/// _cause a panic_. Therefore, it's super important not to call any Starlark operations,
/// even as simple as equality, while holding the [`RefMut`].
pub fn downcast_mut<T: AnyLifetime<'v>>(
self,
heap: &'v Heap,
) -> anyhow::Result<Option<RefMut<'_, T>>> {
let vref = self.get_ref_mut(heap)?;
let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut());
Ok(if any.is::<T>() {
Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap()))
} else {
None
})
}
/// Describe the value, in order to get its metadata in a way that could be used
/// to generate prototypes, help information or whatever other descriptive text
/// is required.
/// Plan is to make this return a data type at some point in the future, possibly
/// move on to `StarlarkValue` and include data from members.
pub fn describe(self, name: &str) -> String {
if self.get_type() == FUNCTION_TYPE {
format!("def {}: pass", self.to_repr().replace(" = ...", " = None"))
} else {
format!("# {} = {}", name, self.to_repr())
}
}
/// Call `export_as` on the underlying value, but only if the type is mutable.
/// Otherwise, does nothing.
pub fn export_as(self, name: &str, heap: &'v Heap) {
if let Some(mut mv) = self.get_ref_mut_already() {
mv.export_as(heap, name)
}
}
/// Return the attribute with the given name. Returns a pair of a boolean and the value.
///
/// The type is [`AttrType::Method`] if the attribute was defined via [`StarlarkValue::get_methods`]
/// and should be used as a signal that if the attribute is subsequently called,
/// e.g. `object.attribute(argument)` then the `object` should be passed as the first
/// argument to the function, e.g. `object.attribute(object, argument)`.
pub fn get_attr(
self,
attribute: &str,
heap: &'v Heap,
) -> anyhow::Result<(AttrType, Value<'v>)> {
let aref = self.get_aref();
if let Some(methods) = aref.get_methods() {
if let Some(v) = methods.get(attribute) {
return Ok((AttrType::Method, v));
}
}
aref.get_attr(attribute, heap).map(|v| (AttrType::Field, v))
}
/// Query whether an attribute exists on a type. Should be equivalent to whether
/// [`get_attr`](Value::get_attr) succeeds, but potentially more efficient.
pub fn has_attr(self, attribute: &str) -> bool {
let aref = self.get_aref();
if let Some(methods) = aref.get_methods() {
if methods.get(attribute).is_some() {
return true;
}
}
aref.has_attr(attribute)
}
/// Get a list of all the attributes this function supports, used to implement the
/// `dir()` function.
pub fn dir_attr(self) -> Vec<String> {
let aref = self.get_aref();
let mut result = if let Some(methods) = aref.get_methods() {
let mut res = methods.names();
res.extend(aref.dir_attr());
res
} else {
aref.dir_attr()
};
result.sort();
result
}
}
/// Methods that just forward to the underlying [`StarlarkValue`].
impl<'v> Value<'v> {
pub fn get_type(self) -> &'static str {
self.get_aref().get_type()
}
pub fn to_bool(self) -> bool {
// Fast path for the common case
if let Some(x) = self.unpack_bool() {
x
} else {
self.get_aref().to_bool()
}
}
pub fn to_int(self) -> anyhow::Result<i32> {
// Fast path for the common case
if let Some(x) = self.unpack_int() {
Ok(x)
} else {
self.get_aref().to_int()
}
}
pub fn at(self, index: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
self.get_aref().at(index, heap)
}
pub fn slice(
self,
start: Option<Value<'v>>,
stop: Option<Value<'v>>,
stride: Option<Value<'v>>,
heap: &'v Heap,
) -> anyhow::Result<Value<'v>> {
self.get_aref().slice(start, stop, stride, heap)
}
pub fn length(self) -> anyhow::Result<i32> {
self.get_aref().length()
}
pub fn is_in(self, other: Value<'v>) -> anyhow::Result<bool> {
self.get_aref().is_in(other)
}
pub fn plus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
self.get_aref().plus(heap)
}
pub fn minus(self, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
self.get_aref().minus(heap)
}
pub fn sub(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
self.get_aref().sub(other, heap)
}
pub fn mul(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
self.get_aref().mul(other, heap)
}
pub fn percent(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
self.get_aref().percent(other, heap)
}
pub fn floor_div(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
self.get_aref().floor_div(other, heap)
}
pub fn bit_and(self, other: Value<'v>) -> anyhow::Result<Value<'v>> {
self.get_aref().bit_and(other)
}
pub fn bit_or(self, other: Value<'v>) -> anyhow::Result<Value<'v>> {
self.get_aref().bit_or(other)
}
pub fn bit_xor(self, other: Value<'v>) -> anyhow::Result<Value<'v>> {
self.get_aref().bit_xor(other)
}
pub fn left_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> {
self.get_aref().left_shift(other)
}
pub fn right_shift(self, other: Value<'v>) -> anyhow::Result<Value<'v>> {
self.get_aref().right_shift(other)
}
pub fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> {
self.get_aref().new_invoker(self, eval)
}
pub fn get_type_value(self) -> &'static ConstFrozenValue | {
self.get_aref().get_type_value()
} | identifier_body | |
mod.rs | fn collect_repr(self, collector: &mut String) {
self.get_aref().collect_repr(collector);
}
fn to_json(self) -> anyhow::Result<String> {
self.get_aref().to_json()
}
fn equals(self, other: Value<'v>) -> anyhow::Result<bool> {
if self.to_value().ptr_eq(other) {
Ok(true)
} else {
let _guard = stack_guard::stack_guard()?;
self.get_aref().equals(other)
}
}
fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> {
let _guard = stack_guard::stack_guard()?;
self.get_aref().compare(other)
}
fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> {
let any = ARef::map(self.get_aref(), |e| e.as_dyn_any());
if any.is::<T>() {
Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap()))
} else {
None
}
}
}
impl<'v, V: ValueLike<'v>> Hashed<V> {
pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> {
// Safe because we know frozen values have the same hash as non-frozen ones
Hashed::new_unchecked(self.hash(), self.key().to_value())
}
}
impl<'v> Hashed<Value<'v>> {
fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> {
// Safe because we know frozen values have the same hash as non-frozen ones
let key = self.key().freeze(freezer);
// But it's an easy mistake to make, so actually check it in debug
debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash()));
Hashed::new_unchecked(self.hash(), key)
}
}
impl<'v> ValueLike<'v> for Value<'v> {
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> {
Value::get_aref(self)
}
fn to_value(self) -> Value<'v> {
self
}
}
impl<'v> ValueLike<'v> for FrozenValue {
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> {
ARef::new_ptr(self.get_ref())
}
fn to_value(self) -> Value<'v> {
Value::new_frozen(self)
}
}
impl FrozenValue {
/// Convert a [`FrozenValue`] back to a [`Value`].
pub fn to_value<'v>(self) -> Value<'v> {
Value::new_frozen(self)
}
}
/// How an attribute (e.g. `x.f`) should behave.
#[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum AttrType {
/// The attribute is a field, a direct value with no special behaviour.
Field,
/// The attribute is a method, which should be called passing the `x` value
/// as its first argument. It will either be a function (which is transformed
/// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a
/// [`NativeAttribute`](crate::values::function::NativeAttribute)
/// (which is evaluated immediately).
Method,
}
impl<'v> Value<'v> {
/// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd),
/// before falling back to [`add`](StarlarkValue::add).
pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
let me = self.to_value();
if let Some(v) = other.get_aref().radd(me, heap) {
v
} else {
self.get_aref().add(other, heap)
}
}
/// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`].
pub fn freeze(self, freezer: &Freezer) -> FrozenValue {
freezer.freeze(self)
}
/// Implement the `str()` function - converts a string value to itself,
/// otherwise uses `repr()`.
pub fn to_str(self) -> String {
match self.unpack_str() {
None => self.to_repr(),
Some(s) => s.to_owned(),
}
}
/// Implement the `repr()` function.
pub fn to_repr(self) -> String {
let mut s = String::new();
self.collect_repr(&mut s);
s
}
/// Forwards to [`ComplexValue::set_attr`].
pub fn set_attr(
self,
attribute: &str,
alloc_value: Value<'v>,
heap: &'v Heap,
) -> anyhow::Result<()> {
self.get_ref_mut(heap)?.set_attr(attribute, alloc_value)
}
/// Forwards to [`ComplexValue::set_at`].
pub fn set_at(
self,
index: Value<'v>,
alloc_value: Value<'v>,
heap: &'v Heap,
) -> anyhow::Result<()> {
self.get_ref_mut(heap)?.set_at(index, alloc_value)
}
/// Return the contents of an iterable collection, as an owned vector.
pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> {
// You might reasonably think this is mostly called on lists (I think it is),
// and thus that a fast-path here would speed things up. But in my experiments
// it's completely irrelevant (you pay a bit for the check, you save a bit on each step).
Ok(self.iterate(heap)?.iter().collect())
}
/// Produce an iterable from a value.
pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> {
let me: ARef<'v, dyn StarlarkValue> = self.get_aref();
me.iterate()?;
Ok(RefIterable::new(
heap,
ARef::map(me, |e| e.iterate().unwrap()),
))
}
/// Get the [`Hashed`] version of this [`Value`].
pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> {
ValueLike::get_hashed(self)
}
/// Get a reference to underlying data or [`None`]
/// if contained object has different type than requested.
///
/// This function panics if the [`Value`] is borrowed mutably.
///
/// In many cases you may wish to call [`FromValue`] instead, as that can
/// get a non-frozen value from an underlying frozen value.
pub fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> {
ValueLike::downcast_ref(self)
}
/// Are two values equal. If the values are of different types it will
/// return [`false`]. It will only error if there is excessive recursion.
pub fn equals(self, other: Value<'v>) -> anyhow::Result<bool> {
ValueLike::equals(self, other)
}
/// How are two values comparable. For values of different types will return [`Err`].
pub fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> {
ValueLike::compare(self, other)
}
/// Get a mutable reference to underlying data or [`None`]
/// if contained object has different type than requested.
///
/// This function returns an [`Err`] if the [`Value`] is already borrowed, is frozen,
/// or frozen for iteration.
///
/// While this reference is active, any [`get_aref`](Value::get_aref) or similar on the value will
/// _cause a panic_. Therefore, it's super important not to call any Starlark operations,
/// even as simple as equality, while holding the [`RefMut`].
pub fn downcast_mut<T: AnyLifetime<'v>>(
self,
heap: &'v Heap,
) -> anyhow::Result<Option<RefMut<'_, T>>> {
let vref = self.get_ref_mut(heap)?;
let any: RefMut<'_, dyn AnyLifetime<'v>> = RefMut::map(vref, |v| v.as_dyn_any_mut());
Ok(if any.is::<T>() {
Some(RefMut::map(any, |any| any.downcast_mut::<T>().unwrap()))
} else {
None
})
}
/// Describe the value, in order to get its metadata in a way that could be used
/// to generate prototypes, help information or whatever other descriptive text
/// is required.
/// Plan is to make this return a data type at some point in the future, possibly
/// move on to `StarlarkValue` and include data from members.
pub fn describe(self, name: &str) -> String {
if self.get_type() == FUNCTION_TYPE {
format!("def {}: pass", self.to_repr().replace(" = ...", " = None"))
} else {
format!("# {} = {}", name, self.to_repr())
}
}
/// Call `export_as` on the underlying value, but only if the type is mutable.
/// Otherwise, does nothing.
pub fn | export_as | identifier_name | |
mod.rs | for FrozenValue {
fn eq(&self, other: &FrozenValue) -> bool {
let v: Value = Value::new_frozen(*self);
let other: Value = Value::new_frozen(*other);
v.equals(other).ok() == Some(true)
}
}
impl Eq for Value<'_> {}
impl Eq for FrozenValue {}
impl Equivalent<FrozenValue> for Value<'_> {
fn equivalent(&self, key: &FrozenValue) -> bool {
key.equals(*self).unwrap()
}
}
impl Equivalent<Value<'_>> for FrozenValue {
fn equivalent(&self, key: &Value) -> bool {
self.equals(*key).unwrap()
}
}
/// Trait for things that can be allocated on a [`Heap`] producing a [`Value`].
pub trait AllocValue<'v> {
fn alloc_value(self, heap: &'v Heap) -> Value<'v>;
}
impl<'v> AllocValue<'v> for Value<'v> {
fn alloc_value(self, _heap: &'v Heap) -> Value<'v> {
self
}
}
/// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`].
pub trait AllocFrozenValue {
fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue;
}
impl FrozenHeap {
/// Allocate a new value on a [`FrozenHeap`].
pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue {
val.alloc_frozen_value(self)
}
}
impl Heap {
/// Allocate a new value on a [`Heap`].
pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> {
x.alloc_value(self)
}
}
/// Abstract over [`Value`] and [`FrozenValue`].
///
/// The methods on this trait are those required to implement containers,
/// allowing implementations of [`ComplexValue`] to be agnostic of their contained type.
/// For details about each function, see the documentation for [`Value`],
/// which provides the same functions (and more).
pub trait ValueLike<'v>: Eq + Copy + Debug {
/// Produce a [`Value`] regardless of the type you are starting with.
fn to_value(self) -> Value<'v>;
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>;
fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> {
self.to_value().new_invoker(eval)
}
fn get_hash(self) -> anyhow::Result<u64> {
self.get_aref().get_hash()
}
fn get_hashed(self) -> anyhow::Result<Hashed<Self>> {
Ok(Hashed::new_unchecked(
SmallHashResult::new_unchecked(self.get_hash()?),
self,
))
}
fn collect_repr(self, collector: &mut String) {
self.get_aref().collect_repr(collector);
}
fn to_json(self) -> anyhow::Result<String> {
self.get_aref().to_json()
}
fn equals(self, other: Value<'v>) -> anyhow::Result<bool> {
if self.to_value().ptr_eq(other) {
Ok(true)
} else |
}
fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> {
let _guard = stack_guard::stack_guard()?;
self.get_aref().compare(other)
}
fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> {
let any = ARef::map(self.get_aref(), |e| e.as_dyn_any());
if any.is::<T>() {
Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap()))
} else {
None
}
}
}
impl<'v, V: ValueLike<'v>> Hashed<V> {
pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> {
// Safe because we know frozen values have the same hash as non-frozen ones
Hashed::new_unchecked(self.hash(), self.key().to_value())
}
}
impl<'v> Hashed<Value<'v>> {
fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> {
// Safe because we know frozen values have the same hash as non-frozen ones
let key = self.key().freeze(freezer);
// But it's an easy mistake to make, so actually check it in debug
debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash()));
Hashed::new_unchecked(self.hash(), key)
}
}
impl<'v> ValueLike<'v> for Value<'v> {
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> {
Value::get_aref(self)
}
fn to_value(self) -> Value<'v> {
self
}
}
impl<'v> ValueLike<'v> for FrozenValue {
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> {
ARef::new_ptr(self.get_ref())
}
fn to_value(self) -> Value<'v> {
Value::new_frozen(self)
}
}
impl FrozenValue {
/// Convert a [`FrozenValue`] back to a [`Value`].
pub fn to_value<'v>(self) -> Value<'v> {
Value::new_frozen(self)
}
}
/// How an attribute (e.g. `x.f`) should behave.
#[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum AttrType {
/// The attribute is a field, a direct value with no special behaviour.
Field,
/// The attribute is a method, which should be called passing the `x` value
/// as its first argument. It will either be a function (which is transformed
/// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a
/// [`NativeAttribute`](crate::values::function::NativeAttribute)
/// (which is evaluated immediately).
Method,
}
impl<'v> Value<'v> {
/// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd),
/// before falling back to [`add`](StarlarkValue::add).
pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
let me = self.to_value();
if let Some(v) = other.get_aref().radd(me, heap) {
v
} else {
self.get_aref().add(other, heap)
}
}
/// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`].
pub fn freeze(self, freezer: &Freezer) -> FrozenValue {
freezer.freeze(self)
}
/// Implement the `str()` function - converts a string value to itself,
/// otherwise uses `repr()`.
pub fn to_str(self) -> String {
match self.unpack_str() {
None => self.to_repr(),
Some(s) => s.to_owned(),
}
}
/// Implement the `repr()` function.
pub fn to_repr(self) -> String {
let mut s = String::new();
self.collect_repr(&mut s);
s
}
/// Forwards to [`ComplexValue::set_attr`].
pub fn set_attr(
self,
attribute: &str,
alloc_value: Value<'v>,
heap: &'v Heap,
) -> anyhow::Result<()> {
self.get_ref_mut(heap)?.set_attr(attribute, alloc_value)
}
/// Forwards to [`ComplexValue::set_at`].
pub fn set_at(
self,
index: Value<'v>,
alloc_value: Value<'v>,
heap: &'v Heap,
) -> anyhow::Result<()> {
self.get_ref_mut(heap)?.set_at(index, alloc_value)
}
/// Return the contents of an iterable collection, as an owned vector.
pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> {
// You might reasonably think this is mostly called on lists (I think it is),
// and thus that a fast-path here would speed things up. But in my experiments
// it's completely irrelevant (you pay a bit for the check, you save a bit on each step).
Ok(self.iterate(heap)?.iter().collect())
}
/// Produce an iterable from a value.
pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> {
let me: ARef<'v, dyn StarlarkValue> = self.get_aref();
me.iterate()?;
Ok(RefIterable::new(
heap,
ARef::map(me, |e| e.iterate().unwrap()),
))
}
/// Get the [`Hashed`] version of this [`Value`].
pub fn get_hashed(self) -> anyhow::Result<Hashed<Self>> {
ValueLike::get_hashed(self)
}
/// Get a reference to underlying data or [`None`]
/// if contained object has different type | {
let _guard = stack_guard::stack_guard()?;
self.get_aref().equals(other)
} | conditional_block |
mod.rs | }
}
impl Debug for FrozenValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
debug_value("FrozenValue", Value::new_frozen(*self), f)
}
}
impl<'v> PartialEq for Value<'v> {
fn eq(&self, other: &Value<'v>) -> bool {
self.equals(*other).ok() == Some(true)
}
}
impl PartialEq for FrozenValue {
fn eq(&self, other: &FrozenValue) -> bool {
let v: Value = Value::new_frozen(*self);
let other: Value = Value::new_frozen(*other);
v.equals(other).ok() == Some(true)
}
}
impl Eq for Value<'_> {}
impl Eq for FrozenValue {}
impl Equivalent<FrozenValue> for Value<'_> {
fn equivalent(&self, key: &FrozenValue) -> bool {
key.equals(*self).unwrap()
}
}
impl Equivalent<Value<'_>> for FrozenValue {
fn equivalent(&self, key: &Value) -> bool {
self.equals(*key).unwrap()
}
}
/// Trait for things that can be allocated on a [`Heap`] producing a [`Value`].
pub trait AllocValue<'v> {
fn alloc_value(self, heap: &'v Heap) -> Value<'v>;
}
impl<'v> AllocValue<'v> for Value<'v> {
fn alloc_value(self, _heap: &'v Heap) -> Value<'v> {
self
}
}
/// Trait for things that can be allocated on a [`FrozenHeap`] producing a [`FrozenValue`].
pub trait AllocFrozenValue {
fn alloc_frozen_value(self, heap: &FrozenHeap) -> FrozenValue;
}
impl FrozenHeap {
/// Allocate a new value on a [`FrozenHeap`].
pub fn alloc<T: AllocFrozenValue>(&self, val: T) -> FrozenValue {
val.alloc_frozen_value(self)
}
}
impl Heap {
/// Allocate a new value on a [`Heap`].
pub fn alloc<'v, T: AllocValue<'v>>(&'v self, x: T) -> Value<'v> {
x.alloc_value(self)
}
}
/// Abstract over [`Value`] and [`FrozenValue`].
///
/// The methods on this trait are those required to implement containers,
/// allowing implementations of [`ComplexValue`] to be agnostic of their contained type.
/// For details about each function, see the documentation for [`Value`],
/// which provides the same functions (and more).
pub trait ValueLike<'v>: Eq + Copy + Debug {
/// Produce a [`Value`] regardless of the type you are starting with.
fn to_value(self) -> Value<'v>;
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>>;
fn new_invoker(self, eval: &mut Evaluator<'v, '_>) -> anyhow::Result<FunctionInvoker<'v>> {
self.to_value().new_invoker(eval)
}
fn get_hash(self) -> anyhow::Result<u64> {
self.get_aref().get_hash()
}
fn get_hashed(self) -> anyhow::Result<Hashed<Self>> {
Ok(Hashed::new_unchecked(
SmallHashResult::new_unchecked(self.get_hash()?),
self,
))
}
fn collect_repr(self, collector: &mut String) {
self.get_aref().collect_repr(collector);
}
fn to_json(self) -> anyhow::Result<String> {
self.get_aref().to_json()
}
fn equals(self, other: Value<'v>) -> anyhow::Result<bool> {
if self.to_value().ptr_eq(other) {
Ok(true)
} else {
let _guard = stack_guard::stack_guard()?;
self.get_aref().equals(other)
}
}
fn compare(self, other: Value<'v>) -> anyhow::Result<Ordering> {
let _guard = stack_guard::stack_guard()?;
self.get_aref().compare(other)
}
fn downcast_ref<T: AnyLifetime<'v>>(self) -> Option<ARef<'v, T>> {
let any = ARef::map(self.get_aref(), |e| e.as_dyn_any());
if any.is::<T>() {
Some(ARef::map(any, |any| any.downcast_ref::<T>().unwrap()))
} else {
None
}
}
}
impl<'v, V: ValueLike<'v>> Hashed<V> {
pub(crate) fn to_hashed_value(&self) -> Hashed<Value<'v>> {
// Safe because we know frozen values have the same hash as non-frozen ones
Hashed::new_unchecked(self.hash(), self.key().to_value())
}
}
impl<'v> Hashed<Value<'v>> {
fn freeze(&self, freezer: &Freezer) -> Hashed<FrozenValue> {
// Safe because we know frozen values have the same hash as non-frozen ones
let key = self.key().freeze(freezer);
// But it's an easy mistake to make, so actually check it in debug
debug_assert_eq!(Some(self.hash()), key.get_hashed().ok().map(|x| x.hash()));
Hashed::new_unchecked(self.hash(), key)
}
}
impl<'v> ValueLike<'v> for Value<'v> {
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> {
Value::get_aref(self)
}
fn to_value(self) -> Value<'v> {
self
}
}
impl<'v> ValueLike<'v> for FrozenValue {
fn get_aref(self) -> ARef<'v, dyn StarlarkValue<'v>> {
ARef::new_ptr(self.get_ref())
}
fn to_value(self) -> Value<'v> {
Value::new_frozen(self)
}
}
impl FrozenValue {
/// Convert a [`FrozenValue`] back to a [`Value`].
pub fn to_value<'v>(self) -> Value<'v> {
Value::new_frozen(self)
}
}
/// How an attribute (e.g. `x.f`) should behave.
#[derive(Clone, Copy, Dupe, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum AttrType {
/// The attribute is a field, a direct value with no special behaviour.
Field,
/// The attribute is a method, which should be called passing the `x` value
/// as its first argument. It will either be a function (which is transformed
/// into a [`WrappedMethod`](crate::values::function::WrappedMethod)) or a
/// [`NativeAttribute`](crate::values::function::NativeAttribute)
/// (which is evaluated immediately).
Method,
}
impl<'v> Value<'v> {
/// Add two [`Value`]s together. Will first try using [`radd`](StarlarkValue::radd),
/// before falling back to [`add`](StarlarkValue::add).
pub fn add(self, other: Value<'v>, heap: &'v Heap) -> anyhow::Result<Value<'v>> {
let me = self.to_value();
if let Some(v) = other.get_aref().radd(me, heap) {
v
} else {
self.get_aref().add(other, heap)
}
}
/// Convert a value to a [`FrozenValue`] using a supplied [`Freezer`].
pub fn freeze(self, freezer: &Freezer) -> FrozenValue {
freezer.freeze(self)
}
/// Implement the `str()` function - converts a string value to itself,
/// otherwise uses `repr()`.
pub fn to_str(self) -> String {
match self.unpack_str() {
None => self.to_repr(),
Some(s) => s.to_owned(),
}
}
/// Implement the `repr()` function.
pub fn to_repr(self) -> String {
let mut s = String::new();
self.collect_repr(&mut s);
s
}
/// Forwards to [`ComplexValue::set_attr`].
pub fn set_attr(
self,
attribute: &str,
alloc_value: Value<'v>,
heap: &'v Heap,
) -> anyhow::Result<()> {
self.get_ref_mut(heap)?.set_attr(attribute, alloc_value)
}
/// Forwards to [`ComplexValue::set_at`].
pub fn set_at(
self,
index: Value<'v>,
alloc_value: Value<'v>,
heap: &'v Heap,
) -> anyhow::Result<()> {
self.get_ref_mut(heap)?.set_at(index, alloc_value)
}
/// Return the contents of an iterable collection, as an owned vector.
pub fn iterate_collect(self, heap: &'v Heap) -> anyhow::Result<Vec<Value<'v>>> {
// You might reasonably think this is mostly called on lists (I think it is),
// and thus that a fast-path here would speed things up. But in my experiments
// it's completely irrelevant (you pay a bit for the check, you save a bit on each step).
Ok(self.iterate(heap)?.iter().collect())
}
/// Produce an iterable from a value.
pub fn iterate(self, heap: &'v Heap) -> anyhow::Result<RefIterable<'v>> {
let me: ARef<'v, dyn StarlarkValue> = self.get_aref();
| debug_value("Value", *self, f) | random_line_split | |
encode.go | ) encoderFunc {
if fi, ok := encoderCache.Load(t); ok {
return fi.(encoderFunc)
}
var (
wg sync.WaitGroup
f encoderFunc
)
wg.Add(1)
fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) {
wg.Wait()
f(e, v)
}))
if loaded {
return fi.(encoderFunc)
}
f = newTypeEncoder(t, true)
wg.Done()
encoderCache.Store(t, f)
return f
}
var (
// marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
// binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem()
)
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
switch t.Kind() {
case reflect.Bool:
return boolEncoder
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return uintEncoder
case reflect.Interface:
return interfaceEncoder
case reflect.String:
return stringEncoder
case reflect.Struct:
return newStructEncoder(t)
case reflect.Slice:
return newSliceEncoder(t)
case reflect.Array:
return newArrayEncoder(t)
case reflect.Ptr:
return newPtrEncoder(t)
default:
return unsupportedTypeEncoder
}
}
func invalidValueEncoder(e *encodeState, v reflect.Value) {
e.error(&InvalidValueError{})
}
func boolEncoder(e *encodeState, v reflect.Value) {
if size, ok := e.tagCache["size"]; ok {
// @todo TagDefinitionRequiredError
switch size {
case "uint8":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint8(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint8(0x00)))
case "uint32":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint32(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint32(0x00)))
default:
e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size})
}
} else {
e.error(&TagDefinitionRequiredError{Tag: "size"})
}
}
func uintEncoder(e *encodeState, v reflect.Value) {
_, bigendian := e.tagCache["bigendian"]
switch v.Kind() {
case reflect.Uint8:
e.buf.WriteByte(uint8(v.Uint()))
case reflect.Uint16:
b := []byte{0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint16(b, uint16(v.Uint()))
} else {
binary.LittleEndian.PutUint16(b, uint16(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint32:
b := []byte{0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint32(b, uint32(v.Uint()))
} else {
binary.LittleEndian.PutUint32(b, uint32(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint64:
b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint64(b, v.Uint())
} else {
binary.LittleEndian.PutUint64(b, v.Uint())
}
e.buf.Write(b)
}
}
func stringEncoder(e *encodeState, v reflect.Value) {
e.buf.WriteString(v.String())
e.buf.WriteByte(0x00)
}
func interfaceEncoder(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
e.reflectValue(v.Elem())
}
func unsupportedTypeEncoder(e *encodeState, v reflect.Value) {
e.error(&UnsupportedTypeError{v.Type()})
}
type structEncoder struct {
fields []field
fieldEncs []encoderFunc
}
func (se *structEncoder) encode(e *encodeState, v reflect.Value) {
for i, f := range se.fields {
fv, err := fieldByIndex(v, f.index)
if err != nil {
e.error(err)
}
if !fv.IsValid() || isEmptyValue(*fv) {
// @todo InvalidValue
e.error(&InvalidValueError{})
}
e.tagCache = se.fields[i].tags
se.fieldEncs[i](e, *fv)
e.tagCache = nil
}
}
func newStructEncoder(t reflect.Type) encoderFunc {
fields := cachedTypeFields(t)
se := &structEncoder{
fields: fields,
fieldEncs: make([]encoderFunc, len(fields)),
}
for i, f := range fields {
se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
}
return se.encode
}
func encodeStringSlice(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
stringEncoder(e, v.Index(i))
}
e.buf.WriteByte(0x00)
}
type sliceEncoder struct {
arrayEnc encoderFunc
}
func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
se.arrayEnc(e, v)
}
func newSliceEncoder(t reflect.Type) encoderFunc {
if t.Elem().Kind() == reflect.String {
return encodeStringSlice
}
enc := &sliceEncoder{newArrayEncoder(t)}
return enc.encode
}
type arrayEncoder struct {
elemEnc encoderFunc
}
func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
ae.elemEnc(e, v.Index(i))
}
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := &arrayEncoder{typeEncoder(t.Elem())}
return enc.encode
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := &ptrEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type ptrEncoder struct {
ptrEnc encoderFunc
}
func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
pe.ptrEnc(e, v.Elem())
}
func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil, &NilPointerError{}
}
v = v.Elem()
}
v = v.Field(i)
}
return &v, nil
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
type field struct {
name string
tags map[string]string
index []int
typ reflect.Type
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
value atomic.Value
mu sync.Mutex
}
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
var count map[reflect.Type]int
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field | random_line_split | ||
encode.go | return invalidValueEncoder
}
return typeEncoder(v.Type())
}
func typeEncoder(t reflect.Type) encoderFunc {
if fi, ok := encoderCache.Load(t); ok {
return fi.(encoderFunc)
}
var (
wg sync.WaitGroup
f encoderFunc
)
wg.Add(1)
fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) {
wg.Wait()
f(e, v)
}))
if loaded {
return fi.(encoderFunc)
}
f = newTypeEncoder(t, true)
wg.Done()
encoderCache.Store(t, f)
return f
}
var (
// marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
// binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem()
)
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
switch t.Kind() {
case reflect.Bool:
return boolEncoder
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return uintEncoder
case reflect.Interface:
return interfaceEncoder
case reflect.String:
return stringEncoder
case reflect.Struct:
return newStructEncoder(t)
case reflect.Slice:
return newSliceEncoder(t)
case reflect.Array:
return newArrayEncoder(t)
case reflect.Ptr:
return newPtrEncoder(t)
default:
return unsupportedTypeEncoder
}
}
func invalidValueEncoder(e *encodeState, v reflect.Value) {
e.error(&InvalidValueError{})
}
func boolEncoder(e *encodeState, v reflect.Value) {
if size, ok := e.tagCache["size"]; ok {
// @todo TagDefinitionRequiredError
switch size {
case "uint8":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint8(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint8(0x00)))
case "uint32":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint32(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint32(0x00)))
default:
e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size})
}
} else {
e.error(&TagDefinitionRequiredError{Tag: "size"})
}
}
func uintEncoder(e *encodeState, v reflect.Value) {
_, bigendian := e.tagCache["bigendian"]
switch v.Kind() {
case reflect.Uint8:
e.buf.WriteByte(uint8(v.Uint()))
case reflect.Uint16:
b := []byte{0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint16(b, uint16(v.Uint()))
} else {
binary.LittleEndian.PutUint16(b, uint16(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint32:
b := []byte{0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint32(b, uint32(v.Uint()))
} else {
binary.LittleEndian.PutUint32(b, uint32(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint64:
b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint64(b, v.Uint())
} else {
binary.LittleEndian.PutUint64(b, v.Uint())
}
e.buf.Write(b)
}
}
func stringEncoder(e *encodeState, v reflect.Value) {
e.buf.WriteString(v.String())
e.buf.WriteByte(0x00)
}
func interfaceEncoder(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
e.reflectValue(v.Elem())
}
func unsupportedTypeEncoder(e *encodeState, v reflect.Value) {
e.error(&UnsupportedTypeError{v.Type()})
}
type structEncoder struct {
fields []field
fieldEncs []encoderFunc
}
func (se *structEncoder) encode(e *encodeState, v reflect.Value) {
for i, f := range se.fields {
fv, err := fieldByIndex(v, f.index)
if err != nil {
e.error(err)
}
if !fv.IsValid() || isEmptyValue(*fv) {
// @todo InvalidValue
e.error(&InvalidValueError{})
}
e.tagCache = se.fields[i].tags
se.fieldEncs[i](e, *fv)
e.tagCache = nil
}
}
func newStructEncoder(t reflect.Type) encoderFunc {
fields := cachedTypeFields(t)
se := &structEncoder{
fields: fields,
fieldEncs: make([]encoderFunc, len(fields)),
}
for i, f := range fields {
se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
}
return se.encode
}
func encodeStringSlice(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
stringEncoder(e, v.Index(i))
}
e.buf.WriteByte(0x00)
}
type sliceEncoder struct {
arrayEnc encoderFunc
}
func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
se.arrayEnc(e, v)
}
func newSliceEncoder(t reflect.Type) encoderFunc {
if t.Elem().Kind() == reflect.String {
return encodeStringSlice
}
enc := &sliceEncoder{newArrayEncoder(t)}
return enc.encode
}
type arrayEncoder struct {
elemEnc encoderFunc
}
func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
ae.elemEnc(e, v.Index(i))
}
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := &arrayEncoder{typeEncoder(t.Elem())}
return enc.encode
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := &ptrEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type ptrEncoder struct {
ptrEnc encoderFunc
}
func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
pe.ptrEnc(e, v.Elem())
}
func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil, &NilPointerError{}
}
v = v.Elem()
}
v = v.Field(i)
}
return &v, nil
}
func | (t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
type field struct {
name string
tags map[string]string
index []int
typ reflect.Type
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
value atomic.Value
mu sync.Mutex
}
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
var count map[reflect.Type]int
nextCount := map[reflect.Type]int{}
// Types already visited at | typeByIndex | identifier_name |
encode.go |
return e.Bytes(), nil
}
// Marshaler is the interface implemented by types that can marshal
// themselves into valid BNET.
type Marshaler interface {
MarshalBNet() ([]byte, error)
}
// An UnsupportedTypeError occurs when attempting to marshal an
// unsupported type.
type UnsupportedTypeError struct {
Type reflect.Type
}
func (e *UnsupportedTypeError) Error() string {
return "bnet: unsupported type: " + e.Type.String()
}
type encodeState struct {
buf bytes.Buffer
tagCache map[string]string
}
func (e *encodeState) Bytes() []byte {
return e.buf.Bytes()
}
func (e *encodeState) marshal(v interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
if s, ok := r.(string); ok {
panic(s)
}
err = r.(error)
}
}()
e.reflectValue(reflect.ValueOf(v))
return nil
}
func (e *encodeState) error(err error) {
panic(err)
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Slice:
return v.Len() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *encodeState) reflectValue(v reflect.Value) {
valueEncoder(v)(e, v)
}
type encoderFunc func(e *encodeState, v reflect.Value)
var encoderCache sync.Map
func valueEncoder(v reflect.Value) encoderFunc {
if !v.IsValid() {
return invalidValueEncoder
}
return typeEncoder(v.Type())
}
func typeEncoder(t reflect.Type) encoderFunc {
if fi, ok := encoderCache.Load(t); ok {
return fi.(encoderFunc)
}
var (
wg sync.WaitGroup
f encoderFunc
)
wg.Add(1)
fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) {
wg.Wait()
f(e, v)
}))
if loaded {
return fi.(encoderFunc)
}
f = newTypeEncoder(t, true)
wg.Done()
encoderCache.Store(t, f)
return f
}
var (
// marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
// binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem()
)
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
switch t.Kind() {
case reflect.Bool:
return boolEncoder
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return uintEncoder
case reflect.Interface:
return interfaceEncoder
case reflect.String:
return stringEncoder
case reflect.Struct:
return newStructEncoder(t)
case reflect.Slice:
return newSliceEncoder(t)
case reflect.Array:
return newArrayEncoder(t)
case reflect.Ptr:
return newPtrEncoder(t)
default:
return unsupportedTypeEncoder
}
}
func invalidValueEncoder(e *encodeState, v reflect.Value) {
e.error(&InvalidValueError{})
}
func boolEncoder(e *encodeState, v reflect.Value) {
if size, ok := e.tagCache["size"]; ok {
// @todo TagDefinitionRequiredError
switch size {
case "uint8":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint8(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint8(0x00)))
case "uint32":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint32(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint32(0x00)))
default:
e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size})
}
} else {
e.error(&TagDefinitionRequiredError{Tag: "size"})
}
}
func uintEncoder(e *encodeState, v reflect.Value) {
_, bigendian := e.tagCache["bigendian"]
switch v.Kind() {
case reflect.Uint8:
e.buf.WriteByte(uint8(v.Uint()))
case reflect.Uint16:
b := []byte{0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint16(b, uint16(v.Uint()))
} else {
binary.LittleEndian.PutUint16(b, uint16(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint32:
b := []byte{0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint32(b, uint32(v.Uint()))
} else {
binary.LittleEndian.PutUint32(b, uint32(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint64:
b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint64(b, v.Uint())
} else {
binary.LittleEndian.PutUint64(b, v.Uint())
}
e.buf.Write(b)
}
}
func stringEncoder(e *encodeState, v reflect.Value) {
e.buf.WriteString(v.String())
e.buf.WriteByte(0x00)
}
func interfaceEncoder(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
e.reflectValue(v.Elem())
}
func unsupportedTypeEncoder(e *encodeState, v reflect.Value) {
e.error(&UnsupportedTypeError{v.Type()})
}
type structEncoder struct {
fields []field
fieldEncs []encoderFunc
}
func (se *structEncoder) encode(e *encodeState, v reflect.Value) {
for i, f := range se.fields {
fv, err := fieldByIndex(v, f.index)
if err != nil {
e.error(err)
}
if !fv.IsValid() || isEmptyValue(*fv) {
// @todo InvalidValue
e.error(&InvalidValueError{})
}
e.tagCache = se.fields[i].tags
se.fieldEncs[i](e, *fv)
e.tagCache = nil
}
}
func newStructEncoder(t reflect.Type) encoderFunc {
fields := cachedTypeFields(t)
se := &structEncoder{
fields: fields,
fieldEncs: make([]encoderFunc, len(fields)),
}
for i, f := range fields {
se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
}
return se.encode
}
func encodeStringSlice(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
stringEncoder(e, v.Index(i))
}
e.buf.WriteByte(0x00)
}
type sliceEncoder struct {
arrayEnc encoderFunc
}
func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
se.arrayEnc(e, v)
}
func newSliceEncoder(t reflect.Type) encoderFunc {
if t.Elem().Kind() == reflect.String {
return encodeStringSlice
}
enc := &sliceEncoder{newArrayEncoder(t)}
return enc.encode
}
type arrayEncoder struct {
elemEnc encoderFunc
}
func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
ae.elemEnc(e, v.Index(i))
}
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := &arrayEncoder{typeEncoder(t.Elem())}
return enc.encode
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := &ptrEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type ptrEncoder struct {
ptrEnc encoderFunc
}
func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
pe.ptrEnc(e, v.Elem())
}
func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil, &NilPointerError{}
}
v = v.Elem()
}
v = v.Field(i)
}
return &v, nil
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
type field struct {
name string
tags map[string]string
index []int
typ reflect.Type
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, x | {
return nil, err
} | conditional_block | |
encode.go | return invalidValueEncoder
}
return typeEncoder(v.Type())
}
func typeEncoder(t reflect.Type) encoderFunc {
if fi, ok := encoderCache.Load(t); ok {
return fi.(encoderFunc)
}
var (
wg sync.WaitGroup
f encoderFunc
)
wg.Add(1)
fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value) {
wg.Wait()
f(e, v)
}))
if loaded {
return fi.(encoderFunc)
}
f = newTypeEncoder(t, true)
wg.Done()
encoderCache.Store(t, f)
return f
}
var (
// marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
// binaryMarshalerType = reflect.TypeOf(new(encoding.BinaryMarshaler)).Elem()
)
func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
switch t.Kind() {
case reflect.Bool:
return boolEncoder
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return uintEncoder
case reflect.Interface:
return interfaceEncoder
case reflect.String:
return stringEncoder
case reflect.Struct:
return newStructEncoder(t)
case reflect.Slice:
return newSliceEncoder(t)
case reflect.Array:
return newArrayEncoder(t)
case reflect.Ptr:
return newPtrEncoder(t)
default:
return unsupportedTypeEncoder
}
}
func invalidValueEncoder(e *encodeState, v reflect.Value) {
e.error(&InvalidValueError{})
}
func boolEncoder(e *encodeState, v reflect.Value) {
if size, ok := e.tagCache["size"]; ok {
// @todo TagDefinitionRequiredError
switch size {
case "uint8":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint8(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint8(0x00)))
case "uint32":
if v.Bool() {
uintEncoder(e, reflect.ValueOf(uint32(0x01)))
return
}
uintEncoder(e, reflect.ValueOf(uint32(0x00)))
default:
e.error(&InvalidTagValueError{Expected: "uint8 or uint32", Value: size})
}
} else {
e.error(&TagDefinitionRequiredError{Tag: "size"})
}
}
func uintEncoder(e *encodeState, v reflect.Value) {
_, bigendian := e.tagCache["bigendian"]
switch v.Kind() {
case reflect.Uint8:
e.buf.WriteByte(uint8(v.Uint()))
case reflect.Uint16:
b := []byte{0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint16(b, uint16(v.Uint()))
} else {
binary.LittleEndian.PutUint16(b, uint16(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint32:
b := []byte{0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint32(b, uint32(v.Uint()))
} else {
binary.LittleEndian.PutUint32(b, uint32(v.Uint()))
}
e.buf.Write(b)
case reflect.Uint64:
b := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
if bigendian {
binary.BigEndian.PutUint64(b, v.Uint())
} else {
binary.LittleEndian.PutUint64(b, v.Uint())
}
e.buf.Write(b)
}
}
func stringEncoder(e *encodeState, v reflect.Value) {
e.buf.WriteString(v.String())
e.buf.WriteByte(0x00)
}
func interfaceEncoder(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
e.reflectValue(v.Elem())
}
func unsupportedTypeEncoder(e *encodeState, v reflect.Value) {
e.error(&UnsupportedTypeError{v.Type()})
}
type structEncoder struct {
fields []field
fieldEncs []encoderFunc
}
func (se *structEncoder) encode(e *encodeState, v reflect.Value) {
for i, f := range se.fields {
fv, err := fieldByIndex(v, f.index)
if err != nil {
e.error(err)
}
if !fv.IsValid() || isEmptyValue(*fv) {
// @todo InvalidValue
e.error(&InvalidValueError{})
}
e.tagCache = se.fields[i].tags
se.fieldEncs[i](e, *fv)
e.tagCache = nil
}
}
func newStructEncoder(t reflect.Type) encoderFunc |
func encodeStringSlice(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
stringEncoder(e, v.Index(i))
}
e.buf.WriteByte(0x00)
}
type sliceEncoder struct {
arrayEnc encoderFunc
}
func (se *sliceEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
se.arrayEnc(e, v)
}
func newSliceEncoder(t reflect.Type) encoderFunc {
if t.Elem().Kind() == reflect.String {
return encodeStringSlice
}
enc := &sliceEncoder{newArrayEncoder(t)}
return enc.encode
}
type arrayEncoder struct {
elemEnc encoderFunc
}
func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value) {
n := v.Len()
for i := 0; i < n; i++ {
ae.elemEnc(e, v.Index(i))
}
}
func newArrayEncoder(t reflect.Type) encoderFunc {
enc := &arrayEncoder{typeEncoder(t.Elem())}
return enc.encode
}
func newPtrEncoder(t reflect.Type) encoderFunc {
enc := &ptrEncoder{typeEncoder(t.Elem())}
return enc.encode
}
type ptrEncoder struct {
ptrEnc encoderFunc
}
func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value) {
if v.IsNil() {
return
}
pe.ptrEnc(e, v.Elem())
}
func fieldByIndex(v reflect.Value, index []int) (*reflect.Value, error) {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil, &NilPointerError{}
}
v = v.Elem()
}
v = v.Field(i)
}
return &v, nil
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
type field struct {
name string
tags map[string]string
index []int
typ reflect.Type
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
value atomic.Value
mu sync.Mutex
}
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
var count map[reflect.Type]int
nextCount := map[reflect.Type]int{}
// Types already visited | {
fields := cachedTypeFields(t)
se := &structEncoder{
fields: fields,
fieldEncs: make([]encoderFunc, len(fields)),
}
for i, f := range fields {
se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
}
return se.encode
} | identifier_body |
backend.rs | Box<dyn TargetIsa>,
symbols: HashMap<String, *const u8>,
libcall_names: Box<dyn Fn(ir::LibCall) -> String>,
code_memory: Memory,
readonly_memory: Memory,
writable_memory: Memory,
}
/// A record of a relocation to perform.
struct RelocRecord {
offset: CodeOffset,
reloc: Reloc,
name: ir::ExternalName,
addend: Addend,
}
pub struct SimpleJITCompiledFunction {
code: *mut u8,
size: usize,
relocs: Vec<RelocRecord>,
}
pub struct SimpleJITCompiledData {
storage: *mut u8,
size: usize,
relocs: Vec<RelocRecord>,
}
impl SimpleJITBackend {
fn lookup_symbol(&self, name: &str) -> *const u8 {
match self.symbols.get(name) {
Some(&ptr) => ptr,
None => lookup_with_dlsym(name),
}
}
fn get_definition(
&self,
namespace: &ModuleNamespace<Self>,
name: &ir::ExternalName,
) -> *const u8 {
match *name {
ir::ExternalName::User { .. } => {
if namespace.is_function(name) {
let (def, name_str, _signature) = namespace.get_function_definition(&name);
match def {
Some(compiled) => compiled.code,
None => self.lookup_symbol(name_str),
}
} else {
let (def, name_str, _writable) = namespace.get_data_definition(&name);
match def {
Some(compiled) => compiled.storage,
None => self.lookup_symbol(name_str),
}
}
}
ir::ExternalName::LibCall(ref libcall) => {
let sym = (self.libcall_names)(*libcall);
self.lookup_symbol(&sym)
}
_ => panic!("invalid ExternalName {}", name),
}
}
}
impl<'simple_jit_backend> Backend for SimpleJITBackend {
type Builder = SimpleJITBuilder;
/// SimpleJIT compiled function and data objects may have outstanding
/// relocations that need to be performed before the memory can be used.
/// These relocations are performed within `finalize_function` and
/// `finalize_data`.
type CompiledFunction = SimpleJITCompiledFunction;
type CompiledData = SimpleJITCompiledData;
/// SimpleJIT emits code and data into memory, and provides raw pointers
/// to them.
type FinalizedFunction = *const u8;
type FinalizedData = (*mut u8, usize);
/// SimpleJIT emits code and data into memory as it processes them, so it
/// doesn't need to provide anything after the `Module` is complete.
type Product = ();
/// Create a new `SimpleJITBackend`.
fn new(builder: SimpleJITBuilder) -> Self {
Self {
isa: builder.isa,
symbols: builder.symbols,
libcall_names: builder.libcall_names,
code_memory: Memory::new(),
readonly_memory: Memory::new(),
writable_memory: Memory::new(),
}
}
fn isa(&self) -> &dyn TargetIsa {
&*self.isa
}
fn declare_function(&mut self, _name: &str, _linkage: Linkage) {
// Nothing to do.
}
fn declare_data(
&mut self,
_name: &str,
_linkage: Linkage,
_writable: bool,
_align: Option<u8>,
) {
// Nothing to do.
}
fn define_function(
&mut self,
name: &str,
ctx: &cranelift_codegen::Context,
_namespace: &ModuleNamespace<Self>,
code_size: u32,
) -> ModuleResult<Self::CompiledFunction> {
let size = code_size as usize;
let ptr = self
.code_memory
.allocate(size, EXECUTABLE_DATA_ALIGNMENT)
.expect("TODO: handle OOM etc.");
if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() {
let mut map_file = ::std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(format!("/tmp/perf-{}.map", ::std::process::id()))
.unwrap();
let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name);
}
let mut reloc_sink = SimpleJITRelocSink::new();
// Ignore traps for now. For now, frontends should just avoid generating code
// that traps.
let mut trap_sink = NullTrapSink {};
unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) };
Ok(Self::CompiledFunction {
code: ptr,
size,
relocs: reloc_sink.relocs,
})
}
fn define_data(
&mut self,
_name: &str,
writable: bool,
align: Option<u8>,
data: &DataContext,
_namespace: &ModuleNamespace<Self>,
) -> ModuleResult<Self::CompiledData> {
let &DataDescription {
ref init,
ref function_decls,
ref data_decls,
ref function_relocs,
ref data_relocs,
} = data.description();
let size = init.size();
let storage = if writable {
self.writable_memory
.allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT))
.expect("TODO: handle OOM etc.")
} else {
self.readonly_memory
.allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT))
.expect("TODO: handle OOM etc.")
};
match *init {
Init::Uninitialized => {
panic!("data is not initialized yet");
}
Init::Zeros { .. } => {
unsafe { ptr::write_bytes(storage, 0, size) };
}
Init::Bytes { ref contents } => {
let src = contents.as_ptr();
unsafe { ptr::copy_nonoverlapping(src, storage, size) };
}
}
let reloc = match self.isa.triple().pointer_width().unwrap() {
PointerWidth::U16 => panic!(),
PointerWidth::U32 => Reloc::Abs4,
PointerWidth::U64 => Reloc::Abs8,
};
let mut relocs = Vec::new();
for &(offset, id) in function_relocs {
relocs.push(RelocRecord {
reloc,
offset,
name: function_decls[id].clone(),
addend: 0,
});
}
for &(offset, id, addend) in data_relocs {
relocs.push(RelocRecord {
reloc,
offset,
name: data_decls[id].clone(),
addend,
});
}
Ok(Self::CompiledData {
storage,
size,
relocs,
})
}
fn write_data_funcaddr(
&mut self,
_data: &mut Self::CompiledData,
_offset: usize,
_what: ir::FuncRef,
) {
unimplemented!();
}
fn write_data_dataaddr(
&mut self,
_data: &mut Self::CompiledData,
_offset: usize,
_what: ir::GlobalValue,
_usize: Addend,
) {
unimplemented!();
}
fn finalize_function(
&mut self,
func: &Self::CompiledFunction,
namespace: &ModuleNamespace<Self>,
) -> Self::FinalizedFunction {
use std::ptr::write_unaligned;
for &RelocRecord {
reloc,
offset,
ref name,
addend,
} in &func.relocs
{
let ptr = func.code;
debug_assert!((offset as usize) < func.size);
let at = unsafe { ptr.offset(offset as isize) };
let base = self.get_definition(namespace, name);
// TODO: Handle overflow.
let what = unsafe { base.offset(addend as isize) };
match reloc {
Reloc::Abs4 => {
// TODO: Handle overflow.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut u32, what as u32)
};
}
Reloc::Abs8 => {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut u64, what as u64)
};
}
Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => {
// TODO: Handle overflow.
let pcrel = ((what as isize) - (at as isize)) as i32;
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut i32, pcrel)
};
}
Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"),
_ => unimplemented!(),
}
}
func.code
}
fn | get_finalized_function | identifier_name | |
backend.rs | .code,
None => self.lookup_symbol(name_str),
}
} else {
let (def, name_str, _writable) = namespace.get_data_definition(&name);
match def {
Some(compiled) => compiled.storage,
None => self.lookup_symbol(name_str),
}
}
}
ir::ExternalName::LibCall(ref libcall) => {
let sym = (self.libcall_names)(*libcall);
self.lookup_symbol(&sym)
}
_ => panic!("invalid ExternalName {}", name),
}
}
}
impl<'simple_jit_backend> Backend for SimpleJITBackend {
type Builder = SimpleJITBuilder;
/// SimpleJIT compiled function and data objects may have outstanding
/// relocations that need to be performed before the memory can be used.
/// These relocations are performed within `finalize_function` and
/// `finalize_data`.
type CompiledFunction = SimpleJITCompiledFunction;
type CompiledData = SimpleJITCompiledData;
/// SimpleJIT emits code and data into memory, and provides raw pointers
/// to them.
type FinalizedFunction = *const u8;
type FinalizedData = (*mut u8, usize);
/// SimpleJIT emits code and data into memory as it processes them, so it
/// doesn't need to provide anything after the `Module` is complete.
type Product = ();
/// Create a new `SimpleJITBackend`.
fn new(builder: SimpleJITBuilder) -> Self {
Self {
isa: builder.isa,
symbols: builder.symbols,
libcall_names: builder.libcall_names,
code_memory: Memory::new(),
readonly_memory: Memory::new(),
writable_memory: Memory::new(),
}
}
fn isa(&self) -> &dyn TargetIsa {
&*self.isa
}
fn declare_function(&mut self, _name: &str, _linkage: Linkage) {
// Nothing to do.
}
fn declare_data(
&mut self,
_name: &str,
_linkage: Linkage,
_writable: bool,
_align: Option<u8>,
) {
// Nothing to do.
}
fn define_function(
&mut self,
name: &str,
ctx: &cranelift_codegen::Context,
_namespace: &ModuleNamespace<Self>,
code_size: u32,
) -> ModuleResult<Self::CompiledFunction> {
let size = code_size as usize;
let ptr = self
.code_memory
.allocate(size, EXECUTABLE_DATA_ALIGNMENT)
.expect("TODO: handle OOM etc.");
if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() {
let mut map_file = ::std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(format!("/tmp/perf-{}.map", ::std::process::id()))
.unwrap();
let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name);
}
let mut reloc_sink = SimpleJITRelocSink::new();
// Ignore traps for now. For now, frontends should just avoid generating code
// that traps.
let mut trap_sink = NullTrapSink {};
unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) };
Ok(Self::CompiledFunction {
code: ptr,
size,
relocs: reloc_sink.relocs,
})
}
fn define_data(
&mut self,
_name: &str,
writable: bool,
align: Option<u8>,
data: &DataContext,
_namespace: &ModuleNamespace<Self>,
) -> ModuleResult<Self::CompiledData> {
let &DataDescription {
ref init,
ref function_decls,
ref data_decls,
ref function_relocs,
ref data_relocs,
} = data.description();
let size = init.size();
let storage = if writable {
self.writable_memory
.allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT))
.expect("TODO: handle OOM etc.")
} else {
self.readonly_memory
.allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT))
.expect("TODO: handle OOM etc.")
};
match *init {
Init::Uninitialized => {
panic!("data is not initialized yet");
}
Init::Zeros { .. } => {
unsafe { ptr::write_bytes(storage, 0, size) };
}
Init::Bytes { ref contents } => {
let src = contents.as_ptr();
unsafe { ptr::copy_nonoverlapping(src, storage, size) };
}
}
let reloc = match self.isa.triple().pointer_width().unwrap() {
PointerWidth::U16 => panic!(),
PointerWidth::U32 => Reloc::Abs4,
PointerWidth::U64 => Reloc::Abs8,
};
let mut relocs = Vec::new();
for &(offset, id) in function_relocs {
relocs.push(RelocRecord {
reloc,
offset,
name: function_decls[id].clone(),
addend: 0,
});
}
for &(offset, id, addend) in data_relocs {
relocs.push(RelocRecord {
reloc,
offset,
name: data_decls[id].clone(),
addend,
});
}
Ok(Self::CompiledData {
storage,
size,
relocs,
})
}
fn write_data_funcaddr(
&mut self,
_data: &mut Self::CompiledData,
_offset: usize,
_what: ir::FuncRef,
) {
unimplemented!();
}
fn write_data_dataaddr(
&mut self,
_data: &mut Self::CompiledData,
_offset: usize,
_what: ir::GlobalValue,
_usize: Addend,
) {
unimplemented!();
}
fn finalize_function(
&mut self,
func: &Self::CompiledFunction,
namespace: &ModuleNamespace<Self>,
) -> Self::FinalizedFunction {
use std::ptr::write_unaligned;
for &RelocRecord {
reloc,
offset,
ref name,
addend,
} in &func.relocs
{
let ptr = func.code;
debug_assert!((offset as usize) < func.size);
let at = unsafe { ptr.offset(offset as isize) };
let base = self.get_definition(namespace, name);
// TODO: Handle overflow.
let what = unsafe { base.offset(addend as isize) };
match reloc {
Reloc::Abs4 => {
// TODO: Handle overflow.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut u32, what as u32)
};
}
Reloc::Abs8 => {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut u64, what as u64)
};
}
Reloc::X86PCRel4 | Reloc::X86CallPCRel4 => {
// TODO: Handle overflow.
let pcrel = ((what as isize) - (at as isize)) as i32;
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut i32, pcrel)
};
}
Reloc::X86GOTPCRel4 | Reloc::X86CallPLTRel4 => panic!("unexpected PIC relocation"),
_ => unimplemented!(),
}
}
func.code
}
fn get_finalized_function(&self, func: &Self::CompiledFunction) -> Self::FinalizedFunction {
func.code
}
fn finalize_data(
&mut self,
data: &Self::CompiledData,
namespace: &ModuleNamespace<Self>,
) -> Self::FinalizedData {
use std::ptr::write_unaligned;
for &RelocRecord {
reloc,
offset,
ref name,
addend,
} in &data.relocs
{
let ptr = data.storage;
debug_assert!((offset as usize) < data.size);
let at = unsafe { ptr.offset(offset as isize) };
let base = self.get_definition(namespace, name);
// TODO: Handle overflow.
let what = unsafe { base.offset(addend as isize) };
match reloc {
Reloc::Abs4 => {
// TODO: Handle overflow.
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut u32, what as u32)
};
}
Reloc::Abs8 => {
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
unsafe {
write_unaligned(at as *mut u64, what as u64)
};
}
Reloc::X86PCRel4 | | Reloc::X86CallPCRel4 | random_line_split | |
backend.rs | {
debug_assert!(!isa.flags().is_pic(), "SimpleJIT requires non-PIC code");
let symbols = HashMap::new();
Self {
isa,
symbols,
libcall_names,
}
}
/// Define a symbol in the internal symbol table.
///
/// The JIT will use the symbol table to resolve names that are declared,
/// but not defined, in the module being compiled. A common example is
/// external functions. With this method, functions and data can be exposed
/// to the code being compiled which are defined by the host.
///
/// If a symbol is defined more than once, the most recent definition will
/// be retained.
///
/// If the JIT fails to find a symbol in its internal table, it will fall
/// back to a platform-specific search (this typically involves searching
/// the current process for public symbols, followed by searching the
/// platform's C runtime).
pub fn symbol<K>(&mut self, name: K, ptr: *const u8) -> &Self
where
K: Into<String>,
{
self.symbols.insert(name.into(), ptr);
self
}
/// Define multiple symbols in the internal symbol table.
///
/// Using this is equivalent to calling `symbol` on each element.
pub fn symbols<It, K>(&mut self, symbols: It) -> &Self
where
It: IntoIterator<Item = (K, *const u8)>,
K: Into<String>,
{
for (name, ptr) in symbols {
self.symbols.insert(name.into(), ptr);
}
self
}
}
/// A `SimpleJITBackend` implements `Backend` and emits code and data into memory where it can be
/// directly called and accessed.
///
/// See the `SimpleJITBuilder` for a convenient way to construct `SimpleJITBackend` instances.
pub struct SimpleJITBackend {
isa: Box<dyn TargetIsa>,
symbols: HashMap<String, *const u8>,
libcall_names: Box<dyn Fn(ir::LibCall) -> String>,
code_memory: Memory,
readonly_memory: Memory,
writable_memory: Memory,
}
/// A record of a relocation to perform.
struct RelocRecord {
offset: CodeOffset,
reloc: Reloc,
name: ir::ExternalName,
addend: Addend,
}
pub struct SimpleJITCompiledFunction {
code: *mut u8,
size: usize,
relocs: Vec<RelocRecord>,
}
pub struct SimpleJITCompiledData {
storage: *mut u8,
size: usize,
relocs: Vec<RelocRecord>,
}
impl SimpleJITBackend {
fn lookup_symbol(&self, name: &str) -> *const u8 {
match self.symbols.get(name) {
Some(&ptr) => ptr,
None => lookup_with_dlsym(name),
}
}
fn get_definition(
&self,
namespace: &ModuleNamespace<Self>,
name: &ir::ExternalName,
) -> *const u8 {
match *name {
ir::ExternalName::User { .. } => {
if namespace.is_function(name) {
let (def, name_str, _signature) = namespace.get_function_definition(&name);
match def {
Some(compiled) => compiled.code,
None => self.lookup_symbol(name_str),
}
} else {
let (def, name_str, _writable) = namespace.get_data_definition(&name);
match def {
Some(compiled) => compiled.storage,
None => self.lookup_symbol(name_str),
}
}
}
ir::ExternalName::LibCall(ref libcall) => {
let sym = (self.libcall_names)(*libcall);
self.lookup_symbol(&sym)
}
_ => panic!("invalid ExternalName {}", name),
}
}
}
impl<'simple_jit_backend> Backend for SimpleJITBackend {
type Builder = SimpleJITBuilder;
/// SimpleJIT compiled function and data objects may have outstanding
/// relocations that need to be performed before the memory can be used.
/// These relocations are performed within `finalize_function` and
/// `finalize_data`.
type CompiledFunction = SimpleJITCompiledFunction;
type CompiledData = SimpleJITCompiledData;
/// SimpleJIT emits code and data into memory, and provides raw pointers
/// to them.
type FinalizedFunction = *const u8;
type FinalizedData = (*mut u8, usize);
/// SimpleJIT emits code and data into memory as it processes them, so it
/// doesn't need to provide anything after the `Module` is complete.
type Product = ();
/// Create a new `SimpleJITBackend`.
fn new(builder: SimpleJITBuilder) -> Self {
Self {
isa: builder.isa,
symbols: builder.symbols,
libcall_names: builder.libcall_names,
code_memory: Memory::new(),
readonly_memory: Memory::new(),
writable_memory: Memory::new(),
}
}
fn isa(&self) -> &dyn TargetIsa |
fn declare_function(&mut self, _name: &str, _linkage: Linkage) {
// Nothing to do.
}
fn declare_data(
&mut self,
_name: &str,
_linkage: Linkage,
_writable: bool,
_align: Option<u8>,
) {
// Nothing to do.
}
fn define_function(
&mut self,
name: &str,
ctx: &cranelift_codegen::Context,
_namespace: &ModuleNamespace<Self>,
code_size: u32,
) -> ModuleResult<Self::CompiledFunction> {
let size = code_size as usize;
let ptr = self
.code_memory
.allocate(size, EXECUTABLE_DATA_ALIGNMENT)
.expect("TODO: handle OOM etc.");
if cfg!(target_os = "linux") && ::std::env::var_os("PERF_BUILDID_DIR").is_some() {
let mut map_file = ::std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(format!("/tmp/perf-{}.map", ::std::process::id()))
.unwrap();
let _ = writeln!(map_file, "{:x} {:x} {}", ptr as usize, code_size, name);
}
let mut reloc_sink = SimpleJITRelocSink::new();
// Ignore traps for now. For now, frontends should just avoid generating code
// that traps.
let mut trap_sink = NullTrapSink {};
unsafe { ctx.emit_to_memory(&*self.isa, ptr, &mut reloc_sink, &mut trap_sink) };
Ok(Self::CompiledFunction {
code: ptr,
size,
relocs: reloc_sink.relocs,
})
}
fn define_data(
&mut self,
_name: &str,
writable: bool,
align: Option<u8>,
data: &DataContext,
_namespace: &ModuleNamespace<Self>,
) -> ModuleResult<Self::CompiledData> {
let &DataDescription {
ref init,
ref function_decls,
ref data_decls,
ref function_relocs,
ref data_relocs,
} = data.description();
let size = init.size();
let storage = if writable {
self.writable_memory
.allocate(size, align.unwrap_or(WRITABLE_DATA_ALIGNMENT))
.expect("TODO: handle OOM etc.")
} else {
self.readonly_memory
.allocate(size, align.unwrap_or(READONLY_DATA_ALIGNMENT))
.expect("TODO: handle OOM etc.")
};
match *init {
Init::Uninitialized => {
panic!("data is not initialized yet");
}
Init::Zeros { .. } => {
unsafe { ptr::write_bytes(storage, 0, size) };
}
Init::Bytes { ref contents } => {
let src = contents.as_ptr();
unsafe { ptr::copy_nonoverlapping(src, storage, size) };
}
}
let reloc = match self.isa.triple().pointer_width().unwrap() {
PointerWidth::U16 => panic!(),
PointerWidth::U32 => Reloc::Abs4,
PointerWidth::U64 => Reloc::Abs8,
};
let mut relocs = Vec::new();
for &(offset, id) in function_relocs {
relocs.push(RelocRecord {
reloc,
offset,
name: function_decls[id].clone(),
addend: 0,
});
}
for &(offset, id, addend) in data_relocs {
relocs.push(RelocRecord {
reloc,
offset,
name: data_decls[id].clone(),
addend,
});
}
Ok(Self::CompiledData {
storage,
size,
relocs,
})
}
fn write_data_funcaddr(
&mut self,
_data: &mut Self::CompiledData,
_offset: usize,
_what: ir::FuncRef,
) {
unimplemented!();
}
fn write_data_dataaddr(
&mut self,
_data: &mut Self::CompiledData,
_offset: usize,
_what: ir::Global | {
&*self.isa
} | identifier_body |
composition_patches.go | // Default
PatchTypePatchSet PatchType = "PatchSet"
PatchTypeToCompositeFieldPath PatchType = "ToCompositeFieldPath"
PatchTypeCombineFromComposite PatchType = "CombineFromComposite"
PatchTypeCombineToComposite PatchType = "CombineToComposite"
)
// A FromFieldPathPolicy determines how to patch from a field path.
type FromFieldPathPolicy string
// FromFieldPath patch policies.
const (
FromFieldPathPolicyOptional FromFieldPathPolicy = "Optional"
FromFieldPathPolicyRequired FromFieldPathPolicy = "Required"
)
// A PatchPolicy configures the specifics of patching behaviour.
type PatchPolicy struct {
// FromFieldPath specifies how to patch from a field path. The default is
// 'Optional', which means the patch will be a no-op if the specified
// fromFieldPath does not exist. Use 'Required' if the patch should fail if
// the specified path does not exist.
// +kubebuilder:validation:Enum=Optional;Required
// +optional
FromFieldPath *FromFieldPathPolicy `json:"fromFieldPath,omitempty"`
MergeOptions *xpv1.MergeOptions `json:"mergeOptions,omitempty"`
}
// Patch objects are applied between composite and composed resources. Their
// behaviour depends on the Type selected. The default Type,
// FromCompositeFieldPath, copies a value from the composite resource to
// the composed resource, applying any defined transformers.
type Patch struct {
// Type sets the patching behaviour to be used. Each patch type may require
// its' own fields to be set on the Patch object.
// +optional
// +kubebuilder:validation:Enum=FromCompositeFieldPath;PatchSet;ToCompositeFieldPath;CombineFromComposite;CombineToComposite
// +kubebuilder:default=FromCompositeFieldPath
Type PatchType `json:"type,omitempty"`
// FromFieldPath is the path of the field on the resource whose value is
// to be used as input. Required when type is FromCompositeFieldPath or
// ToCompositeFieldPath.
// +optional
FromFieldPath *string `json:"fromFieldPath,omitempty"`
// Combine is the patch configuration for a CombineFromComposite or
// CombineToComposite patch.
// +optional
Combine *Combine `json:"combine,omitempty"`
// ToFieldPath is the path of the field on the resource whose value will
// be changed with the result of transforms. Leave empty if you'd like to
// propagate to the same path as fromFieldPath.
// +optional
ToFieldPath *string `json:"toFieldPath,omitempty"`
// PatchSetName to include patches from. Required when type is PatchSet.
// +optional
PatchSetName *string `json:"patchSetName,omitempty"`
// Transforms are the list of functions that are used as a FIFO pipe for the
// input to be transformed.
// +optional
Transforms []Transform `json:"transforms,omitempty"`
// Policy configures the specifics of patching behaviour.
// +optional
Policy *PatchPolicy `json:"policy,omitempty"`
}
// Apply executes a patching operation between the from and to resources.
// Applies all patch types unless an 'only' filter is supplied.
func (c *Patch) Apply(cp, cd runtime.Object, only ...PatchType) error {
if c.filterPatch(only...) {
return nil
}
switch c.Type {
case PatchTypeFromCompositeFieldPath:
return c.applyFromFieldPathPatch(cp, cd)
case PatchTypeToCompositeFieldPath:
return c.applyFromFieldPathPatch(cd, cp)
case PatchTypeCombineFromComposite:
return c.applyCombineFromVariablesPatch(cp, cd)
case PatchTypeCombineToComposite:
return c.applyCombineFromVariablesPatch(cd, cp)
case PatchTypePatchSet:
// Already resolved - nothing to do.
}
return errors.Errorf(errFmtInvalidPatchType, c.Type)
}
// filterPatch returns true if patch should be filtered (not applied)
func (c *Patch) filterPatch(only ...PatchType) bool {
// filter does not apply if not set
if len(only) == 0 {
return false
}
for _, patchType := range only {
if patchType == c.Type {
return false
}
}
return true
}
// applyTransforms applies a list of transforms to a patch value.
func (c *Patch) applyTransforms(input interface{}) (interface{}, error) |
// patchFieldValueToObject, given a path, value and "to" object, will
// apply the value to the "to" object at the given path, returning
// any errors as they occur.
func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error {
paved, err := fieldpath.PaveObject(to)
if err != nil {
return err
}
if err := paved.MergeValue(fieldPath, value, mo); err != nil {
return err
}
return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to)
}
// applyFromFieldPathPatch patches the "to" resource, using a source field
// on the "from" resource. Values may be transformed if any are defined on
// the patch.
func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error {
if c.FromFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type)
}
// Default to patching the same field on the composed resource.
if c.ToFieldPath == nil {
c.ToFieldPath = c.FromFieldPath
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath)
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
var mo *xpv1.MergeOptions
if c.Policy != nil {
mo = c.Policy.MergeOptions
}
// Apply transform pipeline
out, err := c.applyTransforms(in)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, mo)
}
// applyCombineFromVariablesPatch patches the "to" resource, taking a list of
// input variables and combining them into a single output value.
// The single output value may then be further transformed if they are defined
// on the patch.
func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error {
// Combine patch requires configuration
if c.Combine == nil {
return errors.Errorf(errFmtRequiredField, "Combine", c.Type)
}
// Destination field path is required since we can't default to multiple
// fields.
if c.ToFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type)
}
vl := len(c.Combine.Variables)
if vl < 1 {
return errors.New(errCombineRequiresVariables)
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in := make([]interface{}, vl)
// Get value of each variable
// NOTE: This currently assumes all variables define a 'fromFieldPath'
// value. If we add new variable types, this may not be the case and
// this code may be better served split out into a dedicated function.
for i, sp := range c.Combine.Variables {
iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath)
// If any source field is not found, we will not
// apply the patch. This is to avoid situations
// where a combine patch is expecting a fixed
// number of inputs (e.g. a string format
// expecting 3 fields '%s-%s-%s' but only
// receiving 2 values).
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
in[i] = iv
}
// Combine input values
cb, err := c.Combine.Combine(in)
if err != nil {
return err
}
// Apply transform pipeline
out, err := c.applyTransforms(cb)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, nil)
}
// IsOptionalFieldPathNotFound returns true if the supplied error indicates a
// field path was not found, and the supplied policy indicates a patch from that
// field path was optional.
func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool {
switch {
case s == nil:
fallthrough
case s.FromFieldPath == nil:
fallthrough
case *s.FromFieldPath == FromFieldPathPolicyOptional:
return fieldpath.IsNotFound(err)
default:
return false
}
}
// A CombineVariable defines the source of a value that is combined with
// others to form and patch an output value. Currently, this | {
var err error
for i, t := range c.Transforms {
if input, err = t.Transform(input); err != nil {
return nil, errors.Wrapf(err, errFmtTransformAtIndex, i)
}
}
return input, nil
} | identifier_body |
composition_patches.go | Type) error {
if c.filterPatch(only...) {
return nil
}
switch c.Type {
case PatchTypeFromCompositeFieldPath:
return c.applyFromFieldPathPatch(cp, cd)
case PatchTypeToCompositeFieldPath:
return c.applyFromFieldPathPatch(cd, cp)
case PatchTypeCombineFromComposite:
return c.applyCombineFromVariablesPatch(cp, cd)
case PatchTypeCombineToComposite:
return c.applyCombineFromVariablesPatch(cd, cp)
case PatchTypePatchSet:
// Already resolved - nothing to do.
}
return errors.Errorf(errFmtInvalidPatchType, c.Type)
}
// filterPatch returns true if patch should be filtered (not applied)
func (c *Patch) filterPatch(only ...PatchType) bool {
// filter does not apply if not set
if len(only) == 0 {
return false
}
for _, patchType := range only {
if patchType == c.Type {
return false
}
}
return true
}
// applyTransforms applies a list of transforms to a patch value.
func (c *Patch) applyTransforms(input interface{}) (interface{}, error) {
var err error
for i, t := range c.Transforms {
if input, err = t.Transform(input); err != nil {
return nil, errors.Wrapf(err, errFmtTransformAtIndex, i)
}
}
return input, nil
}
// patchFieldValueToObject, given a path, value and "to" object, will
// apply the value to the "to" object at the given path, returning
// any errors as they occur.
func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error {
paved, err := fieldpath.PaveObject(to)
if err != nil {
return err
}
if err := paved.MergeValue(fieldPath, value, mo); err != nil {
return err
}
return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to)
}
// applyFromFieldPathPatch patches the "to" resource, using a source field
// on the "from" resource. Values may be transformed if any are defined on
// the patch.
func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error {
if c.FromFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type)
}
// Default to patching the same field on the composed resource.
if c.ToFieldPath == nil {
c.ToFieldPath = c.FromFieldPath
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath)
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
var mo *xpv1.MergeOptions
if c.Policy != nil {
mo = c.Policy.MergeOptions
}
// Apply transform pipeline
out, err := c.applyTransforms(in)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, mo)
}
// applyCombineFromVariablesPatch patches the "to" resource, taking a list of
// input variables and combining them into a single output value.
// The single output value may then be further transformed if they are defined
// on the patch.
func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error {
// Combine patch requires configuration
if c.Combine == nil {
return errors.Errorf(errFmtRequiredField, "Combine", c.Type)
}
// Destination field path is required since we can't default to multiple
// fields.
if c.ToFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type)
}
vl := len(c.Combine.Variables)
if vl < 1 {
return errors.New(errCombineRequiresVariables)
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in := make([]interface{}, vl)
// Get value of each variable
// NOTE: This currently assumes all variables define a 'fromFieldPath'
// value. If we add new variable types, this may not be the case and
// this code may be better served split out into a dedicated function.
for i, sp := range c.Combine.Variables {
iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath)
// If any source field is not found, we will not
// apply the patch. This is to avoid situations
// where a combine patch is expecting a fixed
// number of inputs (e.g. a string format
// expecting 3 fields '%s-%s-%s' but only
// receiving 2 values).
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
in[i] = iv
}
// Combine input values
cb, err := c.Combine.Combine(in)
if err != nil {
return err
}
// Apply transform pipeline
out, err := c.applyTransforms(cb)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, nil)
}
// IsOptionalFieldPathNotFound returns true if the supplied error indicates a
// field path was not found, and the supplied policy indicates a patch from that
// field path was optional.
func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool {
switch {
case s == nil:
fallthrough
case s.FromFieldPath == nil:
fallthrough
case *s.FromFieldPath == FromFieldPathPolicyOptional:
return fieldpath.IsNotFound(err)
default:
return false
}
}
// A CombineVariable defines the source of a value that is combined with
// others to form and patch an output value. Currently, this only supports
// retrieving values from a field path.
type CombineVariable struct {
// FromFieldPath is the path of the field on the source whose value is
// to be used as input.
FromFieldPath string `json:"fromFieldPath"`
}
// A CombineStrategy determines what strategy will be applied to combine
// variables.
type CombineStrategy string
// CombineStrategy strategy definitions.
const (
CombineStrategyString CombineStrategy = "string"
)
// A Combine configures a patch that combines more than
// one input field into a single output field.
type Combine struct {
// Variables are the list of variables whose values will be retrieved and
// combined.
// +kubebuilder:validation:MinItems=1
Variables []CombineVariable `json:"variables"`
// Strategy defines the strategy to use to combine the input variable values.
// Currently only string is supported.
// +kubebuilder:validation:Enum=string
Strategy CombineStrategy `json:"strategy"`
// String declares that input variables should be combined into a single
// string, using the relevant settings for formatting purposes.
// +optional
String *StringCombine `json:"string,omitempty"`
}
// A StringCombine combines multiple input values into a single string.
type StringCombine struct {
// Format the input using a Go format string. See
// https://golang.org/pkg/fmt/ for details.
Format string `json:"fmt"`
}
// Combine returns a single output by running a string format
// with all of its' input variables.
func (s *StringCombine) Combine(vars []interface{}) (interface{}, error) {
return fmt.Sprintf(s.Format, vars...), nil
}
// Combine calls the appropriate combiner.
func (c *Combine) Combine(vars []interface{}) (interface{}, error) {
var combiner interface {
Combine(vars []interface{}) (interface{}, error)
}
switch c.Strategy {
case CombineStrategyString:
combiner = c.String
default:
return nil, errors.Errorf(errFmtCombineStrategyNotSupported, string(c.Strategy))
}
// Check for nil interface requires reflection.
if reflect.ValueOf(combiner).IsNil() {
return nil, errors.Errorf(errFmtCombineConfigMissing, string(c.Strategy))
}
out, err := combiner.Combine(vars)
// Note: There are currently no tests or triggers to exercise this error as
// our only strategy ("String") uses fmt.Sprintf, which cannot return an error.
return out, errors.Wrapf(err, errFmtCombineStrategyFailed, string(c.Strategy))
}
// ComposedTemplates returns a revision's composed resource templates with any
// patchsets dereferenced.
func (rs *CompositionSpec) ComposedTemplates() ([]ComposedTemplate, error) {
pn := make(map[string][]Patch)
for _, s := range rs.PatchSets {
for _, p := range s.Patches {
if p.Type == PatchTypePatchSet {
return nil, errors.New(errPatchSetType)
}
}
pn[s.Name] = s.Patches
}
ct := make([]ComposedTemplate, len(rs.Resources))
for i, r := range rs.Resources {
po := []Patch{}
for _, p := range r.Patches {
if p.Type != PatchTypePatchSet {
po = append(po, p)
continue | }
if p.PatchSetName == nil { | random_line_split | |
composition_patches.go | // Default
PatchTypePatchSet PatchType = "PatchSet"
PatchTypeToCompositeFieldPath PatchType = "ToCompositeFieldPath"
PatchTypeCombineFromComposite PatchType = "CombineFromComposite"
PatchTypeCombineToComposite PatchType = "CombineToComposite"
)
// A FromFieldPathPolicy determines how to patch from a field path.
type FromFieldPathPolicy string
// FromFieldPath patch policies.
const (
FromFieldPathPolicyOptional FromFieldPathPolicy = "Optional"
FromFieldPathPolicyRequired FromFieldPathPolicy = "Required"
)
// A PatchPolicy configures the specifics of patching behaviour.
type PatchPolicy struct {
// FromFieldPath specifies how to patch from a field path. The default is
// 'Optional', which means the patch will be a no-op if the specified
// fromFieldPath does not exist. Use 'Required' if the patch should fail if
// the specified path does not exist.
// +kubebuilder:validation:Enum=Optional;Required
// +optional
FromFieldPath *FromFieldPathPolicy `json:"fromFieldPath,omitempty"`
MergeOptions *xpv1.MergeOptions `json:"mergeOptions,omitempty"`
}
// Patch objects are applied between composite and composed resources. Their
// behaviour depends on the Type selected. The default Type,
// FromCompositeFieldPath, copies a value from the composite resource to
// the composed resource, applying any defined transformers.
type Patch struct {
// Type sets the patching behaviour to be used. Each patch type may require
// its' own fields to be set on the Patch object.
// +optional
// +kubebuilder:validation:Enum=FromCompositeFieldPath;PatchSet;ToCompositeFieldPath;CombineFromComposite;CombineToComposite
// +kubebuilder:default=FromCompositeFieldPath
Type PatchType `json:"type,omitempty"`
// FromFieldPath is the path of the field on the resource whose value is
// to be used as input. Required when type is FromCompositeFieldPath or
// ToCompositeFieldPath.
// +optional
FromFieldPath *string `json:"fromFieldPath,omitempty"`
// Combine is the patch configuration for a CombineFromComposite or
// CombineToComposite patch.
// +optional
Combine *Combine `json:"combine,omitempty"`
// ToFieldPath is the path of the field on the resource whose value will
// be changed with the result of transforms. Leave empty if you'd like to
// propagate to the same path as fromFieldPath.
// +optional
ToFieldPath *string `json:"toFieldPath,omitempty"`
// PatchSetName to include patches from. Required when type is PatchSet.
// +optional
PatchSetName *string `json:"patchSetName,omitempty"`
// Transforms are the list of functions that are used as a FIFO pipe for the
// input to be transformed.
// +optional
Transforms []Transform `json:"transforms,omitempty"`
// Policy configures the specifics of patching behaviour.
// +optional
Policy *PatchPolicy `json:"policy,omitempty"`
}
// Apply executes a patching operation between the from and to resources.
// Applies all patch types unless an 'only' filter is supplied.
func (c *Patch) Apply(cp, cd runtime.Object, only ...PatchType) error {
if c.filterPatch(only...) |
switch c.Type {
case PatchTypeFromCompositeFieldPath:
return c.applyFromFieldPathPatch(cp, cd)
case PatchTypeToCompositeFieldPath:
return c.applyFromFieldPathPatch(cd, cp)
case PatchTypeCombineFromComposite:
return c.applyCombineFromVariablesPatch(cp, cd)
case PatchTypeCombineToComposite:
return c.applyCombineFromVariablesPatch(cd, cp)
case PatchTypePatchSet:
// Already resolved - nothing to do.
}
return errors.Errorf(errFmtInvalidPatchType, c.Type)
}
// filterPatch returns true if patch should be filtered (not applied)
func (c *Patch) filterPatch(only ...PatchType) bool {
// filter does not apply if not set
if len(only) == 0 {
return false
}
for _, patchType := range only {
if patchType == c.Type {
return false
}
}
return true
}
// applyTransforms applies a list of transforms to a patch value.
func (c *Patch) applyTransforms(input interface{}) (interface{}, error) {
var err error
for i, t := range c.Transforms {
if input, err = t.Transform(input); err != nil {
return nil, errors.Wrapf(err, errFmtTransformAtIndex, i)
}
}
return input, nil
}
// patchFieldValueToObject, given a path, value and "to" object, will
// apply the value to the "to" object at the given path, returning
// any errors as they occur.
func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error {
paved, err := fieldpath.PaveObject(to)
if err != nil {
return err
}
if err := paved.MergeValue(fieldPath, value, mo); err != nil {
return err
}
return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to)
}
// applyFromFieldPathPatch patches the "to" resource, using a source field
// on the "from" resource. Values may be transformed if any are defined on
// the patch.
func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error {
if c.FromFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type)
}
// Default to patching the same field on the composed resource.
if c.ToFieldPath == nil {
c.ToFieldPath = c.FromFieldPath
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath)
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
var mo *xpv1.MergeOptions
if c.Policy != nil {
mo = c.Policy.MergeOptions
}
// Apply transform pipeline
out, err := c.applyTransforms(in)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, mo)
}
// applyCombineFromVariablesPatch patches the "to" resource, taking a list of
// input variables and combining them into a single output value.
// The single output value may then be further transformed if they are defined
// on the patch.
func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error {
// Combine patch requires configuration
if c.Combine == nil {
return errors.Errorf(errFmtRequiredField, "Combine", c.Type)
}
// Destination field path is required since we can't default to multiple
// fields.
if c.ToFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type)
}
vl := len(c.Combine.Variables)
if vl < 1 {
return errors.New(errCombineRequiresVariables)
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in := make([]interface{}, vl)
// Get value of each variable
// NOTE: This currently assumes all variables define a 'fromFieldPath'
// value. If we add new variable types, this may not be the case and
// this code may be better served split out into a dedicated function.
for i, sp := range c.Combine.Variables {
iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath)
// If any source field is not found, we will not
// apply the patch. This is to avoid situations
// where a combine patch is expecting a fixed
// number of inputs (e.g. a string format
// expecting 3 fields '%s-%s-%s' but only
// receiving 2 values).
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
in[i] = iv
}
// Combine input values
cb, err := c.Combine.Combine(in)
if err != nil {
return err
}
// Apply transform pipeline
out, err := c.applyTransforms(cb)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, nil)
}
// IsOptionalFieldPathNotFound returns true if the supplied error indicates a
// field path was not found, and the supplied policy indicates a patch from that
// field path was optional.
func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool {
switch {
case s == nil:
fallthrough
case s.FromFieldPath == nil:
fallthrough
case *s.FromFieldPath == FromFieldPathPolicyOptional:
return fieldpath.IsNotFound(err)
default:
return false
}
}
// A CombineVariable defines the source of a value that is combined with
// others to form and patch an output value. Currently, this | {
return nil
} | conditional_block |
composition_patches.go | PatchSet;ToCompositeFieldPath;CombineFromComposite;CombineToComposite
// +kubebuilder:default=FromCompositeFieldPath
Type PatchType `json:"type,omitempty"`
// FromFieldPath is the path of the field on the resource whose value is
// to be used as input. Required when type is FromCompositeFieldPath or
// ToCompositeFieldPath.
// +optional
FromFieldPath *string `json:"fromFieldPath,omitempty"`
// Combine is the patch configuration for a CombineFromComposite or
// CombineToComposite patch.
// +optional
Combine *Combine `json:"combine,omitempty"`
// ToFieldPath is the path of the field on the resource whose value will
// be changed with the result of transforms. Leave empty if you'd like to
// propagate to the same path as fromFieldPath.
// +optional
ToFieldPath *string `json:"toFieldPath,omitempty"`
// PatchSetName to include patches from. Required when type is PatchSet.
// +optional
PatchSetName *string `json:"patchSetName,omitempty"`
// Transforms are the list of functions that are used as a FIFO pipe for the
// input to be transformed.
// +optional
Transforms []Transform `json:"transforms,omitempty"`
// Policy configures the specifics of patching behaviour.
// +optional
Policy *PatchPolicy `json:"policy,omitempty"`
}
// Apply executes a patching operation between the from and to resources.
// Applies all patch types unless an 'only' filter is supplied.
func (c *Patch) Apply(cp, cd runtime.Object, only ...PatchType) error {
if c.filterPatch(only...) {
return nil
}
switch c.Type {
case PatchTypeFromCompositeFieldPath:
return c.applyFromFieldPathPatch(cp, cd)
case PatchTypeToCompositeFieldPath:
return c.applyFromFieldPathPatch(cd, cp)
case PatchTypeCombineFromComposite:
return c.applyCombineFromVariablesPatch(cp, cd)
case PatchTypeCombineToComposite:
return c.applyCombineFromVariablesPatch(cd, cp)
case PatchTypePatchSet:
// Already resolved - nothing to do.
}
return errors.Errorf(errFmtInvalidPatchType, c.Type)
}
// filterPatch returns true if patch should be filtered (not applied)
func (c *Patch) filterPatch(only ...PatchType) bool {
// filter does not apply if not set
if len(only) == 0 {
return false
}
for _, patchType := range only {
if patchType == c.Type {
return false
}
}
return true
}
// applyTransforms applies a list of transforms to a patch value.
func (c *Patch) applyTransforms(input interface{}) (interface{}, error) {
var err error
for i, t := range c.Transforms {
if input, err = t.Transform(input); err != nil {
return nil, errors.Wrapf(err, errFmtTransformAtIndex, i)
}
}
return input, nil
}
// patchFieldValueToObject, given a path, value and "to" object, will
// apply the value to the "to" object at the given path, returning
// any errors as they occur.
func patchFieldValueToObject(fieldPath string, value interface{}, to runtime.Object, mo *xpv1.MergeOptions) error {
paved, err := fieldpath.PaveObject(to)
if err != nil {
return err
}
if err := paved.MergeValue(fieldPath, value, mo); err != nil {
return err
}
return runtime.DefaultUnstructuredConverter.FromUnstructured(paved.UnstructuredContent(), to)
}
// applyFromFieldPathPatch patches the "to" resource, using a source field
// on the "from" resource. Values may be transformed if any are defined on
// the patch.
func (c *Patch) applyFromFieldPathPatch(from, to runtime.Object) error {
if c.FromFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "FromFieldPath", c.Type)
}
// Default to patching the same field on the composed resource.
if c.ToFieldPath == nil {
c.ToFieldPath = c.FromFieldPath
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in, err := fieldpath.Pave(fromMap).GetValue(*c.FromFieldPath)
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
var mo *xpv1.MergeOptions
if c.Policy != nil {
mo = c.Policy.MergeOptions
}
// Apply transform pipeline
out, err := c.applyTransforms(in)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, mo)
}
// applyCombineFromVariablesPatch patches the "to" resource, taking a list of
// input variables and combining them into a single output value.
// The single output value may then be further transformed if they are defined
// on the patch.
func (c *Patch) applyCombineFromVariablesPatch(from, to runtime.Object) error {
// Combine patch requires configuration
if c.Combine == nil {
return errors.Errorf(errFmtRequiredField, "Combine", c.Type)
}
// Destination field path is required since we can't default to multiple
// fields.
if c.ToFieldPath == nil {
return errors.Errorf(errFmtRequiredField, "ToFieldPath", c.Type)
}
vl := len(c.Combine.Variables)
if vl < 1 {
return errors.New(errCombineRequiresVariables)
}
fromMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(from)
if err != nil {
return err
}
in := make([]interface{}, vl)
// Get value of each variable
// NOTE: This currently assumes all variables define a 'fromFieldPath'
// value. If we add new variable types, this may not be the case and
// this code may be better served split out into a dedicated function.
for i, sp := range c.Combine.Variables {
iv, err := fieldpath.Pave(fromMap).GetValue(sp.FromFieldPath)
// If any source field is not found, we will not
// apply the patch. This is to avoid situations
// where a combine patch is expecting a fixed
// number of inputs (e.g. a string format
// expecting 3 fields '%s-%s-%s' but only
// receiving 2 values).
if IsOptionalFieldPathNotFound(err, c.Policy) {
return nil
}
if err != nil {
return err
}
in[i] = iv
}
// Combine input values
cb, err := c.Combine.Combine(in)
if err != nil {
return err
}
// Apply transform pipeline
out, err := c.applyTransforms(cb)
if err != nil {
return err
}
return patchFieldValueToObject(*c.ToFieldPath, out, to, nil)
}
// IsOptionalFieldPathNotFound returns true if the supplied error indicates a
// field path was not found, and the supplied policy indicates a patch from that
// field path was optional.
func IsOptionalFieldPathNotFound(err error, s *PatchPolicy) bool {
switch {
case s == nil:
fallthrough
case s.FromFieldPath == nil:
fallthrough
case *s.FromFieldPath == FromFieldPathPolicyOptional:
return fieldpath.IsNotFound(err)
default:
return false
}
}
// A CombineVariable defines the source of a value that is combined with
// others to form and patch an output value. Currently, this only supports
// retrieving values from a field path.
type CombineVariable struct {
// FromFieldPath is the path of the field on the source whose value is
// to be used as input.
FromFieldPath string `json:"fromFieldPath"`
}
// A CombineStrategy determines what strategy will be applied to combine
// variables.
type CombineStrategy string
// CombineStrategy strategy definitions.
const (
CombineStrategyString CombineStrategy = "string"
)
// A Combine configures a patch that combines more than
// one input field into a single output field.
type Combine struct {
// Variables are the list of variables whose values will be retrieved and
// combined.
// +kubebuilder:validation:MinItems=1
Variables []CombineVariable `json:"variables"`
// Strategy defines the strategy to use to combine the input variable values.
// Currently only string is supported.
// +kubebuilder:validation:Enum=string
Strategy CombineStrategy `json:"strategy"`
// String declares that input variables should be combined into a single
// string, using the relevant settings for formatting purposes.
// +optional
String *StringCombine `json:"string,omitempty"`
}
// A StringCombine combines multiple input values into a single string.
type StringCombine struct {
// Format the input using a Go format string. See
// https://golang.org/pkg/fmt/ for details.
Format string `json:"fmt"`
}
// Combine returns a single output by running a string format
// with all of its' input variables.
func (s *StringCombine) Combine(vars []interface{}) (interface{}, error) {
return fmt.Sprintf(s.Format, vars...), nil
}
// Combine calls the appropriate combiner.
func (c *Combine) | Combine | identifier_name | |
Training.py | 경로
TRAIN_CSV = "train_shuffle.csv" # train data .csv
VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv
TEST_CSV = "test_uniform.csv" # test data .csv
MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정
class DataGenerator(Sequence):
def __init__(self, csv_file):
self.paths = []
with open(csv_file, "r") as file:
self.coords = np.zeros((sum(1 for line in file), 4))
file.seek(0)
reader = csv.reader(file, delimiter=",")
for index, row in enumerate(reader):
for i, r in enumerate(row[1:7]):
# row[i+1] = int(r)
row[i+1] = float(r)
path, image_height, image_width, x0, y0, width, height, _, _ = row
self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin
self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin
self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width
self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height
# int형
# self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin
# self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin
# self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width
# self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height
# self.coords[index, 2] = width * IMAGE_SIZE / image_width # width
# self.coords[index, 3] = height * IMAGE_SIZE / image_height # height
self.paths.append(path)
def __len__(self):
return math.ceil(len(self.coords) / BATCH_SIZE)
def __getitem__(self, idx):
batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32)
for i, f in enumerate(batch_paths):
img = Image.open(f)
img = img.resize((IMAGE_SIZE, IMAGE_SIZE))
img = img.convert('RGB')
batch_images[i] = preprocess_input(np.array(img, dtype=np.float32))
img.close()
return batch_images, batch_coords
class Training(Callback):
def __init__(self, generator):
self.generator = generator
| poch, logs):
train_pos_count = 0
train_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
train_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
train_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(train_gt_len):
if train_iou[q] >= 0.5: # iou threshold 0.5 지정
train_pos_count += 1
else:
train_neg_count += 1
train_data_count = 59521 # train_총 갯수
train_acc = np.round(train_pos_count / train_data_count, 4)
logs["train_acc"] = train_acc
print(" - train_acc: {}".format(train_acc))
class Validation(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
mse = 0
intersections = 0
unions = 0
val_pos_count = 0
val_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0]
val_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
val_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(val_gt_len):
if val_iou[q] >= 0.5: # iou threshold 0.5 지정
val_pos_count += 1
else:
val_neg_count += 1
intersections += np.sum(intersection * (union > 0))
unions += np.sum(union)
val_data_count = 14879 # validation 총 갯수
val_acc = np.round(val_pos_count / val_data_count, 4)
logs["val_acc"] = val_acc
iou = np.round(intersections / (unions + epsilon()), 4)
logs["val_iou"] = iou
mse = np.round(mse, 4)
logs["val_mse"] = mse
print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc))
class Test_set(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
test_pos_count = 0
test_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
test_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
test_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(test_gt_len):
if test_iou[q] >= 0.5: # iou threshold 0.5 지정
test_pos_count += 1
else:
test_neg_count += 1
test_data_count = 1245 # test 총 갯수
test_acc = np.round(test_pos_count / test_data_count, 4)
logs["test_acc"] = test_acc
print(" - test_acc: {}".format(test_acc))
def create_model(trainable=False):
# pre-trained 된 moblienetv2 아키텍처 이용
model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4)
model.summary()
# to freeze layers 레이어 동결(가중치 그대로 사용)
for layer in model.layers:
layer.trainable = trainable
# 입력 Task에 맞는 딥러닝 모델 변경 가능
block = model.get_layer("block_16 | def on_epoch_end(self, e | identifier_body |
Training.py | validation 경로
TRAIN_CSV = "train_shuffle.csv" # train data .csv
VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv
TEST_CSV = "test_uniform.csv" # test data .csv
MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정
class DataGenerator(Sequence):
def __init__(self, csv_file):
self.paths = []
with open(csv_file, "r") as file:
self.coords = np.zeros((sum(1 for line in file), 4))
file.seek(0)
reader = csv.reader(file, delimiter=",")
for index, row in enumerate(reader):
for i, r in enumerate(row[1:7]):
# row[i+1] = int(r)
row[i+1] = float(r)
path, image_height, image_width, x0, y0, width, height, _, _ = row
self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin
self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin
self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width
self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height
# int형
# self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin
# self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin
# self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width
# self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height
# self.coords[index, 2] = width * IMAGE_SIZE / image_width # width
# self.coords[index, 3] = height * IMAGE_SIZE / image_height # height
self.paths.append(path)
def __len__(self):
return math.ceil(len(self.coords) / BATCH_SIZE)
def __getitem__(self, idx):
batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32)
for i, f in enumerate(batch_paths):
img = Image.open(f)
img = img.resize((IMAGE_SIZE, IMAGE_SIZE))
img = img.convert('RGB')
batch_images[i] = preprocess_input(np.array(img, dtype=np.float32))
img.close()
return batch_images, batch_coords
class Training(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
train_pos_count = 0
train_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
train_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
train_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(train_gt_len):
if train_iou[q] >= 0.5: # iou threshold 0.5 지정
train_pos_count += 1
else:
train_neg_count += 1
train_data_count = 59521 # train_총 갯수
train_acc = np.round(train_pos_count / train_data_count, 4)
logs["train_acc"] = train_acc
print(" - train_acc: {}".format(train_acc))
class Validation(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
mse = 0
intersections = 0
unions = 0
val_pos_count = 0
val_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0]
val_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
val_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(val_gt_len):
if val_iou[q] >= 0.5: # iou threshold 0.5 지정
val_pos_count += 1
else:
val_neg_count += 1
intersections += np.sum(intersection * (union > 0))
unions += np.sum(union)
val_data_count = 14879 # validation 총 갯수
val_acc = np.round(val_pos_count / val_data_count, 4)
logs["val_acc"] = val_acc
iou = np.round(intersections / (unions + epsilon()), 4)
logs["val_iou"] = iou |
class Test_set(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
test_pos_count = 0
test_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
test_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
test_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(test_gt_len):
if test_iou[q] >= 0.5: # iou threshold 0.5 지정
test_pos_count += 1
else:
test_neg_count += 1
test_data_count = 1245 # test 총 갯수
test_acc = np.round(test_pos_count / test_data_count, 4)
logs["test_acc"] = test_acc
print(" - test_acc: {}".format(test_acc))
def create_model(trainable=False):
# pre-trained 된 moblienetv2 아키텍처 이용
model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4)
model.summary()
# to freeze layers 레이어 동결(가중치 그대로 사용)
for layer in model.layers:
layer.trainable = trainable
# 입력 Task에 맞는 딥러닝 모델 변경 가능
block = model.get_layer("block_16_project |
mse = np.round(mse, 4)
logs["val_mse"] = mse
print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc)) | random_line_split |
Training.py | validation 경로
TRAIN_CSV = "train_shuffle.csv" # train data .csv
VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv
TEST_CSV = "test_uniform.csv" # test data .csv
MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정
class DataGenerator(Sequence):
def __init__(self, csv_file):
self.paths = []
with open(csv_file, "r") as file:
self.coords = np.zeros((sum(1 for line in file), 4))
file.seek(0)
reader = csv.reader(file, delimiter=",")
for index, row in enumerate(reader):
for i, r in enumerate(row[1:7]):
# row[i+1] = int(r)
row[i+1] = float(r)
path, image_height, image_width, x0, y0, width, height, _, _ = row
self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin
self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin
self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width
self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height
# int형
# self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin
# self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin
# self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width
# self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height
# self.coords[index, 2] = width * IMAGE_SIZE / image_width # width
# self.coords[index, 3] = height * IMAGE_SIZE / image_height # height
self.paths.append(path)
def __len__(self):
return math.ceil(len(self.coords) / BATCH_SIZE)
def __getitem__(self, idx):
batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32)
for i, f in enumerate(batch_paths):
img = Image.open(f)
img = img.resize((IMAGE_SIZE, IMAGE_SIZE))
img = img.convert('RGB')
batch_images[i] = preprocess_input(np.array(img, dtype=np.float32))
img.close()
return batch_images, batch_coords
class Training(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
train_pos_count = 0
train_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
train_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
train_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(train_gt_len):
if train_iou[q] >= 0.5: # iou threshold 0.5 지정
train_pos_count += 1
else:
train_neg_count += 1
train_data_count = 59521 # train_총 갯수
train_acc = np.round(train_pos_count / train_data_count, 4)
logs["train_acc"] = train_acc
print(" - train_acc: {}".format(train_acc))
class Validation(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
mse | intersections = 0
unions = 0
val_pos_count = 0
val_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0]
val_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
val_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(val_gt_len):
if val_iou[q] >= 0.5: # iou threshold 0.5 지정
val_pos_count += 1
else:
val_neg_count += 1
intersections += np.sum(intersection * (union > 0))
unions += np.sum(union)
val_data_count = 14879 # validation 총 갯수
val_acc = np.round(val_pos_count / val_data_count, 4)
logs["val_acc"] = val_acc
iou = np.round(intersections / (unions + epsilon()), 4)
logs["val_iou"] = iou
mse = np.round(mse, 4)
logs["val_mse"] = mse
print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc))
class Test_set(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
test_pos_count = 0
test_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
test_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
test_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(test_gt_len):
if test_iou[q] >= 0.5: # iou threshold 0.5 지정
test_pos_count += 1
else:
test_neg_count += 1
test_data_count = 1245 # test 총 갯수
test_acc = np.round(test_pos_count / test_data_count, 4)
logs["test_acc"] = test_acc
print(" - test_acc: {}".format(test_acc))
def create_model(trainable=False):
# pre-trained 된 moblienetv2 아키텍처 이용
model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4)
model.summary()
# to freeze layers 레이어 동결(가중치 그대로 사용)
for layer in model.layers:
layer.trainable = trainable
# 입력 Task에 맞는 딥러닝 모델 변경 가능
block = model.get_layer("block_16_project_B | = 0
| identifier_name |
Training.py | 경로
TRAIN_CSV = "train_shuffle.csv" # train data .csv
VALIDATION_CSV = "validation_shuffle.csv" # validation data .csv
TEST_CSV = "test_uniform.csv" # test data .csv
MODEL_SAVE_FOLDER_PATH = './model/' # 저장될 모델 디렉토리 설정
class DataGenerator(Sequence):
def __init__(self, csv_file):
self.paths = []
with open(csv_file, "r") as file:
self.coords = np.zeros((sum(1 for line in file), 4))
file.seek(0)
reader = csv.reader(file, delimiter=",")
for index, row in enumerate(reader):
for i, r in enumerate(row[1:7]):
# row[i+1] = int(r)
row[i+1] = float(r)
path, image_height, image_width, x0, y0, width, height, _, _ = row
self.coords[index, 0] = float((x0 * IMAGE_SIZE / image_width) / IMAGE_SIZE) # xmin
self.coords[index, 1] = float((y0 * IMAGE_SIZE / image_height) / IMAGE_SIZE) # ymin
self.coords[index, 2] = float((width * IMAGE_SIZE / image_width) / IMAGE_SIZE) # width
self.coords[index, 3] = float((height * IMAGE_SIZE / image_height) / IMAGE_SIZE) # height
# int형
# self.coords[index, 0] = x0 * IMAGE_SIZE / image_width / image_width # xmin
# self.coords[index, 1] = y0 * IMAGE_SIZE / image_height # ymin
# self.coords[index, 2] = (x1 - x0) * IMAGE_SIZE / image_width #width
# self.coords[index, 3] = (y1 - y0) * IMAGE_SIZE / image_height #height
# self.coords[index, 2] = width * IMAGE_SIZE / image_width # width
# self.coords[index, 3] = height * IMAGE_SIZE / image_height # height
self.paths.append(path)
def __len__(self):
return math.ceil(len(self.coords) / BATCH_SIZE)
def __getitem__(self, idx):
batch_paths = self.paths[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_coords = self.coords[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]
batch_images = np.zeros((len(batch_paths), IMAGE_SIZE, IMAGE_SIZE, 3), dtype=np.float32)
for i, f in enumerate(batch_paths):
img = Image.open(f)
img = img.resize((IMAGE_SIZE, IMAGE_SIZE))
img = img.convert('RGB')
batch_images[i] = preprocess_input(np.array(img, dtype=np.float32))
img.close()
return batch_images, batch_coords
class Training(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
train_pos_count = 0
train_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
train_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
train_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(train_gt_len):
if train_iou[q] >= 0.5: # iou thre | train_총 갯수
train_acc = np.round(train_pos_count / train_data_count, 4)
logs["train_acc"] = train_acc
print(" - train_acc: {}".format(train_acc))
class Validation(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
mse = 0
intersections = 0
unions = 0
val_pos_count = 0
val_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
mse += np.linalg.norm(gt - pred, ord='fro') / pred.shape[0]
val_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
val_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(val_gt_len):
if val_iou[q] >= 0.5: # iou threshold 0.5 지정
val_pos_count += 1
else:
val_neg_count += 1
intersections += np.sum(intersection * (union > 0))
unions += np.sum(union)
val_data_count = 14879 # validation 총 갯수
val_acc = np.round(val_pos_count / val_data_count, 4)
logs["val_acc"] = val_acc
iou = np.round(intersections / (unions + epsilon()), 4)
logs["val_iou"] = iou
mse = np.round(mse, 4)
logs["val_mse"] = mse
print(" - val_iou: {} - val_mse: {} - val_acc: {}".format(iou, mse, val_acc))
class Test_set(Callback):
def __init__(self, generator):
self.generator = generator
def on_epoch_end(self, epoch, logs):
test_pos_count = 0
test_neg_count = 0
for i in range(len(self.generator)):
batch_images, gt = self.generator[i]
pred = self.model.predict_on_batch(batch_images)
test_gt_len = len(gt)
pred = np.maximum(pred, 0)
########################################################################################################
# iou 계산
diff_width = np.minimum(gt[:, 0] + gt[:, 2], pred[:, 0] + pred[:, 2]) - np.maximum(gt[:, 0], pred[:, 0])
diff_height = np.minimum(gt[:, 1] + gt[:, 3], pred[:, 1] + pred[:, 3]) - np.maximum(gt[:, 1], pred[:, 1])
intersection = np.maximum(diff_width, 0) * np.maximum(diff_height, 0)
area_gt = gt[:, 2] * gt[:, 3]
area_pred = pred[:, 2] * pred[:, 3]
union = np.maximum(area_gt + area_pred - intersection, 0)
test_iou = np.round(intersection / (union + epsilon()), 4)
########################################################################################################
for q in range(test_gt_len):
if test_iou[q] >= 0.5: # iou threshold 0.5 지정
test_pos_count += 1
else:
test_neg_count += 1
test_data_count = 1245 # test 총 갯수
test_acc = np.round(test_pos_count / test_data_count, 4)
logs["test_acc"] = test_acc
print(" - test_acc: {}".format(test_acc))
def create_model(trainable=False):
# pre-trained 된 moblienetv2 아키텍처 이용
model = MobileNetV2(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, alpha=ALPHA, weights='imagenet', classes=4)
model.summary()
# to freeze layers 레이어 동결(가중치 그대로 사용)
for layer in model.layers:
layer.trainable = trainable
# 입력 Task에 맞는 딥러닝 모델 변경 가능
block = model.get_layer("block_1 | shold 0.5 지정
train_pos_count += 1
else:
train_neg_count += 1
train_data_count = 59521 # | conditional_block |
models.py | 'Spanish'),
('Swedish', 'Swedish'),
('Tamil', 'Tamil'),
('Telugu', 'Telugu'),
('Turkish', 'Turkish'),
('Ukrainian', 'Ukrainian'),
('Urdu', 'Urdu'),
('Vietnamese', 'Vietnamese')
)
CITIES = (
('Abidjan', 'Abidjan'),
('Accra', 'Accra'),
('Addis Ababa', 'Addis Ababa'),
('Ahmedabad', 'Ahmedabad'),
('Albuquerque', 'Albuquerque'),
('Aleppo', 'Aleppo'),
('Alexandria', 'Alexandria'),
('Algiers', 'Algiers'),
('Anaheim', 'Anaheim'),
('Anchorage', 'Anchorage'),
('Ankara', 'Ankara'),
('Arlington', 'Arlington'),
('Athens', 'Athens'),
('Atlanta', 'Atlanta'),
('Aurora', 'Aurora'),
('Austin', 'Austin'),
('Baghdad', 'Baghdad'),
('Bakersfield', 'Bakersfield'),
('Baku', 'Baku'),
('Baltimore', 'Baltimore'),
('Bandung', 'Bandung'),
('Bangalore', 'Bangalore'),
('Bangkok', 'Bangkok'),
('Barcelona', 'Barcelona'),
('Baton Rouge', 'Baton Rouge'),
('Beijing', 'Beijing'),
('Bekasi', 'Bekasi'),
('Belém', 'Belém'),
('Belo Horizonte', 'Belo Horizonte'),
('Benoni', 'Benoni'),
('Berlin', 'Berlin'),
('Birmingham', 'Birmingham'),
('Bogota', 'Bogota'),
('Boise', 'Boise'),
('Boston', 'Boston'),
('Brasília', 'Brasília'),
('Brooklyn', 'Brooklyn'),
('Buenos Aires', 'Buenos Aires'),
('Buffalo', 'Buffalo'),
('Busan', 'Busan'),
('Cairo', 'Cairo'),
('Cali', 'Cali'),
('Campinas', 'Campinas'),
('Cape Town', 'Cape Town'),
('Caracas', 'Caracas'),
('Casablanca', 'Casablanca'),
('Chandler', 'Chandler'),
('Changchun', 'Changchun'),
('Changsha', 'Changsha'),
('Charlotte', 'Charlotte'),
('Chengdu', 'Chengdu'),
('Chennai', 'Chennai'),
('Chesapeake', 'Chesapeake'),
('Chicago', 'Chicago'),
('Chittagong', 'Chittagong'),
('Chongqing', 'Chongqing'),
('Chula Vista', 'Chula Vista'),
('Cincinnati', 'Cincinnati'),
('Cleveland', 'Cleveland'),
('Colorado Springs', 'Colorado Springs'),
('Columbus', 'Columbus'),
('Corpus Christi', 'Corpus Christi'),
('Curitiba', 'Curitiba'),
('Daegu', 'Daegu'),
('Dakar', 'Dakar'),
('Dalian', 'Dalian'),
('Dallas', 'Dallas'),
('Damascus', 'Damascus'),
('Dar es Salaam', 'Dar es Salaam'),
('Delhi', 'Delhi'),
('Denver', 'Denver'),
('Detroit', 'Detroit'),
('Dhaka', 'Dhaka'),
('Dongguan', 'Dongguan'),
('Durban', 'Durban'),
('Durham', 'Durham'),
('El Giza', 'El Giza'),
('El Paso', 'El Paso'),
('Faisalabad', 'Faisalabad'),
('Fort Wayne', 'Fort Wayne'),
('Fort Worth', 'Fort Worth'),
('Fortaleza', 'Fortaleza'),
('Frankfurt', 'Frankfurt'),
('Fremont', 'Fremont'),
('Fresno', 'Fresno'),
('Fukuoka', 'Fukuoka'),
('Fuzhou', 'Fuzhou'),
('Garland', 'Garland'),
('George Town', 'George Town'),
('Gilbert', 'Gilbert'),
('Glendale', 'Glendale'),
('Greensboro', 'Greensboro'),
('Guadalajara', 'Guadalajara'),
('Guangzhou', 'Guangzhou'),
('Guayaquil', 'Guayaquil'),
('Guiyang', 'Guiyang'),
('Hangzhou', 'Hangzhou'),
('Hanoi', 'Hanoi'),
('Haora', 'Haora'),
('Harbin', 'Harbin'),
('Havana', 'Havana'),
('Hechi', 'Hechi'),
('Henderson', 'Henderson'),
('Hialeah', 'Hialeah'),
('Ho Chi Minh City', 'Ho Chi Minh City'),
('Hong Kong', 'Hong Kong'),
('Honolulu', 'Honolulu'),
('Houston', 'Houston'),
('Hyderabad', 'Hyderabad'), | ('Incheon', 'Incheon'),
('Indianapolis', 'Indianapolis'),
('Irvine', 'Irvine'),
('Irving', 'Irving'),
('Istanbul', 'Istanbul'),
('İzmir', 'İzmir'),
('Jacksonville', 'Jacksonville'),
('Jaipur', 'Jaipur'),
('Jakarta', 'Jakarta'),
('Jeddah', 'Jeddah'),
('Jersey City', 'Jersey City'),
('Jilin', 'Jilin'),
('Jinan', 'Jinan'),
('Jinxi', 'Jinxi'),
('Johannesburg', 'Johannesburg'),
('Kabul', 'Kabul'),
('Kano', 'Kano'),
('Kanpur', 'Kanpur'),
('Kansas City', 'Kansas City'),
('Kaohsiung', 'Kaohsiung'),
('Karachi', 'Karachi'),
('Katowice', 'Katowice'),
('Khartoum', 'Khartoum'),
('Kiev', 'Kiev'),
('Kinshasa', 'Kinshasa'),
('Kolkata', 'Kolkata'),
('Kunming', 'Kunming'),
('Lagos', 'Lagos'),
('Lahore', 'Lahore'),
('Lanzhou', 'Lanzhou'),
('Laredo', 'Laredo'),
('Las Vegas', 'Las Vegas'),
('Lexington', 'Lexington'),
('Lima', 'Lima'),
('Lincoln', 'Lincoln'),
('Lisbon', 'Lisbon'),
('London', 'London'),
('Long Beach', 'Long Beach'),
('Los Angeles', 'Los Angeles'),
('Louisville', 'Louisville'),
('Luanda', 'Luanda'),
('Lubbock', 'Lubbock'),
('Lucknow', 'Lucknow'),
('Madison', 'Madison'),
('Madrid', 'Madrid'),
('Manchester', 'Manchester'),
('Manila', 'Manila'),
('Mannheim', 'Mannheim'),
('Mashhad', 'Mashhad'),
('Medan', 'Medan'),
('Medellín', 'Medellín'),
('Melbourne', 'Melbourne'),
('Memphis', 'Memphis'),
('Mesa', 'Mesa'),
('Mexico City', 'Mexico City'),
('Miami', 'Miami'),
('Milan', 'Milan'),
('Milwaukee', 'Milwaukee'),
('Minneapolis', 'Minneapolis'),
('Monterrey', 'Monterrey'),
('Montréal', 'Montréal'),
('Moscow', 'Moscow'),
('Mumbai', 'Mumbai'),
('Nagoya', 'Nagoya'),
('Nagpur', 'Nagpur'),
('Nairobi', 'Nairobi'),
('Nanchang', 'Nanchang'),
('Nanchong', 'Nanchong'),
('Nanjing', 'Nanjing'),
('Nanning', 'Nanning'),
('Naples', 'Naples'),
('Nashville', 'Nashville'),
('New Orleans', 'New Orleans'),
('New York', 'New York'),
('Newark', 'Newark'),
('Norfolk', 'Norfolk'),
('North Las Vegas', 'North Las Vegas'),
('Oakland', 'Oakland'),
('Oklahoma City', 'Oklahoma City'),
('Omaha', 'Omaha'),
('Omdurman', 'Omdurman'),
('Orlando', 'Orlando'),
('Ōsaka', 'Ōsaka'),
('Paris', 'Paris'),
('Patna', 'Patna'),
('Philadelphia', 'Philadelphia'),
('Phoenix', 'Phoenix'),
('Pittsburgh', 'Pittsburgh'),
('Plano', 'Plano'),
('Portland', 'Portland'),
('Porto Alegre', 'Porto | ('Ibadan', 'Ibadan'), | random_line_split |
models.py | 'Hong Kong'),
('Honolulu', 'Honolulu'),
('Houston', 'Houston'),
('Hyderabad', 'Hyderabad'),
('Ibadan', 'Ibadan'),
('Incheon', 'Incheon'),
('Indianapolis', 'Indianapolis'),
('Irvine', 'Irvine'),
('Irving', 'Irving'),
('Istanbul', 'Istanbul'),
('İzmir', 'İzmir'),
('Jacksonville', 'Jacksonville'),
('Jaipur', 'Jaipur'),
('Jakarta', 'Jakarta'),
('Jeddah', 'Jeddah'),
('Jersey City', 'Jersey City'),
('Jilin', 'Jilin'),
('Jinan', 'Jinan'),
('Jinxi', 'Jinxi'),
('Johannesburg', 'Johannesburg'),
('Kabul', 'Kabul'),
('Kano', 'Kano'),
('Kanpur', 'Kanpur'),
('Kansas City', 'Kansas City'),
('Kaohsiung', 'Kaohsiung'),
('Karachi', 'Karachi'),
('Katowice', 'Katowice'),
('Khartoum', 'Khartoum'),
('Kiev', 'Kiev'),
('Kinshasa', 'Kinshasa'),
('Kolkata', 'Kolkata'),
('Kunming', 'Kunming'),
('Lagos', 'Lagos'),
('Lahore', 'Lahore'),
('Lanzhou', 'Lanzhou'),
('Laredo', 'Laredo'),
('Las Vegas', 'Las Vegas'),
('Lexington', 'Lexington'),
('Lima', 'Lima'),
('Lincoln', 'Lincoln'),
('Lisbon', 'Lisbon'),
('London', 'London'),
('Long Beach', 'Long Beach'),
('Los Angeles', 'Los Angeles'),
('Louisville', 'Louisville'),
('Luanda', 'Luanda'),
('Lubbock', 'Lubbock'),
('Lucknow', 'Lucknow'),
('Madison', 'Madison'),
('Madrid', 'Madrid'),
('Manchester', 'Manchester'),
('Manila', 'Manila'),
('Mannheim', 'Mannheim'),
('Mashhad', 'Mashhad'),
('Medan', 'Medan'),
('Medellín', 'Medellín'),
('Melbourne', 'Melbourne'),
('Memphis', 'Memphis'),
('Mesa', 'Mesa'),
('Mexico City', 'Mexico City'),
('Miami', 'Miami'),
('Milan', 'Milan'),
('Milwaukee', 'Milwaukee'),
('Minneapolis', 'Minneapolis'),
('Monterrey', 'Monterrey'),
('Montréal', 'Montréal'),
('Moscow', 'Moscow'),
('Mumbai', 'Mumbai'),
('Nagoya', 'Nagoya'),
('Nagpur', 'Nagpur'),
('Nairobi', 'Nairobi'),
('Nanchang', 'Nanchang'),
('Nanchong', 'Nanchong'),
('Nanjing', 'Nanjing'),
('Nanning', 'Nanning'),
('Naples', 'Naples'),
('Nashville', 'Nashville'),
('New Orleans', 'New Orleans'),
('New York', 'New York'),
('Newark', 'Newark'),
('Norfolk', 'Norfolk'),
('North Las Vegas', 'North Las Vegas'),
('Oakland', 'Oakland'),
('Oklahoma City', 'Oklahoma City'),
('Omaha', 'Omaha'),
('Omdurman', 'Omdurman'),
('Orlando', 'Orlando'),
('Ōsaka', 'Ōsaka'),
('Paris', 'Paris'),
('Patna', 'Patna'),
('Philadelphia', 'Philadelphia'),
('Phoenix', 'Phoenix'),
('Pittsburgh', 'Pittsburgh'),
('Plano', 'Plano'),
('Portland', 'Portland'),
('Porto Alegre', 'Porto Alegre'),
('Puebla', 'Puebla'),
('Pune', 'Pune'),
('Pyongyang', 'Pyongyang'),
('Qingdao', 'Qingdao'),
('Queens', 'Queens'),
('Quezon City', 'Quezon City'),
('Raleigh', 'Raleigh'),
('Rangoon', 'Rangoon'),
('Recife', 'Recife'),
('Reno', 'Reno'),
('Richmond', 'Richmond'),
('Rio de Janeiro', 'Rio de Janeiro'),
('Riverside', 'Riverside'),
('Riyadh', 'Riyadh'),
('Rome', 'Rome'),
('Sacramento', 'Sacramento'),
('Saint Paul', 'Saint Paul'),
('Salvador', 'Salvador'),
('San Antonio', 'San Antonio'),
('San Diego', 'San Diego'),
('San Francisco', 'San Francisco'),
('San Jose', 'San Jose'),
('Santa Ana', 'Santa Ana'),
('Santa Cruz', 'Santa Cruz'),
('Santiago', 'Santiago'),
('Santo Domingo', 'Santo Domingo'),
('São Paulo', 'São Paulo'),
('Sapporo', 'Sapporo'),
('Scottsdale', 'Scottsdale'),
('Seattle', 'Seattle'),
('Sendai', 'Sendai'),
('Seoul', 'Seoul'),
('Shanghai', 'Shanghai'),
('Shenyeng', 'Shenyeng'),
('Shenzhen', 'Shenzhen'),
('Shijianzhuang', 'Shijianzhuang'),
('Singapore', 'Singapore'),
('Spokane', 'Spokane'),
('St. Louis', 'St. Louis'),
('St. Petersburg', 'St. Petersburg'),
('Stockton', 'Stockton'),
('Stuttgart', 'Stuttgart'),
('Surabaya', 'Surabaya'),
('Surat', 'Surat'),
('Sydney', 'Sydney'),
('Taichung', 'Taichung'),
('Taipei', 'Taipei'),
('Taiyuan', 'Taiyuan'),
('Tampa', 'Tampa'),
('Tashkent', 'Tashkent'),
('Tehran', 'Tehran'),
('Tel Aviv-Yafo', 'Tel Aviv-Yafo'),
('Tianjin', 'Tianjin'),
('Tokyo', 'Tokyo'),
('Toledo', 'Toledo'),
('Toronto', 'Toronto'),
('Tripoli', 'Tripoli'),
('Tucson', 'Tucson'),
('Tulsa', 'Tulsa'),
('Tunis', 'Tunis'),
('Ürümqi', 'Ürümqi'),
('Vancouver', 'Vancouver'),
('Vienna', 'Vienna'),
('Virginia Beach', 'Virginia Beach'),
('Washington', 'Washington'),
('Wenzhou', 'Wenzhou'),
('Wichita', 'Wichita'),
('Winston–Salem', 'Winston–Salem'),
('Wuhan', 'Wuhan'),
('Xiamen', 'Xiamen'),
('Xian', 'Xian'),
('Xiangtan', 'Xiangtan'),
('Yantai', 'Yantai'),
('Yokohama', 'Yokohama'),
('Zaozhuang', 'Zaozhuang'),
('Zhangzhou', 'Zhangzhou'),
('Zhengzhou', 'Zhengzhou'),
('Zibo', 'Zibo')
)
CATEGORIES = (
('Art', 'Art'),
('Food', 'Food'),
('Sports', 'Sports'),
('Adventure', 'Adventure'),
('Workshop', 'Workshop'),
('Other', 'Other')
)
#----- PROFILE ------
class Profile(models.Model):
user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE)
url = models.CharField(max_length=200)
def __str__(self):
return f'{self.user.username} Profile'
# ---- EXPERIENCE ------
class Experience(models.Model):
title = models.CharField(max_length=100)
category = models.CharField(max_length=100, choices=CATEGORIES, default='Food')
description = models.TextField(max_length=750)
price = models.DecimalField(max_digits=7, decimal_places=2)
hours = models.IntegerField(choices=HOURS, default=12)
minutes = models.IntegerField(choices=MINUTES, default=0)
language = models.CharField(max_length=100, choices=LANGUAGES, default='English')
city = models.CharField(max_length=100, choices=CITIES, default='San Francisco')
address = models.CharField(max_length=100)
zipcode = models.IntegerField(default=99999)
# user in this case is equal to the experience host
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.title} ({self.id})'
def get_absolute_url(self) | :
return | identifier_name | |
models.py | '),
('Hyderabad', 'Hyderabad'),
('Ibadan', 'Ibadan'),
('Incheon', 'Incheon'),
('Indianapolis', 'Indianapolis'),
('Irvine', 'Irvine'),
('Irving', 'Irving'),
('Istanbul', 'Istanbul'),
('İzmir', 'İzmir'),
('Jacksonville', 'Jacksonville'),
('Jaipur', 'Jaipur'),
('Jakarta', 'Jakarta'),
('Jeddah', 'Jeddah'),
('Jersey City', 'Jersey City'),
('Jilin', 'Jilin'),
('Jinan', 'Jinan'),
('Jinxi', 'Jinxi'),
('Johannesburg', 'Johannesburg'),
('Kabul', 'Kabul'),
('Kano', 'Kano'),
('Kanpur', 'Kanpur'),
('Kansas City', 'Kansas City'),
('Kaohsiung', 'Kaohsiung'),
('Karachi', 'Karachi'),
('Katowice', 'Katowice'),
('Khartoum', 'Khartoum'),
('Kiev', 'Kiev'),
('Kinshasa', 'Kinshasa'),
('Kolkata', 'Kolkata'),
('Kunming', 'Kunming'),
('Lagos', 'Lagos'),
('Lahore', 'Lahore'),
('Lanzhou', 'Lanzhou'),
('Laredo', 'Laredo'),
('Las Vegas', 'Las Vegas'),
('Lexington', 'Lexington'),
('Lima', 'Lima'),
('Lincoln', 'Lincoln'),
('Lisbon', 'Lisbon'),
('London', 'London'),
('Long Beach', 'Long Beach'),
('Los Angeles', 'Los Angeles'),
('Louisville', 'Louisville'),
('Luanda', 'Luanda'),
('Lubbock', 'Lubbock'),
('Lucknow', 'Lucknow'),
('Madison', 'Madison'),
('Madrid', 'Madrid'),
('Manchester', 'Manchester'),
('Manila', 'Manila'),
('Mannheim', 'Mannheim'),
('Mashhad', 'Mashhad'),
('Medan', 'Medan'),
('Medellín', 'Medellín'),
('Melbourne', 'Melbourne'),
('Memphis', 'Memphis'),
('Mesa', 'Mesa'),
('Mexico City', 'Mexico City'),
('Miami', 'Miami'),
('Milan', 'Milan'),
('Milwaukee', 'Milwaukee'),
('Minneapolis', 'Minneapolis'),
('Monterrey', 'Monterrey'),
('Montréal', 'Montréal'),
('Moscow', 'Moscow'),
('Mumbai', 'Mumbai'),
('Nagoya', 'Nagoya'),
('Nagpur', 'Nagpur'),
('Nairobi', 'Nairobi'),
('Nanchang', 'Nanchang'),
('Nanchong', 'Nanchong'),
('Nanjing', 'Nanjing'),
('Nanning', 'Nanning'),
('Naples', 'Naples'),
('Nashville', 'Nashville'),
('New Orleans', 'New Orleans'),
('New York', 'New York'),
('Newark', 'Newark'),
('Norfolk', 'Norfolk'),
('North Las Vegas', 'North Las Vegas'),
('Oakland', 'Oakland'),
('Oklahoma City', 'Oklahoma City'),
('Omaha', 'Omaha'),
('Omdurman', 'Omdurman'),
('Orlando', 'Orlando'),
('Ōsaka', 'Ōsaka'),
('Paris', 'Paris'),
('Patna', 'Patna'),
('Philadelphia', 'Philadelphia'),
('Phoenix', 'Phoenix'),
('Pittsburgh', 'Pittsburgh'),
('Plano', 'Plano'),
('Portland', 'Portland'),
('Porto Alegre', 'Porto Alegre'),
('Puebla', 'Puebla'),
('Pune', 'Pune'),
('Pyongyang', 'Pyongyang'),
('Qingdao', 'Qingdao'),
('Queens', 'Queens'),
('Quezon City', 'Quezon City'),
('Raleigh', 'Raleigh'),
('Rangoon', 'Rangoon'),
('Recife', 'Recife'),
('Reno', 'Reno'),
('Richmond', 'Richmond'),
('Rio de Janeiro', 'Rio de Janeiro'),
('Riverside', 'Riverside'),
('Riyadh', 'Riyadh'),
('Rome', 'Rome'),
('Sacramento', 'Sacramento'),
('Saint Paul', 'Saint Paul'),
('Salvador', 'Salvador'),
('San Antonio', 'San Antonio'),
('San Diego', 'San Diego'),
('San Francisco', 'San Francisco'),
('San Jose', 'San Jose'),
('Santa Ana', 'Santa Ana'),
('Santa Cruz', 'Santa Cruz'),
('Santiago', 'Santiago'),
('Santo Domingo', 'Santo Domingo'),
('São Paulo', 'São Paulo'),
('Sapporo', 'Sapporo'),
('Scottsdale', 'Scottsdale'),
('Seattle', 'Seattle'),
('Sendai', 'Sendai'),
('Seoul', 'Seoul'),
('Shanghai', 'Shanghai'),
('Shenyeng', 'Shenyeng'),
('Shenzhen', 'Shenzhen'),
('Shijianzhuang', 'Shijianzhuang'),
('Singapore', 'Singapore'),
('Spokane', 'Spokane'),
('St. Louis', 'St. Louis'),
('St. Petersburg', 'St. Petersburg'),
('Stockton', 'Stockton'),
('Stuttgart', 'Stuttgart'),
('Surabaya', 'Surabaya'),
('Surat', 'Surat'),
('Sydney', 'Sydney'),
('Taichung', 'Taichung'),
('Taipei', 'Taipei'),
('Taiyuan', 'Taiyuan'),
('Tampa', 'Tampa'),
('Tashkent', 'Tashkent'),
('Tehran', 'Tehran'),
('Tel Aviv-Yafo', 'Tel Aviv-Yafo'),
('Tianjin', 'Tianjin'),
('Tokyo', 'Tokyo'),
('Toledo', 'Toledo'),
('Toronto', 'Toronto'),
('Tripoli', 'Tripoli'),
('Tucson', 'Tucson'),
('Tulsa', 'Tulsa'),
('Tunis', 'Tunis'),
('Ürümqi', 'Ürümqi'),
('Vancouver', 'Vancouver'),
('Vienna', 'Vienna'),
('Virginia Beach', 'Virginia Beach'),
('Washington', 'Washington'),
('Wenzhou', 'Wenzhou'),
('Wichita', 'Wichita'),
('Winston–Salem', 'Winston–Salem'),
('Wuhan', 'Wuhan'),
('Xiamen', 'Xiamen'),
('Xian', 'Xian'),
('Xiangtan', 'Xiangtan'),
('Yantai', 'Yantai'),
('Yokohama', 'Yokohama'),
('Zaozhuang', 'Zaozhuang'),
('Zhangzhou', 'Zhangzhou'),
('Zhengzhou', 'Zhengzhou'),
('Zibo', 'Zibo')
)
CATEGORIES = (
('Art', 'Art'),
('Food', 'Food'),
('Sports', 'Sports'),
('Adventure', 'Adventure'),
('Workshop', 'Workshop'),
('Other', 'Other')
)
#----- PROFILE ------
class Profile(models.Model):
user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE)
url = models.CharField(max_length=200)
def __str__(self):
return f'{self.user.username} Profile'
# ---- EXPERIENCE ------
class Experience(models.Model):
title = models.CharField(max_length=100)
category = models.CharField(max_length=100, choices=CATEGORIES, default='Food')
description = models.TextField(max_length=750)
price = models.DecimalField(max_digits=7, decimal_places=2)
hours = models.IntegerField(choices=HOURS, default=12)
minutes = models.IntegerField(choices=MINUTES, default=0)
language = models.CharField(max_length=100, choices=LANGUAGES, default='English')
city = models.CharField(max_length=100, choices=CITIES, default='San Francisco')
address = models.CharField(max_length=100)
zipcode = models.IntegerField(default=99999)
# user in this case is equal to the experience host
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.title} ({self.id})'
def get_absolute_url(self):
return reverse('exp_de | tail', kwargs = { 'pk': self.id })
# ---- BOOKING ---- | identifier_body | |
github.go | ", orgThrottler))
continue
}
if burst > hourlyTokens {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must not be greater than hourlyTokens", orgThrottler))
continue
}
if _, alreadyExists := o.parsedOrgThrottlers[org]; alreadyExists {
errs = append(errs, fmt.Errorf("got multiple -github-throttle-org for the %s org", org))
continue
}
o.parsedOrgThrottlers[org] = throttlerSettings{hourlyTokens: int(hourlyTokens), burst: int(burst)}
}
return utilerrors.NewAggregate(errs)
}
// Validate validates GitHub options. Note that validate updates the GitHubOptions
// to add default values for TokenPath and graphqlEndpoint.
func (o *GitHubOptions) Validate(bool) error {
endpoints := o.endpoint.Strings()
for i, uri := range endpoints {
if uri == "" {
endpoints[i] = github.DefaultAPIEndpoint
} else if _, err := url.ParseRequestURI(uri); err != nil {
return fmt.Errorf("invalid -github-endpoint URI: %q", uri)
}
}
if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") {
return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path")
}
if o.AppID == "" != (o.AppPrivateKeyPath == "") {
return errors.New("--app-id and --app-private-key-path must be set together")
}
if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess {
logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy")
}
if o.graphqlEndpoint == "" {
o.graphqlEndpoint = github.DefaultGraphQLEndpoint
} else if _, err := url.Parse(o.graphqlEndpoint); err != nil {
return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint)
}
if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) {
if o.ThrottleHourlyTokens == 0 {
// Tolerate `--github-hourly-tokens=0` alone to disable throttling
o.ThrottleAllowBurst = 0
} else {
return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero")
}
}
if o.ThrottleAllowBurst > o.ThrottleHourlyTokens {
return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens")
}
return o.parseOrgThrottlers()
}
// GitHubClientWithLogFields returns a GitHub client with extra logging fields
func (o *GitHubOptions) GitHubClientWithLogFields(dryRun bool, fields logrus.Fields) (github.Client, error) {
client, err := o.githubClient(dryRun)
if err != nil {
return nil, err
}
return client.WithFields(fields), nil
}
func (o *GitHubOptions) githubClient(dryRun bool) (github.Client, error) {
fields := logrus.Fields{}
options := o.baseClientOptions()
options.DryRun = dryRun
if o.TokenPath == "" && o.AppPrivateKeyPath == "" {
logrus.Warn("empty -github-token-path, will use anonymous github client")
}
if o.TokenPath == "" {
options.GetToken = func() []byte {
return []byte{}
}
} else {
if err := secret.Add(o.TokenPath); err != nil {
return nil, fmt.Errorf("failed to add GitHub token to secret agent: %w", err)
}
options.GetToken = secret.GetTokenGenerator(o.TokenPath)
}
if o.AppPrivateKeyPath != "" {
apk, err := o.appPrivateKeyGenerator()
if err != nil {
return nil, err
}
options.AppPrivateKey = apk
}
optionallyThrottled := func(c github.Client) (github.Client, error) {
// Throttle handles zeros as "disable throttling" so we do not need to call it conditionally
if err := c.Throttle(o.ThrottleHourlyTokens, o.ThrottleAllowBurst); err != nil {
return nil, fmt.Errorf("failed to throttle: %w", err)
}
for org, settings := range o.parsedOrgThrottlers {
if err := c.Throttle(settings.hourlyTokens, settings.burst, org); err != nil {
return nil, fmt.Errorf("failed to set up throttling for org %s: %w", org, err)
}
}
return c, nil
}
tokenGenerator, userGenerator, client, err := github.NewClientFromOptions(fields, options)
if err != nil {
return nil, fmt.Errorf("failed to construct github client: %w", err)
}
o.tokenGenerator = tokenGenerator
o.userGenerator = userGenerator
return optionallyThrottled(client)
}
// baseClientOptions populates client options that are derived from flags without processing
func (o *GitHubOptions) baseClientOptions() github.ClientOptions {
return github.ClientOptions{
Censor: secret.Censor,
AppID: o.AppID,
GraphqlEndpoint: o.graphqlEndpoint,
Bases: o.endpoint.Strings(),
MaxRequestTime: o.maxRequestTime,
InitialDelay: o.initialDelay,
MaxSleepTime: o.maxSleepTime,
MaxRetries: o.maxRetries,
Max404Retries: o.max404Retries,
}
}
// GitHubClient returns a GitHub client.
func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) {
return o.GitHubClientWithLogFields(dryRun, logrus.Fields{})
}
// GitHubClientWithAccessToken creates a GitHub client from an access token.
func (o *GitHubOptions) GitHubClientWithAccessToken(token string) (github.Client, error) {
options := o.baseClientOptions()
options.GetToken = func() []byte { return []byte(token) }
options.AppID = "" // Since we are using a token, we should not use the app auth
_, _, client, err := github.NewClientFromOptions(logrus.Fields{}, options)
return client, err
}
// GitClientFactory returns git.ClientFactory. Passing non-empty cookieFilePath
// will result in git ClientFactory to work with Gerrit.
// TODO(chaodaiG): move this logic to somewhere more appropriate instead of in
// github.go.
func (o *GitHubOptions) GitClientFactory(cookieFilePath string, cacheDir *string, dryRun, persistCache bool) (gitv2.ClientFactory, error) {
var gitClientFactory gitv2.ClientFactory
if cookieFilePath != "" && o.TokenPath == "" && o.AppPrivateKeyPath == "" {
opts := gitv2.ClientFactoryOpts{
CookieFilePath: cookieFilePath,
Persist: &persistCache,
}
if cacheDir != nil && *cacheDir != "" {
opts.CacheDirBase = cacheDir
}
var err error
gitClientFactory, err = gitv2.NewClientFactory(opts.Apply)
if err != nil {
return nil, fmt.Errorf("failed to create git client from cookieFile: %v\n(cookieFile is only for Gerrit)", err)
}
} else {
gitClient, err := o.GitClient(dryRun)
if err != nil {
return nil, fmt.Errorf("Error getting git client: %w", err)
}
gitClientFactory = gitv2.ClientFactoryFrom(gitClient)
}
return gitClientFactory, nil
}
// GitClient returns a Git client.
func (o *GitHubOptions) GitClient(dryRun bool) (client *git.Client, err error) {
client, err = git.NewClientWithHost(o.Host)
if err != nil {
return nil, err
}
// We must capture the value of client here to prevent issues related
// to the use of named return values when an error is encountered.
// Without this, we risk a nil pointer dereference.
defer func(client *git.Client) {
if err != nil {
client.Clean()
}
}(client)
user, generator, err := o.getGitAuthentication(dryRun)
if err != nil {
return nil, fmt.Errorf("failed to get git authentication: %w", err)
}
client.SetCredentials(user, generator)
return client, nil
}
func (o *GitHubOptions) getGitAuthentication(dryRun bool) (string, git.GitTokenGenerator, error) {
// the client must have been created at least once for us to have generators
if o.userGenerator == nil | {
if _, err := o.GitHubClient(dryRun); err != nil {
return "", nil, fmt.Errorf("error getting GitHub client: %w", err)
}
} | conditional_block | |
github.go | (hourlyTokens, allowedBursts int) FlagParameter {
return func(o *flagParams) {
o.defaults.ThrottleHourlyTokens = hourlyTokens
o.defaults.ThrottleAllowBurst = allowedBursts
}
}
// DisableThrottlerOptions suppresses the presence of throttler-related flags,
// effectively disallowing external users to parametrize default throttling
// behavior. This is useful mostly when a program creates multiple GH clients
// with different behavior.
func DisableThrottlerOptions() FlagParameter {
return func(o *flagParams) {
o.disableThrottlerOptions = true
}
}
// AddCustomizedFlags injects GitHub options into the given FlagSet. Behavior can be customized
// via the functional options.
func (o *GitHubOptions) AddCustomizedFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) {
o.addFlags(fs, paramFuncs...)
}
// AddFlags injects GitHub options into the given FlagSet
func (o *GitHubOptions) AddFlags(fs *flag.FlagSet) {
o.addFlags(fs)
}
func (o *GitHubOptions) addFlags(fs *flag.FlagSet, paramFuncs ...FlagParameter) {
params := flagParams{
defaults: GitHubOptions{
Host: github.DefaultHost,
endpoint: NewStrings(github.DefaultAPIEndpoint),
graphqlEndpoint: github.DefaultGraphQLEndpoint,
},
}
for _, parametrize := range paramFuncs {
parametrize(¶ms)
}
defaults := params.defaults
fs.StringVar(&o.Host, "github-host", defaults.Host, "GitHub's default host (may differ for enterprise)")
o.endpoint = NewStrings(defaults.endpoint.Strings()...)
fs.Var(&o.endpoint, "github-endpoint", "GitHub's API endpoint (may differ for enterprise).")
fs.StringVar(&o.graphqlEndpoint, "github-graphql-endpoint", defaults.graphqlEndpoint, "GitHub GraphQL API endpoint (may differ for enterprise).")
fs.StringVar(&o.TokenPath, "github-token-path", defaults.TokenPath, "Path to the file containing the GitHub OAuth secret.")
fs.StringVar(&o.AppID, "github-app-id", defaults.AppID, "ID of the GitHub app. If set, requires --github-app-private-key-path to be set and --github-token-path to be unset.")
fs.StringVar(&o.AppPrivateKeyPath, "github-app-private-key-path", defaults.AppPrivateKeyPath, "Path to the private key of the github app. If set, requires --github-app-id to bet set and --github-token-path to be unset")
if !params.disableThrottlerOptions {
fs.IntVar(&o.ThrottleHourlyTokens, "github-hourly-tokens", defaults.ThrottleHourlyTokens, "If set to a value larger than zero, enable client-side throttling to limit hourly token consumption. If set, --github-allowed-burst must be positive too.")
fs.IntVar(&o.ThrottleAllowBurst, "github-allowed-burst", defaults.ThrottleAllowBurst, "Size of token consumption bursts. If set, --github-hourly-tokens must be positive too and set to a higher or equal number.")
fs.Var(&o.OrgThrottlers, "github-throttle-org", "Throttler settings for a specific org in org:hourlyTokens:burst format. Can be passed multiple times. Only valid when using github apps auth.")
}
fs.DurationVar(&o.maxRequestTime, "github-client.request-timeout", github.DefaultMaxSleepTime, "Timeout for any single request to the GitHub API.")
fs.IntVar(&o.maxRetries, "github-client.max-retries", github.DefaultMaxRetries, "Maximum number of retries that will be used for a failing request to the GitHub API.")
fs.IntVar(&o.max404Retries, "github-client.max-404-retries", github.DefaultMax404Retries, "Maximum number of retries that will be used for a 404-ing request to the GitHub API.")
fs.DurationVar(&o.maxSleepTime, "github-client.backoff-timeout", github.DefaultMaxSleepTime, "Largest allowable Retry-After time for requests to the GitHub API.")
fs.DurationVar(&o.initialDelay, "github-client.initial-delay", github.DefaultInitialDelay, "Initial delay before retries begin for requests to the GitHub API.")
}
func (o *GitHubOptions) parseOrgThrottlers() error {
if len(o.OrgThrottlers.vals) == 0 {
return nil
}
if o.AppID == "" {
return errors.New("--github-throttle-org was passed, but client doesn't use apps auth")
}
o.parsedOrgThrottlers = make(map[string]throttlerSettings, len(o.OrgThrottlers.vals))
var errs []error
for _, orgThrottler := range o.OrgThrottlers.vals {
colonSplit := strings.Split(orgThrottler, ":")
if len(colonSplit) != 3 {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format", orgThrottler))
continue
}
org, hourlyTokensString, burstString := colonSplit[0], colonSplit[1], colonSplit[2]
hourlyTokens, err := strconv.ParseInt(hourlyTokensString, 10, 32)
if err != nil {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: hourlyTokens is not an int", orgThrottler))
continue
}
burst, err := strconv.ParseInt(burstString, 10, 32)
if err != nil {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: burst is not an int", orgThrottler))
continue
}
if hourlyTokens < 1 {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s: hourlyTokens must be > 0", orgThrottler))
continue
}
if burst < 1 {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must be > 0", orgThrottler))
continue
}
if burst > hourlyTokens {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must not be greater than hourlyTokens", orgThrottler))
continue
}
if _, alreadyExists := o.parsedOrgThrottlers[org]; alreadyExists {
errs = append(errs, fmt.Errorf("got multiple -github-throttle-org for the %s org", org))
continue
}
o.parsedOrgThrottlers[org] = throttlerSettings{hourlyTokens: int(hourlyTokens), burst: int(burst)}
}
return utilerrors.NewAggregate(errs)
}
// Validate validates GitHub options. Note that validate updates the GitHubOptions
// to add default values for TokenPath and graphqlEndpoint.
func (o *GitHubOptions) Validate(bool) error {
endpoints := o.endpoint.Strings()
for i, uri := range endpoints {
if uri == "" {
endpoints[i] = github.DefaultAPIEndpoint
} else if _, err := url.ParseRequestURI(uri); err != nil {
return fmt.Errorf("invalid -github-endpoint URI: %q", uri)
}
}
if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") {
return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path")
}
if o.AppID == "" != (o.AppPrivateKeyPath == "") {
return errors.New("--app-id and --app-private-key-path must be set together")
}
if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess {
logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy")
}
if o.graphqlEndpoint == "" {
o.graphqlEndpoint = github.DefaultGraphQLEndpoint
} else if _, err := url.Parse(o.graphqlEndpoint); err != nil {
return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint)
}
if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) {
if o.ThrottleHourlyTokens == 0 {
// Tolerate `--github-hourly-tokens=0` alone to disable throttling
o.ThrottleAllowBurst = 0
} else {
return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero")
}
}
if o.ThrottleAllowBurst > o.ThrottleHourlyTokens {
return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens")
}
return o.parseOrgThrottlers()
}
// GitHubClientWithLogFields returns a GitHub client with extra logging fields
func | ThrottlerDefaults | identifier_name | |
github.go | != 3 {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format", orgThrottler))
continue
}
org, hourlyTokensString, burstString := colonSplit[0], colonSplit[1], colonSplit[2]
hourlyTokens, err := strconv.ParseInt(hourlyTokensString, 10, 32)
if err != nil {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: hourlyTokens is not an int", orgThrottler))
continue
}
burst, err := strconv.ParseInt(burstString, 10, 32)
if err != nil {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s is not in org:hourlyTokens:burst format: burst is not an int", orgThrottler))
continue
}
if hourlyTokens < 1 {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s: hourlyTokens must be > 0", orgThrottler))
continue
}
if burst < 1 {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must be > 0", orgThrottler))
continue
}
if burst > hourlyTokens {
errs = append(errs, fmt.Errorf("-github-throttle-org=%s: burst must not be greater than hourlyTokens", orgThrottler))
continue
}
if _, alreadyExists := o.parsedOrgThrottlers[org]; alreadyExists {
errs = append(errs, fmt.Errorf("got multiple -github-throttle-org for the %s org", org))
continue
}
o.parsedOrgThrottlers[org] = throttlerSettings{hourlyTokens: int(hourlyTokens), burst: int(burst)}
}
return utilerrors.NewAggregate(errs)
}
// Validate validates GitHub options. Note that validate updates the GitHubOptions
// to add default values for TokenPath and graphqlEndpoint.
func (o *GitHubOptions) Validate(bool) error {
endpoints := o.endpoint.Strings()
for i, uri := range endpoints {
if uri == "" {
endpoints[i] = github.DefaultAPIEndpoint
} else if _, err := url.ParseRequestURI(uri); err != nil {
return fmt.Errorf("invalid -github-endpoint URI: %q", uri)
}
}
if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") {
return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path")
}
if o.AppID == "" != (o.AppPrivateKeyPath == "") {
return errors.New("--app-id and --app-private-key-path must be set together")
}
if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess {
logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy")
}
if o.graphqlEndpoint == "" {
o.graphqlEndpoint = github.DefaultGraphQLEndpoint
} else if _, err := url.Parse(o.graphqlEndpoint); err != nil {
return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint)
}
if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) {
if o.ThrottleHourlyTokens == 0 {
// Tolerate `--github-hourly-tokens=0` alone to disable throttling
o.ThrottleAllowBurst = 0
} else {
return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero")
}
}
if o.ThrottleAllowBurst > o.ThrottleHourlyTokens {
return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens")
}
return o.parseOrgThrottlers()
}
// GitHubClientWithLogFields returns a GitHub client with extra logging fields
func (o *GitHubOptions) GitHubClientWithLogFields(dryRun bool, fields logrus.Fields) (github.Client, error) {
client, err := o.githubClient(dryRun)
if err != nil {
return nil, err
}
return client.WithFields(fields), nil
}
func (o *GitHubOptions) githubClient(dryRun bool) (github.Client, error) {
fields := logrus.Fields{}
options := o.baseClientOptions()
options.DryRun = dryRun
if o.TokenPath == "" && o.AppPrivateKeyPath == "" {
logrus.Warn("empty -github-token-path, will use anonymous github client")
}
if o.TokenPath == "" {
options.GetToken = func() []byte {
return []byte{}
}
} else {
if err := secret.Add(o.TokenPath); err != nil {
return nil, fmt.Errorf("failed to add GitHub token to secret agent: %w", err)
}
options.GetToken = secret.GetTokenGenerator(o.TokenPath)
}
if o.AppPrivateKeyPath != "" {
apk, err := o.appPrivateKeyGenerator()
if err != nil {
return nil, err
}
options.AppPrivateKey = apk
}
optionallyThrottled := func(c github.Client) (github.Client, error) {
// Throttle handles zeros as "disable throttling" so we do not need to call it conditionally
if err := c.Throttle(o.ThrottleHourlyTokens, o.ThrottleAllowBurst); err != nil {
return nil, fmt.Errorf("failed to throttle: %w", err)
}
for org, settings := range o.parsedOrgThrottlers {
if err := c.Throttle(settings.hourlyTokens, settings.burst, org); err != nil {
return nil, fmt.Errorf("failed to set up throttling for org %s: %w", org, err)
}
}
return c, nil
}
tokenGenerator, userGenerator, client, err := github.NewClientFromOptions(fields, options)
if err != nil {
return nil, fmt.Errorf("failed to construct github client: %w", err)
}
o.tokenGenerator = tokenGenerator
o.userGenerator = userGenerator
return optionallyThrottled(client)
}
// baseClientOptions populates client options that are derived from flags without processing
func (o *GitHubOptions) baseClientOptions() github.ClientOptions {
return github.ClientOptions{
Censor: secret.Censor,
AppID: o.AppID,
GraphqlEndpoint: o.graphqlEndpoint,
Bases: o.endpoint.Strings(),
MaxRequestTime: o.maxRequestTime,
InitialDelay: o.initialDelay,
MaxSleepTime: o.maxSleepTime,
MaxRetries: o.maxRetries,
Max404Retries: o.max404Retries,
}
}
// GitHubClient returns a GitHub client.
func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) {
return o.GitHubClientWithLogFields(dryRun, logrus.Fields{})
}
// GitHubClientWithAccessToken creates a GitHub client from an access token.
func (o *GitHubOptions) GitHubClientWithAccessToken(token string) (github.Client, error) {
options := o.baseClientOptions()
options.GetToken = func() []byte { return []byte(token) }
options.AppID = "" // Since we are using a token, we should not use the app auth
_, _, client, err := github.NewClientFromOptions(logrus.Fields{}, options)
return client, err
}
// GitClientFactory returns git.ClientFactory. Passing non-empty cookieFilePath
// will result in git ClientFactory to work with Gerrit.
// TODO(chaodaiG): move this logic to somewhere more appropriate instead of in
// github.go.
func (o *GitHubOptions) GitClientFactory(cookieFilePath string, cacheDir *string, dryRun, persistCache bool) (gitv2.ClientFactory, error) | {
var gitClientFactory gitv2.ClientFactory
if cookieFilePath != "" && o.TokenPath == "" && o.AppPrivateKeyPath == "" {
opts := gitv2.ClientFactoryOpts{
CookieFilePath: cookieFilePath,
Persist: &persistCache,
}
if cacheDir != nil && *cacheDir != "" {
opts.CacheDirBase = cacheDir
}
var err error
gitClientFactory, err = gitv2.NewClientFactory(opts.Apply)
if err != nil {
return nil, fmt.Errorf("failed to create git client from cookieFile: %v\n(cookieFile is only for Gerrit)", err)
}
} else {
gitClient, err := o.GitClient(dryRun)
if err != nil {
return nil, fmt.Errorf("Error getting git client: %w", err)
} | identifier_body | |
github.go | options. Note that validate updates the GitHubOptions
// to add default values for TokenPath and graphqlEndpoint.
func (o *GitHubOptions) Validate(bool) error {
endpoints := o.endpoint.Strings()
for i, uri := range endpoints {
if uri == "" {
endpoints[i] = github.DefaultAPIEndpoint
} else if _, err := url.ParseRequestURI(uri); err != nil {
return fmt.Errorf("invalid -github-endpoint URI: %q", uri)
}
}
if o.TokenPath != "" && (o.AppID != "" || o.AppPrivateKeyPath != "") {
return fmt.Errorf("--token-path is mutually exclusive with --app-id and --app-private-key-path")
}
if o.AppID == "" != (o.AppPrivateKeyPath == "") {
return errors.New("--app-id and --app-private-key-path must be set together")
}
if o.TokenPath != "" && len(endpoints) == 1 && endpoints[0] == github.DefaultAPIEndpoint && !o.AllowDirectAccess {
logrus.Warn("It doesn't look like you are using ghproxy to cache API calls to GitHub! This has become a required component of Prow and other components will soon be allowed to add features that may rapidly consume API ratelimit without caching. Starting May 1, 2020 use Prow components without ghproxy at your own risk! https://github.com/kubernetes/test-infra/tree/master/ghproxy#ghproxy")
}
if o.graphqlEndpoint == "" {
o.graphqlEndpoint = github.DefaultGraphQLEndpoint
} else if _, err := url.Parse(o.graphqlEndpoint); err != nil {
return fmt.Errorf("invalid -github-graphql-endpoint URI: %q", o.graphqlEndpoint)
}
if (o.ThrottleHourlyTokens > 0) != (o.ThrottleAllowBurst > 0) {
if o.ThrottleHourlyTokens == 0 {
// Tolerate `--github-hourly-tokens=0` alone to disable throttling
o.ThrottleAllowBurst = 0
} else {
return errors.New("--github-hourly-tokens and --github-allowed-burst must be either both higher than zero or both equal to zero")
}
}
if o.ThrottleAllowBurst > o.ThrottleHourlyTokens {
return errors.New("--github-allowed-burst must not be larger than --github-hourly-tokens")
}
return o.parseOrgThrottlers()
}
// GitHubClientWithLogFields returns a GitHub client with extra logging fields
func (o *GitHubOptions) GitHubClientWithLogFields(dryRun bool, fields logrus.Fields) (github.Client, error) {
client, err := o.githubClient(dryRun)
if err != nil {
return nil, err
}
return client.WithFields(fields), nil
}
func (o *GitHubOptions) githubClient(dryRun bool) (github.Client, error) {
fields := logrus.Fields{}
options := o.baseClientOptions()
options.DryRun = dryRun
if o.TokenPath == "" && o.AppPrivateKeyPath == "" {
logrus.Warn("empty -github-token-path, will use anonymous github client")
}
if o.TokenPath == "" {
options.GetToken = func() []byte {
return []byte{}
}
} else {
if err := secret.Add(o.TokenPath); err != nil {
return nil, fmt.Errorf("failed to add GitHub token to secret agent: %w", err)
}
options.GetToken = secret.GetTokenGenerator(o.TokenPath)
}
if o.AppPrivateKeyPath != "" {
apk, err := o.appPrivateKeyGenerator()
if err != nil {
return nil, err
}
options.AppPrivateKey = apk
}
optionallyThrottled := func(c github.Client) (github.Client, error) {
// Throttle handles zeros as "disable throttling" so we do not need to call it conditionally
if err := c.Throttle(o.ThrottleHourlyTokens, o.ThrottleAllowBurst); err != nil {
return nil, fmt.Errorf("failed to throttle: %w", err)
}
for org, settings := range o.parsedOrgThrottlers {
if err := c.Throttle(settings.hourlyTokens, settings.burst, org); err != nil {
return nil, fmt.Errorf("failed to set up throttling for org %s: %w", org, err)
}
}
return c, nil
}
tokenGenerator, userGenerator, client, err := github.NewClientFromOptions(fields, options)
if err != nil {
return nil, fmt.Errorf("failed to construct github client: %w", err)
}
o.tokenGenerator = tokenGenerator
o.userGenerator = userGenerator
return optionallyThrottled(client)
}
// baseClientOptions populates client options that are derived from flags without processing
func (o *GitHubOptions) baseClientOptions() github.ClientOptions {
return github.ClientOptions{
Censor: secret.Censor,
AppID: o.AppID,
GraphqlEndpoint: o.graphqlEndpoint,
Bases: o.endpoint.Strings(),
MaxRequestTime: o.maxRequestTime,
InitialDelay: o.initialDelay,
MaxSleepTime: o.maxSleepTime,
MaxRetries: o.maxRetries,
Max404Retries: o.max404Retries,
}
}
// GitHubClient returns a GitHub client.
func (o *GitHubOptions) GitHubClient(dryRun bool) (github.Client, error) {
return o.GitHubClientWithLogFields(dryRun, logrus.Fields{})
}
// GitHubClientWithAccessToken creates a GitHub client from an access token.
func (o *GitHubOptions) GitHubClientWithAccessToken(token string) (github.Client, error) {
options := o.baseClientOptions()
options.GetToken = func() []byte { return []byte(token) }
options.AppID = "" // Since we are using a token, we should not use the app auth
_, _, client, err := github.NewClientFromOptions(logrus.Fields{}, options)
return client, err
}
// GitClientFactory returns git.ClientFactory. Passing non-empty cookieFilePath
// will result in git ClientFactory to work with Gerrit.
// TODO(chaodaiG): move this logic to somewhere more appropriate instead of in
// github.go.
func (o *GitHubOptions) GitClientFactory(cookieFilePath string, cacheDir *string, dryRun, persistCache bool) (gitv2.ClientFactory, error) {
var gitClientFactory gitv2.ClientFactory
if cookieFilePath != "" && o.TokenPath == "" && o.AppPrivateKeyPath == "" {
opts := gitv2.ClientFactoryOpts{
CookieFilePath: cookieFilePath,
Persist: &persistCache,
}
if cacheDir != nil && *cacheDir != "" {
opts.CacheDirBase = cacheDir
}
var err error
gitClientFactory, err = gitv2.NewClientFactory(opts.Apply)
if err != nil {
return nil, fmt.Errorf("failed to create git client from cookieFile: %v\n(cookieFile is only for Gerrit)", err)
}
} else {
gitClient, err := o.GitClient(dryRun)
if err != nil {
return nil, fmt.Errorf("Error getting git client: %w", err)
}
gitClientFactory = gitv2.ClientFactoryFrom(gitClient)
}
return gitClientFactory, nil
}
// GitClient returns a Git client.
func (o *GitHubOptions) GitClient(dryRun bool) (client *git.Client, err error) {
client, err = git.NewClientWithHost(o.Host)
if err != nil {
return nil, err
}
// We must capture the value of client here to prevent issues related
// to the use of named return values when an error is encountered.
// Without this, we risk a nil pointer dereference.
defer func(client *git.Client) {
if err != nil {
client.Clean()
}
}(client)
user, generator, err := o.getGitAuthentication(dryRun)
if err != nil {
return nil, fmt.Errorf("failed to get git authentication: %w", err)
}
client.SetCredentials(user, generator)
return client, nil
}
func (o *GitHubOptions) getGitAuthentication(dryRun bool) (string, git.GitTokenGenerator, error) {
// the client must have been created at least once for us to have generators
if o.userGenerator == nil {
if _, err := o.GitHubClient(dryRun); err != nil {
return "", nil, fmt.Errorf("error getting GitHub client: %w", err)
}
}
login, err := o.userGenerator()
if err != nil {
return "", nil, fmt.Errorf("error getting bot name: %w", err)
}
return login, git.GitTokenGenerator(o.tokenGenerator), nil
}
func (o *GitHubOptions) appPrivateKeyGenerator() (func() *rsa.PrivateKey, error) {
generator, err := secret.AddWithParser(
o.AppPrivateKeyPath,
func(raw []byte) (*rsa.PrivateKey, error) {
privateKey, err := jwt.ParseRSAPrivateKeyFromPEM(raw)
if err != nil {
return nil, fmt.Errorf("failed to parse rsa key from pem: %w", err)
}
return privateKey, nil | }, | random_line_split | |
token.rs | Helper counter for testing to diagnose
/// how many rollbacks have occured
pub rollbacks: u64,
}
impl Ledger {
/// Helper method to get the account details for `owner_id`.
fn get_balance(&self, owner_id: &AccountId) -> u128 {
match self.balances.get(owner_id) {
Some(x) => return x,
None => return 0,
}
}
/// Helper method to set the account details for `owner_id` to the state.
fn set_balance(&mut self, owner_id: &AccountId, balance: Balance) {
assert!(env::is_valid_account_id(owner_id.as_bytes()), "Owner's account ID is invalid");
self.balances.insert(owner_id, &balance);
}
/// Helper method to get the account details for `owner_id`.
fn get_locked_balance(&self, owner_id: &AccountId) -> Balance {
match self.locked_balances.get(owner_id) {
Some(x) => return x,
None => return 0,
}
}
/**
* Send tokens to a new owner.
*
* message is an optional byte data that is passed to the receiving smart contract.
* notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time
* within NEAR smart contract.
*/
pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) {
assert!(
env::is_valid_account_id(new_owner_id.as_bytes()),
"New owner's account ID is invalid"
);
let amount = amount.into();
if amount == 0 {
env::panic(b"Can't transfer 0 tokens");
}
assert_ne!(
owner_id, new_owner_id,
"The new owner should be different from the current owner"
);
// Retrieving the account from the state.
let source_balance = self.get_balance(&owner_id);
let source_lock = self.get_locked_balance(&owner_id);
// Checking and updating unlocked balance
if source_balance < amount {
env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes());
}
// Checking and updating unlocked balance
if source_balance < amount + source_lock {
env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes());
}
self.set_balance(&owner_id, source_balance - amount);
// Deposit amount to the new owner and save the new account to the state.
let target_balance = self.get_balance(&new_owner_id);
let new_target_balance = target_balance + amount;
self.set_balance(&new_owner_id, new_target_balance);
// This much of user balance is lockedup in promise chains
self.set_balance(&new_owner_id, new_target_balance);
let target_lock = self.get_locked_balance(&new_owner_id);
self.locked_balances.insert(&new_owner_id, &(target_lock + amount));
let promise0 = env::promise_create(
new_owner_id.clone(),
b"is_receiver",
&[],
0,
SINGLE_CALL_GAS/3,
);
let promise1 = env::promise_then(
promise0,
env::current_account_id(),
b"handle_receiver",
json!({
"old_owner_id": owner_id,
"new_owner_id": new_owner_id,
"amount_received": amount.to_string(),
"amount_total": new_target_balance.to_string(),
"message": message,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/3,
);
env::promise_return(promise1);
}
/// All promise chains have been successful, release balance from the lock
/// and consider the promise chain final.
pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
}
/// Smart contract call failed. We need to roll back the balance update
pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes());
env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes());
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
// Roll back lock
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
self.balances.insert(&new_owner_id, &new_amount);
// Rollback new owner
let new_target_balance = target_balance - amount;
self.set_balance(&new_owner_id, new_target_balance);
// Rollback old owner
let new_source_balance = source_balance + amount;
self.set_balance(&old_owner_id, new_source_balance);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
self.rollbacks += 1;
}
}
/*
* Information about the token.
*
* We hold the name, symbol and homepage readibly available on chain, but other information must be
* from the JSON data. This way we do not bloat the chain size and also make upgrading the information
* somewhat easier.
*
* All metadata fields are optional.
*/
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Metadata {
// Name of the token
pub name: String,
// Symbol of the token
pub symbol: String,
// URL to the human readable page about the token
pub web_link: String,
// URL to the metadata file with more information about the token, like different icon sets
pub metadata_link: String,
}
/**
* Presents on token.
*/
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Token {
pub ledger: Ledger,
pub metadata: Metadata,
}
impl Default for Token {
fn default() -> Self {
panic!("Token should be initialized before usage")
}
}
#[near_bindgen]
impl Token {
/// Initializes the contract with the given total supply owned by the given `owner_id`.
#[init]
pub fn new(owner_id: AccountId, total_supply: Balance) -> Self {
assert!(!env::state_exists(), "Already initialized");
let total_supply = total_supply.into();
// Initialize the ledger with the initial total supply
let ledger = Ledger {
balances: LookupMap::new(b"bal".to_vec()),
locked_balances: LookupMap::new(b"lck".to_vec()),
total_supply,
rollbacks: 0,
};
// Currently the constructor does not support passing of metadata.
// Start with empty metadata, owner needs to initialize this
// after the token has been created in another transaction
let metadata = Metadata {
name: String::from(""),
symbol: String::from(""),
web_link: String::from(""),
metadata_link: String::from(""),
};
let mut token = Self {
ledger,
metadata
};
token.ledger.set_balance(&owner_id, total_supply);
return token;
}
/// Returns total supply of tokens.
pub fn get_total_supply(&self) -> Balance {
self.ledger.total_supply.into()
}
/// Returns balance of the `owner_id` account.
pub fn get_balance(&self, owner_id: AccountId) -> Balance {
self.ledger.get_balance(&owner_id).into()
}
/// Returns balance lockedin pending transactions
pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance |
//// How many rollbacks we have had
pub fn get_rollback_count(&self) -> u64 {
self.ledger.rollbacks
}
/// Returns balance of the `owner_id` account.
pub fn get_name(&self) -> &str {
return &self.metadata.name;
}
/// Send owner's tokens to another person or a smart contract
#[payable]
pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) {
self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message);
}
/**
* After trying to call receiving smart contract if it reports it can receive tokens.
*
* We gpt the interface test promise back. If the account was not smart contract, finalise the transaction.
* Otherwise trigger the smart contract notifier.
*/
pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) {
// Only callable by self
| {
self.ledger.get_locked_balance(&owner_id).into()
} | identifier_body |
token.rs | ) -> Balance {
match self.locked_balances.get(owner_id) {
Some(x) => return x,
None => return 0,
}
}
/**
* Send tokens to a new owner.
*
* message is an optional byte data that is passed to the receiving smart contract.
* notify is a flag to tell if we are going to call a smart contract, because this cannot be currently resolved run-time
* within NEAR smart contract.
*/
pub fn send(&mut self, owner_id: AccountId, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) {
assert!(
env::is_valid_account_id(new_owner_id.as_bytes()),
"New owner's account ID is invalid"
);
let amount = amount.into();
if amount == 0 {
env::panic(b"Can't transfer 0 tokens");
}
assert_ne!(
owner_id, new_owner_id,
"The new owner should be different from the current owner"
);
// Retrieving the account from the state.
let source_balance = self.get_balance(&owner_id);
let source_lock = self.get_locked_balance(&owner_id);
// Checking and updating unlocked balance
if source_balance < amount {
env::panic(format!("Not enough balance, need {}, has {}", amount, source_balance).as_bytes());
}
// Checking and updating unlocked balance
if source_balance < amount + source_lock {
env::panic(format!("Cannot send {} tokens, as account has {} and in tx lock {}", amount, source_balance, source_lock).as_bytes());
}
self.set_balance(&owner_id, source_balance - amount);
// Deposit amount to the new owner and save the new account to the state.
let target_balance = self.get_balance(&new_owner_id);
let new_target_balance = target_balance + amount;
self.set_balance(&new_owner_id, new_target_balance);
// This much of user balance is lockedup in promise chains
self.set_balance(&new_owner_id, new_target_balance);
let target_lock = self.get_locked_balance(&new_owner_id);
self.locked_balances.insert(&new_owner_id, &(target_lock + amount));
let promise0 = env::promise_create(
new_owner_id.clone(),
b"is_receiver",
&[],
0,
SINGLE_CALL_GAS/3,
);
let promise1 = env::promise_then(
promise0,
env::current_account_id(),
b"handle_receiver",
json!({
"old_owner_id": owner_id,
"new_owner_id": new_owner_id,
"amount_received": amount.to_string(),
"amount_total": new_target_balance.to_string(),
"message": message,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/3,
);
env::promise_return(promise1);
}
/// All promise chains have been successful, release balance from the lock
/// and consider the promise chain final.
pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
}
/// Smart contract call failed. We need to roll back the balance update
pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes());
env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes());
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
// Roll back lock
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
self.balances.insert(&new_owner_id, &new_amount);
// Rollback new owner
let new_target_balance = target_balance - amount;
self.set_balance(&new_owner_id, new_target_balance);
// Rollback old owner
let new_source_balance = source_balance + amount;
self.set_balance(&old_owner_id, new_source_balance);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
self.rollbacks += 1;
}
}
/*
* Information about the token.
*
* We hold the name, symbol and homepage readibly available on chain, but other information must be
* from the JSON data. This way we do not bloat the chain size and also make upgrading the information
* somewhat easier.
*
* All metadata fields are optional.
*/
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Metadata {
// Name of the token
pub name: String,
// Symbol of the token
pub symbol: String,
// URL to the human readable page about the token
pub web_link: String,
// URL to the metadata file with more information about the token, like different icon sets
pub metadata_link: String,
}
/**
* Presents on token.
*/
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Token {
pub ledger: Ledger,
pub metadata: Metadata,
}
impl Default for Token {
fn default() -> Self {
panic!("Token should be initialized before usage")
}
}
#[near_bindgen]
impl Token {
/// Initializes the contract with the given total supply owned by the given `owner_id`.
#[init]
pub fn new(owner_id: AccountId, total_supply: Balance) -> Self {
assert!(!env::state_exists(), "Already initialized");
let total_supply = total_supply.into();
// Initialize the ledger with the initial total supply
let ledger = Ledger {
balances: LookupMap::new(b"bal".to_vec()),
locked_balances: LookupMap::new(b"lck".to_vec()),
total_supply,
rollbacks: 0,
};
// Currently the constructor does not support passing of metadata.
// Start with empty metadata, owner needs to initialize this
// after the token has been created in another transaction
let metadata = Metadata {
name: String::from(""),
symbol: String::from(""),
web_link: String::from(""),
metadata_link: String::from(""),
};
let mut token = Self {
ledger,
metadata
};
token.ledger.set_balance(&owner_id, total_supply);
return token;
}
/// Returns total supply of tokens.
pub fn get_total_supply(&self) -> Balance {
self.ledger.total_supply.into()
}
/// Returns balance of the `owner_id` account.
pub fn get_balance(&self, owner_id: AccountId) -> Balance {
self.ledger.get_balance(&owner_id).into()
}
/// Returns balance lockedin pending transactions
pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance {
self.ledger.get_locked_balance(&owner_id).into()
}
//// How many rollbacks we have had
pub fn get_rollback_count(&self) -> u64 {
self.ledger.rollbacks
}
/// Returns balance of the `owner_id` account.
pub fn get_name(&self) -> &str {
return &self.metadata.name;
}
/// Send owner's tokens to another person or a smart contract
#[payable]
pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) {
self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message);
}
/**
* After trying to call receiving smart contract if it reports it can receive tokens.
*
* We gpt the interface test promise back. If the account was not smart contract, finalise the transaction.
* Otherwise trigger the smart contract notifier.
*/
pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) {
// Only callable by self
assert_eq!(env::current_account_id(), env::predecessor_account_id());
env::log(b"handle_receiver reached");
let uint_amount_received: u128 = amount_received.into();
let uint_amount_total: u128 = amount_total.into();
if is_promise_success() | {
// The send() was destined to a compatible receiver smart contract.
// Build another promise that notifies the smart contract
// that is has received new tokens.
env::log(b"Constructing smart contract notifier promise");
let promise0 = env::promise_create(
new_owner_id.clone(),
b"on_token_received",
json!({
"sender_id": old_owner_id,
"amount_received": amount_received,
"amount_total": amount_total,
"message": message,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/10,
); | conditional_block | |
token.rs | &[],
0,
SINGLE_CALL_GAS/3,
);
let promise1 = env::promise_then(
promise0,
env::current_account_id(),
b"handle_receiver",
json!({
"old_owner_id": owner_id,
"new_owner_id": new_owner_id,
"amount_received": amount.to_string(),
"amount_total": new_target_balance.to_string(),
"message": message,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/3,
);
env::promise_return(promise1);
}
/// All promise chains have been successful, release balance from the lock
/// and consider the promise chain final.
pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
}
/// Smart contract call failed. We need to roll back the balance update
pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes());
env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes());
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
// Roll back lock
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
self.balances.insert(&new_owner_id, &new_amount);
// Rollback new owner
let new_target_balance = target_balance - amount;
self.set_balance(&new_owner_id, new_target_balance);
// Rollback old owner
let new_source_balance = source_balance + amount;
self.set_balance(&old_owner_id, new_source_balance);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
self.rollbacks += 1;
}
}
/*
* Information about the token.
*
* We hold the name, symbol and homepage readibly available on chain, but other information must be
* from the JSON data. This way we do not bloat the chain size and also make upgrading the information
* somewhat easier.
*
* All metadata fields are optional.
*/
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Metadata {
// Name of the token
pub name: String,
// Symbol of the token
pub symbol: String,
// URL to the human readable page about the token
pub web_link: String,
// URL to the metadata file with more information about the token, like different icon sets
pub metadata_link: String,
}
/**
* Presents on token.
*/
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Token {
pub ledger: Ledger,
pub metadata: Metadata,
}
impl Default for Token {
fn default() -> Self {
panic!("Token should be initialized before usage")
}
}
#[near_bindgen]
impl Token {
/// Initializes the contract with the given total supply owned by the given `owner_id`.
#[init]
pub fn new(owner_id: AccountId, total_supply: Balance) -> Self {
assert!(!env::state_exists(), "Already initialized");
let total_supply = total_supply.into();
// Initialize the ledger with the initial total supply
let ledger = Ledger {
balances: LookupMap::new(b"bal".to_vec()),
locked_balances: LookupMap::new(b"lck".to_vec()),
total_supply,
rollbacks: 0,
};
// Currently the constructor does not support passing of metadata.
// Start with empty metadata, owner needs to initialize this
// after the token has been created in another transaction
let metadata = Metadata {
name: String::from(""),
symbol: String::from(""),
web_link: String::from(""),
metadata_link: String::from(""),
};
let mut token = Self {
ledger,
metadata
};
token.ledger.set_balance(&owner_id, total_supply);
return token;
}
/// Returns total supply of tokens.
pub fn get_total_supply(&self) -> Balance {
self.ledger.total_supply.into()
}
/// Returns balance of the `owner_id` account.
pub fn get_balance(&self, owner_id: AccountId) -> Balance {
self.ledger.get_balance(&owner_id).into()
}
/// Returns balance lockedin pending transactions
pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance {
self.ledger.get_locked_balance(&owner_id).into()
}
//// How many rollbacks we have had
pub fn get_rollback_count(&self) -> u64 {
self.ledger.rollbacks
}
/// Returns balance of the `owner_id` account.
pub fn get_name(&self) -> &str {
return &self.metadata.name;
}
/// Send owner's tokens to another person or a smart contract
#[payable]
pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) {
self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message);
}
/**
* After trying to call receiving smart contract if it reports it can receive tokens.
*
* We gpt the interface test promise back. If the account was not smart contract, finalise the transaction.
* Otherwise trigger the smart contract notifier.
*/
pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) {
// Only callable by self
assert_eq!(env::current_account_id(), env::predecessor_account_id());
env::log(b"handle_receiver reached");
let uint_amount_received: u128 = amount_received.into();
let uint_amount_total: u128 = amount_total.into();
if is_promise_success() {
// The send() was destined to a compatible receiver smart contract.
// Build another promise that notifies the smart contract
// that is has received new tokens.
env::log(b"Constructing smart contract notifier promise");
let promise0 = env::promise_create(
new_owner_id.clone(),
b"on_token_received",
json!({
"sender_id": old_owner_id,
"amount_received": amount_received,
"amount_total": amount_total,
"message": message,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/10,
);
// Construct the promise that calls back the
// token contract to finalise the transaction
let promise1 = env::promise_then(
promise0,
env::current_account_id(),
b"handle_token_received",
json!({
"old_owner_id": old_owner_id,
"new_owner_id": new_owner_id,
"amount_received": amount_received,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/10,
);
env::promise_return(promise1);
} else {
// Non-code account
// Finalise transaction now.
self.ledger.finalise(new_owner_id, uint_amount_received);
}
}
/// Smart contract notify succeed, free up any locked balance
/// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here
pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) {
// Only callable by self
assert_eq!(env::current_account_id(), env::predecessor_account_id());
env::log(b"Checking for the need to rollback smart contract transaction");
let amount_received: u128 = amount_received.into();
// TODO: Have some nice error code logic here
if is_promise_success() {
self.ledger.finalise(new_owner_id, amount_received);
} else {
self.ledger.rollback(old_owner_id, new_owner_id, amount_received);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use near_sdk::MockedBlockchain;
use near_sdk::{testing_env, VMContext};
fn alice() -> AccountId {
"alice.near".to_string()
}
fn bob() -> AccountId {
"bob.near".to_string()
}
fn carol() -> AccountId {
"carol.near".to_string()
}
fn get_context(predecessor_account_id: AccountId) -> VMContext {
VMContext {
current_account_id: alice(),
signer_account_id: bob(),
signer_account_pk: vec![0, 1, 2],
predecessor_account_id,
input: vec![], | block_index: 0,
block_timestamp: 0, | random_line_split | |
token.rs | (&new_owner_id);
let new_target_balance = target_balance + amount;
self.set_balance(&new_owner_id, new_target_balance);
// This much of user balance is lockedup in promise chains
self.set_balance(&new_owner_id, new_target_balance);
let target_lock = self.get_locked_balance(&new_owner_id);
self.locked_balances.insert(&new_owner_id, &(target_lock + amount));
let promise0 = env::promise_create(
new_owner_id.clone(),
b"is_receiver",
&[],
0,
SINGLE_CALL_GAS/3,
);
let promise1 = env::promise_then(
promise0,
env::current_account_id(),
b"handle_receiver",
json!({
"old_owner_id": owner_id,
"new_owner_id": new_owner_id,
"amount_received": amount.to_string(),
"amount_total": new_target_balance.to_string(),
"message": message,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/3,
);
env::promise_return(promise1);
}
/// All promise chains have been successful, release balance from the lock
/// and consider the promise chain final.
pub fn finalise(&mut self, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
}
/// Smart contract call failed. We need to roll back the balance update
pub fn rollback(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount: Balance) {
let target_lock = self.get_locked_balance(&new_owner_id);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
env::log(format!("Rolling back back send of {}, from {} to {}, currently locked {}", amount, old_owner_id, new_owner_id, target_lock).as_bytes());
env::log(format!("New owner balance {}, old owner balance {}", target_balance, source_balance).as_bytes());
assert!(
target_lock >= amount,
"Locked balance cannot go to negative"
);
// Roll back lock
let new_amount = target_lock - amount;
self.locked_balances.insert(&new_owner_id, &new_amount);
self.balances.insert(&new_owner_id, &new_amount);
// Rollback new owner
let new_target_balance = target_balance - amount;
self.set_balance(&new_owner_id, new_target_balance);
// Rollback old owner
let new_source_balance = source_balance + amount;
self.set_balance(&old_owner_id, new_source_balance);
let target_balance = self.get_balance(&new_owner_id);
let source_balance = self.get_balance(&old_owner_id);
self.rollbacks += 1;
}
}
/*
* Information about the token.
*
* We hold the name, symbol and homepage readibly available on chain, but other information must be
* from the JSON data. This way we do not bloat the chain size and also make upgrading the information
* somewhat easier.
*
* All metadata fields are optional.
*/
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Metadata {
// Name of the token
pub name: String,
// Symbol of the token
pub symbol: String,
// URL to the human readable page about the token
pub web_link: String,
// URL to the metadata file with more information about the token, like different icon sets
pub metadata_link: String,
}
/**
* Presents on token.
*/
#[near_bindgen]
#[derive(BorshDeserialize, BorshSerialize)]
pub struct Token {
pub ledger: Ledger,
pub metadata: Metadata,
}
impl Default for Token {
fn default() -> Self {
panic!("Token should be initialized before usage")
}
}
#[near_bindgen]
impl Token {
/// Initializes the contract with the given total supply owned by the given `owner_id`.
#[init]
pub fn new(owner_id: AccountId, total_supply: Balance) -> Self {
assert!(!env::state_exists(), "Already initialized");
let total_supply = total_supply.into();
// Initialize the ledger with the initial total supply
let ledger = Ledger {
balances: LookupMap::new(b"bal".to_vec()),
locked_balances: LookupMap::new(b"lck".to_vec()),
total_supply,
rollbacks: 0,
};
// Currently the constructor does not support passing of metadata.
// Start with empty metadata, owner needs to initialize this
// after the token has been created in another transaction
let metadata = Metadata {
name: String::from(""),
symbol: String::from(""),
web_link: String::from(""),
metadata_link: String::from(""),
};
let mut token = Self {
ledger,
metadata
};
token.ledger.set_balance(&owner_id, total_supply);
return token;
}
/// Returns total supply of tokens.
pub fn get_total_supply(&self) -> Balance {
self.ledger.total_supply.into()
}
/// Returns balance of the `owner_id` account.
pub fn get_balance(&self, owner_id: AccountId) -> Balance {
self.ledger.get_balance(&owner_id).into()
}
/// Returns balance lockedin pending transactions
pub fn get_locked_balance(&self, owner_id: AccountId) -> Balance {
self.ledger.get_locked_balance(&owner_id).into()
}
//// How many rollbacks we have had
pub fn get_rollback_count(&self) -> u64 {
self.ledger.rollbacks
}
/// Returns balance of the `owner_id` account.
pub fn get_name(&self) -> &str {
return &self.metadata.name;
}
/// Send owner's tokens to another person or a smart contract
#[payable]
pub fn send(&mut self, new_owner_id: AccountId, amount: Balance, message: Vec<u8>) {
self.ledger.send(env::predecessor_account_id(), new_owner_id, amount, message);
}
/**
* After trying to call receiving smart contract if it reports it can receive tokens.
*
* We gpt the interface test promise back. If the account was not smart contract, finalise the transaction.
* Otherwise trigger the smart contract notifier.
*/
pub fn handle_receiver(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128, amount_total: U128, message: Vec<u8>) {
// Only callable by self
assert_eq!(env::current_account_id(), env::predecessor_account_id());
env::log(b"handle_receiver reached");
let uint_amount_received: u128 = amount_received.into();
let uint_amount_total: u128 = amount_total.into();
if is_promise_success() {
// The send() was destined to a compatible receiver smart contract.
// Build another promise that notifies the smart contract
// that is has received new tokens.
env::log(b"Constructing smart contract notifier promise");
let promise0 = env::promise_create(
new_owner_id.clone(),
b"on_token_received",
json!({
"sender_id": old_owner_id,
"amount_received": amount_received,
"amount_total": amount_total,
"message": message,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/10,
);
// Construct the promise that calls back the
// token contract to finalise the transaction
let promise1 = env::promise_then(
promise0,
env::current_account_id(),
b"handle_token_received",
json!({
"old_owner_id": old_owner_id,
"new_owner_id": new_owner_id,
"amount_received": amount_received,
}).to_string().as_bytes(),
0,
SINGLE_CALL_GAS/10,
);
env::promise_return(promise1);
} else {
// Non-code account
// Finalise transaction now.
self.ledger.finalise(new_owner_id, uint_amount_received);
}
}
/// Smart contract notify succeed, free up any locked balance
/// TODO: Add functionality so that the smart contract that received tokens can trigger a new promise chain here
pub fn handle_token_received(&mut self, old_owner_id: AccountId, new_owner_id: AccountId, amount_received: U128) {
// Only callable by self
assert_eq!(env::current_account_id(), env::predecessor_account_id());
env::log(b"Checking for the need to rollback smart contract transaction");
let amount_received: u128 = amount_received.into();
// TODO: Have some nice error code logic here
if is_promise_success() {
self.ledger.finalise(new_owner_id, amount_received);
} else {
self.ledger.rollback(old_owner_id, new_owner_id, amount_received);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use near_sdk::MockedBlockchain;
use near_sdk::{testing_env, VMContext};
fn alice() -> AccountId {
"alice.near".to_string()
}
fn | bob | identifier_name | |
mp4-tools.ts | {
let matchingTraf = trafs[index];
mdatTrafPairs.push({
mdat: mdat,
traf: matchingTraf
});
});
mdatTrafPairs.forEach(function (pair) {
let mdat = pair.mdat;
let mdatBytes = mdat.data.subarray(mdat.start, mdat.end);
let traf = pair.traf;
let trafBytes = traf.data.subarray(traf.start, traf.end);
let tfhd = findBox(trafBytes, ['tfhd']);
// Exactly 1 tfhd per traf
let headerInfo = parseTfhd(tfhd[0]);
let trackId = headerInfo.trackId;
let tfdt = findBox(trafBytes, ['tfdt']);
// Either 0 or 1 tfdt per traf
let baseMediaDecodeTime = (tfdt.length > 0) ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0;
let truns = findBox(trafBytes, ['trun']);
let samples;
let seiNals;
// Only parse video data for the chosen video track
if (videoTrackId === trackId && truns.length > 0) {
samples = parseSamples(truns, baseMediaDecodeTime, headerInfo);
seiNals = findSeiNals(mdatBytes, samples, trackId);
captionNals = captionNals.concat(seiNals);
}
});
return captionNals;
}
export function parseTfhd (tfhd) {
const data = tfhd.data.subarray(tfhd.start, tfhd.end);
let
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
trackId: view.getUint32(4)
} as any,
baseDataOffsetPresent = result.flags[2] & 0x01,
sampleDescriptionIndexPresent = result.flags[2] & 0x02,
defaultSampleDurationPresent = result.flags[2] & 0x08,
defaultSampleSizePresent = result.flags[2] & 0x10,
defaultSampleFlagsPresent = result.flags[2] & 0x20,
durationIsEmpty = result.flags[0] & 0x010000,
defaultBaseIsMoof = result.flags[0] & 0x020000,
i;
i = 8;
if (baseDataOffsetPresent) {
i += 4; // truncate top 4 bytes
// FIXME: should we read the full 64 bits?
result.baseDataOffset = view.getUint32(12);
i += 4;
}
if (sampleDescriptionIndexPresent) {
result.sampleDescriptionIndex = view.getUint32(i);
i += 4;
}
if (defaultSampleDurationPresent) {
result.defaultSampleDuration = view.getUint32(i);
i += 4;
}
if (defaultSampleSizePresent) {
result.defaultSampleSize = view.getUint32(i);
i += 4;
}
if (defaultSampleFlagsPresent) {
result.defaultSampleFlags = view.getUint32(i);
}
if (durationIsEmpty) {
result.durationIsEmpty = true;
}
if (!baseDataOffsetPresent && defaultBaseIsMoof) {
result.baseDataOffsetIsMoof = true;
}
return result;
}
export function parseTfdt (tfdt) {
const data = tfdt.data.subarray(tfdt.start, tfdt.end);
let result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
baseMediaDecodeTime: toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7])
};
if (result.version === 1) {
result.baseMediaDecodeTime *= Math.pow(2, 32);
result.baseMediaDecodeTime += toUnsigned(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]);
}
return result;
}
/**
* Parses sample information out of Track Run Boxes and calculates
* the absolute presentation and decode timestamps of each sample.
*
* This code was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed
* @param {Number} baseMediaDecodeTime - base media decode time from tfdt
@see ISO-BMFF-12/2015, Section 8.8.12
* @param {Object} tfhd - The parsed Track Fragment Header
* @see inspect.parseTfhd
* @return {Object[]} the parsed samples
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
export function parseSamples (truns, baseMediaDecodeTime, tfhd) {
let currentDts = baseMediaDecodeTime;
let defaultSampleDuration = tfhd.defaultSampleDuration || 0;
let defaultSampleSize = tfhd.defaultSampleSize || 0;
let trackId = tfhd.trackId;
let allSamples = [] as any;
truns.forEach(function (trun) {
// Note: We currently do not parse the sample table as well
// as the trun. It's possible some sources will require this.
// moov > trak > mdia > minf > stbl
let trackRun = parseTrun(trun);
let samples = trackRun.samples as any[];
samples.forEach(function (sample) {
if (sample.duration === undefined) {
sample.duration = defaultSampleDuration;
}
if (sample.size === undefined) {
sample.size = defaultSampleSize;
}
sample.trackId = trackId;
sample.dts = currentDts;
if (sample.compositionTimeOffset === undefined) {
sample.compositionTimeOffset = 0;
}
sample.pts = currentDts + sample.compositionTimeOffset;
currentDts += sample.duration;
});
allSamples = allSamples.concat(samples);
});
return allSamples;
}
/**
* Finds SEI nal units contained in a Media Data Box.
* Assumes that `parseSamples` has been called first.
*
* This was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Uint8Array} avcStream - The bytes of the mdat
* @param {Object[]} samples - The samples parsed out by `parseSamples`
* @param {Number} trackId - The trackId of this video track
* @return {Object[]} seiNals - the parsed SEI NALUs found.
* The contents of the seiNal should match what is expected by
* CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts)
*
* @see ISO-BMFF-12/2015, Section 8.1.1
* @see Rec. ITU-T H.264, 7.3.2.3.1
**/
export function findSeiNals (avcStream, samples, trackId) {
let
avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),
result = [] as any,
seiNal,
i,
length,
lastMatchedSample;
for (i = 0; i + 4 < avcStream.length; i += length) {
length = avcView.getUint32(i);
i += 4;
// Bail if this doesn't appear to be an H264 stream
if (length <= 0) {
continue;
}
switch (avcStream[i] & 0x1F) {
case 0x06:
var data = avcStream.subarray(i + 1, i + 1 + length);
var matchingSample = mapToSample(i, samples);
seiNal = {
nalUnitType: 'sei_rbsp',
size: length,
data: data,
escapedRBSP: discardEmulationPreventionBytes(data),
trackId: trackId
};
if (matchingSample) {
seiNal.pts = matchingSample.pts;
seiNal.dts = matchingSample.dts;
lastMatchedSample = matchingSample;
} else if (lastMatchedSample) {
// If a matching sample cannot be found, use the last
// sample's values as they should be as close as possible
seiNal.pts = lastMatchedSample.pts;
seiNal.dts = lastMatchedSample.dts;
} else | {
logger.log('We\'ve encountered a nal unit without data. See mux.js#233.');
break;
} | conditional_block | |
mp4-tools.ts | = mdat.data.subarray(mdat.start, mdat.end);
let traf = pair.traf;
let trafBytes = traf.data.subarray(traf.start, traf.end);
let tfhd = findBox(trafBytes, ['tfhd']);
// Exactly 1 tfhd per traf
let headerInfo = parseTfhd(tfhd[0]);
let trackId = headerInfo.trackId;
let tfdt = findBox(trafBytes, ['tfdt']);
// Either 0 or 1 tfdt per traf
let baseMediaDecodeTime = (tfdt.length > 0) ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0;
let truns = findBox(trafBytes, ['trun']);
let samples;
let seiNals;
// Only parse video data for the chosen video track
if (videoTrackId === trackId && truns.length > 0) {
samples = parseSamples(truns, baseMediaDecodeTime, headerInfo);
seiNals = findSeiNals(mdatBytes, samples, trackId);
captionNals = captionNals.concat(seiNals);
}
});
return captionNals;
}
export function parseTfhd (tfhd) {
const data = tfhd.data.subarray(tfhd.start, tfhd.end);
let
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
trackId: view.getUint32(4)
} as any,
baseDataOffsetPresent = result.flags[2] & 0x01,
sampleDescriptionIndexPresent = result.flags[2] & 0x02,
defaultSampleDurationPresent = result.flags[2] & 0x08,
defaultSampleSizePresent = result.flags[2] & 0x10,
defaultSampleFlagsPresent = result.flags[2] & 0x20,
durationIsEmpty = result.flags[0] & 0x010000,
defaultBaseIsMoof = result.flags[0] & 0x020000,
i;
i = 8;
if (baseDataOffsetPresent) {
i += 4; // truncate top 4 bytes
// FIXME: should we read the full 64 bits?
result.baseDataOffset = view.getUint32(12);
i += 4;
}
if (sampleDescriptionIndexPresent) {
result.sampleDescriptionIndex = view.getUint32(i);
i += 4;
}
if (defaultSampleDurationPresent) {
result.defaultSampleDuration = view.getUint32(i);
i += 4;
}
if (defaultSampleSizePresent) {
result.defaultSampleSize = view.getUint32(i);
i += 4;
}
if (defaultSampleFlagsPresent) {
result.defaultSampleFlags = view.getUint32(i);
}
if (durationIsEmpty) {
result.durationIsEmpty = true;
}
if (!baseDataOffsetPresent && defaultBaseIsMoof) {
result.baseDataOffsetIsMoof = true;
}
return result;
}
export function parseTfdt (tfdt) {
const data = tfdt.data.subarray(tfdt.start, tfdt.end);
let result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
baseMediaDecodeTime: toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7])
};
if (result.version === 1) {
result.baseMediaDecodeTime *= Math.pow(2, 32);
result.baseMediaDecodeTime += toUnsigned(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]);
}
return result;
}
/**
* Parses sample information out of Track Run Boxes and calculates
* the absolute presentation and decode timestamps of each sample.
*
* This code was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed
* @param {Number} baseMediaDecodeTime - base media decode time from tfdt
@see ISO-BMFF-12/2015, Section 8.8.12
* @param {Object} tfhd - The parsed Track Fragment Header
* @see inspect.parseTfhd
* @return {Object[]} the parsed samples
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
export function parseSamples (truns, baseMediaDecodeTime, tfhd) {
let currentDts = baseMediaDecodeTime;
let defaultSampleDuration = tfhd.defaultSampleDuration || 0;
let defaultSampleSize = tfhd.defaultSampleSize || 0;
let trackId = tfhd.trackId;
let allSamples = [] as any;
truns.forEach(function (trun) {
// Note: We currently do not parse the sample table as well
// as the trun. It's possible some sources will require this.
// moov > trak > mdia > minf > stbl
let trackRun = parseTrun(trun);
let samples = trackRun.samples as any[];
samples.forEach(function (sample) {
if (sample.duration === undefined) {
sample.duration = defaultSampleDuration;
}
if (sample.size === undefined) {
sample.size = defaultSampleSize;
}
sample.trackId = trackId;
sample.dts = currentDts;
if (sample.compositionTimeOffset === undefined) {
sample.compositionTimeOffset = 0;
}
sample.pts = currentDts + sample.compositionTimeOffset;
currentDts += sample.duration;
});
allSamples = allSamples.concat(samples);
});
return allSamples;
}
/**
* Finds SEI nal units contained in a Media Data Box.
* Assumes that `parseSamples` has been called first.
*
* This was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Uint8Array} avcStream - The bytes of the mdat
* @param {Object[]} samples - The samples parsed out by `parseSamples`
* @param {Number} trackId - The trackId of this video track
* @return {Object[]} seiNals - the parsed SEI NALUs found.
* The contents of the seiNal should match what is expected by
* CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts)
*
* @see ISO-BMFF-12/2015, Section 8.1.1
* @see Rec. ITU-T H.264, 7.3.2.3.1
**/
export function findSeiNals (avcStream, samples, trackId) {
let
avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),
result = [] as any,
seiNal,
i,
length,
lastMatchedSample;
for (i = 0; i + 4 < avcStream.length; i += length) {
length = avcView.getUint32(i);
i += 4;
// Bail if this doesn't appear to be an H264 stream
if (length <= 0) {
continue;
}
switch (avcStream[i] & 0x1F) {
case 0x06:
var data = avcStream.subarray(i + 1, i + 1 + length);
var matchingSample = mapToSample(i, samples);
seiNal = {
nalUnitType: 'sei_rbsp',
size: length,
data: data,
escapedRBSP: discardEmulationPreventionBytes(data),
trackId: trackId
};
if (matchingSample) {
seiNal.pts = matchingSample.pts;
seiNal.dts = matchingSample.dts;
lastMatchedSample = matchingSample;
} else if (lastMatchedSample) {
// If a matching sample cannot be found, use the last
// sample's values as they should be as close as possible
seiNal.pts = lastMatchedSample.pts;
seiNal.dts = lastMatchedSample.dts;
} else {
logger.log('We\'ve encountered a nal unit without data. See mux.js#233.');
break;
}
result.push(seiNal);
break;
default:
break;
}
}
return result;
}
/**
* This code was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param trun
*/
export function | parseTrun | identifier_name | |
mp4-tools.ts | 5, Section 8.1.1
* @see Rec. ITU-T H.264, 7.3.2.3.1
**/
export function findSeiNals (avcStream, samples, trackId) {
let
avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),
result = [] as any,
seiNal,
i,
length,
lastMatchedSample;
for (i = 0; i + 4 < avcStream.length; i += length) {
length = avcView.getUint32(i);
i += 4;
// Bail if this doesn't appear to be an H264 stream
if (length <= 0) {
continue;
}
switch (avcStream[i] & 0x1F) {
case 0x06:
var data = avcStream.subarray(i + 1, i + 1 + length);
var matchingSample = mapToSample(i, samples);
seiNal = {
nalUnitType: 'sei_rbsp',
size: length,
data: data,
escapedRBSP: discardEmulationPreventionBytes(data),
trackId: trackId
};
if (matchingSample) {
seiNal.pts = matchingSample.pts;
seiNal.dts = matchingSample.dts;
lastMatchedSample = matchingSample;
} else if (lastMatchedSample) {
// If a matching sample cannot be found, use the last
// sample's values as they should be as close as possible
seiNal.pts = lastMatchedSample.pts;
seiNal.dts = lastMatchedSample.dts;
} else {
logger.log('We\'ve encountered a nal unit without data. See mux.js#233.');
break;
}
result.push(seiNal);
break;
default:
break;
}
}
return result;
}
/**
* This code was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param trun
*/
export function parseTrun (trun) {
const data = trun.data.subarray(trun.start, trun.end);
let
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
samples: []
} as any,
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
// Flag interpretation
dataOffsetPresent = result.flags[2] & 0x01, // compare with 2nd byte of 0x1
firstSampleFlagsPresent = result.flags[2] & 0x04, // compare with 2nd byte of 0x4
sampleDurationPresent = result.flags[1] & 0x01, // compare with 2nd byte of 0x100
sampleSizePresent = result.flags[1] & 0x02, // compare with 2nd byte of 0x200
sampleFlagsPresent = result.flags[1] & 0x04, // compare with 2nd byte of 0x400
sampleCompositionTimeOffsetPresent = result.flags[1] & 0x08, // compare with 2nd byte of 0x800
sampleCount = view.getUint32(4),
offset = 8,
sample;
if (dataOffsetPresent) {
// 32 bit signed integer
result.dataOffset = view.getInt32(offset);
offset += 4;
}
// Overrides the flags for the first sample only. The order of
// optional values will be: duration, size, compositionTimeOffset
if (firstSampleFlagsPresent && sampleCount) {
sample = {
flags: parseSampleFlags(data.subarray(offset, offset + 4))
};
offset += 4;
if (sampleDurationPresent) {
sample.duration = view.getUint32(offset);
offset += 4;
}
if (sampleSizePresent) {
sample.size = view.getUint32(offset);
offset += 4;
}
if (sampleCompositionTimeOffsetPresent) {
// Note: this should be a signed int if version is 1
sample.compositionTimeOffset = view.getUint32(offset);
offset += 4;
}
result.samples.push(sample);
sampleCount--;
}
while (sampleCount--) {
sample = {};
if (sampleDurationPresent) {
sample.duration = view.getUint32(offset);
offset += 4;
}
if (sampleSizePresent) {
sample.size = view.getUint32(offset);
offset += 4;
}
if (sampleFlagsPresent) {
sample.flags = parseSampleFlags(data.subarray(offset, offset + 4));
offset += 4;
}
if (sampleCompositionTimeOffsetPresent) {
// Note: this should be a signed int if version is 1
sample.compositionTimeOffset = view.getUint32(offset);
offset += 4;
}
result.samples.push(sample);
}
return result;
}
/**
* Parses sample information out of Track Run Boxes and calculates
* the absolute presentation and decode timestamps of each sample.
*
* This code was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed
* @param {Number} baseMediaDecodeTime - base media decode time from tfdt
@see ISO-BMFF-12/2015, Section 8.8.12
* @param {Object} tfhd - The parsed Track Fragment Header
* @see inspect.parseTfhd
* @return {Object[]} the parsed samples
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
export function parseSampleFlags (flags) {
return {
isLeading: (flags[0] & 0x0c) >>> 2,
dependsOn: flags[0] & 0x03,
isDependedOn: (flags[1] & 0xc0) >>> 6,
hasRedundancy: (flags[1] & 0x30) >>> 4,
paddingValue: (flags[1] & 0x0e) >>> 1,
isNonSyncSample: flags[1] & 0x01,
degradationPriority: (flags[2] << 8) | flags[3]
};
}
/**
* Maps an offset in the mdat to a sample based on the the size of the samples.
* Assumes that `parseSamples` has been called first.
*
* This code was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Number} offset - The offset into the mdat
* @param {Object[]} samples - An array of samples, parsed using `parseSamples`
* @return {?Object} The matching sample, or null if no match was found.
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
export function mapToSample (offset, samples) {
let approximateOffset = offset;
for (let i = 0; i < samples.length; i++) {
let sample = samples[i];
if (approximateOffset < sample.size) {
return sample;
}
approximateOffset -= sample.size;
}
return null;
}
/**
* Determine the base media decode start time, in seconds, for an MP4
* fragment. If multiple fragments are specified, the earliest time is
* returned.
*
* The base media decode time can be parsed from track fragment
* metadata:
* ```
* moof > traf > tfdt.baseMediaDecodeTime
* ```
* It requires the timescale value from the mdhd to interpret.
*
* @param initData {object} containing information about track
* @param fragment mp4 fragment data
* @return {number} the earliest base media decode start time for the
* fragment, in seconds
*/
export function getStartDTS (initData, fragment) | {
let trafs, baseTimes, result;
// we need info from two childrend of each track fragment box
trafs = findBox(fragment, ['moof', 'traf']);
// determine the start times for each track
baseTimes = [].concat.apply([], trafs.map(function (traf) {
return findBox(traf, ['tfhd']).map(function (tfhd) {
let id, scale, baseTime;
// get the track id from the tfhd
id = readUint32(tfhd, 4);
// assume a 90kHz clock if no timescale was specified
scale = initData[id].timescale || 90e3;
// get the base media decode time from the tfdt
baseTime = findBox(traf, ['tfdt']).map(function (tfdt) {
let version, result;
| identifier_body | |
mp4-tools.ts | Object[]>} A mapping of video trackId to
* a list of seiNals found in that track
**/
export function parseCaptionNals (data, videoTrackId) {
let captionNals = [] as any;
// To get the samples
let trafs = findBox(data, ['moof', 'traf']);
// To get SEI NAL units
let mdats = findBox(data, ['mdat']);
let mdatTrafPairs = [] as any;
// Pair up each traf with a mdat as moofs and mdats are in pairs
mdats.forEach(function (mdat, index) {
let matchingTraf = trafs[index];
mdatTrafPairs.push({
mdat: mdat,
traf: matchingTraf
});
});
mdatTrafPairs.forEach(function (pair) {
let mdat = pair.mdat;
let mdatBytes = mdat.data.subarray(mdat.start, mdat.end);
let traf = pair.traf;
let trafBytes = traf.data.subarray(traf.start, traf.end);
let tfhd = findBox(trafBytes, ['tfhd']);
// Exactly 1 tfhd per traf
let headerInfo = parseTfhd(tfhd[0]);
let trackId = headerInfo.trackId;
let tfdt = findBox(trafBytes, ['tfdt']);
// Either 0 or 1 tfdt per traf
let baseMediaDecodeTime = (tfdt.length > 0) ? parseTfdt(tfdt[0]).baseMediaDecodeTime : 0;
let truns = findBox(trafBytes, ['trun']);
let samples;
let seiNals;
// Only parse video data for the chosen video track
if (videoTrackId === trackId && truns.length > 0) {
samples = parseSamples(truns, baseMediaDecodeTime, headerInfo);
seiNals = findSeiNals(mdatBytes, samples, trackId);
captionNals = captionNals.concat(seiNals);
}
});
return captionNals;
}
export function parseTfhd (tfhd) {
const data = tfhd.data.subarray(tfhd.start, tfhd.end);
let
view = new DataView(data.buffer, data.byteOffset, data.byteLength),
result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
trackId: view.getUint32(4)
} as any,
baseDataOffsetPresent = result.flags[2] & 0x01,
sampleDescriptionIndexPresent = result.flags[2] & 0x02,
defaultSampleDurationPresent = result.flags[2] & 0x08,
defaultSampleSizePresent = result.flags[2] & 0x10,
defaultSampleFlagsPresent = result.flags[2] & 0x20,
durationIsEmpty = result.flags[0] & 0x010000,
defaultBaseIsMoof = result.flags[0] & 0x020000,
i;
i = 8;
if (baseDataOffsetPresent) {
i += 4; // truncate top 4 bytes
// FIXME: should we read the full 64 bits?
result.baseDataOffset = view.getUint32(12);
i += 4;
}
if (sampleDescriptionIndexPresent) {
result.sampleDescriptionIndex = view.getUint32(i);
i += 4;
}
if (defaultSampleDurationPresent) {
result.defaultSampleDuration = view.getUint32(i);
i += 4;
}
if (defaultSampleSizePresent) {
result.defaultSampleSize = view.getUint32(i);
i += 4;
}
if (defaultSampleFlagsPresent) {
result.defaultSampleFlags = view.getUint32(i);
}
if (durationIsEmpty) {
result.durationIsEmpty = true;
}
if (!baseDataOffsetPresent && defaultBaseIsMoof) {
result.baseDataOffsetIsMoof = true;
}
return result;
}
export function parseTfdt (tfdt) {
const data = tfdt.data.subarray(tfdt.start, tfdt.end);
let result = {
version: data[0],
flags: new Uint8Array(data.subarray(1, 4)),
baseMediaDecodeTime: toUnsigned(data[4] << 24 | data[5] << 16 | data[6] << 8 | data[7])
};
if (result.version === 1) {
result.baseMediaDecodeTime *= Math.pow(2, 32);
result.baseMediaDecodeTime += toUnsigned(data[8] << 24 | data[9] << 16 | data[10] << 8 | data[11]);
}
return result;
}
/**
* Parses sample information out of Track Run Boxes and calculates
* the absolute presentation and decode timestamps of each sample.
*
* This code was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Array<Uint8Array>} truns - The Trun Run boxes to be parsed
* @param {Number} baseMediaDecodeTime - base media decode time from tfdt
@see ISO-BMFF-12/2015, Section 8.8.12
* @param {Object} tfhd - The parsed Track Fragment Header
* @see inspect.parseTfhd
* @return {Object[]} the parsed samples
*
* @see ISO-BMFF-12/2015, Section 8.8.8
**/
export function parseSamples (truns, baseMediaDecodeTime, tfhd) {
let currentDts = baseMediaDecodeTime;
let defaultSampleDuration = tfhd.defaultSampleDuration || 0;
let defaultSampleSize = tfhd.defaultSampleSize || 0;
let trackId = tfhd.trackId;
let allSamples = [] as any;
truns.forEach(function (trun) {
// Note: We currently do not parse the sample table as well
// as the trun. It's possible some sources will require this.
// moov > trak > mdia > minf > stbl
let trackRun = parseTrun(trun);
let samples = trackRun.samples as any[];
samples.forEach(function (sample) {
if (sample.duration === undefined) {
sample.duration = defaultSampleDuration;
}
if (sample.size === undefined) {
sample.size = defaultSampleSize;
}
sample.trackId = trackId;
sample.dts = currentDts;
if (sample.compositionTimeOffset === undefined) {
sample.compositionTimeOffset = 0;
}
sample.pts = currentDts + sample.compositionTimeOffset;
currentDts += sample.duration;
});
allSamples = allSamples.concat(samples);
});
return allSamples;
}
/**
* Finds SEI nal units contained in a Media Data Box.
* Assumes that `parseSamples` has been called first.
*
* This was ported from the mux.js project at: https://github.com/videojs/mux.js
*
* @param {Uint8Array} avcStream - The bytes of the mdat
* @param {Object[]} samples - The samples parsed out by `parseSamples`
* @param {Number} trackId - The trackId of this video track
* @return {Object[]} seiNals - the parsed SEI NALUs found.
* The contents of the seiNal should match what is expected by
* CaptionStream.push (nalUnitType, size, data, escapedRBSP, pts, dts)
*
* @see ISO-BMFF-12/2015, Section 8.1.1
* @see Rec. ITU-T H.264, 7.3.2.3.1
**/
export function findSeiNals (avcStream, samples, trackId) {
let
avcView = new DataView(avcStream.buffer, avcStream.byteOffset, avcStream.byteLength),
result = [] as any,
seiNal,
i,
length,
lastMatchedSample;
for (i = 0; i + 4 < avcStream.length; i += length) {
length = avcView.getUint32(i);
i += 4;
// Bail if this doesn't appear to be an H264 stream
if (length <= 0) {
continue;
}
switch (avcStream[i] & 0x1F) {
case 0x06:
var data = avcStream.subarray(i + 1, i + 1 + length);
var matchingSample = mapToSample(i, samples);
seiNal = {
nalUnitType: 'sei_rbsp',
size: length,
data: data, | escapedRBSP: discardEmulationPreventionBytes(data),
trackId: trackId
}; | random_line_split | |
processor.rs | system, such as "Windows NT", "Mac OS X", or "Linux".
///
/// If the information is present in the dump but its value is unknown, this field will contain
/// a numeric value. If the information is not present in the dump, this field will be empty.
pub fn os_name(&self) -> String {
unsafe {
let ptr = system_info_os_name(self);
utils::ptr_to_string(ptr)
}
}
/// Strings identifying the version and build number of the operating system.
///
/// If the dump does not contain either information, the component will be empty. Tries to parse
/// the version number from the build if it is not apparent from the version string.
pub fn os_parts(&self) -> (String, String) {
let string = unsafe {
let ptr = system_info_os_version(self);
utils::ptr_to_string(ptr)
};
let mut parts = string.splitn(2, ' ');
let version = parts.next().unwrap_or("0.0.0");
let build = parts.next().unwrap_or("");
if version == "0.0.0" {
// Try to parse the Linux build string. Breakpad and Crashpad run
// `uname -srvmo` to generate it. This roughtly resembles:
// "Linux [version] [build...] [arch] Linux/GNU"
if let Some(captures) = LINUX_BUILD_RE.captures(&build) {
let version = captures.get(1).unwrap(); // uname -r portion
let build = captures.get(2).unwrap(); // uname -v portion
return (version.as_str().into(), build.as_str().into());
}
}
(version.into(), build.into())
}
/// A string identifying the version of the operating system.
///
/// The version will be formatted as three-component semantic version, such as "5.1.2600" or
/// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0".
pub fn os_version(&self) -> String {
self.os_parts().0
}
/// A string identifying the build of the operating system.
///
/// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump
/// does not contain this information, this field will be empty.
pub fn os_build(&self) -> String {
self.os_parts().1
}
/// A string identifying the basic CPU family, such as "x86" or "ppc".
///
/// If this information is present in the dump but its value is unknown,
/// this field will contain a numeric value. If the information is not
/// present in the dump, this field will be empty.
pub fn cpu_family(&self) -> String {
unsafe {
let ptr = system_info_cpu_family(self);
utils::ptr_to_string(ptr)
}
}
/// The architecture of the CPU parsed from `ProcessState::cpu_family`.
///
/// If this information is present in the dump but its value is unknown
/// or if the value is missing, this field will contain `Arch::Unknown`.
pub fn cpu_arch(&self) -> Arch {
Arch::from_breakpad(&self.cpu_family()).unwrap_or_default()
}
/// A string further identifying the specific CPU.
///
/// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping
/// 8". If the information is not present in the dump, or additional identifying information is
/// not defined for the CPU family, this field will be empty.
pub fn cpu_info(&self) -> String {
unsafe {
let ptr = system_info_cpu_info(self);
utils::ptr_to_string(ptr)
}
}
/// The number of processors in the system.
///
/// Will be greater than one for multi-core systems.
pub fn cpu_count(&self) -> u32 {
unsafe { system_info_cpu_count(self) }
}
}
impl fmt::Debug for SystemInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SystemInfo")
.field("os_name", &self.os_name())
.field("os_version", &self.os_version())
.field("cpu_family", &self.cpu_family())
.field("cpu_info", &self.cpu_info())
.field("cpu_count", &self.cpu_count())
.finish()
}
}
/// Result of processing a Minidump or Microdump file.
///
/// Usually included in `ProcessError` when the file cannot be processed.
#[repr(u32)]
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum ProcessResult {
/// The dump was processed successfully.
Ok,
/// The minidump file was not found or the buffer was empty.
MinidumpNotFound,
/// The minidump file had no header.
NoMinidumpHeader,
/// The minidump file has no thread list.
NoThreadList,
/// There was an error getting one thread's data from the dump.
InvalidThreadIndex,
/// There was an error getting a thread id from the thread's data.
InvalidThreadId,
/// There was more than one requesting thread.
DuplicateRequestingThreads,
/// The dump processing was interrupted (not fatal).
SymbolSupplierInterrupted,
}
impl fmt::Display for ProcessResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let formatted = match self {
&ProcessResult::Ok => "dump processed successfully",
&ProcessResult::MinidumpNotFound => "file could not be opened",
&ProcessResult::NoMinidumpHeader => "minidump header missing",
&ProcessResult::NoThreadList => "minidump has no thread list",
&ProcessResult::InvalidThreadIndex => "could not get thread data",
&ProcessResult::InvalidThreadId => "could not get a thread by id",
&ProcessResult::DuplicateRequestingThreads => "multiple requesting threads",
&ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)",
};
write!(f, "{}", formatted)
}
}
/// An error generated when trying to process a minidump.
#[derive(Debug, Fail, Copy, Clone)]
#[fail(display = "minidump processing failed: {}", _0)]
pub struct ProcessMinidumpError(ProcessResult);
impl ProcessMinidumpError {
/// Returns the kind of this error.
pub fn kind(&self) -> ProcessResult {
self.0
}
}
/// Internal type used to transfer Breakpad symbols over FFI.
#[repr(C)]
struct SymbolEntry {
debug_identifier: *const c_char,
symbol_size: usize,
symbol_data: *const u8,
}
/// Container for call frame information (CFI) of `CodeModules`.
///
/// This information is required by the stackwalker in case framepointers are
/// missing in the raw stacktraces. Frame information is given as plain ASCII
/// text as specified in the Breakpad symbol file specification.
pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>;
type IProcessState = c_void;
/// Snapshot of the state of a processes during its crash. The object can be
/// obtained by processing Minidump or Microdump files.
pub struct ProcessState<'a> {
internal: *mut IProcessState,
_ty: PhantomData<ByteView<'a>>,
}
impl<'a> ProcessState<'a> {
/// Processes a minidump supplied via raw binary data.
///
/// Returns a `ProcessState` that contains information about the crashed
/// process. The parameter `frame_infos` expects a map of Breakpad symbols
/// containing STACK CFI and STACK WIN records to allow stackwalking with
/// omitted frame pointers.
pub fn from_minidump(
buffer: &ByteView<'a>,
frame_infos: Option<&FrameInfoMap>,
) -> Result<ProcessState<'a>, ProcessMinidumpError> {
let cfi_count = frame_infos.map_or(0, |s| s.len());
let mut result: ProcessResult = ProcessResult::Ok;
// Keep a reference to all CStrings to extend their lifetime.
let cfi_vec: Vec<_> = frame_infos.map_or(Vec::new(), |s| {
s.iter()
.map(|(k, v)| (CString::new(k.to_string()), v.len(), v.as_ptr()))
.collect()
});
// Keep a reference to all symbol entries to extend their lifetime.
let cfi_entries: Vec<_> = cfi_vec
.iter()
.map(|&(ref id, size, data)| SymbolEntry {
debug_identifier: id.as_ref().map(|i| i.as_ptr()).unwrap_or(ptr::null()),
symbol_size: size,
symbol_data: data,
})
.collect();
let internal = unsafe {
process_minidump(
buffer.as_ptr() as *const c_char,
buffer.len(),
cfi_entries.as_ptr(),
cfi_count,
&mut result,
)
};
if result == ProcessResult::Ok && !internal.is_null() | {
Ok(ProcessState {
internal,
_ty: PhantomData,
})
} | conditional_block | |
processor.rs | /// Returns the `CodeModule` that contains this frame's instruction.
pub fn module(&self) -> Option<&CodeModule> {
unsafe { stack_frame_module(self).as_ref() }
}
/// Returns how well the instruction pointer is trusted.
pub fn trust(&self) -> FrameTrust {
unsafe { stack_frame_trust(self) }
}
}
impl fmt::Debug for StackFrame {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("StackFrame")
.field("return_address", &self.return_address(Arch::Unknown))
.field("instruction", &self.instruction())
.field("trust", &self.trust())
.field("module", &self.module())
.finish()
}
}
/// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s.
#[repr(C)]
pub struct CallStack(c_void);
impl CallStack {
/// Returns the thread identifier of this callstack.
pub fn thread_id(&self) -> u32 {
unsafe { call_stack_thread_id(self) }
}
/// Returns the list of `StackFrame`s in the call stack.
pub fn frames(&self) -> &[&StackFrame] {
unsafe {
let mut size = 0 as usize;
let data = call_stack_frames(self, &mut size);
let slice = slice::from_raw_parts(data, size);
mem::transmute(slice)
}
}
}
impl fmt::Debug for CallStack {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("CallStack")
.field("thread_id", &self.thread_id())
.field("frames", &self.frames())
.finish()
}
}
/// Information about the CPU and OS on which a minidump was generated.
#[repr(C)]
pub struct SystemInfo(c_void);
impl SystemInfo {
/// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux".
///
/// If the information is present in the dump but its value is unknown, this field will contain
/// a numeric value. If the information is not present in the dump, this field will be empty.
pub fn os_name(&self) -> String {
unsafe {
let ptr = system_info_os_name(self);
utils::ptr_to_string(ptr)
}
}
/// Strings identifying the version and build number of the operating system.
///
/// If the dump does not contain either information, the component will be empty. Tries to parse
/// the version number from the build if it is not apparent from the version string.
pub fn os_parts(&self) -> (String, String) {
let string = unsafe {
let ptr = system_info_os_version(self);
utils::ptr_to_string(ptr)
};
let mut parts = string.splitn(2, ' ');
let version = parts.next().unwrap_or("0.0.0");
let build = parts.next().unwrap_or("");
if version == "0.0.0" {
// Try to parse the Linux build string. Breakpad and Crashpad run
// `uname -srvmo` to generate it. This roughtly resembles:
// "Linux [version] [build...] [arch] Linux/GNU"
if let Some(captures) = LINUX_BUILD_RE.captures(&build) {
let version = captures.get(1).unwrap(); // uname -r portion
let build = captures.get(2).unwrap(); // uname -v portion
return (version.as_str().into(), build.as_str().into());
}
}
(version.into(), build.into())
}
/// A string identifying the version of the operating system.
///
/// The version will be formatted as three-component semantic version, such as "5.1.2600" or
/// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0".
pub fn os_version(&self) -> String {
self.os_parts().0
}
/// A string identifying the build of the operating system.
///
/// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump
/// does not contain this information, this field will be empty.
pub fn os_build(&self) -> String {
self.os_parts().1
}
/// A string identifying the basic CPU family, such as "x86" or "ppc".
///
/// If this information is present in the dump but its value is unknown,
/// this field will contain a numeric value. If the information is not
/// present in the dump, this field will be empty.
pub fn cpu_family(&self) -> String {
unsafe {
let ptr = system_info_cpu_family(self);
utils::ptr_to_string(ptr)
}
}
/// The architecture of the CPU parsed from `ProcessState::cpu_family`.
///
/// If this information is present in the dump but its value is unknown
/// or if the value is missing, this field will contain `Arch::Unknown`.
pub fn cpu_arch(&self) -> Arch {
Arch::from_breakpad(&self.cpu_family()).unwrap_or_default()
}
/// A string further identifying the specific CPU.
///
/// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping
/// 8". If the information is not present in the dump, or additional identifying information is
/// not defined for the CPU family, this field will be empty.
pub fn cpu_info(&self) -> String {
unsafe {
let ptr = system_info_cpu_info(self);
utils::ptr_to_string(ptr)
}
}
/// The number of processors in the system.
///
/// Will be greater than one for multi-core systems.
pub fn cpu_count(&self) -> u32 {
unsafe { system_info_cpu_count(self) }
}
}
impl fmt::Debug for SystemInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SystemInfo")
.field("os_name", &self.os_name())
.field("os_version", &self.os_version())
.field("cpu_family", &self.cpu_family())
.field("cpu_info", &self.cpu_info())
.field("cpu_count", &self.cpu_count())
.finish()
}
}
/// Result of processing a Minidump or Microdump file.
///
/// Usually included in `ProcessError` when the file cannot be processed.
#[repr(u32)]
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum ProcessResult {
/// The dump was processed successfully.
Ok,
/// The minidump file was not found or the buffer was empty.
MinidumpNotFound,
/// The minidump file had no header.
NoMinidumpHeader,
/// The minidump file has no thread list.
NoThreadList,
/// There was an error getting one thread's data from the dump.
InvalidThreadIndex,
/// There was an error getting a thread id from the thread's data.
InvalidThreadId,
/// There was more than one requesting thread.
DuplicateRequestingThreads,
/// The dump processing was interrupted (not fatal).
SymbolSupplierInterrupted,
}
impl fmt::Display for ProcessResult {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let formatted = match self {
&ProcessResult::Ok => "dump processed successfully",
&ProcessResult::MinidumpNotFound => "file could not be opened",
&ProcessResult::NoMinidumpHeader => "minidump header missing",
&ProcessResult::NoThreadList => "minidump has no thread list",
&ProcessResult::InvalidThreadIndex => "could not get thread data",
&ProcessResult::InvalidThreadId => "could not get a thread by id",
&ProcessResult::DuplicateRequestingThreads => "multiple requesting threads",
&ProcessResult::SymbolSupplierInterrupted => "processing was interrupted (not fatal)",
};
write!(f, "{}", formatted)
}
}
/// An error generated when trying to process a minidump.
#[derive(Debug, Fail, Copy, Clone)]
#[fail(display = "minidump processing failed: {}", _0)]
pub struct ProcessMinidumpError(ProcessResult);
impl ProcessMinidumpError {
/// Returns the kind of this error.
pub fn kind(&self) -> ProcessResult {
self.0
}
}
/// Internal type used to transfer Breakpad symbols over FFI.
#[repr(C)]
struct SymbolEntry {
debug_identifier: *const c_char,
symbol_size: usize,
symbol_data: *const u8,
}
/// Container for call frame information (CFI) of `CodeModules`.
///
/// This information is required by the stackwalker in case framepointers are
/// missing in the raw stacktraces. Frame information is given as plain ASCII
/// text as specified in the Breakpad symbol file specification.
pub type FrameInfoMap<'a> = BTreeMap<CodeModuleId, ByteView<'a>>;
type IProcessState = c_void;
/// Snapshot of the state of a processes during its crash. The object can be
/// obtained by processing Minidump or Microdump files.
pub struct | ProcessState | identifier_name | |
processor.rs | instruction of this stack frame.
#[repr(C)]
pub struct StackFrame(c_void);
impl StackFrame {
/// Returns the program counter location as an absolute virtual address.
///
/// - For the innermost called frame in a stack, this will be an exact
/// program counter or instruction pointer value.
///
/// - For all other frames, this address is within the instruction that
/// caused execution to branch to this frame's callee (although it may
/// not point to the exact beginning of that instruction). This ensures
/// that, when we look up the source code location for this frame, we
/// get the source location of the call, not of the point at which
/// control will resume when the call returns, which may be on the next
/// line. (If the compiler knows the callee never returns, it may even
/// place the call instruction at the very end of the caller's machine
/// code, such that the "return address" (which will never be used)
/// immediately after the call instruction is in an entirely different
/// function, perhaps even from a different source file.)
///
/// On some architectures, the return address as saved on the stack or in
/// a register is fine for looking up the point of the call. On others, it
/// requires adjustment. ReturnAddress returns the address as saved by the
/// machine.
///
/// Use `trust` to obtain how trustworthy this instruction is.
pub fn instruction(&self) -> u64 {
unsafe { stack_frame_instruction(self) }
}
// Return the actual return address, as saved on the stack or in a
// register. See the comments for `StackFrame::instruction' for
// details.
pub fn return_address(&self, arch: Arch) -> u64 {
let address = unsafe { stack_frame_return_address(self) };
// The return address reported for ARM* frames is actually the
// instruction with heuristics from Breakpad applied already.
// To resolve the original return address value, compensate
// by adding the offsets applied in `StackwalkerARM::GetCallerFrame`
// and `StackwalkerARM64::GetCallerFrame`.
match arch.cpu_family() {
CpuFamily::Arm32 => address + 2,
CpuFamily::Arm64 => address + 4,
_ => address,
}
}
/// Returns the `CodeModule` that contains this frame's instruction.
pub fn module(&self) -> Option<&CodeModule> {
unsafe { stack_frame_module(self).as_ref() }
}
/// Returns how well the instruction pointer is trusted.
pub fn trust(&self) -> FrameTrust {
unsafe { stack_frame_trust(self) }
}
}
impl fmt::Debug for StackFrame {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("StackFrame")
.field("return_address", &self.return_address(Arch::Unknown))
.field("instruction", &self.instruction())
.field("trust", &self.trust())
.field("module", &self.module())
.finish()
}
}
/// Represents a thread of the `ProcessState` which holds a list of `StackFrame`s.
#[repr(C)]
pub struct CallStack(c_void);
impl CallStack {
/// Returns the thread identifier of this callstack.
pub fn thread_id(&self) -> u32 {
unsafe { call_stack_thread_id(self) }
}
/// Returns the list of `StackFrame`s in the call stack.
pub fn frames(&self) -> &[&StackFrame] {
unsafe {
let mut size = 0 as usize;
let data = call_stack_frames(self, &mut size);
let slice = slice::from_raw_parts(data, size);
mem::transmute(slice)
}
}
}
impl fmt::Debug for CallStack {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("CallStack")
.field("thread_id", &self.thread_id())
.field("frames", &self.frames())
.finish()
}
}
/// Information about the CPU and OS on which a minidump was generated.
#[repr(C)]
pub struct SystemInfo(c_void);
impl SystemInfo {
/// A string identifying the operating system, such as "Windows NT", "Mac OS X", or "Linux".
///
/// If the information is present in the dump but its value is unknown, this field will contain
/// a numeric value. If the information is not present in the dump, this field will be empty.
pub fn os_name(&self) -> String {
unsafe {
let ptr = system_info_os_name(self);
utils::ptr_to_string(ptr)
}
}
/// Strings identifying the version and build number of the operating system.
///
/// If the dump does not contain either information, the component will be empty. Tries to parse
/// the version number from the build if it is not apparent from the version string.
pub fn os_parts(&self) -> (String, String) {
let string = unsafe {
let ptr = system_info_os_version(self);
utils::ptr_to_string(ptr)
};
let mut parts = string.splitn(2, ' ');
let version = parts.next().unwrap_or("0.0.0");
let build = parts.next().unwrap_or("");
if version == "0.0.0" {
// Try to parse the Linux build string. Breakpad and Crashpad run
// `uname -srvmo` to generate it. This roughtly resembles:
// "Linux [version] [build...] [arch] Linux/GNU"
if let Some(captures) = LINUX_BUILD_RE.captures(&build) {
let version = captures.get(1).unwrap(); // uname -r portion
let build = captures.get(2).unwrap(); // uname -v portion
return (version.as_str().into(), build.as_str().into());
}
}
(version.into(), build.into())
}
/// A string identifying the version of the operating system.
///
/// The version will be formatted as three-component semantic version, such as "5.1.2600" or
/// "10.4.8". If the dump does not contain this information, this field will contain "0.0.0".
pub fn os_version(&self) -> String {
self.os_parts().0
}
/// A string identifying the build of the operating system.
///
/// This build version is platform dependent, such as "Service Pack 2" or "8L2127". If the dump
/// does not contain this information, this field will be empty.
pub fn os_build(&self) -> String {
self.os_parts().1
}
/// A string identifying the basic CPU family, such as "x86" or "ppc".
///
/// If this information is present in the dump but its value is unknown,
/// this field will contain a numeric value. If the information is not
/// present in the dump, this field will be empty.
pub fn cpu_family(&self) -> String {
unsafe {
let ptr = system_info_cpu_family(self);
utils::ptr_to_string(ptr)
}
}
/// The architecture of the CPU parsed from `ProcessState::cpu_family`.
///
/// If this information is present in the dump but its value is unknown
/// or if the value is missing, this field will contain `Arch::Unknown`.
pub fn cpu_arch(&self) -> Arch {
Arch::from_breakpad(&self.cpu_family()).unwrap_or_default()
}
/// A string further identifying the specific CPU.
///
/// This information depends on the CPU vendor, such as "GenuineIntel level 6 model 13 stepping
/// 8". If the information is not present in the dump, or additional identifying information is
/// not defined for the CPU family, this field will be empty.
pub fn cpu_info(&self) -> String {
unsafe {
let ptr = system_info_cpu_info(self);
utils::ptr_to_string(ptr)
}
}
/// The number of processors in the system.
///
/// Will be greater than one for multi-core systems.
pub fn cpu_count(&self) -> u32 {
unsafe { system_info_cpu_count(self) }
}
}
impl fmt::Debug for SystemInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SystemInfo")
.field("os_name", &self.os_name())
.field("os_version", &self.os_version())
.field("cpu_family", &self.cpu_family())
.field("cpu_info", &self.cpu_info())
.field("cpu_count", &self.cpu_count())
.finish()
}
}
/// Result of processing a Minidump or Microdump file.
///
/// Usually included in `ProcessError` when the file cannot be processed.
#[repr(u32)]
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum ProcessResult {
/// The dump was processed successfully.
Ok,
/// The minidump file was not found or the buffer was empty.
MinidumpNotFound, |
/// The minidump file had no header. | random_line_split | |
level_2_optionals_cdsu.py | ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline',
('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'}
control_prints(df, '1', head=1)
df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc
control_prints(df, '1b', head=1)
df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's
control_prints(df, '2')
df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future
df = total_price(df) # Creates a new column with the total cost for each configuration;
control_prints(df, '3a', head=0)
df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary
control_prints(df, '3b', head=0)
df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model
df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1)
control_prints(df, '3c')
df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking Sensors, External and Internal colours, Model and Rim's Size
control_prints(df, '3d', head=1, null_analysis_flag=1)
df.loc[df['Combustível'].isin(['Elétrico', 'Híbrido']), 'Motor'] = 'N/A' # Defaults the value of motorization for electric/hybrid cars;
control_prints(df, '4', head=0, save=1)
# df = remove_rows(df, [df[df.Modelo.isnull()].index], project_id, warning=1)
df = remove_columns(df, ['Colour_Ext_Code'], project_id) # This column was only needed for some very specific cases where no Colour_Ext_Code was available;
df.to_csv('dbs/df_cdsu.csv', index=False)
control_prints(df, '5', head=0, save=1)
# project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=1)
df = color_replacement(df, level_2_optionals_cdsu_options.colors_to_replace_dict, project_id) # Translates all english colors to portuguese
control_prints(df, '6', head=0, save=1)
df = duplicate_removal(df, subset_col='Nº Stock') # Removes duplicate rows, based on the Stock number. This leaves one line per configuration;
control_prints(df, '7')
df = remove_columns(df, ['Cor', 'Interior', 'Opcional', 'Custo', 'Versão', 'Franchise_Code'], project_id) # Remove columns not needed atm;
# Will probably need to also remove: stock_days, stock_days_norm, and one of the scores
# df = remove_rows(df, [df.loc[df['Local da Venda'] == 'DCV - Viat.Toy Viseu', :].index], project_id) # Removes the vehicles sold here, as they are from another brand (Toyota)
df = margin_calculation(df) # Calculates the margin in percentage of the total price
control_prints(df, '8')
df = score_calculation(df, [level_2_optionals_cdsu_options.stock_days_threshold], level_2_optionals_cdsu_options.margin_threshold, level_2_optionals_cdsu_options.project_id) # Classifies the stockdays and margin based in their respective thresholds in tow classes (0 or 1) and then creates a new_score metric,
control_prints(df, '9')
# where only configurations with 1 in both dimension, have 1 as new_score
# df = new_column_creation(df, ['Local da Venda_v2'], df['Local da Venda'])
# control_prints(df, '10')
# cols_to_group_layer_2 = ['Local da Venda']
# mapping_dictionaries, _ = sql_mapping_retrieval(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['mappings_temp'], 'Mapped_Value', level_2_optionals_cdsu_options)
# df = sell_place_parametrization(df, 'Local da Venda', 'Local da Venda_Fase2', mapping_dictionaries[2], level_2_optionals_cdsu_options.project_id)
# df = col_group(df, cols_to_group_layer_2[0:2], mapping_dictionaries[0:2], project_id) # Based on the information provided by Manuel some entries were grouped as to remove small groups. The columns grouped are mentioned in cols_to_group, and their respective groups are shown in level_2_optionals_cdsu_options
control_prints(df, '9b, before new features', null_analysis_flag=1)
df = new_features(df, configuration_parameters, project_id) # Creates a series of new features, explained in the provided pdf
control_prints(df, '10, after new_features', null_analysis_flag=1)
# global_variables_saving(df, level_2_optionals_cdsu_options.project_id) # Small functions to save 2 specific global variables which will be needed later
log_record('Checkpoint B.1...', project_id)
# performance_info_append(time.time(), 'checkpoint_b1')
df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()))
# sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['checkpoint_b_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), truncate=1, check_date=1)
df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()))
df = remove_columns(df, ['Date'], project_id)
log_record('Fim Secção B.', project_id)
performance_info_append(time.time(), 'Section_B_End')
return df
def deployment(df, db, view):
performance_info_append(time.time(), 'Section_E_Start')
log_record('Início Secção E...', project_id)
if df is not None:
df['NLR_Code'] = level_2_optionals_cdsu_options.nlr_code
# df = column_rename(df, list(level_2_optionals_cdsu_options.column_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_sql_renaming.values()))
df = df.rename(columns=level_2_optionals_cdsu_options.column_sql_renaming)
control_prints(df, 'before deployment, after renaming', head=1)
sql_delete(level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, {'NLR_Code': '{}'.format(level_2_optionals_cdsu_options.nlr_code)})
sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), check_date=1)
log_record('Fim Secção E.', project_id)
performance_info_append(time.time(), 'Section_E_End')
return
if __name__ == '__main__':
try:
main()
except Exception as exception:
| project_identifier, exception_desc = level_2_optionals_cdsu_options.project_id, str(sys.exc_info()[1])
log_record(exception_desc, project_identifier, flag=2)
error_upload(level_2_optionals_cdsu_options, project_identifier, format_exc(), exception_desc, error_flag=1)
log_record('Falhou - Projeto: {}.'.format(str(project_dict[project_identifier])), project_identifier)
| conditional_block | |
level_2_optionals_cdsu.py | is {}'.format(max(df['Sell_Date'])))
return
def data_processing(df):
performance_info_append(time.time(), 'Section_B_Start')
log_record('Início Secção B...', project_id)
log_record('Checkpoint não encontrado ou demasiado antigo. A processar dados...', project_id)
df = lowercase_column_conversion(df, ['Opcional', 'Cor', 'Interior', 'Versão']) # Lowercases the strings of these columns
dict_strings_to_replace = {('Modelo', ' - não utilizar'): '', ('Interior', '\\|'): '/', ('Cor', '\\|'): '', ('Interior', 'ind.'): '', ('Interior', '\\]'): '/', ('Interior', '\\.'): ' ', ('Interior', '\'merino\''): 'merino', ('Interior', '\' merino\''): 'merino', ('Interior', '\'vernasca\''): 'vernasca', ('Interior', 'leder'): 'leather',
('Interior', 'p '): 'pele', ('Interior', 'pelenevada'): 'pele nevada', ('Opcional', 'bi-xénon'): 'bixénon', ('Opcional', 'bi-xenon'): 'bixénon', ('Opcional', 'vidro'): 'vidros', ('Opcional', 'dacota'): 'dakota', ('Opcional', 'whites'): 'white', ('Opcional', 'beige'): 'bege', ('Interior', '\'dakota\''): 'dakota', ('Interior', 'dacota'): 'dakota',
('Interior', 'mokka'): 'mocha', ('Interior', 'beige'): 'bege', ('Interior', 'dakota\''): 'dakota', ('Interior', 'antracite/cinza/p'): 'antracite/cinza/preto', ('Interior', 'antracite/cinza/pretoreto'): 'antracite/cinza/preto', ('Interior', 'nevada\''): 'nevada',
('Interior', '"nappa"'): 'nappa', ('Interior', 'anthrazit'): 'antracite', ('Interior', 'antracito'): 'antracite', ('Interior', 'preto/laranja/preto/lara'): 'preto/laranja', ('Interior', 'anthtacite'): 'antracite',
('Interior', 'champag'): 'champagne', ('Interior', 'cri'): 'crimson', ('Modelo', 'Enter Model Details'): '', ('Registration_Number', '\.'): '', ('Interior', 'preto/m '): 'preto ', ('Interior', 'congnac/preto'): 'cognac/preto',
('Local da Venda', 'DCN'): 'DCP', ('Cor', 'oceanao'): 'oceano', ('Cor', 'ocenao'): 'oceano', ('Interior', 'reto'): 'preto', ('Cor', 'banco'): 'branco', ('Cor', 'catanho'): 'castanho', ('Cor', 'petrìleo'): 'petróleo', ('Interior', 'ecido'): 'tecido',
('Interior', 'ege'): 'bege', ('Interior', 'inza'): 'cinza', ('Interior', 'inzento'): 'cinzento', ('Interior', 'teciso'): 'tecido', ('Opcional', 'autmático'): 'automático', ('Opcional', 'esctacionamento'): 'estacionamento',
('Opcional', 'estacionamernto'): 'estacionamento', ('Opcional', 'pct'): 'pacote', ('Opcional', 'navegaçãp'): 'navegação', ('Opcional', '\\+'): '', ('Versão', 'bussiness'): 'business', ('Versão', 'r-line'): 'rline', ('Versão', 'confortl'): 'confortline',
('Versão', 'high'): 'highline', ('Opcional', 'p/dsg'): 'para dsg', ('Opcional', 'dianteirostraseiros'): 'dianteiros traseiros', ('Opcional', 'dianteirostras'): 'dianteiros traseiros', ('Opcional', 'diant'): 'dianteiros',
('Opcional', 'dttras'): 'dianteiros traseiros', ('Opcional', 'dttrpark'): 'dianteiros traseiros park', ('Opcional', 'dianttras'): 'dianteiros traseiros', ('Opcional', 'câmara'): 'camara', ('Opcional', 'camera'): 'camara',
('Opcional', 'câmera'): 'camara', ('Versão', 'trendtline'): 'trendline', ('Versão', 'trendtline'): 'trendline', ('Versão', 'confort'): 'confortline', ('Versão', 'conftl'): 'confortline', ('Versão', 'hightline'): 'highline', ('Versão', 'bluem'): 'bluemotion',
('Versão', 'bmt'): 'bluemotion', ('Versão', 'up!bluemotion'): 'up! bluemotion', ('Versão', 'up!bluem'): 'up! bluemotion', ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline',
('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'}
control_prints(df, '1', head=1)
df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc
control_prints(df, '1b', head=1)
df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's
control_prints(df, '2')
df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future
df = total_price(df) # Creates a new column with the total cost for each configuration;
control_prints(df, '3a', head=0)
df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary
control_prints(df, '3b', head=0)
df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model
df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1)
control_prints(df, '3c')
df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking Sensors, External and Internal colours, Model and Rim's Size
control_prints(df, '3d', head=1, null_analysis_flag=1)
df.loc[df['Combustível'].isin(['Elétrico', 'Híbrido']), 'Motor'] = 'N/A' # Defaults the value of motorization for electric/hybrid cars;
control_prints(df, '4', head=0, save=1)
# df = remove_rows(df, [df[df.Modelo.isnull()].index], project_id, warning=1)
df = remove_columns(df, ['Colour_Ext_Code'], project_id) # This column was only needed for some very specific cases where no Colour_Ext_Code was available;
df.to_csv('dbs/df_cdsu.csv', index=False)
control_prints(df, '5', head=0, save=1)
# project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=1)
df = color_replacement(df, level_2_optionals_cdsu_options.colors_to_replace_dict, project_id) # Translates all english colors to portuguese
control_prints(df, '6', head=0, save=1)
df = duplicate_removal(df, subset_col='Nº Stock') # Removes duplicate rows, based on the Stock number. This leaves one line per configuration;
control_prints(df, '7')
| df = remove_columns(df, ['Cor', 'Interior', 'Opcional', 'Custo', 'Versão', 'Franchise_Code'], project_id) # Remove columns not needed atm;
# Will probably need to also remove: stock_days, stock_days_norm, and one of the scores | random_line_split | |
level_2_optionals_cdsu.py | fortline', ('Versão', 'conftl'): 'confortline', ('Versão', 'hightline'): 'highline', ('Versão', 'bluem'): 'bluemotion',
('Versão', 'bmt'): 'bluemotion', ('Versão', 'up!bluemotion'): 'up! bluemotion', ('Versão', 'up!bluem'): 'up! bluemotion', ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline',
('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'}
control_prints(df, '1', head=1)
df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc
control_prints(df, '1b', head=1)
df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's
control_prints(df, '2')
df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future
df = total_price(df) # Creates a new column with the total cost for each configuration;
control_prints(df, '3a', head=0)
df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary
control_prints(df, '3b', head=0)
df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model
df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1)
control_prints(df, '3c')
df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking Sensors, External and Internal colours, Model and Rim's Size
control_prints(df, '3d', head=1, null_analysis_flag=1)
df.loc[df['Combustível'].isin(['Elétrico', 'Híbrido']), 'Motor'] = 'N/A' # Defaults the value of motorization for electric/hybrid cars;
control_prints(df, '4', head=0, save=1)
# df = remove_rows(df, [df[df.Modelo.isnull()].index], project_id, warning=1)
df = remove_columns(df, ['Colour_Ext_Code'], project_id) # This column was only needed for some very specific cases where no Colour_Ext_Code was available;
df.to_csv('dbs/df_cdsu.csv', index=False)
control_prints(df, '5', head=0, save=1)
# project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=1)
df = color_replacement(df, level_2_optionals_cdsu_options.colors_to_replace_dict, project_id) # Translates all english colors to portuguese
control_prints(df, '6', head=0, save=1)
df = duplicate_removal(df, subset_col='Nº Stock') # Removes duplicate rows, based on the Stock number. This leaves one line per configuration;
control_prints(df, '7')
df = remove_columns(df, ['Cor', 'Interior', 'Opcional', 'Custo', 'Versão', 'Franchise_Code'], project_id) # Remove columns not needed atm;
# Will probably need to also remove: stock_days, stock_days_norm, and one of the scores
# df = remove_rows(df, [df.loc[df['Local da Venda'] == 'DCV - Viat.Toy Viseu', :].index], project_id) # Removes the vehicles sold here, as they are from another brand (Toyota)
df = margin_calculation(df) # Calculates the margin in percentage of the total price
control_prints(df, '8')
df = score_calculation(df, [level_2_optionals_cdsu_options.stock_days_threshold], level_2_optionals_cdsu_options.margin_threshold, level_2_optionals_cdsu_options.project_id) # Classifies the stockdays and margin based in their respective thresholds in tow classes (0 or 1) and then creates a new_score metric,
control_prints(df, '9')
# where only configurations with 1 in both dimension, have 1 as new_score
# df = new_column_creation(df, ['Local da Venda_v2'], df['Local da Venda'])
# control_prints(df, '10')
# cols_to_group_layer_2 = ['Local da Venda']
# mapping_dictionaries, _ = sql_mapping_retrieval(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['mappings_temp'], 'Mapped_Value', level_2_optionals_cdsu_options)
# df = sell_place_parametrization(df, 'Local da Venda', 'Local da Venda_Fase2', mapping_dictionaries[2], level_2_optionals_cdsu_options.project_id)
# df = col_group(df, cols_to_group_layer_2[0:2], mapping_dictionaries[0:2], project_id) # Based on the information provided by Manuel some entries were grouped as to remove small groups. The columns grouped are mentioned in cols_to_group, and their respective groups are shown in level_2_optionals_cdsu_options
control_prints(df, '9b, before new features', null_analysis_flag=1)
df = new_features(df, configuration_parameters, project_id) # Creates a series of new features, explained in the provided pdf
control_prints(df, '10, after new_features', null_analysis_flag=1)
# global_variables_saving(df, level_2_optionals_cdsu_options.project_id) # Small functions to save 2 specific global variables which will be needed later
log_record('Checkpoint B.1...', project_id)
# performance_info_append(time.time(), 'checkpoint_b1')
df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()))
# sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['checkpoint_b_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), truncate=1, check_date=1)
df = column_rename(df, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.keys()))
df = remove_columns(df, ['Date'], project_id)
log_record('Fim Secção B.', project_id)
performance_info_append(time.time(), 'Section_B_End')
return df
def deployment(df, db, view):
performance_info_append(time.time(), 'Section_E_Start')
| log_record('Início Secção E...', project_id)
if df is not None:
df['NLR_Code'] = level_2_optionals_cdsu_options.nlr_code
# df = column_rename(df, list(level_2_optionals_cdsu_options.column_sql_renaming.keys()), list(level_2_optionals_cdsu_options.column_sql_renaming.values()))
df = df.rename(columns=level_2_optionals_cdsu_options.column_sql_renaming)
control_prints(df, 'before deployment, after renaming', head=1)
sql_delete(level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, {'NLR_Code': '{}'.format(level_2_optionals_cdsu_options.nlr_code)})
sql_inject(df, level_2_optionals_cdsu_options.DSN_MLG_PRD, db, view, level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.column_checkpoint_sql_renaming.values()), check_date=1)
log_record('Fim Secção E.', project_id)
performance_info_append(time.time(), 'Section_E_End')
return
if __name__ == '__main__':
try:
main()
exce | identifier_body | |
level_2_optionals_cdsu.py | uery_filters):
performance_info_append(time.time(), 'Section_A_Start')
log_record('Início Secção A...', project_id)
df = sql_retrieve_df(level_2_optionals_cdsu_options.DSN_MLG_PRD, level_2_optionals_cdsu_options.sql_info['database'], level_2_optionals_cdsu_options.sql_info['initial_table'], level_2_optionals_cdsu_options, list(level_2_optionals_cdsu_options.sql_to_code_renaming.keys()), query_filters, column_renaming=1, parse_dates=['Purchase_Date', 'Sell_Date'])
# project_units_count_checkup(df, 'Nº Stock', level_2_optionals_cdsu_options, sql_check=0)
log_record('Fim Secção A.', project_id)
performance_info_append(time.time(), 'Section_A_End')
return df
def control_prints(df, tag, head=0, save=0, null_analysis_flag=0, date=0):
# print('{}\n{}'.format(tag, df.shape))
# try:
# print('Unique Vehicles: {}'.format(df['Nº Stock'].nunique()))
#
# except KeyError:
# print('Unique Vehicles: {}'.format(df['VHE_Number'].nunique()))
#
# if head:
# print(df.head())
# if save:
# df.to_csv('dbs/cdsu_control_save_tag_{}.csv'.format(tag))
# if null_analysis_flag:
# null_analysis(df)
# if date:
# try:
# print('Current Max Sell Date is {}'.format(max(df['Data Venda'])))
# except KeyError:
# print('Current Max Sell Date is {}'.format(max(df['Sell_Date'])))
return
def data_processing(df):
performance_info_append(time.time(), 'Section_B_Start')
log_record('Início Secção B...', project_id)
log_record('Checkpoint não encontrado ou demasiado antigo. A processar dados...', project_id)
df = lowercase_column_conversion(df, ['Opcional', 'Cor', 'Interior', 'Versão']) # Lowercases the strings of these columns
dict_strings_to_replace = {('Modelo', ' - não utilizar'): '', ('Interior', '\\|'): '/', ('Cor', '\\|'): '', ('Interior', 'ind.'): '', ('Interior', '\\]'): '/', ('Interior', '\\.'): ' ', ('Interior', '\'merino\''): 'merino', ('Interior', '\' merino\''): 'merino', ('Interior', '\'vernasca\''): 'vernasca', ('Interior', 'leder'): 'leather',
('Interior', 'p '): 'pele', ('Interior', 'pelenevada'): 'pele nevada', ('Opcional', 'bi-xénon'): 'bixénon', ('Opcional', 'bi-xenon'): 'bixénon', ('Opcional', 'vidro'): 'vidros', ('Opcional', 'dacota'): 'dakota', ('Opcional', 'whites'): 'white', ('Opcional', 'beige'): 'bege', ('Interior', '\'dakota\''): 'dakota', ('Interior', 'dacota'): 'dakota',
('Interior', 'mokka'): 'mocha', ('Interior', 'beige'): 'bege', ('Interior', 'dakota\''): 'dakota', ('Interior', 'antracite/cinza/p'): 'antracite/cinza/preto', ('Interior', 'antracite/cinza/pretoreto'): 'antracite/cinza/preto', ('Interior', 'nevada\''): 'nevada',
('Interior', '"nappa"'): 'nappa', ('Interior', 'anthrazit'): 'antracite', ('Interior', 'antracito'): 'antracite', ('Interior', 'preto/laranja/preto/lara'): 'preto/laranja', ('Interior', 'anthtacite'): 'antracite',
('Interior', 'champag'): 'champagne', ('Interior', 'cri'): 'crimson', ('Modelo', 'Enter Model Details'): '', ('Registration_Number', '\.'): '', ('Interior', 'preto/m '): 'preto ', ('Interior', 'congnac/preto'): 'cognac/preto',
('Local da Venda', 'DCN'): 'DCP', ('Cor', 'oceanao'): 'oceano', ('Cor', 'ocenao'): 'oceano', ('Interior', 'reto'): 'preto', ('Cor', 'banco'): 'branco', ('Cor', 'catanho'): 'castanho', ('Cor', 'petrìleo'): 'petróleo', ('Interior', 'ecido'): 'tecido',
('Interior', 'ege'): 'bege', ('Interior', 'inza'): 'cinza', ('Interior', 'inzento'): 'cinzento', ('Interior', 'teciso'): 'tecido', ('Opcional', 'autmático'): 'automático', ('Opcional', 'esctacionamento'): 'estacionamento',
('Opcional', 'estacionamernto'): 'estacionamento', ('Opcional', 'pct'): 'pacote', ('Opcional', 'navegaçãp'): 'navegação', ('Opcional', '\\+'): '', ('Versão', 'bussiness'): 'business', ('Versão', 'r-line'): 'rline', ('Versão', 'confortl'): 'confortline',
('Versão', 'high'): 'highline', ('Opcional', 'p/dsg'): 'para dsg', ('Opcional', 'dianteirostraseiros'): 'dianteiros traseiros', ('Opcional', 'dianteirostras'): 'dianteiros traseiros', ('Opcional', 'diant'): 'dianteiros',
('Opcional', 'dttras'): 'dianteiros traseiros', ('Opcional', 'dttrpark'): 'dianteiros traseiros park', ('Opcional', 'dianttras'): 'dianteiros traseiros', ('Opcional', 'câmara'): 'camara', ('Opcional', 'camera'): 'camara',
('Opcional', 'câmera'): 'camara', ('Versão', 'trendtline'): 'trendline', ('Versão', 'trendtline'): 'trendline', ('Versão', 'confort'): 'confortline', ('Versão', 'conftl'): 'confortline', ('Versão', 'hightline'): 'highline', ('Versão', 'bluem'): 'bluemotion',
('Versão', 'bmt'): 'bluemotion', ('Versão', 'up!bluemotion'): 'up! bluemotion', ('Versão', 'up!bluem'): 'up! bluemotion', ('Versão', 'trendl'): 'trendline', ('Versão', 'conft'): 'confortline', ('Versão', 'highlin'): 'highline',
('Versão', 'confortine'): 'confortline', ('Versão', 'cofrtl'): 'confortline', ('Versão', 'confortlline'): 'confortline', ('Versão', 'highl'): 'highline', ('Modelo', 'up!'): 'up'}
control_prints(df, '1', head=1)
df = string_replacer(df, dict_strings_to_replace) # Replaces the strings mentioned in dict_strings_to_replace which are typos, useless information, etc
control_prints(df, '1b', head=1)
df.dropna(subset=['Cor', 'Colour_Ext_Code', 'Modelo', 'Interior'], axis=0, inplace=True) # Removes all remaining NA's
control_prints(df, '2')
df = new_column_creation(df, [x for x in level_2_optionals_cdsu_options.configuration_parameters_full if x != 'Modelo' and x != 'Combustível'], 0) # Creates new columns filled with zeros, which will be filled in the future
df = total_price(df) # Creates a new column with the total cost for each configuration;
control_prints(df, '3a', head=0)
df = remove_zero_price_total_vhe(df, project_id) # Removes VHE with a price total of 0; ToDo: keep checking up if this is still necessary
control_prints(df, '3b', head=0)
df = remove_rows(df, [df[df.Franchise_Code.str.contains('X')].index], project_id) # This removes VW Commercials Vehicles that aren't supposed to be in this model
df = remove_rows(df, [df[(df.Colour_Ext_Code == ' ') & (df.Cor == ' ')].index], project_id, warning=1)
control_prints(df, '3c')
df = options_scraping_v2(df, level_2_optionals_cdsu_options, 'Modelo') # Scrapes the optionals columns for information regarding the GPS, Auto Transmission, Posterior Parking | ta_acquisition(q | identifier_name | |
util.js | var a = "",
b = this.indexOf(e);
if (b != -1) {
b += e.length;
var d = this.indexOf(c, b);
if (d != -1) {
a = this.substr(b, d - b)
}
}
return a
};
StringUtils.capitalize = function(e, c) {
e = StringUtils.trimLeft(e);
return c === true ? e.replace(/^.|\s+(.)/, StringUtils._upperCase) : e.replace(/(^\w)/, StringUtils._upperCase)
};
String.prototype.capitalize = function() {
return StringUtils.capitalize(this)
};
String.prototype.ljust = function(e, c) {
if (c == null) {
c = " "
}
var a = c.substr(0, 1);
return this.length < e ? this + this.repeat(e - this.length, a) : this
};
String.prototype.rjust = function(e, c) {
if (c == null) {
c = " "
}
var a = c.substr(0, 1);
return this.length < e ? this.repeat(e - this.length, a) + this : this
};
String.prototype.center = function(e, c) {
if (c == null) {
c = " "
}
var a = c.substr(0, 1);
if (this.length < e) {
var b = e - this.length,
d = b % 2 == 0 ? "" : a;
a = this.repeat(Math.round(b / 2), a);
return a + this + a + d
} else {
return this
}
};
String.prototype.repeat = function(e, c) {
if (isNaN(e)) {
e = 1
}
for (var a = ""; e--;) {
a += c || this
}
return a
};
String.prototype.base64Encode = function() {
for (var e = "", c = 0, a = this.length; c < a;) {
var b = this.charCodeAt(c++) & 255;
if (c == a) {
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4) + "==";
break
}
var d = this.charCodeAt(c++);
if (c == a) {
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "=";
break
}
var g = this.charCodeAt(c++);
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((d & 15) << 2 | (g & 192) >> 6) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(g & 63)
}
return e
};
String.prototype.contains = function(e) {
return this.indexOf(e) != -1
};
StringUtils.editDistance = function(e, c) {
if (e == null) {
e = ""
}
if (c == null) {
c = ""
}
if (e == c) {
return 0
}
var a = [],
b, d = e.length,
g = c.length;
if (d == 0) {
return g
}
if (g == 0) {
return d
}
for (var m = 0; m <= d; m++) {
a[m] = []
}
for (m = 0; m <= d; m++) {
a[m][0] = m
}
for (m = 0; m <= g; m++) {
a[0][m] = m
}
for (m = 1; m <= d; m++) {
for (var q = e.charAt(m - 1), s = 1; s <= g; s++) {
b = c.charAt(s - 1);
b = q == b ? 0 : 1;
a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b)
}
}
return a[d][g]
};
String.prototype.editDistance = function(e) {
return StringUtils.editDistance(this, e)
};
String.prototype.endsWith = function(e) {
return RegExp(e + "$").test(this)
};
String.prototype.hasText = function() {
return !!this.removeExtraWhitespace().length
};
String.prototype.isEmpty = function() {
return !this.length
};
String.prototype.isNumeric = function() {
return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this)
};
String.prototype.padLeft = function(e, c) {
for (var a = this; a.length < c;) {
a = e + a
}
return a
};
String.prototype.padRight = function(e, c) {
for (var a = this; a.length < c;) {
a += e
}
return a
};
String.prototype.properCase = function() {
return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I")
};
String.prototype.quote = function() {
return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"'
};
String.prototype.remove = function(e, c) {
if (c === null) {
c = true
}
var a = StringUtils.escapePattern(e);
return this.replace(RegExp(a, !c ? "ig" : "g"), "")
};
String.prototype.removeExtraWhitespace = function() {
return this.trim(this).replace(/\s+/g, " ")
};
String.prototype.reverse = function() {
return this.split("").reverse().join("")
};
String.prototype.reverseWords = function() {
return this.split(/\s+/).reverse().join(" ")
};
String.prototype.similarity = function(e) {
var c = StringUtils.editDistance(this, e);
e = Math.max(this.length, e.length);
return e == 0 ? 1 : 1 - c / e
};
String.prototype.stripTags = function() {
return this.replace(/<\/?[^>]+>/igm, "")
};
String.prototype.supplant = function() {
var e = this;
if (arguments[0] instanceof Object) {
for (var c in arguments[0]) {
e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c])
}
} else {
c = arguments.length;
for (var a = 0; a < c; a++) {
e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a])
}
}
return e
};
String.prototype.swapCase = function() {
return this.replace(/(\w)/, StringUtils._swapCase)
};
String.prototype.trim = function() {
return this.replace(/^\s+|\s+$/g, "")
};
StringUtils.trimLeft = function(e) {
return e.replace(/^\s+/, "")
};
String.prototype.trimLeft = function() {
return StringUtils.trimLeft(this)
};
StringUtils.trimRight = function(e) {
return e.replace(/\s+$/, "")
};
String.prototype.trimRight = function() {
return StringUtils.trimLeft(this)
};
String.prototype.truncate = function(e, c) {
if (c == null) {
c = "..."
}
if (e == 0) {
e = this.length
}
e -= c.length;
var a = this;
if (a.length > e) {
a = a.substr(0, e);
if (/[^\s]/.test(a.charAt(e))) {
a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, ""))
}
a += c
}
return a
};
String.prototype.wordCount = function() {
return this.match(/\b\w+\b/g).length
};
StringUtils.escapePattern = function(e) {
return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1")
};
StringUtils.prototype._quote = function() {
switch (this) {
case "\\":
return "\\\\";
case "\r":
return "\\r";
case "\n":
return "\\n";
case '"':
return '\\"'
}
return null
};
StringUtils._upperCase = function(e) {
return e.toUpperCase()
};
StringUtils._swapCase = function(e) {
var c = e.toLowerCase(),
a = e.toUpperCase();
switch (e) {
case c:
| }
return this.substr(0, e)
};
String.prototype.between = function(e, c) { | random_line_split | |
util.js | if (c == null) {
c = " "
}
var a = c.substr(0, 1);
return this.length < e ? this + this.repeat(e - this.length, a) : this
};
String.prototype.rjust = function(e, c) {
if (c == null) {
c = " "
}
var a = c.substr(0, 1);
return this.length < e ? this.repeat(e - this.length, a) + this : this
};
String.prototype.center = function(e, c) {
if (c == null) {
c = " "
}
var a = c.substr(0, 1);
if (this.length < e) {
var b = e - this.length,
d = b % 2 == 0 ? "" : a;
a = this.repeat(Math.round(b / 2), a);
return a + this + a + d
} else {
return this
}
};
String.prototype.repeat = function(e, c) {
if (isNaN(e)) {
e = 1
}
for (var a = ""; e--;) {
a += c || this
}
return a
};
String.prototype.base64Encode = function() {
for (var e = "", c = 0, a = this.length; c < a;) {
var b = this.charCodeAt(c++) & 255;
if (c == a) {
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4) + "==";
break
}
var d = this.charCodeAt(c++);
if (c == a) {
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "=";
break
}
var g = this.charCodeAt(c++);
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((d & 15) << 2 | (g & 192) >> 6) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(g & 63)
}
return e
};
String.prototype.contains = function(e) {
return this.indexOf(e) != -1
};
StringUtils.editDistance = function(e, c) {
if (e == null) {
e = ""
}
if (c == null) {
c = ""
}
if (e == c) {
return 0
}
var a = [],
b, d = e.length,
g = c.length;
if (d == 0) {
return g
}
if (g == 0) {
return d
}
for (var m = 0; m <= d; m++) {
a[m] = []
}
for (m = 0; m <= d; m++) |
for (m = 0; m <= g; m++) {
a[0][m] = m
}
for (m = 1; m <= d; m++) {
for (var q = e.charAt(m - 1), s = 1; s <= g; s++) {
b = c.charAt(s - 1);
b = q == b ? 0 : 1;
a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b)
}
}
return a[d][g]
};
String.prototype.editDistance = function(e) {
return StringUtils.editDistance(this, e)
};
String.prototype.endsWith = function(e) {
return RegExp(e + "$").test(this)
};
String.prototype.hasText = function() {
return !!this.removeExtraWhitespace().length
};
String.prototype.isEmpty = function() {
return !this.length
};
String.prototype.isNumeric = function() {
return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this)
};
String.prototype.padLeft = function(e, c) {
for (var a = this; a.length < c;) {
a = e + a
}
return a
};
String.prototype.padRight = function(e, c) {
for (var a = this; a.length < c;) {
a += e
}
return a
};
String.prototype.properCase = function() {
return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I")
};
String.prototype.quote = function() {
return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"'
};
String.prototype.remove = function(e, c) {
if (c === null) {
c = true
}
var a = StringUtils.escapePattern(e);
return this.replace(RegExp(a, !c ? "ig" : "g"), "")
};
String.prototype.removeExtraWhitespace = function() {
return this.trim(this).replace(/\s+/g, " ")
};
String.prototype.reverse = function() {
return this.split("").reverse().join("")
};
String.prototype.reverseWords = function() {
return this.split(/\s+/).reverse().join(" ")
};
String.prototype.similarity = function(e) {
var c = StringUtils.editDistance(this, e);
e = Math.max(this.length, e.length);
return e == 0 ? 1 : 1 - c / e
};
String.prototype.stripTags = function() {
return this.replace(/<\/?[^>]+>/igm, "")
};
String.prototype.supplant = function() {
var e = this;
if (arguments[0] instanceof Object) {
for (var c in arguments[0]) {
e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c])
}
} else {
c = arguments.length;
for (var a = 0; a < c; a++) {
e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a])
}
}
return e
};
String.prototype.swapCase = function() {
return this.replace(/(\w)/, StringUtils._swapCase)
};
String.prototype.trim = function() {
return this.replace(/^\s+|\s+$/g, "")
};
StringUtils.trimLeft = function(e) {
return e.replace(/^\s+/, "")
};
String.prototype.trimLeft = function() {
return StringUtils.trimLeft(this)
};
StringUtils.trimRight = function(e) {
return e.replace(/\s+$/, "")
};
String.prototype.trimRight = function() {
return StringUtils.trimLeft(this)
};
String.prototype.truncate = function(e, c) {
if (c == null) {
c = "..."
}
if (e == 0) {
e = this.length
}
e -= c.length;
var a = this;
if (a.length > e) {
a = a.substr(0, e);
if (/[^\s]/.test(a.charAt(e))) {
a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, ""))
}
a += c
}
return a
};
String.prototype.wordCount = function() {
return this.match(/\b\w+\b/g).length
};
StringUtils.escapePattern = function(e) {
return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1")
};
StringUtils.prototype._quote = function() {
switch (this) {
case "\\":
return "\\\\";
case "\r":
return "\\r";
case "\n":
return "\\n";
case '"':
return '\\"'
}
return null
};
StringUtils._upperCase = function(e) {
return e.toUpperCase()
};
StringUtils._swapCase = function(e) {
var c = e.toLowerCase(),
a = e.toUpperCase();
switch (e) {
case c:
return a;
case a:
return c;
default:
return e
}
};
function Rnd() {
throw Error("Rnd is static and cannot be instantiated.");
}
Rnd.randFloat = function(e, c) {
if (isNaN(c)) {
c = e;
e = 0
}
return Math.random() * (c - e) + e
};
Rnd.randBoolean = function(e) {
if (isNaN(e)) {
e = 0.5
}
return Math.random() < e
};
Rnd.randSign = function(e) {
if (isNaN(e)) {
e = 0.5
}
return Math.random() < e ? 1 : -1
};
Rnd.randBit = function(e) | {
a[m][0] = m
} | conditional_block |
util.js | ] = []
}
for (m = 0; m <= d; m++) {
a[m][0] = m
}
for (m = 0; m <= g; m++) {
a[0][m] = m
}
for (m = 1; m <= d; m++) {
for (var q = e.charAt(m - 1), s = 1; s <= g; s++) {
b = c.charAt(s - 1);
b = q == b ? 0 : 1;
a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b)
}
}
return a[d][g]
};
String.prototype.editDistance = function(e) {
return StringUtils.editDistance(this, e)
};
String.prototype.endsWith = function(e) {
return RegExp(e + "$").test(this)
};
String.prototype.hasText = function() {
return !!this.removeExtraWhitespace().length
};
String.prototype.isEmpty = function() {
return !this.length
};
String.prototype.isNumeric = function() {
return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this)
};
String.prototype.padLeft = function(e, c) {
for (var a = this; a.length < c;) {
a = e + a
}
return a
};
String.prototype.padRight = function(e, c) {
for (var a = this; a.length < c;) {
a += e
}
return a
};
String.prototype.properCase = function() {
return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I")
};
String.prototype.quote = function() {
return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"'
};
String.prototype.remove = function(e, c) {
if (c === null) {
c = true
}
var a = StringUtils.escapePattern(e);
return this.replace(RegExp(a, !c ? "ig" : "g"), "")
};
String.prototype.removeExtraWhitespace = function() {
return this.trim(this).replace(/\s+/g, " ")
};
String.prototype.reverse = function() {
return this.split("").reverse().join("")
};
String.prototype.reverseWords = function() {
return this.split(/\s+/).reverse().join(" ")
};
String.prototype.similarity = function(e) {
var c = StringUtils.editDistance(this, e);
e = Math.max(this.length, e.length);
return e == 0 ? 1 : 1 - c / e
};
String.prototype.stripTags = function() {
return this.replace(/<\/?[^>]+>/igm, "")
};
String.prototype.supplant = function() {
var e = this;
if (arguments[0] instanceof Object) {
for (var c in arguments[0]) {
e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c])
}
} else {
c = arguments.length;
for (var a = 0; a < c; a++) {
e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a])
}
}
return e
};
String.prototype.swapCase = function() {
return this.replace(/(\w)/, StringUtils._swapCase)
};
String.prototype.trim = function() {
return this.replace(/^\s+|\s+$/g, "")
};
StringUtils.trimLeft = function(e) {
return e.replace(/^\s+/, "")
};
String.prototype.trimLeft = function() {
return StringUtils.trimLeft(this)
};
StringUtils.trimRight = function(e) {
return e.replace(/\s+$/, "")
};
String.prototype.trimRight = function() {
return StringUtils.trimLeft(this)
};
String.prototype.truncate = function(e, c) {
if (c == null) {
c = "..."
}
if (e == 0) {
e = this.length
}
e -= c.length;
var a = this;
if (a.length > e) {
a = a.substr(0, e);
if (/[^\s]/.test(a.charAt(e))) {
a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, ""))
}
a += c
}
return a
};
String.prototype.wordCount = function() {
return this.match(/\b\w+\b/g).length
};
StringUtils.escapePattern = function(e) {
return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1")
};
StringUtils.prototype._quote = function() {
switch (this) {
case "\\":
return "\\\\";
case "\r":
return "\\r";
case "\n":
return "\\n";
case '"':
return '\\"'
}
return null
};
StringUtils._upperCase = function(e) {
return e.toUpperCase()
};
StringUtils._swapCase = function(e) {
var c = e.toLowerCase(),
a = e.toUpperCase();
switch (e) {
case c:
return a;
case a:
return c;
default:
return e
}
};
function Rnd() {
throw Error("Rnd is static and cannot be instantiated.");
}
Rnd.randFloat = function(e, c) {
if (isNaN(c)) {
c = e;
e = 0
}
return Math.random() * (c - e) + e
};
Rnd.randBoolean = function(e) {
if (isNaN(e)) {
e = 0.5
}
return Math.random() < e
};
Rnd.randSign = function(e) {
if (isNaN(e)) {
e = 0.5
}
return Math.random() < e ? 1 : -1
};
Rnd.randBit = function(e) {
if (isNaN(e)) {
e = 0.5
}
return Math.random() < e ? 1 : 0
};
Rnd.randInteger = function(e, c) {
if (isNaN(c)) {
c = e;
e = 0
}
return Math.floor(Rnd.randFloat(e, c))
};
Number.prototype.floor = function() {
return this | 0
};
Number.prototype.round = function() {
return this + 0.5 | 0
};
Number.RADIANS = 180 / Math.PI;
Number.prototype.fromRadians = function() {
return this * Number.RADIANS
};
Number.prototype.toRadians = function() {
return this / Number.RADIANS
};
Array.prototype.randomSort = function() {
var e = this.length;
if (e == 0) {
return false
}
for (; e--;) {
var c = Math.random() * (e + 1) | 0,
a = this[e];
this[e] = this[c];
this[c] = a
}
return this
};
Array.prototype.findRandom = function() {
if (this.length == 1) {
return this[0]
}
return this[Math.random() * this.length | 0]
};
Array.prototype.removeRandom = function() {
return this.splice(Math.random() * this.length | 0, 1)[0]
};
Array.prototype.removeItem = function(e) {
for (var c = 0, a = this.length; c < a; c++) {
if (e == this[c]) {
this.splice(c, 1);
return true
}
}
return false
};
Array.prototype.sum = function() {
for (var e = 0, c = 0, a = this.length; c < a; c++) {
e += this[c]
}
return e
};
Object.prototype.formatToString = function() {
if (arguments == null) {
return "[Object object]"
}
for (var e = [], c = 0, a = arguments.length; c < a; c++) {
var b = arguments[c],
d = this[b];
if (!isNaN(d) && d << 0 != d) {
d = d.toFixed(2)
}
e.push(b + ":" + d)
}
return "[" + e.join(", ") + "]"
};
Number.prototype.commaDelimit = function() {
var e = String(this),
c = e.length % 3,
a = Math.floor(e.length / 3);
if (a > 0) {
for (var b = [], d = 0; d < a; d++) {
var g = d * 3 + c;
d == 0 && c > 0 && b.push(e.substr(0, c));
b.push(e.substr(g, 3))
}
e = b.join(",")
}
return e
};
Number.prototype.getOrdinal = function(e) {
e = e == true ? this.commaDelimit() : this;
switch (this % 10) {
case 1:
return e + "st";
case 2:
return e + "nd";
case 3:
return e + "rd";
default:
return e + "th"
}
};
function getTimer() | {
return (new Date).getTime()
} | identifier_body | |
util.js | if (c == null) {
c = " "
}
var a = c.substr(0, 1);
return this.length < e ? this + this.repeat(e - this.length, a) : this
};
String.prototype.rjust = function(e, c) {
if (c == null) {
c = " "
}
var a = c.substr(0, 1);
return this.length < e ? this.repeat(e - this.length, a) + this : this
};
String.prototype.center = function(e, c) {
if (c == null) {
c = " "
}
var a = c.substr(0, 1);
if (this.length < e) {
var b = e - this.length,
d = b % 2 == 0 ? "" : a;
a = this.repeat(Math.round(b / 2), a);
return a + this + a + d
} else {
return this
}
};
String.prototype.repeat = function(e, c) {
if (isNaN(e)) {
e = 1
}
for (var a = ""; e--;) {
a += c || this
}
return a
};
String.prototype.base64Encode = function() {
for (var e = "", c = 0, a = this.length; c < a;) {
var b = this.charCodeAt(c++) & 255;
if (c == a) {
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4) + "==";
break
}
var d = this.charCodeAt(c++);
if (c == a) {
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "=";
break
}
var g = this.charCodeAt(c++);
e += "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(b >> 2) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((b & 3) << 4 | (d & 240) >> 4) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt((d & 15) << 2 | (g & 192) >> 6) + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(g & 63)
}
return e
};
String.prototype.contains = function(e) {
return this.indexOf(e) != -1
};
StringUtils.editDistance = function(e, c) {
if (e == null) {
e = ""
}
if (c == null) {
c = ""
}
if (e == c) {
return 0
}
var a = [],
b, d = e.length,
g = c.length;
if (d == 0) {
return g
}
if (g == 0) {
return d
}
for (var m = 0; m <= d; m++) {
a[m] = []
}
for (m = 0; m <= d; m++) {
a[m][0] = m
}
for (m = 0; m <= g; m++) {
a[0][m] = m
}
for (m = 1; m <= d; m++) {
for (var q = e.charAt(m - 1), s = 1; s <= g; s++) {
b = c.charAt(s - 1);
b = q == b ? 0 : 1;
a[m][s] = Math.min(a[m - 1][s] + 1, a[m][s - 1] + 1, a[m - 1][s - 1] + b)
}
}
return a[d][g]
};
String.prototype.editDistance = function(e) {
return StringUtils.editDistance(this, e)
};
String.prototype.endsWith = function(e) {
return RegExp(e + "$").test(this)
};
String.prototype.hasText = function() {
return !!this.removeExtraWhitespace().length
};
String.prototype.isEmpty = function() {
return !this.length
};
String.prototype.isNumeric = function() {
return /^[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?$/.test(this)
};
String.prototype.padLeft = function(e, c) {
for (var a = this; a.length < c;) {
a = e + a
}
return a
};
String.prototype.padRight = function(e, c) {
for (var a = this; a.length < c;) {
a += e
}
return a
};
String.prototype.properCase = function() {
return this.toLowerCase().replace(/\b([^.?;!]+)/, StringUtils.capitalize).replace(/\b[i]\b/, "I")
};
String.prototype.quote = function() {
return '"' + this.replace(/[\\"\r\n]/g, this._quote) + '"'
};
String.prototype.remove = function(e, c) {
if (c === null) {
c = true
}
var a = StringUtils.escapePattern(e);
return this.replace(RegExp(a, !c ? "ig" : "g"), "")
};
String.prototype.removeExtraWhitespace = function() {
return this.trim(this).replace(/\s+/g, " ")
};
String.prototype.reverse = function() {
return this.split("").reverse().join("")
};
String.prototype.reverseWords = function() {
return this.split(/\s+/).reverse().join(" ")
};
String.prototype.similarity = function(e) {
var c = StringUtils.editDistance(this, e);
e = Math.max(this.length, e.length);
return e == 0 ? 1 : 1 - c / e
};
String.prototype.stripTags = function() {
return this.replace(/<\/?[^>]+>/igm, "")
};
String.prototype.supplant = function() {
var e = this;
if (arguments[0] instanceof Object) {
for (var c in arguments[0]) {
e = e.replace(RegExp("\\{" + c + "\\}", "g"), arguments[0][c])
}
} else {
c = arguments.length;
for (var a = 0; a < c; a++) {
e = e.replace(RegExp("\\{" + a + "\\}", "g"), arguments[a])
}
}
return e
};
String.prototype.swapCase = function() {
return this.replace(/(\w)/, StringUtils._swapCase)
};
String.prototype.trim = function() {
return this.replace(/^\s+|\s+$/g, "")
};
StringUtils.trimLeft = function(e) {
return e.replace(/^\s+/, "")
};
String.prototype.trimLeft = function() {
return StringUtils.trimLeft(this)
};
StringUtils.trimRight = function(e) {
return e.replace(/\s+$/, "")
};
String.prototype.trimRight = function() {
return StringUtils.trimLeft(this)
};
String.prototype.truncate = function(e, c) {
if (c == null) {
c = "..."
}
if (e == 0) {
e = this.length
}
e -= c.length;
var a = this;
if (a.length > e) {
a = a.substr(0, e);
if (/[^\s]/.test(a.charAt(e))) {
a = StringUtils.trimRight(a.replace(/\w+$|\s+$/, ""))
}
a += c
}
return a
};
String.prototype.wordCount = function() {
return this.match(/\b\w+\b/g).length
};
StringUtils.escapePattern = function(e) {
return e.replace(/(\]|\[|\{|\}|\(|\)|\*|\+|\?|\.|\\)/g, "\\$1")
};
StringUtils.prototype._quote = function() {
switch (this) {
case "\\":
return "\\\\";
case "\r":
return "\\r";
case "\n":
return "\\n";
case '"':
return '\\"'
}
return null
};
StringUtils._upperCase = function(e) {
return e.toUpperCase()
};
StringUtils._swapCase = function(e) {
var c = e.toLowerCase(),
a = e.toUpperCase();
switch (e) {
case c:
return a;
case a:
return c;
default:
return e
}
};
function | () {
throw Error("Rnd is static and cannot be instantiated.");
}
Rnd.randFloat = function(e, c) {
if (isNaN(c)) {
c = e;
e = 0
}
return Math.random() * (c - e) + e
};
Rnd.randBoolean = function(e) {
if (isNaN(e)) {
e = 0.5
}
return Math.random() < e
};
Rnd.randSign = function(e) {
if (isNaN(e)) {
e = 0.5
}
return Math.random() < e ? 1 : -1
};
Rnd.randBit = function(e) {
| Rnd | identifier_name |
img-touch-clip.js | .path;
//从canvas-zoom迁移过来,关于边框等元素的adaption的设置
this.scaleAdaption = 1;
var indoormap =options.canvas;
var pageWidth = parseInt(indoormap.getAttribute("width")); //750
var pageHeight = parseInt(indoormap.getAttribute("height"));//1180
currentWidth = document.documentElement.clientWidth; //value 414
currentHeight = document.documentElement.clientHeight;//value 736
console.log("currentWidth",currentWidth);
console.log("pageWidth",pageWidth);
var offsetX = 0;
var offsetY = 0;
if (pageWidth < pageHeight) {//canvas.width < canvas.height
this.scaleAdaption = currentHeight / pageHeight;
if (pageWidth * this.scaleAdaption > currentWidth) {
this.scaleAdaption = this.scaleAdaption * (currentWidth / (this.scaleAdaption * pageWidth));
}
} else {//canvas.width >= canvas.height
this.scaleAdaption = currentWidth / pageWidth;
if (pageHeight * this.scaleAdaption > currentHeight) {
this.scaleAdaption = this.scaleAdaption * (currentHeight / (this.scaleAdaption * pageHeight));
}
}
console.log("scaleAdaption",this.scaleAdaption); //0.552
console.log("currentHeight",currentHeight); //736
this.positionAdaption = {
x: (parseInt(currentWidth) - parseInt(indoormap.getAttribute("width"))) / 2,
y: (parseInt(currentHeight) - parseInt(indoormap.getAttribute("height"))) / 2
};
console.log("positionada-x:",this.positionAdaption.x); //-168
console.log("positionada-y:",this.positionAdaption.y); //-222
//end
this.imgTexture = new Image();
this.lastZoomScale = null;
this.lastX = null;
this.lastY = null;
this.mdown = false; //desktop drag
this.init = false;
this.checkRequestAnimationFrame();
requestAnimationFrame(this.animate.bind(this));
// requestAnimationFrame(this.draw_box.bind(this));
//this.init_draw();
this.setEventListeners();
};
ImgTouchCanvas.prototype = {
_imgBoxSize:750,
_imgBox:null,
_previewBox:null,
_uploadInputBtn:null,
_$canvas:null,
_$canvasW:0,
_$canvasH:0,
_$canvas2d:null,
_imgScale:0, | _img_sx:0,
_img_sy:0,
// 图片的高宽
_imgW:0,
_imgH:0,
init_url: function(url){
this.imgTexture = new Image();
this.imgTexture.src=url;
this.init=false;
this.box_Scale=1;
this.position = {
x: 0,
y: 0
};
this.img_y=0;
this.scale = {
x: 0.5,
y: 0.5
};
this.context.clearRect(0, 0, this.canvas.width, this.canvas.height);
this.animate();
console.log(url);
},
animate: function() {
//set scale such as image cover all the canvas
if(!this.init) {
if(this.imgTexture.width) {
var scaleRatio = null;
//检测图片的宽高比例
var w_h_ratio=this.imgTexture.width/this.imgTexture.height;
if(this.canvas.clientWidth <= this.canvas.clientHeight) {
scaleRatio = this.canvas.clientWidth / this.imgTexture.width;
}
else {
scaleRatio = this.canvas.clientHeight / this.imgTexture.height;
}
this.img_y=this.canvas.height/2-this.imgTexture.height*scaleRatio/2;
this.position.y=this.img_y;
this.scale.x = scaleRatio;
this.scale.y = scaleRatio;
this.init = true;
console.log("init:",this.init ,this.scale.x);
}
}
this.context.clearRect(0, 0, this.canvas.width, this.canvas.height);
this.context.drawImage(
this.imgTexture,
this.position.x, this.position.y,
this.scale.x * this.imgTexture.width,
this.scale.y * this.imgTexture.height);
DrawMapInfo(
this.box_Scale,
this.scale.y,
this.position.x,
this.position.y);
requestAnimationFrame(this.animate.bind(this));
},
gesturePinchZoom: function(event) {
var zoom = false;
if( event.targetTouches.length >= 2 ) {
var p1 = event.targetTouches[0];
var p2 = event.targetTouches[1];
//两个touch_X坐标的绝对值
var zoomScale = Math.sqrt(Math.pow(p2.pageX - p1.pageX, 2) + Math.pow(p2.pageY - p1.pageY, 2)); //euclidian distance
if( this.lastZoomScale ) {
zoom = zoomScale - this.lastZoomScale;
}
this.lastZoomScale = zoomScale;
}
return zoom;
},
doZoom: function(zoom) {
if(!zoom) return;
//new scale
var currentScale = this.scale.x;
var newScale = this.scale.x + zoom/100;
var box_Scale = this.box_Scale + zoom/100;
//var newzoom
console.log("zoom",zoom/100);
var img_y=this.img_y;
//some helpers
var deltaScale = newScale - currentScale;
var currentWidth = (this.imgTexture.width * this.scale.x);
var currentHeight = (this.imgTexture.height * this.scale.y);
//deltaWidth===》detascale缩放差
var deltaWidth = this.imgTexture.width*deltaScale;
var deltaHeight = this.imgTexture.height*deltaScale;
console.log("detalwidth",deltaWidth);
//by default scale doesnt change position and only add/remove pixel to right and bottom
//默认的缩放不会改变定位,只会添加/移除像素,到左边或底部
//so we must move the image to the left to keep the image centered
//所以我们必须移动图像到左边以保持图像的中心化
//ex: coefX and coefY = 0.5 when image is centered <=> move image to the left 0.5x pixels added to the right
//coefX和coefY赋值为0.5当图像在中心时,移动图像到左边的0.5像素,
//canvasmiddleX——取得canvas的中心点
var canvas_middle_X = this.canvas.clientWidth / 2;
var canvas_middle_Y = this.canvas.clientHeight / 2;
//
var xonmap = (-this.position.x) + canvas_middle_X;
var yonmap = (-this.position.y) + canvas_middle_Y;
var coefX = -xonmap / (currentWidth);
var coefY = -yonmap / (currentHeight);
var newPosX = this.position.x + deltaWidth*coefX;
var newPosY = this.position.y + deltaHeight*coefY;
// console.log("new_posy",newPosX);
//edges cases
var newWidth = currentWidth + deltaWidth;
var newHeight = currentHeight + deltaHeight;
if( newWidth < this.canvas.clientWidth ) return;
if( newPosX > 0 ) { newPosX = 0; }
if( newPosX + newWidth < this.canvas.clientWidth ) {
newPosX = this.canvas.clientWidth - newWidth;
}
console.log("new_posy",newPosY);
console.log("newHeight",newHeight);
console.log("newPosY + newHeight",newPosY + newHeight);
// if( newHeight < this.canvas.clientHeight ) return;
if( newPosY > this.img_y) { newPosY = this.img_y; }
// if( newPosY + newHeight < this.canvas.clientHeight ) {
// newPosY = this.imgTexture.height - newHeight; }
// console.log("new_posy",newPosX);
//最终效果并初始赋值 //finally affectations
this.box_Scale = box_Scale;
this.scale.x = newScale;
this.scale.y = newScale;
this.position.x = newPosX;
this.position.y = newPosY;
console.log("newScale:",box_Scale);
},
//平移
doMove: function(relativeX, relativeY) {
if(this.lastX && this.lastY) {
var deltaX = relativeX - this.lastX;
var deltaY = relativeY - this.lastY;
var currentWidth = (this.imgTexture.width * this.scale.x);
var currentHeight = (this.imgTexture.height * this.scale.y);
this.position.x += deltaX;
this.position.y += deltaY;
//domve--->edge cases
if( this.position.x > 0 ) {
this.position.x = 0;
}
else if( this.position.x + currentWidth < this.canvas.clientWidth ) {
this.position.x = this.canvas.clientWidth - currentWidth;
}
if(currentHeight >= this.canvas.clientHeight){
if( this.position.y >0 ) {
this.position.y = 0;
|
// _img:this.imgTexture,
//剪裁的x y坐标 | random_line_split |
IIoT End-to-End (Pt 2).py | .secrets.get("iot","adls_key"))
# Setup storage locations for all data
ROOT_PATH = f"abfss://iot@{storage_account}.dfs.core.windows.net/"
# Pyspark and ML Imports
import os, json, requests
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf, PandasUDFType
import numpy as np
import pandas as pd
import xgboost as xgb
import mlflow.xgboost
import mlflow.azureml
from azureml.core import Workspace
from azureml.core.webservice import AciWebservice, Webservice
import random, string
# Random String generator for ML models served in AzureML
random_string = lambda length: ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(length))
# COMMAND ----------
# MAGIC %md ## Step 3 - Machine Learning
# MAGIC Now that our data is flowing reliably from our sensor devices into an enriched Delta table in Data Lake storage, we can start to build ML models to predict power output and remaining life of our assets using historical sensor, weather, power and maintenance data.
# MAGIC
# MAGIC We create two models ***for each Wind Turbine***:
# MAGIC 1. Turbine Power Output - using current readings for turbine operating parameters (angle, RPM) and weather (temperature, humidity, etc.), predict the expected power output 6 hours from now
# MAGIC 2. Turbine Remaining Life - predict the remaining life in days until the next maintenance event
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/turbine_models.png" width=800>
# MAGIC
# MAGIC We will use the XGBoost framework to train regression models. Due to the size of the data and number of Wind Turbines, we will use Spark UDFs to distribute training across all the nodes in our cluster.
# COMMAND ----------
# MAGIC %md ### 3a. Feature Engineering
# MAGIC In order to predict power output 6 hours ahead, we need to first time-shift our data to create our label column. We can do this easily using Spark Window partitioning.
# MAGIC
# MAGIC In order to predict remaining life, we need to backtrace the remaining life from the maintenance events. We can do this easily using cross joins. The following diagram illustrates the ML Feature Engineering pipeline:
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/ml_pipeline.png" width=800>
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Calculate the age of each turbine and the remaining life in days
# MAGIC CREATE OR REPLACE VIEW turbine_age AS
# MAGIC WITH reading_dates AS (SELECT distinct date, deviceid FROM turbine_power),
# MAGIC maintenance_dates AS (
# MAGIC SELECT d.*, datediff(nm.date, d.date) as datediff_next, datediff(d.date, lm.date) as datediff_last
# MAGIC FROM reading_dates d LEFT JOIN turbine_maintenance nm ON (d.deviceid=nm.deviceid AND d.date<=nm.date)
# MAGIC LEFT JOIN turbine_maintenance lm ON (d.deviceid=lm.deviceid AND d.date>=lm.date ))
# MAGIC SELECT date, deviceid, ifnull(min(datediff_last),0) AS age, ifnull(min(datediff_next),0) AS remaining_life
# MAGIC FROM maintenance_dates
# MAGIC GROUP BY deviceid, date;
# MAGIC
# MAGIC -- Calculate the power 6 hours ahead using Spark Windowing and build a feature_table to feed into our ML models
# MAGIC CREATE OR REPLACE VIEW feature_table AS
# MAGIC SELECT r.*, age, remaining_life,
# MAGIC LEAD(power, 72, power) OVER (PARTITION BY r.deviceid ORDER BY window) as power_6_hours_ahead
# MAGIC FROM gold_readings r JOIN turbine_age a ON (r.date=a.date AND r.deviceid=a.deviceid)
# MAGIC WHERE r.date < CURRENT_DATE();
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT window, power, power_6_hours_ahead FROM feature_table WHERE deviceid='WindTurbine-1'
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT date, avg(age) as age, avg(remaining_life) as life FROM feature_table WHERE deviceid='WindTurbine-1' GROUP BY date ORDER BY date
# COMMAND ----------
# MAGIC %md ### 3b. Distributed Model Training - Predict Power Output
# MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level.
# COMMAND ----------
# Create a function to train a XGBoost Regressor on a turbine's data
def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col):
mlflow.xgboost.autolog()
with mlflow.start_run():
# Log the model type and device ID
mlflow.log_param('deviceid', readings_pd['deviceid'][0])
mlflow.log_param('model', model_type)
# Train an XGBRegressor on the data for this Turbine
alg = xgb.XGBRegressor()
train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col])
params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5}
model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')])
# Make predictions on the dataset and return the results
readings_pd[prediction_col] = model.predict(train_dmatrix)
return readings_pd
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','remaining_life']
feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age']
label_col = 'power_6_hours_ahead'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest
feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}'])
# Register a Pandas UDF to distribute XGB model training using Spark
@pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP)
def | (readings_pd):
return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col)
# Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine
power_predictions = feature_df.groupBy('deviceid').apply(train_power_models)
# Save predictions to storage
power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Plot actuals vs. predicted
# MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid
# COMMAND ----------
# MAGIC %md #### Automated Model Tracking in Databricks
# MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks:
# MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable
# MAGIC 2. Metrics specified in `evals` (RMSE by default)
# MAGIC 3. The trained XGBoost model file
# MAGIC 4. Feature importances
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800>
# COMMAND ----------
# MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life
# MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label.
# MAGIC
# MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions
# COMMAND ----------
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted']
label_col = 'remaining_life'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest | train_power_models | identifier_name |
IIoT End-to-End (Pt 2).py | gboost
import mlflow.azureml
from azureml.core import Workspace
from azureml.core.webservice import AciWebservice, Webservice
import random, string
# Random String generator for ML models served in AzureML
random_string = lambda length: ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(length))
# COMMAND ----------
# MAGIC %md ## Step 3 - Machine Learning
# MAGIC Now that our data is flowing reliably from our sensor devices into an enriched Delta table in Data Lake storage, we can start to build ML models to predict power output and remaining life of our assets using historical sensor, weather, power and maintenance data.
# MAGIC
# MAGIC We create two models ***for each Wind Turbine***:
# MAGIC 1. Turbine Power Output - using current readings for turbine operating parameters (angle, RPM) and weather (temperature, humidity, etc.), predict the expected power output 6 hours from now
# MAGIC 2. Turbine Remaining Life - predict the remaining life in days until the next maintenance event
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/turbine_models.png" width=800>
# MAGIC
# MAGIC We will use the XGBoost framework to train regression models. Due to the size of the data and number of Wind Turbines, we will use Spark UDFs to distribute training across all the nodes in our cluster.
# COMMAND ----------
# MAGIC %md ### 3a. Feature Engineering
# MAGIC In order to predict power output 6 hours ahead, we need to first time-shift our data to create our label column. We can do this easily using Spark Window partitioning.
# MAGIC
# MAGIC In order to predict remaining life, we need to backtrace the remaining life from the maintenance events. We can do this easily using cross joins. The following diagram illustrates the ML Feature Engineering pipeline:
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/ml_pipeline.png" width=800>
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Calculate the age of each turbine and the remaining life in days
# MAGIC CREATE OR REPLACE VIEW turbine_age AS
# MAGIC WITH reading_dates AS (SELECT distinct date, deviceid FROM turbine_power),
# MAGIC maintenance_dates AS (
# MAGIC SELECT d.*, datediff(nm.date, d.date) as datediff_next, datediff(d.date, lm.date) as datediff_last
# MAGIC FROM reading_dates d LEFT JOIN turbine_maintenance nm ON (d.deviceid=nm.deviceid AND d.date<=nm.date)
# MAGIC LEFT JOIN turbine_maintenance lm ON (d.deviceid=lm.deviceid AND d.date>=lm.date ))
# MAGIC SELECT date, deviceid, ifnull(min(datediff_last),0) AS age, ifnull(min(datediff_next),0) AS remaining_life
# MAGIC FROM maintenance_dates
# MAGIC GROUP BY deviceid, date;
# MAGIC
# MAGIC -- Calculate the power 6 hours ahead using Spark Windowing and build a feature_table to feed into our ML models
# MAGIC CREATE OR REPLACE VIEW feature_table AS
# MAGIC SELECT r.*, age, remaining_life,
# MAGIC LEAD(power, 72, power) OVER (PARTITION BY r.deviceid ORDER BY window) as power_6_hours_ahead
# MAGIC FROM gold_readings r JOIN turbine_age a ON (r.date=a.date AND r.deviceid=a.deviceid)
# MAGIC WHERE r.date < CURRENT_DATE();
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT window, power, power_6_hours_ahead FROM feature_table WHERE deviceid='WindTurbine-1'
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT date, avg(age) as age, avg(remaining_life) as life FROM feature_table WHERE deviceid='WindTurbine-1' GROUP BY date ORDER BY date
# COMMAND ----------
# MAGIC %md ### 3b. Distributed Model Training - Predict Power Output
# MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level.
# COMMAND ----------
# Create a function to train a XGBoost Regressor on a turbine's data
def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col):
mlflow.xgboost.autolog()
with mlflow.start_run():
# Log the model type and device ID
mlflow.log_param('deviceid', readings_pd['deviceid'][0])
mlflow.log_param('model', model_type)
# Train an XGBRegressor on the data for this Turbine
alg = xgb.XGBRegressor()
train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col])
params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5}
model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')])
# Make predictions on the dataset and return the results
readings_pd[prediction_col] = model.predict(train_dmatrix)
return readings_pd
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','remaining_life']
feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age']
label_col = 'power_6_hours_ahead'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest
feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}'])
# Register a Pandas UDF to distribute XGB model training using Spark
@pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP)
def train_power_models(readings_pd):
return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col)
# Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine
power_predictions = feature_df.groupBy('deviceid').apply(train_power_models)
# Save predictions to storage
power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Plot actuals vs. predicted
# MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid
# COMMAND ----------
# MAGIC %md #### Automated Model Tracking in Databricks
# MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks:
# MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable
# MAGIC 2. Metrics specified in `evals` (RMSE by default)
# MAGIC 3. The trained XGBoost model file
# MAGIC 4. Feature importances
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800>
# COMMAND ----------
# MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life
# MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label.
# MAGIC
# MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions
# COMMAND ----------
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted']
label_col = 'remaining_life'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest
feature_df = spark.table('turbine_power_predictions').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}'])
# Register a Pandas UDF to distribute XGB model training using Spark
@pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP)
def train_life_models(readings_pd):
| return train_distributed_xgb(readings_pd, 'life_prediction', label_col, prediction_col) | identifier_body | |
IIoT End-to-End (Pt 2).py | # MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level.
# COMMAND ----------
# Create a function to train a XGBoost Regressor on a turbine's data
def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col):
mlflow.xgboost.autolog()
with mlflow.start_run():
# Log the model type and device ID
mlflow.log_param('deviceid', readings_pd['deviceid'][0])
mlflow.log_param('model', model_type)
# Train an XGBRegressor on the data for this Turbine
alg = xgb.XGBRegressor()
train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col])
params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5}
model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')])
# Make predictions on the dataset and return the results
readings_pd[prediction_col] = model.predict(train_dmatrix)
return readings_pd
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','remaining_life']
feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age']
label_col = 'power_6_hours_ahead'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest
feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}'])
# Register a Pandas UDF to distribute XGB model training using Spark
@pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP)
def train_power_models(readings_pd):
return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col)
# Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine
power_predictions = feature_df.groupBy('deviceid').apply(train_power_models)
# Save predictions to storage
power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Plot actuals vs. predicted
# MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid
# COMMAND ----------
# MAGIC %md #### Automated Model Tracking in Databricks
# MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks:
# MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable
# MAGIC 2. Metrics specified in `evals` (RMSE by default)
# MAGIC 3. The trained XGBoost model file
# MAGIC 4. Feature importances
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800>
# COMMAND ----------
# MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life
# MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label.
# MAGIC
# MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions
# COMMAND ----------
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted']
label_col = 'remaining_life'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest
feature_df = spark.table('turbine_power_predictions').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}'])
# Register a Pandas UDF to distribute XGB model training using Spark
@pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP)
def train_life_models(readings_pd):
return train_distributed_xgb(readings_pd, 'life_prediction', label_col, prediction_col)
# Run the Pandas UDF against our feature dataset - this will train 1 model per turbine and write the predictions to a table
life_predictions = (
feature_df.groupBy('deviceid').apply(train_life_models)
.write.format("delta").mode("overwrite")
.partitionBy("date")
.saveAsTable("turbine_life_predictions")
)
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT date, avg(remaining_life) as Actual_Life, avg(remaining_life_predicted) as Predicted_Life
# MAGIC FROM turbine_life_predictions
# MAGIC WHERE deviceid='WindTurbine-1'
# MAGIC GROUP BY date ORDER BY date
# COMMAND ----------
# MAGIC %md The models to predict remaining useful life have been trained and logged by MLflow. We can now move on to model deployment in AzureML.
# COMMAND ----------
# MAGIC %md ## Step 4 - Model Deployment to AzureML
# MAGIC Now that our models have been trained, we can deploy them in an automated way directly to a model serving environment like Azure ML. Below, we connect to an AzureML workspace, build a container image for the model, and deploy that image to Azure Container Instances (ACI) to be hosted for REST API calls.
# MAGIC
# MAGIC **Note:** This step can take up to 10 minutes to run due to images being created and deplyed in Azure ML.
# MAGIC
# MAGIC **Important:** This step requires authentication to Azure - open the link provided in the output of the cell in a new browser tab and use the code provided.
# COMMAND ----------
# AML Workspace Information - replace with your workspace info
aml_resource_group = dbutils.widgets.get("Resource Group")
aml_subscription_id = dbutils.widgets.get("Subscription ID")
aml_region = dbutils.widgets.get("Region")
aml_workspace_name = "iot"
turbine = "WindTurbine-1"
power_model = "power_prediction"
life_model = "life_prediction"
# Connect to a workspace (replace widgets with your own workspace info)
workspace = Workspace.create(name = aml_workspace_name,
subscription_id = aml_subscription_id,
resource_group = aml_resource_group,
location = aml_region,
exist_ok=True)
# Retrieve the remaining_life and power_output experiments on WindTurbine-1, and get the best performing model (min RMSE)
best_life_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{life_model}"')\
.dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model'
best_power_model = mlflow.search_runs(filter_string=f'params.deviceid="{turbine}" and params.model="{power_model}"')\
.dropna().sort_values("metrics.train-rmse")['artifact_uri'].iloc[0] + '/model'
scoring_uris = {}
for model, path in [('life',best_life_model),('power',best_power_model)]:
# Build images for each of our two models in Azure Container Instances
| print(f"-----Building image for {model} model-----")
model_image, azure_model = mlflow.azureml.build_image(model_uri=path,
workspace=workspace,
model_name=model,
image_name=model,
description=f"XGBoost model to predict {model} of a turbine",
synchronous=True)
model_image.wait_for_creation(show_output=True)
# Deploy web services to host each model as a REST API
print(f"-----Deploying image for {model} model-----")
dev_webservice_name = model + random_string(10)
dev_webservice_deployment_config = AciWebservice.deploy_configuration()
dev_webservice = Webservice.deploy_from_image(name=dev_webservice_name, image=model_image, deployment_config=dev_webservice_deployment_config, workspace=workspace)
dev_webservice.wait_for_deployment()
# Get the URI for sending REST requests to
scoring_uris[model] = dev_webservice.scoring_uri | conditional_block | |
IIoT End-to-End (Pt 2).py | utils.secrets.get("iot","adls_key"))
# Setup storage locations for all data
ROOT_PATH = f"abfss://iot@{storage_account}.dfs.core.windows.net/"
# Pyspark and ML Imports
import os, json, requests
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf, PandasUDFType
import numpy as np
import pandas as pd
import xgboost as xgb
import mlflow.xgboost
import mlflow.azureml
from azureml.core import Workspace
from azureml.core.webservice import AciWebservice, Webservice
import random, string
# Random String generator for ML models served in AzureML
random_string = lambda length: ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(length))
# COMMAND ----------
# MAGIC %md ## Step 3 - Machine Learning
# MAGIC Now that our data is flowing reliably from our sensor devices into an enriched Delta table in Data Lake storage, we can start to build ML models to predict power output and remaining life of our assets using historical sensor, weather, power and maintenance data.
# MAGIC
# MAGIC We create two models ***for each Wind Turbine***:
# MAGIC 1. Turbine Power Output - using current readings for turbine operating parameters (angle, RPM) and weather (temperature, humidity, etc.), predict the expected power output 6 hours from now
# MAGIC 2. Turbine Remaining Life - predict the remaining life in days until the next maintenance event
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/turbine_models.png" width=800>
# MAGIC
# MAGIC We will use the XGBoost framework to train regression models. Due to the size of the data and number of Wind Turbines, we will use Spark UDFs to distribute training across all the nodes in our cluster.
# COMMAND ----------
# MAGIC %md ### 3a. Feature Engineering
# MAGIC In order to predict power output 6 hours ahead, we need to first time-shift our data to create our label column. We can do this easily using Spark Window partitioning.
# MAGIC
# MAGIC In order to predict remaining life, we need to backtrace the remaining life from the maintenance events. We can do this easily using cross joins. The following diagram illustrates the ML Feature Engineering pipeline:
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/ml_pipeline.png" width=800>
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Calculate the age of each turbine and the remaining life in days
# MAGIC CREATE OR REPLACE VIEW turbine_age AS
# MAGIC WITH reading_dates AS (SELECT distinct date, deviceid FROM turbine_power),
# MAGIC maintenance_dates AS (
# MAGIC SELECT d.*, datediff(nm.date, d.date) as datediff_next, datediff(d.date, lm.date) as datediff_last
# MAGIC FROM reading_dates d LEFT JOIN turbine_maintenance nm ON (d.deviceid=nm.deviceid AND d.date<=nm.date)
# MAGIC LEFT JOIN turbine_maintenance lm ON (d.deviceid=lm.deviceid AND d.date>=lm.date ))
# MAGIC SELECT date, deviceid, ifnull(min(datediff_last),0) AS age, ifnull(min(datediff_next),0) AS remaining_life
# MAGIC FROM maintenance_dates
# MAGIC GROUP BY deviceid, date;
# MAGIC
# MAGIC -- Calculate the power 6 hours ahead using Spark Windowing and build a feature_table to feed into our ML models
# MAGIC CREATE OR REPLACE VIEW feature_table AS
# MAGIC SELECT r.*, age, remaining_life,
# MAGIC LEAD(power, 72, power) OVER (PARTITION BY r.deviceid ORDER BY window) as power_6_hours_ahead
# MAGIC FROM gold_readings r JOIN turbine_age a ON (r.date=a.date AND r.deviceid=a.deviceid)
# MAGIC WHERE r.date < CURRENT_DATE();
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT window, power, power_6_hours_ahead FROM feature_table WHERE deviceid='WindTurbine-1'
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT date, avg(age) as age, avg(remaining_life) as life FROM feature_table WHERE deviceid='WindTurbine-1' GROUP BY date ORDER BY date
# COMMAND ----------
# MAGIC %md ### 3b. Distributed Model Training - Predict Power Output
# MAGIC [Pandas UDFs](https://docs.microsoft.com/en-us/azure/databricks/spark/latest/spark-sql/udf-python-pandas?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fazure-databricks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) allow us to vectorize Pandas code across multiple nodes in a cluster. Here we create a UDF to train an XGBoost Regressor model against all the historic data for a particular Wind Turbine. We use a Grouped Map UDF as we perform this model training on the Wind Turbine group level.
# COMMAND ----------
# Create a function to train a XGBoost Regressor on a turbine's data
def train_distributed_xgb(readings_pd, model_type, label_col, prediction_col):
mlflow.xgboost.autolog()
with mlflow.start_run():
# Log the model type and device ID
mlflow.log_param('deviceid', readings_pd['deviceid'][0])
mlflow.log_param('model', model_type)
# Train an XGBRegressor on the data for this Turbine
alg = xgb.XGBRegressor()
train_dmatrix = xgb.DMatrix(data=readings_pd[feature_cols].astype('float'),label=readings_pd[label_col])
params = {'learning_rate': 0.5, 'alpha':10, 'colsample_bytree': 0.5, 'max_depth': 5}
model = xgb.train(params=params, dtrain=train_dmatrix, evals=[(train_dmatrix, 'train')])
# Make predictions on the dataset and return the results
readings_pd[prediction_col] = model.predict(train_dmatrix)
return readings_pd
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','remaining_life']
feature_cols = ['angle','rpm','temperature','humidity','windspeed','power','age']
label_col = 'power_6_hours_ahead'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest
feature_df = spark.table('feature_table').selectExpr(non_feature_cols + feature_cols + [label_col] + [f'0 as {prediction_col}'])
# Register a Pandas UDF to distribute XGB model training using Spark
@pandas_udf(feature_df.schema, PandasUDFType.GROUPED_MAP)
def train_power_models(readings_pd):
return train_distributed_xgb(readings_pd, 'power_prediction', label_col, prediction_col)
# Run the Pandas UDF against our feature dataset - this will train 1 model for each turbine
power_predictions = feature_df.groupBy('deviceid').apply(train_power_models)
# Save predictions to storage
power_predictions.write.format("delta").mode("overwrite").partitionBy("date").saveAsTable("turbine_power_predictions")
# COMMAND ----------
# MAGIC %sql | # MAGIC %md #### Automated Model Tracking in Databricks
# MAGIC As you train the models, notice how Databricks-managed MLflow automatically tracks each run in the "Runs" tab of the notebook. You can open each run and view the parameters, metrics, models and model artifacts that are captured by MLflow Autologging. For XGBoost Regression models, MLflow tracks:
# MAGIC 1. Any model parameters (alpha, colsample, learning rate, etc.) passed to the `params` variable
# MAGIC 2. Metrics specified in `evals` (RMSE by default)
# MAGIC 3. The trained XGBoost model file
# MAGIC 4. Feature importances
# MAGIC
# MAGIC <img src="https://sguptasa.blob.core.windows.net/random/iiot_blog/iiot_mlflow_tracking.gif" width=800>
# COMMAND ----------
# MAGIC %md ### 3c. Distributed Model Training - Predict Remaining Life
# MAGIC Our second model predicts the remaining useful life of each Wind Turbine based on the current operating conditions. We have historical maintenance data that indicates when a replacement activity occured - this will be used to calculate the remaining life as our training label.
# MAGIC
# MAGIC Once again, we train an XGBoost model for each Wind Turbine to predict the remaining life given a set of operating parameters and weather conditions
# COMMAND ----------
# Create a Spark Dataframe that contains the features and labels we need
non_feature_cols = ['date','window','deviceid','winddirection','power_6_hours_ahead_predicted']
label_col = 'remaining_life'
prediction_col = label_col + '_predicted'
# Read in our feature table and select the columns of interest
| # MAGIC -- Plot actuals vs. predicted
# MAGIC SELECT date, deviceid, avg(power_6_hours_ahead) as actual, avg(power_6_hours_ahead_predicted) as predicted FROM turbine_power_predictions GROUP BY date, deviceid
# COMMAND ----------
| random_line_split |
builders.rs | b| b
/// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?))
/// ```
pub fn merge(mut self, other: $name) -> $name {
self.0.extend(other.0); self
}
}
)*
}
}
builder! {
/// Patch content for the `edit_server` call.
EditServer(Object);
/// Patch content for the `edit_channel` call.
EditChannel(Object);
/// Patch content for the `edit_member` call.
EditMember(Object);
/// Patch content for the `edit_profile` call.
EditProfile(Object);
/// Patch content for the `edit_user_profile` call.
EditUserProfile(Object);
/// Patch content for the `edit_role` call.
EditRole(Object);
/// Content for the `send_message` call.
SendMessage(Object);
/// `allowed_mentions` object for use within `send_message`.
AllowedMentions(Object);
/// Patch content for the `send_embed` call.
EmbedBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedFooterBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedAuthorBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedFieldsBuilder(Vec<Value>);
}
macro_rules! set {
($self:ident, $key:expr, $($rest:tt)*) => {{
{let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s}
}}
}
impl EditServer {
/// Edit the server's name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the server's voice region.
pub fn region(self, region: &str) -> Self {
set!(self, "region", region)
}
/// Edit the server's icon. Use `None` to remove the icon.
pub fn icon(self, icon: Option<&str>) -> Self {
set!(self, "icon", icon)
}
/// Edit the server's AFK channel. Use `None` to select no AFK channel.
pub fn afk_channel(self, channel: Option<ChannelId>) -> Self {
set!(self, "afk_channel_id", channel)
}
/// Edit the server's AFK timeout.
pub fn afk_timeout(self, timeout: u64) -> Self {
set!(self, "afk_timeout", timeout)
}
/// Transfer ownership of the server to a new owner.
pub fn owner(self, owner: UserId) -> Self {
set!(self, "owner_id", owner.0)
}
/// Edit the verification level of the server.
pub fn verification_level(self, verification_level: VerificationLevel) -> Self {
set!(self, "verification_level", verification_level)
}
/// Edit the server's splash. Use `None` to remove the splash.
pub fn splash(self, splash: Option<&str>) -> Self {
set!(self, "splash", splash)
}
}
impl EditChannel {
/// Edit the channel's name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the text channel's topic.
pub fn topic(self, topic: &str) -> Self {
set!(self, "topic", topic)
}
/// Edit the channel's position in the list.
pub fn position(self, position: u64) -> Self {
set!(self, "position", position)
}
/// Edit the voice channel's bitrate.
pub fn | (self, bitrate: u64) -> Self {
set!(self, "bitrate", bitrate)
}
/// Edit the voice channel's user limit. Zero (`0`) means unlimited.
pub fn user_limit(self, user_limit: u64) -> Self {
set!(self, "user_limit", user_limit)
}
}
impl EditMember {
/// Edit the member's nickname. Supply the empty string to remove a nickname.
pub fn nickname(self, nick: &str) -> Self {
set!(self, "nick", nick)
}
/// Edit whether the member is server-muted.
pub fn mute(self, mute: bool) -> Self {
set!(self, "mute", mute)
}
/// Edit whether the member is server-deafened.
pub fn deaf(self, deafen: bool) -> Self {
set!(self, "deaf", deafen)
}
/// Edit the member's assigned roles.
pub fn roles(self, roles: &[RoleId]) -> Self {
set!(self, "roles", roles)
}
/// Move the member to another voice channel.
pub fn channel(self, channel: ChannelId) -> Self {
set!(self, "channel_id", channel.0)
}
}
impl EditProfile {
/// Edit the user's username. Must be between 2 and 32 characters long.
pub fn username(self, username: &str) -> Self {
set!(self, "username", username)
}
/// Edit the user's avatar. Use `None` to remove the avatar.
pub fn avatar(self, icon: Option<&str>) -> Self {
set!(self, "avatar", icon)
}
}
impl EditUserProfile {
/// Provide the user's current password for authentication. Required if
/// the email or password is being changed.
pub fn password(self, password: &str) -> Self {
set!(self, "password", password)
}
/// Edit the user's email address.
pub fn email(self, email: &str) -> Self {
set!(self, "email", email)
}
/// Edit the user's password.
pub fn new_password(self, password: &str) -> Self {
set!(self, "new_password", password)
}
/// Edit the user's username. Must be between 2 and 32 characters long.
pub fn username(self, username: &str) -> Self {
set!(self, "username", username)
}
/// Edit the user's avatar. Use `None` to remove the avatar.
pub fn avatar(self, icon: Option<&str>) -> Self {
set!(self, "avatar", icon)
}
}
impl EditRole {
/// Edit the role's name. Supply the empty string to remove a name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the role's permissions.
pub fn permissions(self, permissions: Permissions) -> Self {
set!(self, "permissions", permissions)
}
/// Edit the role's color. Set to zero for default.
pub fn color(self, color: u64) -> Self {
set!(self, "color", color)
}
/// Edit the role's hoist status (whether the role should be displayed separately in the sidebar).
pub fn hoist(self, hoist: bool) -> Self {
set!(self, "hoist", hoist)
}
/// Edit the role's mentionability, if the role can be mentioned.
pub fn mentionable(self, mentionable: bool) -> Self {
set!(self, "mentionable", mentionable)
}
}
impl SendMessage {
/// Set the text content of the message.
pub fn content(self, content: &str) -> Self {
set!(self, "content", content)
}
/// Set a nonce that can be used for optimistic message sending.
pub fn nonce(self, nonce: &str) -> Self {
set!(self, "nonce", nonce)
}
/// Set to true to use text-to-speech.
pub fn tts(self, tts: bool) -> Self {
set!(self, "tts", tts)
}
/// Embed rich content.
pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self {
set!(self, "embed", EmbedBuilder::__build(f))
}
/// Restrict allowed mentions for this message.
pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self {
set!(self, "allowed_mentions", AllowedMentions::__build(f))
}
/// Reply to the given message, optionally mentioning the sender.
///
/// The given `message_id` must be in the same channel that this message is
/// being sent to.
pub fn reply(self, message_id: MessageId, mention: bool) -> Self {
set!(self, "message_reference", json! {{
"message_id": message_id,
}}).allowed_mentions(|b| b.replied_user(mention))
}
/// Change the message's flags.
///
/// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on
/// request.
pub fn flags(self, flags: MessageFlags) -> Self {
set!(self, "flags", flags)
}
// TODO: file, payload_json, message_reference
}
impl AllowedMentions {
// TODO: parse, roles, users
/// Set to `false` to disable mentioning a replied-to user.
pub fn replied_user(self, replied_user: bool) -> Self {
set!(self, "replied_user", replied_user)
}
}
impl EmbedBuilder {
/// Add the "title of embed".
pub fn title(self, title: &str) -> Self {
set!(self | bitrate | identifier_name |
builders.rs | #[inline(always)]
pub fn __build<F: FnOnce($name) -> $name>(f: F) -> $inner where $inner: Default {
Self::__apply(f, Default::default())
}
#[doc(hidden)]
pub fn __apply<F: FnOnce($name) -> $name>(f: F, inp: $inner) -> $inner {
f($name(inp)).0
}
/// Merge this builder's contents with another of the same type.
/// Keys in `other` will override those in `self`.
///
/// This method is intended to be used with deserialized
/// instances. Note that deserialization *does not* check that
/// the keys are valid for the relevant API call.
///
/// ```ignore
/// discord.edit_server(|b| b
/// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?))
/// ```
pub fn merge(mut self, other: $name) -> $name {
self.0.extend(other.0); self
}
}
)*
}
}
builder! {
/// Patch content for the `edit_server` call.
EditServer(Object);
/// Patch content for the `edit_channel` call.
EditChannel(Object);
/// Patch content for the `edit_member` call.
EditMember(Object);
/// Patch content for the `edit_profile` call.
EditProfile(Object);
/// Patch content for the `edit_user_profile` call.
EditUserProfile(Object);
/// Patch content for the `edit_role` call.
EditRole(Object);
/// Content for the `send_message` call.
SendMessage(Object);
/// `allowed_mentions` object for use within `send_message`.
AllowedMentions(Object);
/// Patch content for the `send_embed` call.
EmbedBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedFooterBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedAuthorBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedFieldsBuilder(Vec<Value>);
}
macro_rules! set {
($self:ident, $key:expr, $($rest:tt)*) => {{
{let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s}
}}
}
impl EditServer {
/// Edit the server's name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the server's voice region.
pub fn region(self, region: &str) -> Self {
set!(self, "region", region)
}
/// Edit the server's icon. Use `None` to remove the icon.
pub fn icon(self, icon: Option<&str>) -> Self {
set!(self, "icon", icon)
}
/// Edit the server's AFK channel. Use `None` to select no AFK channel.
pub fn afk_channel(self, channel: Option<ChannelId>) -> Self {
set!(self, "afk_channel_id", channel)
}
/// Edit the server's AFK timeout.
pub fn afk_timeout(self, timeout: u64) -> Self {
set!(self, "afk_timeout", timeout)
}
/// Transfer ownership of the server to a new owner.
pub fn owner(self, owner: UserId) -> Self {
set!(self, "owner_id", owner.0)
}
/// Edit the verification level of the server.
pub fn verification_level(self, verification_level: VerificationLevel) -> Self {
set!(self, "verification_level", verification_level)
}
/// Edit the server's splash. Use `None` to remove the splash.
pub fn splash(self, splash: Option<&str>) -> Self {
set!(self, "splash", splash)
}
}
impl EditChannel {
/// Edit the channel's name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the text channel's topic.
pub fn topic(self, topic: &str) -> Self {
set!(self, "topic", topic)
}
/// Edit the channel's position in the list.
pub fn position(self, position: u64) -> Self {
set!(self, "position", position)
}
/// Edit the voice channel's bitrate.
pub fn bitrate(self, bitrate: u64) -> Self {
set!(self, "bitrate", bitrate)
}
/// Edit the voice channel's user limit. Zero (`0`) means unlimited.
pub fn user_limit(self, user_limit: u64) -> Self {
set!(self, "user_limit", user_limit)
}
}
impl EditMember {
/// Edit the member's nickname. Supply the empty string to remove a nickname.
pub fn nickname(self, nick: &str) -> Self {
set!(self, "nick", nick)
}
/// Edit whether the member is server-muted.
pub fn mute(self, mute: bool) -> Self {
set!(self, "mute", mute)
}
/// Edit whether the member is server-deafened.
pub fn deaf(self, deafen: bool) -> Self {
set!(self, "deaf", deafen)
}
/// Edit the member's assigned roles.
pub fn roles(self, roles: &[RoleId]) -> Self {
set!(self, "roles", roles)
}
/// Move the member to another voice channel.
pub fn channel(self, channel: ChannelId) -> Self {
set!(self, "channel_id", channel.0)
}
}
impl EditProfile {
/// Edit the user's username. Must be between 2 and 32 characters long.
pub fn username(self, username: &str) -> Self {
set!(self, "username", username)
}
/// Edit the user's avatar. Use `None` to remove the avatar.
pub fn avatar(self, icon: Option<&str>) -> Self {
set!(self, "avatar", icon)
}
}
impl EditUserProfile {
/// Provide the user's current password for authentication. Required if
/// the email or password is being changed.
pub fn password(self, password: &str) -> Self {
set!(self, "password", password)
}
/// Edit the user's email address.
pub fn email(self, email: &str) -> Self {
set!(self, "email", email)
}
/// Edit the user's password.
pub fn new_password(self, password: &str) -> Self {
set!(self, "new_password", password)
}
/// Edit the user's username. Must be between 2 and 32 characters long.
pub fn username(self, username: &str) -> Self {
set!(self, "username", username)
}
/// Edit the user's avatar. Use `None` to remove the avatar.
pub fn avatar(self, icon: Option<&str>) -> Self {
set!(self, "avatar", icon)
}
}
impl EditRole {
/// Edit the role's name. Supply the empty string to remove a name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the role's permissions.
pub fn permissions(self, permissions: Permissions) -> Self {
set!(self, "permissions", permissions)
}
/// Edit the role's color. Set to zero for default.
pub fn color(self, color: u64) -> Self {
set!(self, "color", color)
}
/// Edit the role's hoist status (whether the role should be displayed separately in the sidebar).
pub fn hoist(self, hoist: bool) -> Self {
set!(self, "hoist", hoist)
}
/// Edit the role's mentionability, if the role can be mentioned.
pub fn mentionable(self, mentionable: bool) -> Self {
set!(self, "mentionable", mentionable)
}
}
impl SendMessage {
/// Set the text content of the message.
pub fn content(self, content: &str) -> Self {
set!(self, "content", content)
}
/// Set a nonce that can be used for optimistic message sending.
pub fn nonce(self, nonce: &str) -> Self {
set!(self, "nonce", nonce)
}
/// Set to true to use text-to-speech.
pub fn tts(self, tts: bool) -> Self {
set!(self, "tts", tts)
}
/// Embed rich content.
pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self {
set!(self, "embed", EmbedBuilder::__build(f))
}
/// Restrict allowed mentions for this message.
pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self {
set!(self, "allowed_mentions", AllowedMentions::__build(f))
}
/// Reply to the given message, optionally mentioning the sender.
///
/// The given `message_id` must be in the same channel that this message is
/// being sent to.
pub fn reply(self, message_id: MessageId, mention: bool) -> Self {
set!(self, |
impl $name {
#[doc(hidden)] | random_line_split | |
builders.rs | b| b
/// .merge(serde_json::from_str(r#"{"name":"My Server"}"#)?))
/// ```
pub fn merge(mut self, other: $name) -> $name {
self.0.extend(other.0); self
}
}
)*
}
}
builder! {
/// Patch content for the `edit_server` call.
EditServer(Object);
/// Patch content for the `edit_channel` call.
EditChannel(Object);
/// Patch content for the `edit_member` call.
EditMember(Object);
/// Patch content for the `edit_profile` call.
EditProfile(Object);
/// Patch content for the `edit_user_profile` call.
EditUserProfile(Object);
/// Patch content for the `edit_role` call.
EditRole(Object);
/// Content for the `send_message` call.
SendMessage(Object);
/// `allowed_mentions` object for use within `send_message`.
AllowedMentions(Object);
/// Patch content for the `send_embed` call.
EmbedBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedFooterBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedAuthorBuilder(Object);
/// Inner patch content for the `send_embed` call.
EmbedFieldsBuilder(Vec<Value>);
}
macro_rules! set {
($self:ident, $key:expr, $($rest:tt)*) => {{
{let mut s = $self; s.0.insert($key.into(), json!($($rest)*)); s}
}}
}
impl EditServer {
/// Edit the server's name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the server's voice region.
pub fn region(self, region: &str) -> Self {
set!(self, "region", region)
}
/// Edit the server's icon. Use `None` to remove the icon.
pub fn icon(self, icon: Option<&str>) -> Self {
set!(self, "icon", icon)
}
/// Edit the server's AFK channel. Use `None` to select no AFK channel.
pub fn afk_channel(self, channel: Option<ChannelId>) -> Self {
set!(self, "afk_channel_id", channel)
}
/// Edit the server's AFK timeout.
pub fn afk_timeout(self, timeout: u64) -> Self |
/// Transfer ownership of the server to a new owner.
pub fn owner(self, owner: UserId) -> Self {
set!(self, "owner_id", owner.0)
}
/// Edit the verification level of the server.
pub fn verification_level(self, verification_level: VerificationLevel) -> Self {
set!(self, "verification_level", verification_level)
}
/// Edit the server's splash. Use `None` to remove the splash.
pub fn splash(self, splash: Option<&str>) -> Self {
set!(self, "splash", splash)
}
}
impl EditChannel {
/// Edit the channel's name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the text channel's topic.
pub fn topic(self, topic: &str) -> Self {
set!(self, "topic", topic)
}
/// Edit the channel's position in the list.
pub fn position(self, position: u64) -> Self {
set!(self, "position", position)
}
/// Edit the voice channel's bitrate.
pub fn bitrate(self, bitrate: u64) -> Self {
set!(self, "bitrate", bitrate)
}
/// Edit the voice channel's user limit. Zero (`0`) means unlimited.
pub fn user_limit(self, user_limit: u64) -> Self {
set!(self, "user_limit", user_limit)
}
}
impl EditMember {
/// Edit the member's nickname. Supply the empty string to remove a nickname.
pub fn nickname(self, nick: &str) -> Self {
set!(self, "nick", nick)
}
/// Edit whether the member is server-muted.
pub fn mute(self, mute: bool) -> Self {
set!(self, "mute", mute)
}
/// Edit whether the member is server-deafened.
pub fn deaf(self, deafen: bool) -> Self {
set!(self, "deaf", deafen)
}
/// Edit the member's assigned roles.
pub fn roles(self, roles: &[RoleId]) -> Self {
set!(self, "roles", roles)
}
/// Move the member to another voice channel.
pub fn channel(self, channel: ChannelId) -> Self {
set!(self, "channel_id", channel.0)
}
}
impl EditProfile {
/// Edit the user's username. Must be between 2 and 32 characters long.
pub fn username(self, username: &str) -> Self {
set!(self, "username", username)
}
/// Edit the user's avatar. Use `None` to remove the avatar.
pub fn avatar(self, icon: Option<&str>) -> Self {
set!(self, "avatar", icon)
}
}
impl EditUserProfile {
/// Provide the user's current password for authentication. Required if
/// the email or password is being changed.
pub fn password(self, password: &str) -> Self {
set!(self, "password", password)
}
/// Edit the user's email address.
pub fn email(self, email: &str) -> Self {
set!(self, "email", email)
}
/// Edit the user's password.
pub fn new_password(self, password: &str) -> Self {
set!(self, "new_password", password)
}
/// Edit the user's username. Must be between 2 and 32 characters long.
pub fn username(self, username: &str) -> Self {
set!(self, "username", username)
}
/// Edit the user's avatar. Use `None` to remove the avatar.
pub fn avatar(self, icon: Option<&str>) -> Self {
set!(self, "avatar", icon)
}
}
impl EditRole {
/// Edit the role's name. Supply the empty string to remove a name.
pub fn name(self, name: &str) -> Self {
set!(self, "name", name)
}
/// Edit the role's permissions.
pub fn permissions(self, permissions: Permissions) -> Self {
set!(self, "permissions", permissions)
}
/// Edit the role's color. Set to zero for default.
pub fn color(self, color: u64) -> Self {
set!(self, "color", color)
}
/// Edit the role's hoist status (whether the role should be displayed separately in the sidebar).
pub fn hoist(self, hoist: bool) -> Self {
set!(self, "hoist", hoist)
}
/// Edit the role's mentionability, if the role can be mentioned.
pub fn mentionable(self, mentionable: bool) -> Self {
set!(self, "mentionable", mentionable)
}
}
impl SendMessage {
/// Set the text content of the message.
pub fn content(self, content: &str) -> Self {
set!(self, "content", content)
}
/// Set a nonce that can be used for optimistic message sending.
pub fn nonce(self, nonce: &str) -> Self {
set!(self, "nonce", nonce)
}
/// Set to true to use text-to-speech.
pub fn tts(self, tts: bool) -> Self {
set!(self, "tts", tts)
}
/// Embed rich content.
pub fn embed<F: FnOnce(EmbedBuilder) -> EmbedBuilder>(self, f: F) -> Self {
set!(self, "embed", EmbedBuilder::__build(f))
}
/// Restrict allowed mentions for this message.
pub fn allowed_mentions<F: FnOnce(AllowedMentions) -> AllowedMentions>(self, f: F) -> Self {
set!(self, "allowed_mentions", AllowedMentions::__build(f))
}
/// Reply to the given message, optionally mentioning the sender.
///
/// The given `message_id` must be in the same channel that this message is
/// being sent to.
pub fn reply(self, message_id: MessageId, mention: bool) -> Self {
set!(self, "message_reference", json! {{
"message_id": message_id,
}}).allowed_mentions(|b| b.replied_user(mention))
}
/// Change the message's flags.
///
/// Can only be set while editing. Only `SUPPRESS_EMBEDS` can be edited on
/// request.
pub fn flags(self, flags: MessageFlags) -> Self {
set!(self, "flags", flags)
}
// TODO: file, payload_json, message_reference
}
impl AllowedMentions {
// TODO: parse, roles, users
/// Set to `false` to disable mentioning a replied-to user.
pub fn replied_user(self, replied_user: bool) -> Self {
set!(self, "replied_user", replied_user)
}
}
impl EmbedBuilder {
/// Add the "title of embed".
pub fn title(self, title: &str) -> Self {
set!(self | {
set!(self, "afk_timeout", timeout)
} | identifier_body |
philo2.go | maxDinner int
currentlyEating int
tableCount int
chopsticksFree []bool
freeSeats []int
}
// philosopherData contains philosopher specific data. It is used within DinnerHost.
type philosopherData struct {
respChannel chan string
eating bool
dinnersSpent int
seat int
leftChopstick int
rightChopstick int
finishedAt time.Time
}
// NewDinnerHostPtr creates a new, initialized DinnerHost object and returns a pointer to it.
func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost {
host := new(DinnerHost)
host.Init(tableCount, maxParallel, maxDinner)
return host
}
// Init is used to initialize the DinnerHost. Note: seats are randomized.
func (host *DinnerHost) Init(tableCount, maxParallel, maxDinner int) {
host.phiData = make(map[string]*philosopherData)
host.requestChannel = make(chan string)
host.finishChannel = make(chan string)
host.maxParallel = maxParallel
if host.maxParallel > tableCount {
host.maxParallel = tableCount
}
host.maxDinner = maxDinner
host.currentlyEating = 0
host.tableCount = tableCount
host.chopsticksFree = make([]bool, 5)
for i := range host.chopsticksFree {
host.chopsticksFree[i] = true
}
rand.Seed(time.Now().Unix())
host.freeSeats = rand.Perm(tableCount)
}
// newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it.
func newPhilosopherDataPtr(respChannel chan string) *philosopherData {
pd := new(philosopherData)
pd.Init(respChannel)
return pd
}
// Init is used to initialize the philosopherData.
func (pd *philosopherData) Init(respChannel chan string) {
pd.respChannel = respChannel
pd.eating = false
pd.dinnersSpent = 0
pd.seat = -1
pd.leftChopstick = -1
pd.rightChopstick = -1
}
// ===== DinnerHost methods =====
// AskChannels can be used to obtain two common channels of the host, the first used to request dinner,
// the second used to indicate that someone finished eating.
func (host *DinnerHost) AskChannels() (chan string, chan string) {
return host.requestChannel, host.finishChannel
}
// Add registers the philosopher at the host. It first checks if they can join (table full, already at
// the table), then creates a new philosopher data record and assigns a seat to the
func (host *DinnerHost) Add(newPhilosopher Philosopher) bool {
newName := newPhilosopher.Name()
fmt.Println(newName + " WANTS TO JOIN THE TABLE.")
if len(host.phiData) >= host.tableCount {
fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.")
fmt.Println()
return false
}
if host.phiData[newName] != nil {
fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.")
fmt.Println()
return false
}
host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel())
host.phiData[newName].TakeSeat(host.freeSeats[0])
host.freeSeats = host.freeSeats[1:]
fmt.Println(newName + " JOINED THE TABLE.")
fmt.Println()
return true
}
// Listen is the main function of the host, which handles dinner requests and finish
// indications coming from the philosophers on _requestChannel_ and _finishChannel_.
// Dinner request is authorized with a proper reply to a philosopher on its own
// dedicated response channel.
func (host *DinnerHost) Listen() {
name := ""
for {
select {
case name = <-host.requestChannel:
fmt.Println(name + " WOULD LIKE TO EAT.")
response := host.AllowEating(name)
kickOut := false
switch response {
case "OK":
fmt.Println(name + " STARTS EATING.")
case "E:CHOPSTICKS":
fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.")
case "E:FULL":
fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.")
case "E:JUSTFINISHED":
fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.")
case "E:EATING":
fmt.Println(name + " CANNOT EAT: ALREADY EATING.")
case "E:LIMIT":
fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.")
host.freeSeats = append(host.freeSeats, host.phiData[name].Seat())
kickOut = true
}
fmt.Println()
host.phiData[name].RespChannel() <- response
if kickOut {
delete(host.phiData, name)
}
case name = <-host.finishChannel:
host.SomeoneFinished(name)
}
host.PrintReport(false)
}
}
// AllowEating checks if the philosopher is allowed to have dinner. Criteria:
// * No more than _maxParallel_ philosophers can eat in parallel.
// * The philosopher is not already eating, do not exceed the number of allowed dinners and
// there's enough time elapsed since the philosopher's last dinner.
// * Both chopsticks corresponding to the philosopher's seat are free.
// The function also takes care of chopstick reservation. Note: when only either of the
// chopsticks is free, it is reserved in spite the philosopher cannot start eating.
func (host *DinnerHost) | (name string) string {
if host.currentlyEating >= host.maxParallel {
return "E:FULL"
}
data := host.phiData[name]
canEat := data.CanEat(host.maxDinner)
if canEat != "OK" {
return canEat
}
seatNumber := data.Seat()
leftChop := seatNumber
rightChop := (seatNumber + 1) % host.tableCount
if host.chopsticksFree[leftChop] {
host.chopsticksFree[leftChop] = false
data.SetLeftChop(leftChop)
}
if host.chopsticksFree[rightChop] {
host.chopsticksFree[rightChop] = false
data.SetRightChop(rightChop)
}
if !data.HasBothChopsticks() {
return "E:CHOPSTICKS"
}
host.currentlyEating++
data.StartedEating()
return "OK"
}
// SomeoneFinished takes the necessary actions when a philosopher finished eating.
func (host *DinnerHost) SomeoneFinished(name string) {
if host.currentlyEating > 0 {
host.currentlyEating--
}
host.chopsticksFree[host.phiData[name].LeftChopstick()] = true
host.chopsticksFree[host.phiData[name].RightChopstick()] = true
host.phiData[name].FinishedEating()
fmt.Println(name + " FINISHED EATING.")
fmt.Println()
}
// PrintReport shows the status of the philosophers in a verbose format.
func (host *DinnerHost) PrintReport(additionalInfo bool) {
names := make([]string, 0, len(host.phiData))
maxNameLen := 0
for i := range host.phiData {
names = append(names, i)
if len(i) > maxNameLen {
maxNameLen = len(i)
}
}
sort.Strings(names)
fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME")
fmt.Println()
for _, name := range names {
data := host.phiData[name]
status := "waiting"
if data.eating == true {
status = "eating"
}
leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1)
rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1)
repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s",
maxNameLen, name, 4, data.seat, 7, leftChopStr,
8, rightChopStr, 7, data.dinnersSpent, status)
fmt.Println(repLine)
}
if additionalInfo {
freeChops := fmt.Sprintf("CHOPSTICKS:")
for chopInd, chopStat := range host.chopsticksFree {
status := "FREE"
if chopStat == false {
status = "RESERVED"
}
freeChops += fmt.Sprintf(" %d[%s]", chopInd, status)
}
fmt.Println(freeChops)
}
fmt.Println()
}
// ===== philosopherData methods
// CanEat checks if the philosopher specific criteria of eating is fulfilled.
func (pd *philosopherData) CanEat(maxDinner int) string {
switch {
case pd.eating:
return "E:EATING"
case pd.dinnersSpent >= maxDinner:
return "E:LIMIT"
case time.Now().Sub(pd.finishedAt) < (time.Duration(150) * time.Millisecond):
return "E:JUSTFINISHED"
}
return "OK"
| AllowEating | identifier_name |
philo2.go | maxDinner int
currentlyEating int
tableCount int
chopsticksFree []bool
freeSeats []int
}
// philosopherData contains philosopher specific data. It is used within DinnerHost.
type philosopherData struct {
respChannel chan string
eating bool
dinnersSpent int
seat int
leftChopstick int
rightChopstick int
finishedAt time.Time
}
// NewDinnerHostPtr creates a new, initialized DinnerHost object and returns a pointer to it.
func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost {
host := new(DinnerHost)
host.Init(tableCount, maxParallel, maxDinner)
return host
}
// Init is used to initialize the DinnerHost. Note: seats are randomized.
func (host *DinnerHost) Init(tableCount, maxParallel, maxDinner int) {
host.phiData = make(map[string]*philosopherData)
host.requestChannel = make(chan string)
host.finishChannel = make(chan string)
host.maxParallel = maxParallel
if host.maxParallel > tableCount {
host.maxParallel = tableCount
}
host.maxDinner = maxDinner
host.currentlyEating = 0
host.tableCount = tableCount
host.chopsticksFree = make([]bool, 5)
for i := range host.chopsticksFree {
host.chopsticksFree[i] = true
}
rand.Seed(time.Now().Unix())
host.freeSeats = rand.Perm(tableCount)
}
// newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it.
func newPhilosopherDataPtr(respChannel chan string) *philosopherData {
pd := new(philosopherData)
pd.Init(respChannel)
return pd
}
// Init is used to initialize the philosopherData.
func (pd *philosopherData) Init(respChannel chan string) {
pd.respChannel = respChannel
pd.eating = false
pd.dinnersSpent = 0
pd.seat = -1
pd.leftChopstick = -1
pd.rightChopstick = -1
}
// ===== DinnerHost methods =====
// AskChannels can be used to obtain two common channels of the host, the first used to request dinner,
// the second used to indicate that someone finished eating.
func (host *DinnerHost) AskChannels() (chan string, chan string) {
return host.requestChannel, host.finishChannel
}
// Add registers the philosopher at the host. It first checks if they can join (table full, already at
// the table), then creates a new philosopher data record and assigns a seat to the
func (host *DinnerHost) Add(newPhilosopher Philosopher) bool {
newName := newPhilosopher.Name()
fmt.Println(newName + " WANTS TO JOIN THE TABLE.")
if len(host.phiData) >= host.tableCount {
fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.")
fmt.Println()
return false
}
if host.phiData[newName] != nil {
fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.")
fmt.Println()
return false
}
host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel())
host.phiData[newName].TakeSeat(host.freeSeats[0])
host.freeSeats = host.freeSeats[1:]
fmt.Println(newName + " JOINED THE TABLE.")
fmt.Println()
return true
}
// Listen is the main function of the host, which handles dinner requests and finish
// indications coming from the philosophers on _requestChannel_ and _finishChannel_.
// Dinner request is authorized with a proper reply to a philosopher on its own
// dedicated response channel.
func (host *DinnerHost) Listen() {
name := ""
for {
select {
case name = <-host.requestChannel:
fmt.Println(name + " WOULD LIKE TO EAT.")
response := host.AllowEating(name)
kickOut := false
switch response {
case "OK":
fmt.Println(name + " STARTS EATING.")
case "E:CHOPSTICKS":
fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.")
case "E:FULL":
fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.")
case "E:JUSTFINISHED":
fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.")
case "E:EATING":
fmt.Println(name + " CANNOT EAT: ALREADY EATING.")
case "E:LIMIT":
fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.")
host.freeSeats = append(host.freeSeats, host.phiData[name].Seat())
kickOut = true
}
fmt.Println()
host.phiData[name].RespChannel() <- response
if kickOut {
delete(host.phiData, name)
}
case name = <-host.finishChannel:
host.SomeoneFinished(name)
}
host.PrintReport(false)
}
}
// AllowEating checks if the philosopher is allowed to have dinner. Criteria:
// * No more than _maxParallel_ philosophers can eat in parallel.
// * The philosopher is not already eating, do not exceed the number of allowed dinners and
// there's enough time elapsed since the philosopher's last dinner.
// * Both chopsticks corresponding to the philosopher's seat are free.
// The function also takes care of chopstick reservation. Note: when only either of the
// chopsticks is free, it is reserved in spite the philosopher cannot start eating.
func (host *DinnerHost) AllowEating(name string) string {
if host.currentlyEating >= host.maxParallel {
return "E:FULL"
}
data := host.phiData[name]
canEat := data.CanEat(host.maxDinner)
if canEat != "OK" {
return canEat
}
seatNumber := data.Seat()
leftChop := seatNumber
rightChop := (seatNumber + 1) % host.tableCount
if host.chopsticksFree[leftChop] {
host.chopsticksFree[leftChop] = false
data.SetLeftChop(leftChop)
}
if host.chopsticksFree[rightChop] {
host.chopsticksFree[rightChop] = false
data.SetRightChop(rightChop)
}
if !data.HasBothChopsticks() {
return "E:CHOPSTICKS"
}
host.currentlyEating++
data.StartedEating()
return "OK"
}
// SomeoneFinished takes the necessary actions when a philosopher finished eating.
func (host *DinnerHost) SomeoneFinished(name string) {
if host.currentlyEating > 0 |
host.chopsticksFree[host.phiData[name].LeftChopstick()] = true
host.chopsticksFree[host.phiData[name].RightChopstick()] = true
host.phiData[name].FinishedEating()
fmt.Println(name + " FINISHED EATING.")
fmt.Println()
}
// PrintReport shows the status of the philosophers in a verbose format.
func (host *DinnerHost) PrintReport(additionalInfo bool) {
names := make([]string, 0, len(host.phiData))
maxNameLen := 0
for i := range host.phiData {
names = append(names, i)
if len(i) > maxNameLen {
maxNameLen = len(i)
}
}
sort.Strings(names)
fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME")
fmt.Println()
for _, name := range names {
data := host.phiData[name]
status := "waiting"
if data.eating == true {
status = "eating"
}
leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1)
rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1)
repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s",
maxNameLen, name, 4, data.seat, 7, leftChopStr,
8, rightChopStr, 7, data.dinnersSpent, status)
fmt.Println(repLine)
}
if additionalInfo {
freeChops := fmt.Sprintf("CHOPSTICKS:")
for chopInd, chopStat := range host.chopsticksFree {
status := "FREE"
if chopStat == false {
status = "RESERVED"
}
freeChops += fmt.Sprintf(" %d[%s]", chopInd, status)
}
fmt.Println(freeChops)
}
fmt.Println()
}
// ===== philosopherData methods
// CanEat checks if the philosopher specific criteria of eating is fulfilled.
func (pd *philosopherData) CanEat(maxDinner int) string {
switch {
case pd.eating:
return "E:EATING"
case pd.dinnersSpent >= maxDinner:
return "E:LIMIT"
case time.Now().Sub(pd.finishedAt) < (time.Duration(150) * time.Millisecond):
return "E:JUSTFINISHED"
}
return "OK | {
host.currentlyEating--
} | conditional_block |
philo2.go | The host of the dinner.
--- */
// DinnerHost is the main data structure for the host of the dinner.
type DinnerHost struct {
phiData map[string]*philosopherData
requestChannel chan string
finishChannel chan string
maxParallel int
maxDinner int
currentlyEating int
tableCount int
chopsticksFree []bool
freeSeats []int
}
// philosopherData contains philosopher specific data. It is used within DinnerHost.
type philosopherData struct {
respChannel chan string
eating bool
dinnersSpent int
seat int
leftChopstick int
rightChopstick int
finishedAt time.Time
}
// NewDinnerHostPtr creates a new, initialized DinnerHost object and returns a pointer to it.
func NewDinnerHostPtr(tableCount, maxParallel, maxDinner int) *DinnerHost {
host := new(DinnerHost)
host.Init(tableCount, maxParallel, maxDinner)
return host
}
// Init is used to initialize the DinnerHost. Note: seats are randomized.
func (host *DinnerHost) Init(tableCount, maxParallel, maxDinner int) {
host.phiData = make(map[string]*philosopherData)
host.requestChannel = make(chan string)
host.finishChannel = make(chan string)
host.maxParallel = maxParallel
if host.maxParallel > tableCount {
host.maxParallel = tableCount
}
host.maxDinner = maxDinner
host.currentlyEating = 0
host.tableCount = tableCount
host.chopsticksFree = make([]bool, 5)
for i := range host.chopsticksFree {
host.chopsticksFree[i] = true
}
rand.Seed(time.Now().Unix())
host.freeSeats = rand.Perm(tableCount)
}
// newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it.
func newPhilosopherDataPtr(respChannel chan string) *philosopherData {
pd := new(philosopherData)
pd.Init(respChannel)
return pd
}
// Init is used to initialize the philosopherData.
func (pd *philosopherData) Init(respChannel chan string) {
pd.respChannel = respChannel
pd.eating = false
pd.dinnersSpent = 0
pd.seat = -1
pd.leftChopstick = -1
pd.rightChopstick = -1
}
// ===== DinnerHost methods =====
// AskChannels can be used to obtain two common channels of the host, the first used to request dinner,
// the second used to indicate that someone finished eating.
func (host *DinnerHost) AskChannels() (chan string, chan string) {
return host.requestChannel, host.finishChannel
}
// Add registers the philosopher at the host. It first checks if they can join (table full, already at
// the table), then creates a new philosopher data record and assigns a seat to the
func (host *DinnerHost) Add(newPhilosopher Philosopher) bool {
newName := newPhilosopher.Name()
fmt.Println(newName + " WANTS TO JOIN THE TABLE.")
if len(host.phiData) >= host.tableCount {
fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.")
fmt.Println()
return false
}
if host.phiData[newName] != nil {
fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.")
fmt.Println()
return false
}
host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel())
host.phiData[newName].TakeSeat(host.freeSeats[0])
host.freeSeats = host.freeSeats[1:]
fmt.Println(newName + " JOINED THE TABLE.")
fmt.Println()
return true
}
// Listen is the main function of the host, which handles dinner requests and finish
// indications coming from the philosophers on _requestChannel_ and _finishChannel_.
// Dinner request is authorized with a proper reply to a philosopher on its own
// dedicated response channel.
func (host *DinnerHost) Listen() {
name := ""
for {
select {
case name = <-host.requestChannel:
fmt.Println(name + " WOULD LIKE TO EAT.")
response := host.AllowEating(name)
kickOut := false
switch response {
case "OK":
fmt.Println(name + " STARTS EATING.")
case "E:CHOPSTICKS":
fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.")
case "E:FULL":
fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.")
case "E:JUSTFINISHED":
fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.")
case "E:EATING":
fmt.Println(name + " CANNOT EAT: ALREADY EATING.")
case "E:LIMIT":
fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.")
host.freeSeats = append(host.freeSeats, host.phiData[name].Seat())
kickOut = true
}
fmt.Println()
host.phiData[name].RespChannel() <- response
if kickOut {
delete(host.phiData, name)
}
case name = <-host.finishChannel:
host.SomeoneFinished(name)
}
host.PrintReport(false)
}
}
// AllowEating checks if the philosopher is allowed to have dinner. Criteria:
// * No more than _maxParallel_ philosophers can eat in parallel.
// * The philosopher is not already eating, do not exceed the number of allowed dinners and
// there's enough time elapsed since the philosopher's last dinner.
// * Both chopsticks corresponding to the philosopher's seat are free.
// The function also takes care of chopstick reservation. Note: when only either of the
// chopsticks is free, it is reserved in spite the philosopher cannot start eating.
func (host *DinnerHost) AllowEating(name string) string {
if host.currentlyEating >= host.maxParallel {
return "E:FULL"
}
data := host.phiData[name]
canEat := data.CanEat(host.maxDinner)
if canEat != "OK" {
return canEat
}
seatNumber := data.Seat()
leftChop := seatNumber
rightChop := (seatNumber + 1) % host.tableCount
if host.chopsticksFree[leftChop] {
host.chopsticksFree[leftChop] = false
data.SetLeftChop(leftChop)
}
if host.chopsticksFree[rightChop] {
host.chopsticksFree[rightChop] = false
data.SetRightChop(rightChop)
}
if !data.HasBothChopsticks() {
return "E:CHOPSTICKS"
}
host.currentlyEating++
data.StartedEating()
return "OK"
}
// SomeoneFinished takes the necessary actions when a philosopher finished eating.
func (host *DinnerHost) SomeoneFinished(name string) {
if host.currentlyEating > 0 {
host.currentlyEating--
}
host.chopsticksFree[host.phiData[name].LeftChopstick()] = true
host.chopsticksFree[host.phiData[name].RightChopstick()] = true
host.phiData[name].FinishedEating()
fmt.Println(name + " FINISHED EATING.")
fmt.Println()
}
// PrintReport shows the status of the philosophers in a verbose format.
func (host *DinnerHost) PrintReport(additionalInfo bool) {
names := make([]string, 0, len(host.phiData))
maxNameLen := 0
for i := range host.phiData {
names = append(names, i)
if len(i) > maxNameLen {
maxNameLen = len(i)
}
}
sort.Strings(names)
fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME")
fmt.Println()
for _, name := range names {
data := host.phiData[name]
status := "waiting"
if data.eating == true {
status = "eating"
}
leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1)
rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1)
repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s",
maxNameLen, name, 4, data.seat, 7, leftChopStr,
8, rightChopStr, 7, data.dinnersSpent, status)
fmt.Println(repLine)
}
if additionalInfo {
freeChops := fmt.Sprintf("CHOPSTICKS:")
for chopInd, chopStat := range host.chopsticksFree {
status := "FREE"
if chopStat == false {
status = "RESERVED"
}
freeChops += fmt.Sprintf(" %d[%s]", chopInd, status)
}
fmt.Println(freeChops)
}
fmt.Println()
}
// ===== philosopherData methods
// CanEat checks if the philosopher specific criteria of eating is fulfilled.
func (pd *philosopherData) CanEat(maxDinner int) string {
switch {
case pd.eating:
return "E |
/* --- | random_line_split | |
philo2.go | Count
}
host.maxDinner = maxDinner
host.currentlyEating = 0
host.tableCount = tableCount
host.chopsticksFree = make([]bool, 5)
for i := range host.chopsticksFree {
host.chopsticksFree[i] = true
}
rand.Seed(time.Now().Unix())
host.freeSeats = rand.Perm(tableCount)
}
// newPhilosopherDataPtr creates and initializes a philosopherData object and returns a pointer to it.
func newPhilosopherDataPtr(respChannel chan string) *philosopherData {
pd := new(philosopherData)
pd.Init(respChannel)
return pd
}
// Init is used to initialize the philosopherData.
func (pd *philosopherData) Init(respChannel chan string) {
pd.respChannel = respChannel
pd.eating = false
pd.dinnersSpent = 0
pd.seat = -1
pd.leftChopstick = -1
pd.rightChopstick = -1
}
// ===== DinnerHost methods =====
// AskChannels can be used to obtain two common channels of the host, the first used to request dinner,
// the second used to indicate that someone finished eating.
func (host *DinnerHost) AskChannels() (chan string, chan string) {
return host.requestChannel, host.finishChannel
}
// Add registers the philosopher at the host. It first checks if they can join (table full, already at
// the table), then creates a new philosopher data record and assigns a seat to the
func (host *DinnerHost) Add(newPhilosopher Philosopher) bool {
newName := newPhilosopher.Name()
fmt.Println(newName + " WANTS TO JOIN THE TABLE.")
if len(host.phiData) >= host.tableCount {
fmt.Println(newName + " CANNOT JOIN: THE TABLE IS FULL.")
fmt.Println()
return false
}
if host.phiData[newName] != nil {
fmt.Println(newName + " CANNOT JOIN: ALREADY ON THE HOST'S LIST.")
fmt.Println()
return false
}
host.phiData[newName] = newPhilosopherDataPtr(newPhilosopher.RespChannel())
host.phiData[newName].TakeSeat(host.freeSeats[0])
host.freeSeats = host.freeSeats[1:]
fmt.Println(newName + " JOINED THE TABLE.")
fmt.Println()
return true
}
// Listen is the main function of the host, which handles dinner requests and finish
// indications coming from the philosophers on _requestChannel_ and _finishChannel_.
// Dinner request is authorized with a proper reply to a philosopher on its own
// dedicated response channel.
func (host *DinnerHost) Listen() {
name := ""
for {
select {
case name = <-host.requestChannel:
fmt.Println(name + " WOULD LIKE TO EAT.")
response := host.AllowEating(name)
kickOut := false
switch response {
case "OK":
fmt.Println(name + " STARTS EATING.")
case "E:CHOPSTICKS":
fmt.Println(name + " CANNOT EAT: REQUIRED CHOPSTICKS ARE NOT AVAILABLE.")
case "E:FULL":
fmt.Println(name + " CANNOT EAT: TWO OTHER PHILOSOPHERS ARE ALREADY EATING.")
case "E:JUSTFINISHED":
fmt.Println(name + " CANNOT EAT: JUST FINISHED THE PREVIOUS MEAL.")
case "E:EATING":
fmt.Println(name + " CANNOT EAT: ALREADY EATING.")
case "E:LIMIT":
fmt.Println(name + " CANNOT EAT: ALREADY HAD THREE DINNERS; MUST LEAVE.")
host.freeSeats = append(host.freeSeats, host.phiData[name].Seat())
kickOut = true
}
fmt.Println()
host.phiData[name].RespChannel() <- response
if kickOut {
delete(host.phiData, name)
}
case name = <-host.finishChannel:
host.SomeoneFinished(name)
}
host.PrintReport(false)
}
}
// AllowEating checks if the philosopher is allowed to have dinner. Criteria:
// * No more than _maxParallel_ philosophers can eat in parallel.
// * The philosopher is not already eating, do not exceed the number of allowed dinners and
// there's enough time elapsed since the philosopher's last dinner.
// * Both chopsticks corresponding to the philosopher's seat are free.
// The function also takes care of chopstick reservation. Note: when only either of the
// chopsticks is free, it is reserved in spite the philosopher cannot start eating.
func (host *DinnerHost) AllowEating(name string) string {
if host.currentlyEating >= host.maxParallel {
return "E:FULL"
}
data := host.phiData[name]
canEat := data.CanEat(host.maxDinner)
if canEat != "OK" {
return canEat
}
seatNumber := data.Seat()
leftChop := seatNumber
rightChop := (seatNumber + 1) % host.tableCount
if host.chopsticksFree[leftChop] {
host.chopsticksFree[leftChop] = false
data.SetLeftChop(leftChop)
}
if host.chopsticksFree[rightChop] {
host.chopsticksFree[rightChop] = false
data.SetRightChop(rightChop)
}
if !data.HasBothChopsticks() {
return "E:CHOPSTICKS"
}
host.currentlyEating++
data.StartedEating()
return "OK"
}
// SomeoneFinished takes the necessary actions when a philosopher finished eating.
func (host *DinnerHost) SomeoneFinished(name string) {
if host.currentlyEating > 0 {
host.currentlyEating--
}
host.chopsticksFree[host.phiData[name].LeftChopstick()] = true
host.chopsticksFree[host.phiData[name].RightChopstick()] = true
host.phiData[name].FinishedEating()
fmt.Println(name + " FINISHED EATING.")
fmt.Println()
}
// PrintReport shows the status of the philosophers in a verbose format.
func (host *DinnerHost) PrintReport(additionalInfo bool) {
names := make([]string, 0, len(host.phiData))
maxNameLen := 0
for i := range host.phiData {
names = append(names, i)
if len(i) > maxNameLen {
maxNameLen = len(i)
}
}
sort.Strings(names)
fmt.Printf("%*s | SEAT | LEFTCH. | RIGHTCH. | DINNERS | STATUS", maxNameLen, "NAME")
fmt.Println()
for _, name := range names {
data := host.phiData[name]
status := "waiting"
if data.eating == true {
status = "eating"
}
leftChopStr := strings.Replace(strconv.Itoa(data.LeftChopstick()), "-1", "X", 1)
rightChopStr := strings.Replace(strconv.Itoa(data.RightChopstick()), "-1", "X", 1)
repLine := fmt.Sprintf("%*s | %*d | %*s | %*s | %*d | %s",
maxNameLen, name, 4, data.seat, 7, leftChopStr,
8, rightChopStr, 7, data.dinnersSpent, status)
fmt.Println(repLine)
}
if additionalInfo {
freeChops := fmt.Sprintf("CHOPSTICKS:")
for chopInd, chopStat := range host.chopsticksFree {
status := "FREE"
if chopStat == false {
status = "RESERVED"
}
freeChops += fmt.Sprintf(" %d[%s]", chopInd, status)
}
fmt.Println(freeChops)
}
fmt.Println()
}
// ===== philosopherData methods
// CanEat checks if the philosopher specific criteria of eating is fulfilled.
func (pd *philosopherData) CanEat(maxDinner int) string {
switch {
case pd.eating:
return "E:EATING"
case pd.dinnersSpent >= maxDinner:
return "E:LIMIT"
case time.Now().Sub(pd.finishedAt) < (time.Duration(150) * time.Millisecond):
return "E:JUSTFINISHED"
}
return "OK"
}
// StartedEating updates philosopher specific data when the philosopher starts eating.
func (pd *philosopherData) StartedEating() {
pd.eating = true
pd.dinnersSpent++
}
// FinishedEating updates philosopher specific data when the philosopher finished eating.
func (pd *philosopherData) FinishedEating() {
pd.eating = false
pd.leftChopstick = -1
pd.rightChopstick = -1
pd.finishedAt = time.Now()
}
// RespChannel returns the philosopher's response channel.
func (pd *philosopherData) RespChannel() chan string {
return pd.respChannel
}
// LeftChopstick returns the ID of the philosopher's currently reserved left chopstick.
// If no left chopstick is reserved, then -1 is returned.
func (pd *philosopherData) LeftChopstick() int {
return pd.leftChopstick
}
// RightChopstick returns the ID of the philosopher's currently reserved right chopstick.
// If no right chopstick is reserved, then -1 is returned.
func (pd *philosopherData) RightChopstick() int | {
return pd.rightChopstick
} | identifier_body | |
verify_cert.rs | cert: &Cert,
time: time::Time,
sub_ca_count: usize,
) -> Result<(), Error> {
let used_as_ca = used_as_ca(&cert.ee_or_ca);
check_issuer_independent_properties(
cert,
time,
used_as_ca,
sub_ca_count,
required_eku_if_present,
)?;
// TODO: HPKP checks.
match used_as_ca {
UsedAsCa::Yes => {
const MAX_SUB_CA_COUNT: usize = 6;
if sub_ca_count >= MAX_SUB_CA_COUNT {
return Err(Error::UnknownIssuer);
}
}
UsedAsCa::No => {
assert_eq!(0, sub_ca_count);
}
}
// TODO: revocation.
match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| {
let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject);
if cert.issuer != trust_anchor_subject {
return Err(Error::UnknownIssuer);
}
let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from);
untrusted::read_all_optional(name_constraints, Error::BadDer, |value| {
name::check_name_constraints(value, &cert)
})?;
let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki);
// TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?;
check_signatures(supported_sig_algs, cert, trust_anchor_spki)?;
Ok(())
}) {
Ok(()) => {
return Ok(());
}
Err(..) => {
// If the error is not fatal, then keep going.
}
}
loop_while_non_fatal_error(intermediate_certs, |cert_der| {
let potential_issuer =
cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?;
if potential_issuer.subject != cert.issuer {
return Err(Error::UnknownIssuer);
}
// Prevent loops; see RFC 4158 section 5.2.
let mut prev = cert;
loop {
if potential_issuer.spki.value() == prev.spki.value()
&& potential_issuer.subject == prev.subject
{
return Err(Error::UnknownIssuer);
}
match &prev.ee_or_ca {
EndEntityOrCa::EndEntity => {
break;
}
EndEntityOrCa::Ca(child_cert) => {
prev = child_cert;
}
}
}
untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| {
name::check_name_constraints(value, &cert)
})?;
let next_sub_ca_count = match used_as_ca {
UsedAsCa::No => sub_ca_count,
UsedAsCa::Yes => sub_ca_count + 1,
};
build_chain(
required_eku_if_present,
supported_sig_algs,
trust_anchors,
intermediate_certs,
&potential_issuer,
time,
next_sub_ca_count,
)
})
}
fn check_signatures(
supported_sig_algs: &[&SignatureAlgorithm],
cert_chain: &Cert,
trust_anchor_key: untrusted::Input,
) -> Result<(), Error> {
let mut spki_value = trust_anchor_key;
let mut cert = cert_chain;
loop {
signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?;
// TODO: check revocation
match &cert.ee_or_ca {
EndEntityOrCa::Ca(child_cert) => {
spki_value = cert.spki.value();
cert = child_cert;
}
EndEntityOrCa::EndEntity => {
break;
}
}
}
Ok(())
}
fn check_issuer_independent_properties(
cert: &Cert,
time: time::Time,
used_as_ca: UsedAsCa,
sub_ca_count: usize,
required_eku_if_present: KeyPurposeId,
) -> Result<(), Error> {
// TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?;
// TODO: Check signature algorithm like mozilla::pkix.
// TODO: Check SPKI like mozilla::pkix.
// TODO: check for active distrust like mozilla::pkix.
// See the comment in `remember_extension` for why we don't check the
// KeyUsage extension.
cert.validity
.read_all(Error::BadDer, |value| check_validity(value, time))?;
untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| {
check_basic_constraints(value, used_as_ca, sub_ca_count)
})?;
untrusted::read_all_optional(cert.eku, Error::BadDer, |value| {
check_eku(value, required_eku_if_present)
})?;
Ok(())
}
// https://tools.ietf.org/html/rfc5280#section-4.1.2.5
fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> {
let not_before = der::time_choice(input)?;
let not_after = der::time_choice(input)?;
if not_before > not_after {
return Err(Error::InvalidCertValidity);
}
if time < not_before {
return Err(Error::CertNotValidYet);
}
if time > not_after {
return Err(Error::CertExpired);
}
// TODO: mozilla::pkix allows the TrustDomain to check not_before and
// not_after, to enforce things like a maximum validity period. We should
// do something similar.
Ok(())
}
#[derive(Clone, Copy)]
enum UsedAsCa {
Yes,
No,
}
fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa {
match ee_or_ca {
EndEntityOrCa::EndEntity => UsedAsCa::No,
EndEntityOrCa::Ca(..) => UsedAsCa::Yes,
}
}
// https://tools.ietf.org/html/rfc5280#section-4.2.1.9
fn | (
input: Option<&mut untrusted::Reader>,
used_as_ca: UsedAsCa,
sub_ca_count: usize,
) -> Result<(), Error> {
let (is_ca, path_len_constraint) = match input {
Some(input) => {
let is_ca = der::optional_boolean(input)?;
// https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280
// says that a certificate must not have pathLenConstraint unless
// it is a CA certificate, but some real-world end-entity
// certificates have pathLenConstraint.
let path_len_constraint = if !input.at_end() {
let value = der::small_nonnegative_integer(input)?;
Some(usize::from(value))
} else {
None
};
(is_ca, path_len_constraint)
}
None => (false, None),
};
match (used_as_ca, is_ca, path_len_constraint) {
(UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity),
(UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa),
(UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => {
Err(Error::PathLenConstraintViolated)
}
_ => Ok(()),
}
}
#[derive(Clone, Copy)]
pub struct KeyPurposeId {
oid_value: untrusted::Input<'static>,
}
// id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 }
// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]),
};
// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]),
};
// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]),
};
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
//
// Notable Differences from RFC 5280:
//
// * We follow the convention established by Microsoft's implementation and
// mozilla::pkix | check_basic_constraints | identifier_name |
verify_cert.rs | cert: &Cert,
time: time::Time,
sub_ca_count: usize,
) -> Result<(), Error> {
let used_as_ca = used_as_ca(&cert.ee_or_ca);
check_issuer_independent_properties(
cert,
time,
used_as_ca,
sub_ca_count,
required_eku_if_present,
)?;
// TODO: HPKP checks.
match used_as_ca {
UsedAsCa::Yes => {
const MAX_SUB_CA_COUNT: usize = 6;
if sub_ca_count >= MAX_SUB_CA_COUNT {
return Err(Error::UnknownIssuer);
}
}
UsedAsCa::No => {
assert_eq!(0, sub_ca_count);
}
}
// TODO: revocation.
match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| {
let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject);
if cert.issuer != trust_anchor_subject {
return Err(Error::UnknownIssuer);
}
let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from);
untrusted::read_all_optional(name_constraints, Error::BadDer, |value| {
name::check_name_constraints(value, &cert)
})?;
let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki);
// TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?;
check_signatures(supported_sig_algs, cert, trust_anchor_spki)?;
Ok(())
}) {
Ok(()) => {
return Ok(());
}
Err(..) => {
// If the error is not fatal, then keep going.
}
}
loop_while_non_fatal_error(intermediate_certs, |cert_der| {
let potential_issuer =
cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?;
if potential_issuer.subject != cert.issuer {
return Err(Error::UnknownIssuer);
}
// Prevent loops; see RFC 4158 section 5.2.
let mut prev = cert;
loop {
if potential_issuer.spki.value() == prev.spki.value()
&& potential_issuer.subject == prev.subject
{
return Err(Error::UnknownIssuer);
}
match &prev.ee_or_ca {
EndEntityOrCa::EndEntity => {
break;
}
EndEntityOrCa::Ca(child_cert) => {
prev = child_cert;
}
}
}
untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| {
name::check_name_constraints(value, &cert)
})?;
let next_sub_ca_count = match used_as_ca {
UsedAsCa::No => sub_ca_count,
UsedAsCa::Yes => sub_ca_count + 1,
};
build_chain(
required_eku_if_present,
supported_sig_algs,
trust_anchors,
intermediate_certs,
&potential_issuer,
time,
next_sub_ca_count,
)
})
}
fn check_signatures(
supported_sig_algs: &[&SignatureAlgorithm],
cert_chain: &Cert,
trust_anchor_key: untrusted::Input,
) -> Result<(), Error> {
let mut spki_value = trust_anchor_key;
let mut cert = cert_chain;
loop {
signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?;
// TODO: check revocation
match &cert.ee_or_ca {
EndEntityOrCa::Ca(child_cert) => {
spki_value = cert.spki.value();
cert = child_cert;
}
EndEntityOrCa::EndEntity => {
break;
}
}
}
Ok(())
}
fn check_issuer_independent_properties(
cert: &Cert,
time: time::Time,
used_as_ca: UsedAsCa,
sub_ca_count: usize,
required_eku_if_present: KeyPurposeId,
) -> Result<(), Error> |
// https://tools.ietf.org/html/rfc5280#section-4.1.2.5
fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> {
let not_before = der::time_choice(input)?;
let not_after = der::time_choice(input)?;
if not_before > not_after {
return Err(Error::InvalidCertValidity);
}
if time < not_before {
return Err(Error::CertNotValidYet);
}
if time > not_after {
return Err(Error::CertExpired);
}
// TODO: mozilla::pkix allows the TrustDomain to check not_before and
// not_after, to enforce things like a maximum validity period. We should
// do something similar.
Ok(())
}
#[derive(Clone, Copy)]
enum UsedAsCa {
Yes,
No,
}
fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa {
match ee_or_ca {
EndEntityOrCa::EndEntity => UsedAsCa::No,
EndEntityOrCa::Ca(..) => UsedAsCa::Yes,
}
}
// https://tools.ietf.org/html/rfc5280#section-4.2.1.9
fn check_basic_constraints(
input: Option<&mut untrusted::Reader>,
used_as_ca: UsedAsCa,
sub_ca_count: usize,
) -> Result<(), Error> {
let (is_ca, path_len_constraint) = match input {
Some(input) => {
let is_ca = der::optional_boolean(input)?;
// https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280
// says that a certificate must not have pathLenConstraint unless
// it is a CA certificate, but some real-world end-entity
// certificates have pathLenConstraint.
let path_len_constraint = if !input.at_end() {
let value = der::small_nonnegative_integer(input)?;
Some(usize::from(value))
} else {
None
};
(is_ca, path_len_constraint)
}
None => (false, None),
};
match (used_as_ca, is_ca, path_len_constraint) {
(UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity),
(UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa),
(UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => {
Err(Error::PathLenConstraintViolated)
}
_ => Ok(()),
}
}
#[derive(Clone, Copy)]
pub struct KeyPurposeId {
oid_value: untrusted::Input<'static>,
}
// id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 }
// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]),
};
// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]),
};
// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]),
};
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
//
// Notable Differences from RFC 5280:
//
// * We follow the convention established by Microsoft's implementation and
// mozilla::pk | {
// TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?;
// TODO: Check signature algorithm like mozilla::pkix.
// TODO: Check SPKI like mozilla::pkix.
// TODO: check for active distrust like mozilla::pkix.
// See the comment in `remember_extension` for why we don't check the
// KeyUsage extension.
cert.validity
.read_all(Error::BadDer, |value| check_validity(value, time))?;
untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| {
check_basic_constraints(value, used_as_ca, sub_ca_count)
})?;
untrusted::read_all_optional(cert.eku, Error::BadDer, |value| {
check_eku(value, required_eku_if_present)
})?;
Ok(())
} | identifier_body |
verify_cert.rs | cert: &Cert,
time: time::Time,
sub_ca_count: usize,
) -> Result<(), Error> {
let used_as_ca = used_as_ca(&cert.ee_or_ca);
check_issuer_independent_properties(
cert,
time,
used_as_ca,
sub_ca_count,
required_eku_if_present,
)?;
// TODO: HPKP checks.
match used_as_ca {
UsedAsCa::Yes => {
const MAX_SUB_CA_COUNT: usize = 6;
if sub_ca_count >= MAX_SUB_CA_COUNT {
return Err(Error::UnknownIssuer);
}
}
UsedAsCa::No => {
assert_eq!(0, sub_ca_count);
}
}
// TODO: revocation.
match loop_while_non_fatal_error(trust_anchors, |trust_anchor: &TrustAnchor| {
let trust_anchor_subject = untrusted::Input::from(trust_anchor.subject);
if cert.issuer != trust_anchor_subject {
return Err(Error::UnknownIssuer);
}
let name_constraints = trust_anchor.name_constraints.map(untrusted::Input::from);
untrusted::read_all_optional(name_constraints, Error::BadDer, |value| {
name::check_name_constraints(value, &cert)
})?;
let trust_anchor_spki = untrusted::Input::from(trust_anchor.spki);
// TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?;
check_signatures(supported_sig_algs, cert, trust_anchor_spki)?;
Ok(())
}) {
Ok(()) => {
return Ok(());
}
Err(..) => {
// If the error is not fatal, then keep going.
}
}
loop_while_non_fatal_error(intermediate_certs, |cert_der| {
let potential_issuer =
cert::parse_cert(untrusted::Input::from(*cert_der), EndEntityOrCa::Ca(&cert))?;
if potential_issuer.subject != cert.issuer {
return Err(Error::UnknownIssuer);
}
// Prevent loops; see RFC 4158 section 5.2.
let mut prev = cert;
loop {
if potential_issuer.spki.value() == prev.spki.value()
&& potential_issuer.subject == prev.subject
{
return Err(Error::UnknownIssuer);
}
match &prev.ee_or_ca {
EndEntityOrCa::EndEntity => {
break;
}
EndEntityOrCa::Ca(child_cert) => {
prev = child_cert;
}
}
}
untrusted::read_all_optional(potential_issuer.name_constraints, Error::BadDer, |value| {
name::check_name_constraints(value, &cert)
})?;
let next_sub_ca_count = match used_as_ca {
UsedAsCa::No => sub_ca_count,
UsedAsCa::Yes => sub_ca_count + 1,
};
build_chain(
required_eku_if_present,
supported_sig_algs,
trust_anchors,
intermediate_certs,
&potential_issuer,
time,
next_sub_ca_count,
)
})
}
fn check_signatures(
supported_sig_algs: &[&SignatureAlgorithm],
cert_chain: &Cert,
trust_anchor_key: untrusted::Input,
) -> Result<(), Error> {
let mut spki_value = trust_anchor_key;
let mut cert = cert_chain;
loop {
signed_data::verify_signed_data(supported_sig_algs, spki_value, &cert.signed_data)?;
// TODO: check revocation
match &cert.ee_or_ca {
EndEntityOrCa::Ca(child_cert) => {
spki_value = cert.spki.value();
cert = child_cert;
}
EndEntityOrCa::EndEntity => {
break;
}
}
}
Ok(())
}
fn check_issuer_independent_properties(
cert: &Cert,
time: time::Time,
used_as_ca: UsedAsCa,
sub_ca_count: usize,
required_eku_if_present: KeyPurposeId,
) -> Result<(), Error> {
// TODO: check_distrust(trust_anchor_subject, trust_anchor_spki)?;
// TODO: Check signature algorithm like mozilla::pkix.
// TODO: Check SPKI like mozilla::pkix.
// TODO: check for active distrust like mozilla::pkix.
// See the comment in `remember_extension` for why we don't check the
// KeyUsage extension.
cert.validity
.read_all(Error::BadDer, |value| check_validity(value, time))?;
untrusted::read_all_optional(cert.basic_constraints, Error::BadDer, |value| {
check_basic_constraints(value, used_as_ca, sub_ca_count)
})?;
untrusted::read_all_optional(cert.eku, Error::BadDer, |value| {
check_eku(value, required_eku_if_present)
})?;
Ok(())
}
// https://tools.ietf.org/html/rfc5280#section-4.1.2.5
fn check_validity(input: &mut untrusted::Reader, time: time::Time) -> Result<(), Error> {
let not_before = der::time_choice(input)?;
let not_after = der::time_choice(input)?;
if not_before > not_after {
return Err(Error::InvalidCertValidity);
}
if time < not_before {
return Err(Error::CertNotValidYet);
}
if time > not_after {
return Err(Error::CertExpired);
}
// TODO: mozilla::pkix allows the TrustDomain to check not_before and
// not_after, to enforce things like a maximum validity period. We should
// do something similar.
Ok(())
}
#[derive(Clone, Copy)]
enum UsedAsCa {
Yes,
No,
}
fn used_as_ca(ee_or_ca: &EndEntityOrCa) -> UsedAsCa {
match ee_or_ca {
EndEntityOrCa::EndEntity => UsedAsCa::No,
EndEntityOrCa::Ca(..) => UsedAsCa::Yes,
}
}
// https://tools.ietf.org/html/rfc5280#section-4.2.1.9
fn check_basic_constraints(
input: Option<&mut untrusted::Reader>,
used_as_ca: UsedAsCa,
sub_ca_count: usize,
) -> Result<(), Error> {
let (is_ca, path_len_constraint) = match input {
Some(input) => {
let is_ca = der::optional_boolean(input)?;
// https://bugzilla.mozilla.org/show_bug.cgi?id=985025: RFC 5280
// says that a certificate must not have pathLenConstraint unless
// it is a CA certificate, but some real-world end-entity
// certificates have pathLenConstraint.
let path_len_constraint = if !input.at_end() {
let value = der::small_nonnegative_integer(input)?;
Some(usize::from(value))
} else {
None
};
(is_ca, path_len_constraint)
}
None => (false, None),
};
match (used_as_ca, is_ca, path_len_constraint) {
(UsedAsCa::No, true, _) => Err(Error::CaUsedAsEndEntity),
(UsedAsCa::Yes, false, _) => Err(Error::EndEntityUsedAsCa),
(UsedAsCa::Yes, true, Some(len)) if sub_ca_count > len => {
Err(Error::PathLenConstraintViolated)
}
_ => Ok(()),
}
}
#[derive(Clone, Copy)]
pub struct KeyPurposeId {
oid_value: untrusted::Input<'static>,
}
// id-pkix OBJECT IDENTIFIER ::= { 1 3 6 1 5 5 7 }
// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 }
// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_SERVER_AUTH: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 1]),
};
// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_CLIENT_AUTH: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 2]),
};
// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 }
#[allow(clippy::identity_op)] // TODO: Make this clearer
pub static EKU_OCSP_SIGNING: KeyPurposeId = KeyPurposeId {
oid_value: untrusted::Input::from(&[(40 * 1) + 3, 6, 1, 5, 5, 7, 3, 9]),
};
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
// | // * We follow the convention established by Microsoft's implementation and
// mozilla::pkix | // Notable Differences from RFC 5280:
// | random_line_split |
move_vm.go | enabled clones to image service will not be deleted")
fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?")
fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)")
fmt.Println("--help List this help")
fmt.Println("--version Show the move_vm version")
fmt.Println("")
fmt.Println("Example:")
fmt.Println("")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2")
fmt.Println("")
}
// parse --vdisk-mapping or --container and checks if all container exist
func parseVdiskMapping(n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
}()
var vdiskMappings []ntnxAPI.VMDisks
var VMDisk ntnxAPI.VMDisks
result := strings.Split(*vdiskMapping, ",")
// add Mappings
for i := range result {
res := strings.Split(result[i], "/")
resAddr := strings.Split(res[0], ".")
VMDisk.Addr.DeviceBus = resAddr[0]
VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1])
// check if right format is used
if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") {
log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) {
log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if res[1] != "EMPTY" {
containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1])
if err != nil {
os.Exit(1)
}
VMDisk.ContainerUUID = containerUUID
}
vdiskMappings = append(vdiskMappings, VMDisk)
}
return vdiskMappings, nil
}
func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks) |
}
func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) {
//help
if *help {
printHelp()
os.Exit(0)
}
//version
if *version {
fmt.Println("Version: " + appVersion)
os.Exit(0)
}
//debug
if *debug {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.InfoLevel)
}
//delete
if *delete {
reader := bufio.NewReader(os.Stdin)
fmt.Println("THIS WILL DELETE source VM: " + *vmName)
fmt.Print("If you want to continue type YES: ")
text, _ := reader.ReadString('\n')
fmt.Println(text)
if strings.TrimRight(text, "\n") != "YES" {
os.Exit(0)
}
}
//host
if *host == "" {
log.Warn("mandatory option '--host=' is not set")
os.Exit(0)
}
//username
if *username == "" {
log.Warn("option '--username=' is not set Default: admin is used")
*username = "admin"
}
//password
if *password == "" {
log.Warn("option '--password=' is not set Default: nutanix/4u is used")
*password = "nutanix/4u"
}
//vm-name
if *vmName == "" {
log.Warn("mandatory option '--vm-name=' is not set")
os.Exit(0)
}
var vm ntnxAPI.VMJSONAHV
vm.Config.Name = *vmName
if *newVMName == "" {
*newVMName = *vmName
}
var vNew ntnxAPI.VMJSONAHV
vNew.Config.Name = *newVMName
var n ntnxAPI.NTNXConnection
n.NutanixHost = *host
n.Username = *username
n.Password = *password
ntnxAPI.EncodeCredentials(&n)
ntnxAPI.CreateHTTPClient(&n)
ntnxAPI.NutanixCheckCredentials(&n)
// list mapping if specified
if *listMapping {
var listMappingStr string
exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name)
if exist {
vm, _ = ntnxAPI.GetVMbyName(&n, &vm)
for i, elem := range vm.Config.VMDisks {
if !elem.IsEmpty {
containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID)
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName
} else {
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY"
}
if i < len(vm.Config.VMDisks)-1 {
listMappingStr = listMappingStr + ","
}
}
fmt.Println(listMappingStr)
os.Exit(0)
}
}
// both options set container and vdisk-mapping
if *container != "" && *vdiskMapping != "" {
log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed")
os.Exit(0)
}
// none options set container and vdisk-mapping
if *container == "" && *vdiskMapping == "" {
log.Warn("None of --container or --vdisk-mapping is set. One is mandatory")
os.Exit(0)
}
// If container is not found exit
if *container != "" {
_, err := ntnxAPI.GetContainerUUIDbyName(&n, *container)
if err != nil {
os.Exit(1)
}
}
var VdiskMapping []ntnxAPI.VMDisks
var err error
// If container is not found exit
if *vdiskMapping != "" {
VdiskMapping, err = parseVdiskMapping(&n)
if err != nil {
os.Exit(1)
}
}
return n, vm, vNew, VdiskMapping
}
func main() {
flag.Usage = printHelp
flag.Parse()
customFormatter := new(log.TextFormatter)
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
log.SetFormatter(customFormatter)
customFormatter.FullTimestamp = true
var n ntnxAPI.NTNXConnection
var v ntnxAPI.VMJSONAHV
var vNew ntnxAPI.VMJSONAHV
var d ntnxAPI.VDiskJSONREST
var net ntnxAPI.NetworkREST
var im ntnxAPI.ImageJSONAHV
var taskUUID ntnxAPI.TaskUUID
var VdiskMapping []ntnxAPI.VMDisks
var existV bool
var existVNew bool
n, v, vNew, VdiskMapping = evaluateFlags()
/*
Short description what will be done
1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait
2. Create VM and wait
3. Clone Images to Disks and wait
4. Add network
5. delete images
*/
/*To-DO:
2. show_progress
*/
//check if source VM exists
existV, _ = ntnxAPI.VMExist(&n, v.Config.Name)
if existV {
//check if new VM exists
existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name)
if existVNew {
if vNew.Config.Name != v.Config.Name {
log.Warn("VM " + vNew.Config.Name + " already exists")
if !*overwrite {
os.Exit(0)
| {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping is not correct")
os.Exit(1)
}
}()
for i, elem := range v.Config.VMDisks {
if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
}
}
if len(v.Config.VMDisks) != len(VdiskMapping) {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
} | identifier_body |
move_vm.go | enabled clones to image service will not be deleted")
fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?")
fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)")
fmt.Println("--help List this help")
fmt.Println("--version Show the move_vm version")
fmt.Println("")
fmt.Println("Example:")
fmt.Println("")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2")
fmt.Println("")
}
// parse --vdisk-mapping or --container and checks if all container exist
func parseVdiskMapping(n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
}()
var vdiskMappings []ntnxAPI.VMDisks
var VMDisk ntnxAPI.VMDisks
result := strings.Split(*vdiskMapping, ",")
// add Mappings
for i := range result {
res := strings.Split(result[i], "/")
resAddr := strings.Split(res[0], ".")
VMDisk.Addr.DeviceBus = resAddr[0]
VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1])
// check if right format is used
if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") {
log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) {
log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if res[1] != "EMPTY" {
containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1])
if err != nil {
os.Exit(1)
}
VMDisk.ContainerUUID = containerUUID
}
vdiskMappings = append(vdiskMappings, VMDisk)
}
return vdiskMappings, nil
}
func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks) {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping is not correct")
os.Exit(1)
}
}()
for i, elem := range v.Config.VMDisks {
if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
}
}
if len(v.Config.VMDisks) != len(VdiskMapping) {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
}
}
func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) {
//help
if *help {
printHelp()
os.Exit(0)
}
//version
if *version {
fmt.Println("Version: " + appVersion)
os.Exit(0)
}
//debug
if *debug {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.InfoLevel)
}
//delete
if *delete {
reader := bufio.NewReader(os.Stdin)
fmt.Println("THIS WILL DELETE source VM: " + *vmName)
fmt.Print("If you want to continue type YES: ")
text, _ := reader.ReadString('\n')
fmt.Println(text)
if strings.TrimRight(text, "\n") != "YES" {
os.Exit(0)
}
}
//host
if *host == "" {
log.Warn("mandatory option '--host=' is not set")
os.Exit(0)
}
//username
if *username == "" {
log.Warn("option '--username=' is not set Default: admin is used")
*username = "admin"
}
//password
if *password == "" {
log.Warn("option '--password=' is not set Default: nutanix/4u is used")
*password = "nutanix/4u"
}
//vm-name
if *vmName == "" {
log.Warn("mandatory option '--vm-name=' is not set")
os.Exit(0)
}
var vm ntnxAPI.VMJSONAHV
vm.Config.Name = *vmName
if *newVMName == "" {
*newVMName = *vmName
}
var vNew ntnxAPI.VMJSONAHV
vNew.Config.Name = *newVMName
var n ntnxAPI.NTNXConnection
n.NutanixHost = *host
n.Username = *username
n.Password = *password
ntnxAPI.EncodeCredentials(&n)
ntnxAPI.CreateHTTPClient(&n)
ntnxAPI.NutanixCheckCredentials(&n)
// list mapping if specified
if *listMapping {
var listMappingStr string
exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name)
if exist {
vm, _ = ntnxAPI.GetVMbyName(&n, &vm)
for i, elem := range vm.Config.VMDisks {
if !elem.IsEmpty {
containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID)
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName
} else {
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY"
}
if i < len(vm.Config.VMDisks)-1 {
listMappingStr = listMappingStr + ","
}
}
fmt.Println(listMappingStr)
os.Exit(0)
}
}
// both options set container and vdisk-mapping
if *container != "" && *vdiskMapping != "" {
log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed")
os.Exit(0)
}
// none options set container and vdisk-mapping
if *container == "" && *vdiskMapping == "" {
log.Warn("None of --container or --vdisk-mapping is set. One is mandatory")
os.Exit(0)
}
// If container is not found exit
if *container != "" {
_, err := ntnxAPI.GetContainerUUIDbyName(&n, *container)
if err != nil {
os.Exit(1)
}
}
var VdiskMapping []ntnxAPI.VMDisks
var err error
// If container is not found exit
if *vdiskMapping != "" |
return n, vm, vNew, VdiskMapping
}
func main() {
flag.Usage = printHelp
flag.Parse()
customFormatter := new(log.TextFormatter)
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
log.SetFormatter(customFormatter)
customFormatter.FullTimestamp = true
var n ntnxAPI.NTNXConnection
var v ntnxAPI.VMJSONAHV
var vNew ntnxAPI.VMJSONAHV
var d ntnxAPI.VDiskJSONREST
var net ntnxAPI.NetworkREST
var im ntnxAPI.ImageJSONAHV
var taskUUID ntnxAPI.TaskUUID
var VdiskMapping []ntnxAPI.VMDisks
var existV bool
var existVNew bool
n, v, vNew, VdiskMapping = evaluateFlags()
/*
Short description what will be done
1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait
2. Create VM and wait
3. Clone Images to Disks and wait
4. Add network
5. delete images
*/
/*To-DO:
2. show_progress
*/
//check if source VM exists
existV, _ = ntnxAPI.VMExist(&n, v.Config.Name)
if existV {
//check if new VM exists
existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name)
if existVNew {
if vNew.Config.Name != v.Config.Name {
log.Warn("VM " + vNew.Config.Name + " already exists")
if !*overwrite {
os.Exit(0)
| {
VdiskMapping, err = parseVdiskMapping(&n)
if err != nil {
os.Exit(1)
}
} | conditional_block |
move_vm.go | If enabled clones to image service will not be deleted")
fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?")
fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)")
fmt.Println("--help List this help")
fmt.Println("--version Show the move_vm version")
fmt.Println("")
fmt.Println("Example:")
fmt.Println("")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2")
fmt.Println("")
}
// parse --vdisk-mapping or --container and checks if all container exist
func | (n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
}()
var vdiskMappings []ntnxAPI.VMDisks
var VMDisk ntnxAPI.VMDisks
result := strings.Split(*vdiskMapping, ",")
// add Mappings
for i := range result {
res := strings.Split(result[i], "/")
resAddr := strings.Split(res[0], ".")
VMDisk.Addr.DeviceBus = resAddr[0]
VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1])
// check if right format is used
if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") {
log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) {
log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if res[1] != "EMPTY" {
containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1])
if err != nil {
os.Exit(1)
}
VMDisk.ContainerUUID = containerUUID
}
vdiskMappings = append(vdiskMappings, VMDisk)
}
return vdiskMappings, nil
}
func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks) {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping is not correct")
os.Exit(1)
}
}()
for i, elem := range v.Config.VMDisks {
if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
}
}
if len(v.Config.VMDisks) != len(VdiskMapping) {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
}
}
func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) {
//help
if *help {
printHelp()
os.Exit(0)
}
//version
if *version {
fmt.Println("Version: " + appVersion)
os.Exit(0)
}
//debug
if *debug {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.InfoLevel)
}
//delete
if *delete {
reader := bufio.NewReader(os.Stdin)
fmt.Println("THIS WILL DELETE source VM: " + *vmName)
fmt.Print("If you want to continue type YES: ")
text, _ := reader.ReadString('\n')
fmt.Println(text)
if strings.TrimRight(text, "\n") != "YES" {
os.Exit(0)
}
}
//host
if *host == "" {
log.Warn("mandatory option '--host=' is not set")
os.Exit(0)
}
//username
if *username == "" {
log.Warn("option '--username=' is not set Default: admin is used")
*username = "admin"
}
//password
if *password == "" {
log.Warn("option '--password=' is not set Default: nutanix/4u is used")
*password = "nutanix/4u"
}
//vm-name
if *vmName == "" {
log.Warn("mandatory option '--vm-name=' is not set")
os.Exit(0)
}
var vm ntnxAPI.VMJSONAHV
vm.Config.Name = *vmName
if *newVMName == "" {
*newVMName = *vmName
}
var vNew ntnxAPI.VMJSONAHV
vNew.Config.Name = *newVMName
var n ntnxAPI.NTNXConnection
n.NutanixHost = *host
n.Username = *username
n.Password = *password
ntnxAPI.EncodeCredentials(&n)
ntnxAPI.CreateHTTPClient(&n)
ntnxAPI.NutanixCheckCredentials(&n)
// list mapping if specified
if *listMapping {
var listMappingStr string
exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name)
if exist {
vm, _ = ntnxAPI.GetVMbyName(&n, &vm)
for i, elem := range vm.Config.VMDisks {
if !elem.IsEmpty {
containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID)
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName
} else {
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY"
}
if i < len(vm.Config.VMDisks)-1 {
listMappingStr = listMappingStr + ","
}
}
fmt.Println(listMappingStr)
os.Exit(0)
}
}
// both options set container and vdisk-mapping
if *container != "" && *vdiskMapping != "" {
log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed")
os.Exit(0)
}
// none options set container and vdisk-mapping
if *container == "" && *vdiskMapping == "" {
log.Warn("None of --container or --vdisk-mapping is set. One is mandatory")
os.Exit(0)
}
// If container is not found exit
if *container != "" {
_, err := ntnxAPI.GetContainerUUIDbyName(&n, *container)
if err != nil {
os.Exit(1)
}
}
var VdiskMapping []ntnxAPI.VMDisks
var err error
// If container is not found exit
if *vdiskMapping != "" {
VdiskMapping, err = parseVdiskMapping(&n)
if err != nil {
os.Exit(1)
}
}
return n, vm, vNew, VdiskMapping
}
func main() {
flag.Usage = printHelp
flag.Parse()
customFormatter := new(log.TextFormatter)
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
log.SetFormatter(customFormatter)
customFormatter.FullTimestamp = true
var n ntnxAPI.NTNXConnection
var v ntnxAPI.VMJSONAHV
var vNew ntnxAPI.VMJSONAHV
var d ntnxAPI.VDiskJSONREST
var net ntnxAPI.NetworkREST
var im ntnxAPI.ImageJSONAHV
var taskUUID ntnxAPI.TaskUUID
var VdiskMapping []ntnxAPI.VMDisks
var existV bool
var existVNew bool
n, v, vNew, VdiskMapping = evaluateFlags()
/*
Short description what will be done
1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait
2. Create VM and wait
3. Clone Images to Disks and wait
4. Add network
5. delete images
*/
/*To-DO:
2. show_progress
*/
//check if source VM exists
existV, _ = ntnxAPI.VMExist(&n, v.Config.Name)
if existV {
//check if new VM exists
existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name)
if existVNew {
if vNew.Config.Name != v.Config.Name {
log.Warn("VM " + vNew.Config.Name + " already exists")
if !*overwrite {
os.Exit(0)
| parseVdiskMapping | identifier_name |
move_vm.go | If enabled clones to image service will not be deleted")
fmt.Println("--delete Deletes source VM - ARE YOU REALLY SURE?")
fmt.Println("--overwrite Overwrites target VM/Images (delete and creates new one)")
fmt.Println("--help List this help")
fmt.Println("--version Show the move_vm version")
fmt.Println("")
fmt.Println("Example:")
fmt.Println("")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --container=ISO")
fmt.Println("move_vm --host=NTNX-CVM --username=admin --password=nutanix/4u --vm-name=MyVM --vdisk-mapping=scsi.0/ISO,scsi.1/Prod2")
fmt.Println("")
}
// parse --vdisk-mapping or --container and checks if all container exist
func parseVdiskMapping(n *ntnxAPI.NTNXConnection) ([]ntnxAPI.VMDisks, error) {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
}()
var vdiskMappings []ntnxAPI.VMDisks
var VMDisk ntnxAPI.VMDisks
result := strings.Split(*vdiskMapping, ",")
// add Mappings
for i := range result {
res := strings.Split(result[i], "/")
resAddr := strings.Split(res[0], ".")
VMDisk.Addr.DeviceBus = resAddr[0]
VMDisk.Addr.DeviceIndex, _ = strconv.Atoi(resAddr[1]) | log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if !(VMDisk.Addr.DeviceIndex >= 0 && VMDisk.Addr.DeviceIndex <= 255) {
log.Error("--vdisk-mapping seems not to have right format")
os.Exit(1)
}
if res[1] != "EMPTY" {
containerUUID, err := ntnxAPI.GetContainerUUIDbyName(n, res[1])
if err != nil {
os.Exit(1)
}
VMDisk.ContainerUUID = containerUUID
}
vdiskMappings = append(vdiskMappings, VMDisk)
}
return vdiskMappings, nil
}
func checkVdiskMapping(v ntnxAPI.VMJSONAHV, VdiskMapping []ntnxAPI.VMDisks) {
defer func() {
if err := recover(); err != nil {
log.Fatal("--vdisk-mapping is not correct")
os.Exit(1)
}
}()
for i, elem := range v.Config.VMDisks {
if elem.Addr.DeviceBus != VdiskMapping[i].Addr.DeviceBus || elem.Addr.DeviceIndex != VdiskMapping[i].Addr.DeviceIndex {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
}
}
if len(v.Config.VMDisks) != len(VdiskMapping) {
log.Error("--vdisk-mapping some source vdisks are not mapped")
os.Exit(1)
}
}
func evaluateFlags() (ntnxAPI.NTNXConnection, ntnxAPI.VMJSONAHV, ntnxAPI.VMJSONAHV, []ntnxAPI.VMDisks) {
//help
if *help {
printHelp()
os.Exit(0)
}
//version
if *version {
fmt.Println("Version: " + appVersion)
os.Exit(0)
}
//debug
if *debug {
log.SetLevel(log.DebugLevel)
} else {
log.SetLevel(log.InfoLevel)
}
//delete
if *delete {
reader := bufio.NewReader(os.Stdin)
fmt.Println("THIS WILL DELETE source VM: " + *vmName)
fmt.Print("If you want to continue type YES: ")
text, _ := reader.ReadString('\n')
fmt.Println(text)
if strings.TrimRight(text, "\n") != "YES" {
os.Exit(0)
}
}
//host
if *host == "" {
log.Warn("mandatory option '--host=' is not set")
os.Exit(0)
}
//username
if *username == "" {
log.Warn("option '--username=' is not set Default: admin is used")
*username = "admin"
}
//password
if *password == "" {
log.Warn("option '--password=' is not set Default: nutanix/4u is used")
*password = "nutanix/4u"
}
//vm-name
if *vmName == "" {
log.Warn("mandatory option '--vm-name=' is not set")
os.Exit(0)
}
var vm ntnxAPI.VMJSONAHV
vm.Config.Name = *vmName
if *newVMName == "" {
*newVMName = *vmName
}
var vNew ntnxAPI.VMJSONAHV
vNew.Config.Name = *newVMName
var n ntnxAPI.NTNXConnection
n.NutanixHost = *host
n.Username = *username
n.Password = *password
ntnxAPI.EncodeCredentials(&n)
ntnxAPI.CreateHTTPClient(&n)
ntnxAPI.NutanixCheckCredentials(&n)
// list mapping if specified
if *listMapping {
var listMappingStr string
exist, _ := ntnxAPI.VMExist(&n, vm.Config.Name)
if exist {
vm, _ = ntnxAPI.GetVMbyName(&n, &vm)
for i, elem := range vm.Config.VMDisks {
if !elem.IsEmpty {
containerName, _ := ntnxAPI.GetContainerNamebyUUID(&n, elem.ContainerUUID)
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/" + containerName
} else {
listMappingStr = listMappingStr + elem.Addr.DeviceBus + "." + strconv.Itoa(elem.Addr.DeviceIndex) + "/EMPTY"
}
if i < len(vm.Config.VMDisks)-1 {
listMappingStr = listMappingStr + ","
}
}
fmt.Println(listMappingStr)
os.Exit(0)
}
}
// both options set container and vdisk-mapping
if *container != "" && *vdiskMapping != "" {
log.Warn("Option --container and --vdisk-mapping are set. Only one of them is allowed")
os.Exit(0)
}
// none options set container and vdisk-mapping
if *container == "" && *vdiskMapping == "" {
log.Warn("None of --container or --vdisk-mapping is set. One is mandatory")
os.Exit(0)
}
// If container is not found exit
if *container != "" {
_, err := ntnxAPI.GetContainerUUIDbyName(&n, *container)
if err != nil {
os.Exit(1)
}
}
var VdiskMapping []ntnxAPI.VMDisks
var err error
// If container is not found exit
if *vdiskMapping != "" {
VdiskMapping, err = parseVdiskMapping(&n)
if err != nil {
os.Exit(1)
}
}
return n, vm, vNew, VdiskMapping
}
func main() {
flag.Usage = printHelp
flag.Parse()
customFormatter := new(log.TextFormatter)
customFormatter.TimestampFormat = "2006-01-02 15:04:05"
log.SetFormatter(customFormatter)
customFormatter.FullTimestamp = true
var n ntnxAPI.NTNXConnection
var v ntnxAPI.VMJSONAHV
var vNew ntnxAPI.VMJSONAHV
var d ntnxAPI.VDiskJSONREST
var net ntnxAPI.NetworkREST
var im ntnxAPI.ImageJSONAHV
var taskUUID ntnxAPI.TaskUUID
var VdiskMapping []ntnxAPI.VMDisks
var existV bool
var existVNew bool
n, v, vNew, VdiskMapping = evaluateFlags()
/*
Short description what will be done
1. Upload vDisk from Source VM to Image Service. This is needed while a direct copy is not possible and wait
2. Create VM and wait
3. Clone Images to Disks and wait
4. Add network
5. delete images
*/
/*To-DO:
2. show_progress
*/
//check if source VM exists
existV, _ = ntnxAPI.VMExist(&n, v.Config.Name)
if existV {
//check if new VM exists
existVNew, _ = ntnxAPI.VMExist(&n, vNew.Config.Name)
if existVNew {
if vNew.Config.Name != v.Config.Name {
log.Warn("VM " + vNew.Config.Name + " already exists")
if !*overwrite {
os.Exit(0)
|
// check if right format is used
if !(VMDisk.Addr.DeviceBus == "scsi" || VMDisk.Addr.DeviceBus == "pci" || VMDisk.Addr.DeviceBus == "ide") { | random_line_split |
model.py | None:
# empty model
self.model = None
self.keywords = None
elif _type == "fixed":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelFixed(pre_trained_model_json)
elif _type == "lda":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelLDA(pre_trained_model_json)
else:
raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'")
print("Loading nlp tools...")
if spacy_nlp is None:
# load default model
self.nlp = loadDefaultNLP()
else:
self.nlp = spacy_nlp
print("Loading pdf parser...")
# takes some time
from tika import parser
self.parser = parser
def loadModelLDA(self, model_json: str) -> None:
"""
Function to load a pre-trained ;da model
:param model_csv: the json filename of the model
"""
dirname = os.path.dirname(model_json)
try:
with open(model_json, "r") as f:
j = json.load(f)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_json %s is not a valid path" % model_json
)
try:
path = os.path.join(dirname, j["model_csv"])
self.model = pd.read_csv(path)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_csv %s in model_json is not a valid path" % path
)
try:
path = os.path.join(dirname, j["lda"])
self.lda = LdaModel.load(path)
self.dictionary = self.lda.id2word
except Exception as e:
print(e)
raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path)
try:
path = os.path.join(dirname, j["top_k_words"])
self.top_k_words = []
with open(path, "r") as f:
for line in f:
if line:
self.top_k_words.append(line.strip())
except Exception as e:
print(e)
raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path)
self._type = "lda"
def __keep_top_k_words(self, text):
|
def __trainKMWM(self,seen_chunks_words: List[str],all_tokens_chunks: List[Any],
keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]:
"""
Hidden function to obtain KM and WM scores from keywords
:param seen_chunks_words: n-grams of words in doc
:param all_tokens_chunks: list of all tokens and chunks
:param keywords: keywords to train on
:return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores
if no errors.
Else None
"""
# get word2vec correlation matrix of all tokens + keyword_tokens
keywords_tokenized = self.nlp(" ".join(keywords))
# prepare word embedding matrix
pd_series_all = []
# convert tokens and chunks into word embeddings and put them into a pd.Series
for tc in all_tokens_chunks:
name = tc.lemma_.lower()
pd_series_all.append(pd.Series(tc.vector, name=name))
# convert keywords into word embeddings and put them into a pd.Series
for kwt in keywords_tokenized:
name = kwt.text.lower()
if name not in seen_chunks_words:
pd_series_all.append(pd.Series(kwt.vector, name=name))
seen_chunks_words.append(name)
# get embedding matrix by concatenating all pd.Series
embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index()
corrmat = embedd_mat_df.corr()
# top n words correlated to keyword
top_n = list(range(10, 100, 10))
km_scores = []
wm_scores = []
try:
for kw in keywords:
km_similarities = []
wm_similarities = []
# for top n words based on correlation to kw
for n in top_n:
cols = np.append(
corrmat[kw]
.drop(keywords)
.sort_values(ascending=False)
.index.values[: n - 1],
kw,
)
cm = np.corrcoef(embedd_mat_df[cols].values.T)
# KM score
# avg of top n correlations wrt kw (less the keyword
# itself since it has corr = 1)
avg_sim = np.mean(cm[0, :][1:])
km_similarities.append(avg_sim)
# WM score
# avg of top n correlations (without kw)
# amongst each other
len_minus = (
cm.shape[0] - 1
) # cm.shape to remove all the self correlations
len_minus_sq = len_minus ** 2
# 1. sum the correlations less the
# correlations with the keyword
# 2. subtract len_minus since there are
# len_minus autocorrelations
# 3. get mean by dividing the size of the rest
# i.e. (len_minus_sq - len_minus)
avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / (
len_minus_sq - len_minus
)
wm_similarities.append(avg_wm)
# get 8th degree of X and perform LR to get intercept
X = np.array(top_n)
Xes = [X]
# for i in range(2, 9):
# Xes.append(X ** i)
X_transformed = np.array(Xes).T
lm = LinearRegression()
# KM score
y = np.array(km_similarities)
lm.fit(X_transformed, y)
km_scores.append(lm.intercept_)
# WM score
y = np.array(wm_similarities)
lm.fit(X_transformed, y)
wm_scores.append(lm.intercept_)
except Exception as e:
print(e)
return None
return km_scores, wm_scores
def test(self, filename: str, info_extractor: Optional[InfoExtractor]):
"""
Test a document and print the extracted information and rating
:param filename: name of resume file
:param info_extractor: InfoExtractor object
"""
if self.model is None:
raise RatingModel.RatingModelError("model is not loaded or trained yet")
doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp)
print("Getting rating...")
if self._type == "fixed":
print("working on fixed model")
if self.keywords is None:
raise RatingModel.RatingModelError("Keywords not found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
# scoring
temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords)
if temp_out is None:
raise RatingModel.RatingModelError(
"Either parser cannot detect text or too few words in resume for analysis. Most usually the former."
)
km_scores, wm_scores = temp_out
# average of km/wm scores for all keywords
km_score = np.mean(km_scores)
wm_score = np.mean(wm_scores)
final_score = km_score * wm_score
elif self._type == "lda":
if self.lda is None or self.dictionary is None or self.top_k_words is None:
raise RatingModel.RatingModelError("No LDA found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
seen_chunks_words, all_tokens_chunks = (
list(seen_chunks_words),
list(all_tokens_chunks),
)
# scoring
new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words)
bow = self.dictionary.doc2bow(new_seen_chunks_words)
doc_distribution = np.array(
[tup[1] for tup in self.lda.get_document_topics(bow=bow)]
)
# get keywords and weights
keywords = []
all_pair_scores = []
all_topic_scores = []
all_diff_scores = []
# take top 5 topics
for j in doc_distribution.argsort()[-5:][::-1]:
topic_prob = doc_distribution[j]
# take top 5 words for each topic
st = self.lda.show_topic(topicid=j, topn=5)
sum_st = np.sum(list(map(lambda x: x[1], st)))
pair_scores = []
for pair in st:
keywords.append(pair[0])
pair_scores.append(pair[1])
all_pair_scores.append(np.array(pair_scores))
all_topic_scores.append(np.array(topic_prob))
all_pair_scores = np.array(all_pair_scores)
norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1)
norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores)
all_diff_scores = (norm_all_pair_scores | return [word for word in text if word in self.top_k_words] | identifier_body |
model.py | None:
# empty model
self.model = None
self.keywords = None
elif _type == "fixed":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelFixed(pre_trained_model_json)
elif _type == "lda":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelLDA(pre_trained_model_json)
else:
raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'")
print("Loading nlp tools...")
if spacy_nlp is None:
# load default model
self.nlp = loadDefaultNLP()
else:
self.nlp = spacy_nlp
print("Loading pdf parser...")
# takes some time
from tika import parser
self.parser = parser
def loadModelLDA(self, model_json: str) -> None:
"""
Function to load a pre-trained ;da model
:param model_csv: the json filename of the model
"""
dirname = os.path.dirname(model_json)
try:
with open(model_json, "r") as f:
j = json.load(f)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_json %s is not a valid path" % model_json
)
try:
path = os.path.join(dirname, j["model_csv"])
self.model = pd.read_csv(path)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_csv %s in model_json is not a valid path" % path
)
try:
path = os.path.join(dirname, j["lda"])
self.lda = LdaModel.load(path)
self.dictionary = self.lda.id2word
except Exception as e:
print(e)
raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path)
try:
path = os.path.join(dirname, j["top_k_words"])
self.top_k_words = []
with open(path, "r") as f:
for line in f:
if line:
self.top_k_words.append(line.strip())
except Exception as e:
print(e)
raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path)
self._type = "lda"
def __keep_top_k_words(self, text):
return [word for word in text if word in self.top_k_words]
def __trainKMWM(self,seen_chunks_words: List[str],all_tokens_chunks: List[Any],
keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]:
"""
Hidden function to obtain KM and WM scores from keywords
:param seen_chunks_words: n-grams of words in doc
:param all_tokens_chunks: list of all tokens and chunks
:param keywords: keywords to train on
:return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores
if no errors.
Else None
"""
# get word2vec correlation matrix of all tokens + keyword_tokens
keywords_tokenized = self.nlp(" ".join(keywords))
# prepare word embedding matrix
pd_series_all = []
# convert tokens and chunks into word embeddings and put them into a pd.Series
for tc in all_tokens_chunks:
name = tc.lemma_.lower()
pd_series_all.append(pd.Series(tc.vector, name=name))
# convert keywords into word embeddings and put them into a pd.Series
for kwt in keywords_tokenized:
name = kwt.text.lower()
if name not in seen_chunks_words:
pd_series_all.append(pd.Series(kwt.vector, name=name))
seen_chunks_words.append(name)
# get embedding matrix by concatenating all pd.Series
embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index()
corrmat = embedd_mat_df.corr()
# top n words correlated to keyword
top_n = list(range(10, 100, 10))
km_scores = []
wm_scores = []
try:
for kw in keywords:
km_similarities = []
wm_similarities = []
# for top n words based on correlation to kw
for n in top_n:
cols = np.append(
corrmat[kw]
.drop(keywords)
.sort_values(ascending=False)
.index.values[: n - 1],
kw,
)
cm = np.corrcoef(embedd_mat_df[cols].values.T)
# KM score
# avg of top n correlations wrt kw (less the keyword
# itself since it has corr = 1)
avg_sim = np.mean(cm[0, :][1:])
km_similarities.append(avg_sim)
# WM score
# avg of top n correlations (without kw)
# amongst each other
len_minus = (
cm.shape[0] - 1
) # cm.shape to remove all the self correlations
len_minus_sq = len_minus ** 2
# 1. sum the correlations less the
# correlations with the keyword
# 2. subtract len_minus since there are
# len_minus autocorrelations
# 3. get mean by dividing the size of the rest
# i.e. (len_minus_sq - len_minus)
avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / (
len_minus_sq - len_minus
)
wm_similarities.append(avg_wm)
# get 8th degree of X and perform LR to get intercept
X = np.array(top_n)
Xes = [X]
# for i in range(2, 9):
# Xes.append(X ** i)
X_transformed = np.array(Xes).T
lm = LinearRegression()
# KM score
y = np.array(km_similarities)
lm.fit(X_transformed, y)
km_scores.append(lm.intercept_)
# WM score
y = np.array(wm_similarities)
lm.fit(X_transformed, y)
wm_scores.append(lm.intercept_)
except Exception as e:
print(e)
return None
return km_scores, wm_scores
def test(self, filename: str, info_extractor: Optional[InfoExtractor]):
"""
Test a document and print the extracted information and rating
:param filename: name of resume file
:param info_extractor: InfoExtractor object
"""
if self.model is None:
raise RatingModel.RatingModelError("model is not loaded or trained yet")
doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp)
print("Getting rating...")
if self._type == "fixed":
|
elif self._type == "lda":
if self.lda is None or self.dictionary is None or self.top_k_words is None:
raise RatingModel.RatingModelError("No LDA found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
seen_chunks_words, all_tokens_chunks = (
list(seen_chunks_words),
list(all_tokens_chunks),
)
# scoring
new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words)
bow = self.dictionary.doc2bow(new_seen_chunks_words)
doc_distribution = np.array(
[tup[1] for tup in self.lda.get_document_topics(bow=bow)]
)
# get keywords and weights
keywords = []
all_pair_scores = []
all_topic_scores = []
all_diff_scores = []
# take top 5 topics
for j in doc_distribution.argsort()[-5:][::-1]:
topic_prob = doc_distribution[j]
# take top 5 words for each topic
st = self.lda.show_topic(topicid=j, topn=5)
sum_st = np.sum(list(map(lambda x: x[1], st)))
pair_scores = []
for pair in st:
keywords.append(pair[0])
pair_scores.append(pair[1])
all_pair_scores.append(np.array(pair_scores))
all_topic_scores.append(np.array(topic_prob))
all_pair_scores = np.array(all_pair_scores)
norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1)
norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores)
all_diff_scores = (norm_all_pair_scores | print("working on fixed model")
if self.keywords is None:
raise RatingModel.RatingModelError("Keywords not found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
# scoring
temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords)
if temp_out is None:
raise RatingModel.RatingModelError(
"Either parser cannot detect text or too few words in resume for analysis. Most usually the former."
)
km_scores, wm_scores = temp_out
# average of km/wm scores for all keywords
km_score = np.mean(km_scores)
wm_score = np.mean(wm_scores)
final_score = km_score * wm_score | conditional_block |
model.py | None:
# empty model
self.model = None
self.keywords = None
elif _type == "fixed":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelFixed(pre_trained_model_json)
elif _type == "lda":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelLDA(pre_trained_model_json)
else:
raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'")
print("Loading nlp tools...")
if spacy_nlp is None:
# load default model
self.nlp = loadDefaultNLP()
else:
self.nlp = spacy_nlp
print("Loading pdf parser...")
# takes some time
from tika import parser
self.parser = parser
def loadModelLDA(self, model_json: str) -> None:
"""
Function to load a pre-trained ;da model
:param model_csv: the json filename of the model
"""
dirname = os.path.dirname(model_json)
try:
with open(model_json, "r") as f:
j = json.load(f)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_json %s is not a valid path" % model_json
)
try:
path = os.path.join(dirname, j["model_csv"])
self.model = pd.read_csv(path)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_csv %s in model_json is not a valid path" % path
)
try:
path = os.path.join(dirname, j["lda"])
self.lda = LdaModel.load(path)
self.dictionary = self.lda.id2word
except Exception as e:
print(e)
raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path)
try:
path = os.path.join(dirname, j["top_k_words"])
self.top_k_words = []
with open(path, "r") as f:
for line in f:
if line:
self.top_k_words.append(line.strip())
except Exception as e:
print(e)
raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path)
self._type = "lda"
def __keep_top_k_words(self, text):
return [word for word in text if word in self.top_k_words]
def __trainKMWM(self,seen_chunks_words: List[str],all_tokens_chunks: List[Any],
keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]:
"""
Hidden function to obtain KM and WM scores from keywords
:param seen_chunks_words: n-grams of words in doc
:param all_tokens_chunks: list of all tokens and chunks
:param keywords: keywords to train on
:return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores
if no errors.
Else None
"""
# get word2vec correlation matrix of all tokens + keyword_tokens
keywords_tokenized = self.nlp(" ".join(keywords))
# prepare word embedding matrix
pd_series_all = []
# convert tokens and chunks into word embeddings and put them into a pd.Series
for tc in all_tokens_chunks:
name = tc.lemma_.lower()
pd_series_all.append(pd.Series(tc.vector, name=name))
# convert keywords into word embeddings and put them into a pd.Series
for kwt in keywords_tokenized:
name = kwt.text.lower()
if name not in seen_chunks_words:
pd_series_all.append(pd.Series(kwt.vector, name=name))
seen_chunks_words.append(name)
# get embedding matrix by concatenating all pd.Series
embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index()
corrmat = embedd_mat_df.corr()
# top n words correlated to keyword
top_n = list(range(10, 100, 10))
km_scores = []
wm_scores = []
try:
for kw in keywords:
km_similarities = []
wm_similarities = []
# for top n words based on correlation to kw
for n in top_n:
cols = np.append(
corrmat[kw]
.drop(keywords)
.sort_values(ascending=False)
.index.values[: n - 1],
kw,
)
cm = np.corrcoef(embedd_mat_df[cols].values.T)
# KM score
# avg of top n correlations wrt kw (less the keyword
# itself since it has corr = 1)
avg_sim = np.mean(cm[0, :][1:])
km_similarities.append(avg_sim)
# WM score
# avg of top n correlations (without kw)
# amongst each other
len_minus = (
cm.shape[0] - 1
) # cm.shape to remove all the self correlations
len_minus_sq = len_minus ** 2
# 1. sum the correlations less the
# correlations with the keyword
# 2. subtract len_minus since there are
# len_minus autocorrelations
# 3. get mean by dividing the size of the rest
# i.e. (len_minus_sq - len_minus)
avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / (
len_minus_sq - len_minus
)
wm_similarities.append(avg_wm)
# get 8th degree of X and perform LR to get intercept
X = np.array(top_n)
Xes = [X]
# for i in range(2, 9):
# Xes.append(X ** i)
X_transformed = np.array(Xes).T
lm = LinearRegression()
# KM score
y = np.array(km_similarities)
lm.fit(X_transformed, y)
km_scores.append(lm.intercept_)
# WM score
y = np.array(wm_similarities)
lm.fit(X_transformed, y)
wm_scores.append(lm.intercept_)
except Exception as e:
print(e)
return None
return km_scores, wm_scores
def test(self, filename: str, info_extractor: Optional[InfoExtractor]):
"""
Test a document and print the extracted information and rating
:param filename: name of resume file
:param info_extractor: InfoExtractor object
"""
if self.model is None:
raise RatingModel.RatingModelError("model is not loaded or trained yet")
doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp)
print("Getting rating...")
if self._type == "fixed":
print("working on fixed model")
if self.keywords is None:
raise RatingModel.RatingModelError("Keywords not found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
# scoring
temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords)
if temp_out is None:
raise RatingModel.RatingModelError(
"Either parser cannot detect text or too few words in resume for analysis. Most usually the former."
)
km_scores, wm_scores = temp_out
# average of km/wm scores for all keywords
km_score = np.mean(km_scores)
wm_score = np.mean(wm_scores)
| elif self._type == "lda":
if self.lda is None or self.dictionary is None or self.top_k_words is None:
raise RatingModel.RatingModelError("No LDA found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
seen_chunks_words, all_tokens_chunks = (
list(seen_chunks_words),
list(all_tokens_chunks),
)
# scoring
new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words)
bow = self.dictionary.doc2bow(new_seen_chunks_words)
doc_distribution = np.array(
[tup[1] for tup in self.lda.get_document_topics(bow=bow)]
)
# get keywords and weights
keywords = []
all_pair_scores = []
all_topic_scores = []
all_diff_scores = []
# take top 5 topics
for j in doc_distribution.argsort()[-5:][::-1]:
topic_prob = doc_distribution[j]
# take top 5 words for each topic
st = self.lda.show_topic(topicid=j, topn=5)
sum_st = np.sum(list(map(lambda x: x[1], st)))
pair_scores = []
for pair in st:
keywords.append(pair[0])
pair_scores.append(pair[1])
all_pair_scores.append(np.array(pair_scores))
all_topic_scores.append(np.array(topic_prob))
all_pair_scores = np.array(all_pair_scores)
norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1)
norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores)
all_diff_scores = (norm_all_pair | final_score = km_score * wm_score
| random_line_split |
model.py | :
# empty model
self.model = None
self.keywords = None
elif _type == "fixed":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelFixed(pre_trained_model_json)
elif _type == "lda":
if pre_trained_model_json is None:
raise RatingModel.RatingModel.Error("pre_trained_model_json is None")
self.loadModelLDA(pre_trained_model_json)
else:
raise RatingModel.RatingModelError( "type of test not valid. Either 'fixed' or 'lda'")
print("Loading nlp tools...")
if spacy_nlp is None:
# load default model
self.nlp = loadDefaultNLP()
else:
self.nlp = spacy_nlp
print("Loading pdf parser...")
# takes some time
from tika import parser
self.parser = parser
def loadModelLDA(self, model_json: str) -> None:
"""
Function to load a pre-trained ;da model
:param model_csv: the json filename of the model
"""
dirname = os.path.dirname(model_json)
try:
with open(model_json, "r") as f:
j = json.load(f)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_json %s is not a valid path" % model_json
)
try:
path = os.path.join(dirname, j["model_csv"])
self.model = pd.read_csv(path)
except Exception as e:
print(e)
raise RatingModel.RatingModelError(
"model_csv %s in model_json is not a valid path" % path
)
try:
path = os.path.join(dirname, j["lda"])
self.lda = LdaModel.load(path)
self.dictionary = self.lda.id2word
except Exception as e:
print(e)
raise RatingModel.RatingModelError("lda %s in model_json is not a valid path" % path)
try:
path = os.path.join(dirname, j["top_k_words"])
self.top_k_words = []
with open(path, "r") as f:
for line in f:
if line:
self.top_k_words.append(line.strip())
except Exception as e:
print(e)
raise RatingModel.RatingModelError("top_k_words %s in model_json is not a valid path" % path)
self._type = "lda"
def __keep_top_k_words(self, text):
return [word for word in text if word in self.top_k_words]
def | (self,seen_chunks_words: List[str],all_tokens_chunks: List[Any],
keywords: List[str]) -> Optional[Tuple[List[float], List[float]]]:
"""
Hidden function to obtain KM and WM scores from keywords
:param seen_chunks_words: n-grams of words in doc
:param all_tokens_chunks: list of all tokens and chunks
:param keywords: keywords to train on
:return: Optional[Tuple[List[float], List[float]]]: kmscores, wmscores
if no errors.
Else None
"""
# get word2vec correlation matrix of all tokens + keyword_tokens
keywords_tokenized = self.nlp(" ".join(keywords))
# prepare word embedding matrix
pd_series_all = []
# convert tokens and chunks into word embeddings and put them into a pd.Series
for tc in all_tokens_chunks:
name = tc.lemma_.lower()
pd_series_all.append(pd.Series(tc.vector, name=name))
# convert keywords into word embeddings and put them into a pd.Series
for kwt in keywords_tokenized:
name = kwt.text.lower()
if name not in seen_chunks_words:
pd_series_all.append(pd.Series(kwt.vector, name=name))
seen_chunks_words.append(name)
# get embedding matrix by concatenating all pd.Series
embedd_mat_df = pd.concat(pd_series_all, axis=1).reset_index()
corrmat = embedd_mat_df.corr()
# top n words correlated to keyword
top_n = list(range(10, 100, 10))
km_scores = []
wm_scores = []
try:
for kw in keywords:
km_similarities = []
wm_similarities = []
# for top n words based on correlation to kw
for n in top_n:
cols = np.append(
corrmat[kw]
.drop(keywords)
.sort_values(ascending=False)
.index.values[: n - 1],
kw,
)
cm = np.corrcoef(embedd_mat_df[cols].values.T)
# KM score
# avg of top n correlations wrt kw (less the keyword
# itself since it has corr = 1)
avg_sim = np.mean(cm[0, :][1:])
km_similarities.append(avg_sim)
# WM score
# avg of top n correlations (without kw)
# amongst each other
len_minus = (
cm.shape[0] - 1
) # cm.shape to remove all the self correlations
len_minus_sq = len_minus ** 2
# 1. sum the correlations less the
# correlations with the keyword
# 2. subtract len_minus since there are
# len_minus autocorrelations
# 3. get mean by dividing the size of the rest
# i.e. (len_minus_sq - len_minus)
avg_wm = (np.sum(cm[1:, 1:]) - len_minus) / (
len_minus_sq - len_minus
)
wm_similarities.append(avg_wm)
# get 8th degree of X and perform LR to get intercept
X = np.array(top_n)
Xes = [X]
# for i in range(2, 9):
# Xes.append(X ** i)
X_transformed = np.array(Xes).T
lm = LinearRegression()
# KM score
y = np.array(km_similarities)
lm.fit(X_transformed, y)
km_scores.append(lm.intercept_)
# WM score
y = np.array(wm_similarities)
lm.fit(X_transformed, y)
wm_scores.append(lm.intercept_)
except Exception as e:
print(e)
return None
return km_scores, wm_scores
def test(self, filename: str, info_extractor: Optional[InfoExtractor]):
"""
Test a document and print the extracted information and rating
:param filename: name of resume file
:param info_extractor: InfoExtractor object
"""
if self.model is None:
raise RatingModel.RatingModelError("model is not loaded or trained yet")
doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp)
print("Getting rating...")
if self._type == "fixed":
print("working on fixed model")
if self.keywords is None:
raise RatingModel.RatingModelError("Keywords not found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
# scoring
temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords)
if temp_out is None:
raise RatingModel.RatingModelError(
"Either parser cannot detect text or too few words in resume for analysis. Most usually the former."
)
km_scores, wm_scores = temp_out
# average of km/wm scores for all keywords
km_score = np.mean(km_scores)
wm_score = np.mean(wm_scores)
final_score = km_score * wm_score
elif self._type == "lda":
if self.lda is None or self.dictionary is None or self.top_k_words is None:
raise RatingModel.RatingModelError("No LDA found")
seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)
seen_chunks_words, all_tokens_chunks = (
list(seen_chunks_words),
list(all_tokens_chunks),
)
# scoring
new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words)
bow = self.dictionary.doc2bow(new_seen_chunks_words)
doc_distribution = np.array(
[tup[1] for tup in self.lda.get_document_topics(bow=bow)]
)
# get keywords and weights
keywords = []
all_pair_scores = []
all_topic_scores = []
all_diff_scores = []
# take top 5 topics
for j in doc_distribution.argsort()[-5:][::-1]:
topic_prob = doc_distribution[j]
# take top 5 words for each topic
st = self.lda.show_topic(topicid=j, topn=5)
sum_st = np.sum(list(map(lambda x: x[1], st)))
pair_scores = []
for pair in st:
keywords.append(pair[0])
pair_scores.append(pair[1])
all_pair_scores.append(np.array(pair_scores))
all_topic_scores.append(np.array(topic_prob))
all_pair_scores = np.array(all_pair_scores)
norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1)
norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores)
all_diff_scores = (norm_all_pair_scores | __trainKMWM | identifier_name |
ng-typeview.ts | Handler,
defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives"
import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule,
ControllerViewInfo, ControllerScopeInfo,
ControllerViewConnector, defaultCtrlViewConnectors,
CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors,
ModelViewConnector, defaultModelViewConnectors} from "./controller-parser"
import {NgFilter, defaultNgFilters} from "./filters"
export {ControllerViewInfo} from "./controller-parser";
// we only repeat the imports, type synonyms and custom interfaces
// if there is a module, because otherwise those are dumped in the
// global namespace anyway
function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo,
contents: string): string |
function getViewTestFilename(ctrlFname: string, viewFname: string): string {
return `${ctrlFname}_${viewFname}_viewtest.ts`;
}
async function processControllerView(prjSettings: ProjectSettings,
controllerPath: string, viewPath: string, ngFilters: NgFilter[],
tagDirectives: TagDirectiveHandler[],
attributeDirectives: AttributeDirectiveHandler[]) {
const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo(
controllerPath, prjSettings.ctrlViewFragmentExtractors);
if (scopeContents.scopeInfo.isNone()) {
// no point of writing anything if there is no scope block
return;
}
const viewExprs = await parseView(
prjSettings.resolveImportsAsNonScope || false,
viewPath, scopeContents.viewFragments, scopeContents.importNames,
Vector.ofIterable(tagDirectives),
Vector.ofIterable(attributeDirectives),
Vector.ofIterable(ngFilters));
const pathInfo = parse(controllerPath);
const viewPathInfo = parse(viewPath);
// putting both controller & view name in the output, as one controller
// may be used for several views.
const outputFname = pathInfo.dir + "/" +
getViewTestFilename(pathInfo.name, viewPathInfo.name);
const moduleWrap = (x:string) => scopeContents.tsModuleName
.map(n => wrapInModule(n, scopeContents, x))
.getOrElse(x);
const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ")
const typeParams = scopeContents.scopeTypeParams.getOrElse("");
writeFileSync(outputFname, moduleWrap(
scopeContents.scopeInfo.getOrThrow() +
`\n\nfunction ___f${typeParams}($scope: Scope${
typeParams}, ${filterParams}) {\n` +
viewExprs +
"\n}\n") + "\n");
}
/**
* Configuration for a ng-typeview project.
*/
export interface ProjectSettings {
/**
* The path for the project on disk (root folder)
*/
path: string;
/**
* Folders within the project to exclude from analysis
* (for instance external JS libraries, the folder where
* your typescript is compiled to javascript, and so on).
*/
blacklistedPaths: string[];
/**
* List of angular filters to handle during the analysis.
* You can use [[defaultNgFilters]], add to that list, or specify your own.
*/
ngFilters: NgFilter[];
/**
* List of controller-view connectors to use.
* [[defaultCtrlViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
ctrlViewConnectors: ControllerViewConnector[];
/**
* Hardcoded controller/view connections that'll be added
* to the ones which were autodetected through ctrlViewConnectors.
* Useful in case it's too hard to parse some connections
* from source.
*/
extraCtrlViewConnections: ControllerViewInfo[];
/**
* List of model-view connectors to use.
* These tie model files to views.
* This allows to express non-controller models, such
* as directive models for instance.
* [[defaultModelViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
modelViewConnectors: ModelViewConnector[];
/**
* List of tag-bound angular directives to handle during the analysis.
* [[defaultTagDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
tagDirectives: TagDirectiveHandler[];
/**
* List of attribute-bound angular directives to handle during the analysis.
* [[defaultAttrDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
attributeDirectives: AttributeDirectiveHandler[];
/**
* Controller view fragment extractors. For instance, you may have
* view fragments present in your controllers, for instance ng-grid has
* 'cell templates' which typeview can also type-check through this mechanism.
* Extractors allows you to tell ng-typeview about those.
*/
ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[];
/**
* When resolving the scope for variables in the view, we prefix "$scope."
* for all variables except those defined in the view. For instance, a
* `ng-repeat` will define local variables. For these, we do not prefix with
* "$scope.". 99% of the time, that works great.
* One issue that can come up though, is if you have static fields for
* instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript
* and angular, due to the TS->JS transpilation. But in ng-typeview, we
* can't declare on the scope a field of type [class of MyClass], so that
* field.MY_STATIC_FIELD would work.
* So a workaround is to specify in your controller:
* `import MyClass = api.MyClass;`
* In that case, if you enable this `resolveImportsAsNonScope` option
* (disabled by default), ng-typeview will not resolve
* `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore,
* but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the
* viewtest, it should work.
* But it's pretty messy, so we rather encourage you to avoid statics if
* at all possible.
*/
resolveImportsAsNonScope?: boolean;
}
function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void {
const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"),
{nodir:true, ignore: prjSettings.blacklistedPaths});
files.forEach(f => unlinkSync(f));
}
/**
* Will go through the views and controllers in the project folder and
* generate viewtest typescript files to ascertain type-safety of the views.
* NOTE: The function returns a promise but is not fully async: a good part of its
* runtime is spend running synchronous functions.
*/
export async function processProject(prjSettings: ProjectSettings): Promise<any> {
deletePreviouslyGeneratedFiles(prjSettings);
const files = sync(prjSettings.path + "/**/*.@(js|ts)",
{nodir:true, ignore: prjSettings.blacklistedPaths});
const viewInfos = await Promise.all(
files.map(f => extractCtrlViewConnsAngularModule(
f, prjSettings.path,
prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors)));
const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.controllerViewInfos))
.appendAll(prjSettings.extraCtrlViewConnections)
.groupBy(cvi => cvi.viewPath);
const controllerNameToFilename =
Vector.ofIterable(viewInfos)
.filter(vi => vi.controllerName.isSome())
// JS files are not going to have a scope interface
// definition so they're not helpful. Also, we can
// get twice the same file: original TS & compiled JS.
// => keep only the original TS in that case.
.filter(vi => vi.fileName.toLowerCase().endsWith(".ts"))
.toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]);
const viewFilenameToCtrlFilenamesViewConns =
viewFilenameToControllerNames
.map<string,Vector<string>>(
(viewFname,ctrlViewInfos) =>
[viewFname, collectionKeepDefined(
ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]);
const viewFilenameToCtrlFilenamesModelConns =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.modelViewInfos))
.groupBy(mvi => mvi.viewPath)
.mapValues(mvis => mvis.map(mvi => mvi.modelPath));
const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith(
viewFilenameToCtrlFilenamesModelConns, (views | {
return "module " + moduleName + " {\n" +
scopeInfo.imports.join("\n") + "\n" +
scopeInfo.typeAliases.join("\n") + "\n" +
scopeInfo.nonExportedDeclarations.join("\n") + "\n" +
contents +
"}\n";
} | identifier_body |
ng-typeview.ts | DirectiveHandler,
defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives"
import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule,
ControllerViewInfo, ControllerScopeInfo,
ControllerViewConnector, defaultCtrlViewConnectors,
CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors,
ModelViewConnector, defaultModelViewConnectors} from "./controller-parser"
import {NgFilter, defaultNgFilters} from "./filters"
export {ControllerViewInfo} from "./controller-parser";
// we only repeat the imports, type synonyms and custom interfaces
// if there is a module, because otherwise those are dumped in the
// global namespace anyway
function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo,
contents: string): string {
return "module " + moduleName + " {\n" +
scopeInfo.imports.join("\n") + "\n" +
scopeInfo.typeAliases.join("\n") + "\n" +
scopeInfo.nonExportedDeclarations.join("\n") + "\n" +
contents +
"}\n";
}
function getViewTestFilename(ctrlFname: string, viewFname: string): string {
return `${ctrlFname}_${viewFname}_viewtest.ts`;
}
async function processControllerView(prjSettings: ProjectSettings,
controllerPath: string, viewPath: string, ngFilters: NgFilter[],
tagDirectives: TagDirectiveHandler[],
attributeDirectives: AttributeDirectiveHandler[]) {
const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo(
controllerPath, prjSettings.ctrlViewFragmentExtractors);
if (scopeContents.scopeInfo.isNone()) {
// no point of writing anything if there is no scope block
return;
}
const viewExprs = await parseView(
prjSettings.resolveImportsAsNonScope || false,
viewPath, scopeContents.viewFragments, scopeContents.importNames,
Vector.ofIterable(tagDirectives),
Vector.ofIterable(attributeDirectives),
Vector.ofIterable(ngFilters));
const pathInfo = parse(controllerPath);
const viewPathInfo = parse(viewPath);
// putting both controller & view name in the output, as one controller
// may be used for several views.
const outputFname = pathInfo.dir + "/" +
getViewTestFilename(pathInfo.name, viewPathInfo.name);
const moduleWrap = (x:string) => scopeContents.tsModuleName
.map(n => wrapInModule(n, scopeContents, x))
.getOrElse(x);
const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ")
const typeParams = scopeContents.scopeTypeParams.getOrElse("");
writeFileSync(outputFname, moduleWrap(
scopeContents.scopeInfo.getOrThrow() +
`\n\nfunction ___f${typeParams}($scope: Scope${
typeParams}, ${filterParams}) {\n` +
viewExprs +
"\n}\n") + "\n");
}
/**
* Configuration for a ng-typeview project.
*/
export interface ProjectSettings {
/**
* The path for the project on disk (root folder)
*/
path: string;
/**
* Folders within the project to exclude from analysis
* (for instance external JS libraries, the folder where
* your typescript is compiled to javascript, and so on).
*/
blacklistedPaths: string[];
/**
* List of angular filters to handle during the analysis.
* You can use [[defaultNgFilters]], add to that list, or specify your own.
*/ | ngFilters: NgFilter[];
/**
* List of controller-view connectors to use.
* [[defaultCtrlViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
ctrlViewConnectors: ControllerViewConnector[];
/**
* Hardcoded controller/view connections that'll be added
* to the ones which were autodetected through ctrlViewConnectors.
* Useful in case it's too hard to parse some connections
* from source.
*/
extraCtrlViewConnections: ControllerViewInfo[];
/**
* List of model-view connectors to use.
* These tie model files to views.
* This allows to express non-controller models, such
* as directive models for instance.
* [[defaultModelViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
modelViewConnectors: ModelViewConnector[];
/**
* List of tag-bound angular directives to handle during the analysis.
* [[defaultTagDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
tagDirectives: TagDirectiveHandler[];
/**
* List of attribute-bound angular directives to handle during the analysis.
* [[defaultAttrDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
attributeDirectives: AttributeDirectiveHandler[];
/**
* Controller view fragment extractors. For instance, you may have
* view fragments present in your controllers, for instance ng-grid has
* 'cell templates' which typeview can also type-check through this mechanism.
* Extractors allows you to tell ng-typeview about those.
*/
ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[];
/**
* When resolving the scope for variables in the view, we prefix "$scope."
* for all variables except those defined in the view. For instance, a
* `ng-repeat` will define local variables. For these, we do not prefix with
* "$scope.". 99% of the time, that works great.
* One issue that can come up though, is if you have static fields for
* instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript
* and angular, due to the TS->JS transpilation. But in ng-typeview, we
* can't declare on the scope a field of type [class of MyClass], so that
* field.MY_STATIC_FIELD would work.
* So a workaround is to specify in your controller:
* `import MyClass = api.MyClass;`
* In that case, if you enable this `resolveImportsAsNonScope` option
* (disabled by default), ng-typeview will not resolve
* `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore,
* but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the
* viewtest, it should work.
* But it's pretty messy, so we rather encourage you to avoid statics if
* at all possible.
*/
resolveImportsAsNonScope?: boolean;
}
function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void {
const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"),
{nodir:true, ignore: prjSettings.blacklistedPaths});
files.forEach(f => unlinkSync(f));
}
/**
* Will go through the views and controllers in the project folder and
* generate viewtest typescript files to ascertain type-safety of the views.
* NOTE: The function returns a promise but is not fully async: a good part of its
* runtime is spend running synchronous functions.
*/
export async function processProject(prjSettings: ProjectSettings): Promise<any> {
deletePreviouslyGeneratedFiles(prjSettings);
const files = sync(prjSettings.path + "/**/*.@(js|ts)",
{nodir:true, ignore: prjSettings.blacklistedPaths});
const viewInfos = await Promise.all(
files.map(f => extractCtrlViewConnsAngularModule(
f, prjSettings.path,
prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors)));
const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.controllerViewInfos))
.appendAll(prjSettings.extraCtrlViewConnections)
.groupBy(cvi => cvi.viewPath);
const controllerNameToFilename =
Vector.ofIterable(viewInfos)
.filter(vi => vi.controllerName.isSome())
// JS files are not going to have a scope interface
// definition so they're not helpful. Also, we can
// get twice the same file: original TS & compiled JS.
// => keep only the original TS in that case.
.filter(vi => vi.fileName.toLowerCase().endsWith(".ts"))
.toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]);
const viewFilenameToCtrlFilenamesViewConns =
viewFilenameToControllerNames
.map<string,Vector<string>>(
(viewFname,ctrlViewInfos) =>
[viewFname, collectionKeepDefined(
ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]);
const viewFilenameToCtrlFilenamesModelConns =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.modelViewInfos))
.groupBy(mvi => mvi.viewPath)
.mapValues(mvis => mvis.map(mvi => mvi.modelPath));
const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith(
viewFilenameToCtrlFilenamesModelConns, (views1 | random_line_split | |
ng-typeview.ts | Handler,
defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives"
import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule,
ControllerViewInfo, ControllerScopeInfo,
ControllerViewConnector, defaultCtrlViewConnectors,
CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors,
ModelViewConnector, defaultModelViewConnectors} from "./controller-parser"
import {NgFilter, defaultNgFilters} from "./filters"
export {ControllerViewInfo} from "./controller-parser";
// we only repeat the imports, type synonyms and custom interfaces
// if there is a module, because otherwise those are dumped in the
// global namespace anyway
function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo,
contents: string): string {
return "module " + moduleName + " {\n" +
scopeInfo.imports.join("\n") + "\n" +
scopeInfo.typeAliases.join("\n") + "\n" +
scopeInfo.nonExportedDeclarations.join("\n") + "\n" +
contents +
"}\n";
}
function getViewTestFilename(ctrlFname: string, viewFname: string): string {
return `${ctrlFname}_${viewFname}_viewtest.ts`;
}
async function processControllerView(prjSettings: ProjectSettings,
controllerPath: string, viewPath: string, ngFilters: NgFilter[],
tagDirectives: TagDirectiveHandler[],
attributeDirectives: AttributeDirectiveHandler[]) {
const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo(
controllerPath, prjSettings.ctrlViewFragmentExtractors);
if (scopeContents.scopeInfo.isNone()) |
const viewExprs = await parseView(
prjSettings.resolveImportsAsNonScope || false,
viewPath, scopeContents.viewFragments, scopeContents.importNames,
Vector.ofIterable(tagDirectives),
Vector.ofIterable(attributeDirectives),
Vector.ofIterable(ngFilters));
const pathInfo = parse(controllerPath);
const viewPathInfo = parse(viewPath);
// putting both controller & view name in the output, as one controller
// may be used for several views.
const outputFname = pathInfo.dir + "/" +
getViewTestFilename(pathInfo.name, viewPathInfo.name);
const moduleWrap = (x:string) => scopeContents.tsModuleName
.map(n => wrapInModule(n, scopeContents, x))
.getOrElse(x);
const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ")
const typeParams = scopeContents.scopeTypeParams.getOrElse("");
writeFileSync(outputFname, moduleWrap(
scopeContents.scopeInfo.getOrThrow() +
`\n\nfunction ___f${typeParams}($scope: Scope${
typeParams}, ${filterParams}) {\n` +
viewExprs +
"\n}\n") + "\n");
}
/**
* Configuration for a ng-typeview project.
*/
export interface ProjectSettings {
/**
* The path for the project on disk (root folder)
*/
path: string;
/**
* Folders within the project to exclude from analysis
* (for instance external JS libraries, the folder where
* your typescript is compiled to javascript, and so on).
*/
blacklistedPaths: string[];
/**
* List of angular filters to handle during the analysis.
* You can use [[defaultNgFilters]], add to that list, or specify your own.
*/
ngFilters: NgFilter[];
/**
* List of controller-view connectors to use.
* [[defaultCtrlViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
ctrlViewConnectors: ControllerViewConnector[];
/**
* Hardcoded controller/view connections that'll be added
* to the ones which were autodetected through ctrlViewConnectors.
* Useful in case it's too hard to parse some connections
* from source.
*/
extraCtrlViewConnections: ControllerViewInfo[];
/**
* List of model-view connectors to use.
* These tie model files to views.
* This allows to express non-controller models, such
* as directive models for instance.
* [[defaultModelViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
modelViewConnectors: ModelViewConnector[];
/**
* List of tag-bound angular directives to handle during the analysis.
* [[defaultTagDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
tagDirectives: TagDirectiveHandler[];
/**
* List of attribute-bound angular directives to handle during the analysis.
* [[defaultAttrDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
attributeDirectives: AttributeDirectiveHandler[];
/**
* Controller view fragment extractors. For instance, you may have
* view fragments present in your controllers, for instance ng-grid has
* 'cell templates' which typeview can also type-check through this mechanism.
* Extractors allows you to tell ng-typeview about those.
*/
ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[];
/**
* When resolving the scope for variables in the view, we prefix "$scope."
* for all variables except those defined in the view. For instance, a
* `ng-repeat` will define local variables. For these, we do not prefix with
* "$scope.". 99% of the time, that works great.
* One issue that can come up though, is if you have static fields for
* instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript
* and angular, due to the TS->JS transpilation. But in ng-typeview, we
* can't declare on the scope a field of type [class of MyClass], so that
* field.MY_STATIC_FIELD would work.
* So a workaround is to specify in your controller:
* `import MyClass = api.MyClass;`
* In that case, if you enable this `resolveImportsAsNonScope` option
* (disabled by default), ng-typeview will not resolve
* `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore,
* but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the
* viewtest, it should work.
* But it's pretty messy, so we rather encourage you to avoid statics if
* at all possible.
*/
resolveImportsAsNonScope?: boolean;
}
function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void {
const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"),
{nodir:true, ignore: prjSettings.blacklistedPaths});
files.forEach(f => unlinkSync(f));
}
/**
* Will go through the views and controllers in the project folder and
* generate viewtest typescript files to ascertain type-safety of the views.
* NOTE: The function returns a promise but is not fully async: a good part of its
* runtime is spend running synchronous functions.
*/
export async function processProject(prjSettings: ProjectSettings): Promise<any> {
deletePreviouslyGeneratedFiles(prjSettings);
const files = sync(prjSettings.path + "/**/*.@(js|ts)",
{nodir:true, ignore: prjSettings.blacklistedPaths});
const viewInfos = await Promise.all(
files.map(f => extractCtrlViewConnsAngularModule(
f, prjSettings.path,
prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors)));
const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.controllerViewInfos))
.appendAll(prjSettings.extraCtrlViewConnections)
.groupBy(cvi => cvi.viewPath);
const controllerNameToFilename =
Vector.ofIterable(viewInfos)
.filter(vi => vi.controllerName.isSome())
// JS files are not going to have a scope interface
// definition so they're not helpful. Also, we can
// get twice the same file: original TS & compiled JS.
// => keep only the original TS in that case.
.filter(vi => vi.fileName.toLowerCase().endsWith(".ts"))
.toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]);
const viewFilenameToCtrlFilenamesViewConns =
viewFilenameToControllerNames
.map<string,Vector<string>>(
(viewFname,ctrlViewInfos) =>
[viewFname, collectionKeepDefined(
ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]);
const viewFilenameToCtrlFilenamesModelConns =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.modelViewInfos))
.groupBy(mvi => mvi.viewPath)
.mapValues(mvis => mvis.map(mvi => mvi.modelPath));
const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith(
viewFilenameToCtrlFilenamesModelConns, (views | {
// no point of writing anything if there is no scope block
return;
} | conditional_block |
ng-typeview.ts | DirectiveHandler,
defaultTagDirectiveHandlers, defaultAttrDirectiveHandlers} from "./ng-directives"
import {extractControllerScopeInfo, extractCtrlViewConnsAngularModule,
ControllerViewInfo, ControllerScopeInfo,
ControllerViewConnector, defaultCtrlViewConnectors,
CtrlViewFragmentExtractor, defaultCtrlViewFragmentExtractors,
ModelViewConnector, defaultModelViewConnectors} from "./controller-parser"
import {NgFilter, defaultNgFilters} from "./filters"
export {ControllerViewInfo} from "./controller-parser";
// we only repeat the imports, type synonyms and custom interfaces
// if there is a module, because otherwise those are dumped in the
// global namespace anyway
function wrapInModule(moduleName: string, scopeInfo: ControllerScopeInfo,
contents: string): string {
return "module " + moduleName + " {\n" +
scopeInfo.imports.join("\n") + "\n" +
scopeInfo.typeAliases.join("\n") + "\n" +
scopeInfo.nonExportedDeclarations.join("\n") + "\n" +
contents +
"}\n";
}
function | (ctrlFname: string, viewFname: string): string {
return `${ctrlFname}_${viewFname}_viewtest.ts`;
}
async function processControllerView(prjSettings: ProjectSettings,
controllerPath: string, viewPath: string, ngFilters: NgFilter[],
tagDirectives: TagDirectiveHandler[],
attributeDirectives: AttributeDirectiveHandler[]) {
const scopeContents: ControllerScopeInfo = await extractControllerScopeInfo(
controllerPath, prjSettings.ctrlViewFragmentExtractors);
if (scopeContents.scopeInfo.isNone()) {
// no point of writing anything if there is no scope block
return;
}
const viewExprs = await parseView(
prjSettings.resolveImportsAsNonScope || false,
viewPath, scopeContents.viewFragments, scopeContents.importNames,
Vector.ofIterable(tagDirectives),
Vector.ofIterable(attributeDirectives),
Vector.ofIterable(ngFilters));
const pathInfo = parse(controllerPath);
const viewPathInfo = parse(viewPath);
// putting both controller & view name in the output, as one controller
// may be used for several views.
const outputFname = pathInfo.dir + "/" +
getViewTestFilename(pathInfo.name, viewPathInfo.name);
const moduleWrap = (x:string) => scopeContents.tsModuleName
.map(n => wrapInModule(n, scopeContents, x))
.getOrElse(x);
const filterParams = ngFilters.map(f => `f__${f.name}:${f.type}`).join(",\n ")
const typeParams = scopeContents.scopeTypeParams.getOrElse("");
writeFileSync(outputFname, moduleWrap(
scopeContents.scopeInfo.getOrThrow() +
`\n\nfunction ___f${typeParams}($scope: Scope${
typeParams}, ${filterParams}) {\n` +
viewExprs +
"\n}\n") + "\n");
}
/**
* Configuration for a ng-typeview project.
*/
export interface ProjectSettings {
/**
* The path for the project on disk (root folder)
*/
path: string;
/**
* Folders within the project to exclude from analysis
* (for instance external JS libraries, the folder where
* your typescript is compiled to javascript, and so on).
*/
blacklistedPaths: string[];
/**
* List of angular filters to handle during the analysis.
* You can use [[defaultNgFilters]], add to that list, or specify your own.
*/
ngFilters: NgFilter[];
/**
* List of controller-view connectors to use.
* [[defaultCtrlViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
ctrlViewConnectors: ControllerViewConnector[];
/**
* Hardcoded controller/view connections that'll be added
* to the ones which were autodetected through ctrlViewConnectors.
* Useful in case it's too hard to parse some connections
* from source.
*/
extraCtrlViewConnections: ControllerViewInfo[];
/**
* List of model-view connectors to use.
* These tie model files to views.
* This allows to express non-controller models, such
* as directive models for instance.
* [[defaultModelViewConnectors]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
modelViewConnectors: ModelViewConnector[];
/**
* List of tag-bound angular directives to handle during the analysis.
* [[defaultTagDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
tagDirectives: TagDirectiveHandler[];
/**
* List of attribute-bound angular directives to handle during the analysis.
* [[defaultAttrDirectiveHandlers]] contains a default list; you can use
* that, add to that list, or specify your own.
*/
attributeDirectives: AttributeDirectiveHandler[];
/**
* Controller view fragment extractors. For instance, you may have
* view fragments present in your controllers, for instance ng-grid has
* 'cell templates' which typeview can also type-check through this mechanism.
* Extractors allows you to tell ng-typeview about those.
*/
ctrlViewFragmentExtractors: CtrlViewFragmentExtractor[];
/**
* When resolving the scope for variables in the view, we prefix "$scope."
* for all variables except those defined in the view. For instance, a
* `ng-repeat` will define local variables. For these, we do not prefix with
* "$scope.". 99% of the time, that works great.
* One issue that can come up though, is if you have static fields for
* instance. If you read `MyClass.MY_STATIC_FIELD`... That'll work in javascript
* and angular, due to the TS->JS transpilation. But in ng-typeview, we
* can't declare on the scope a field of type [class of MyClass], so that
* field.MY_STATIC_FIELD would work.
* So a workaround is to specify in your controller:
* `import MyClass = api.MyClass;`
* In that case, if you enable this `resolveImportsAsNonScope` option
* (disabled by default), ng-typeview will not resolve
* `MyClass.MY_STATIC_FIELD` as `$scope.MyClass.MY_STATIC_FIELD` anymore,
* but as `MyClass.MY_STATIC_FIELD`. And since we copy the imports in the
* viewtest, it should work.
* But it's pretty messy, so we rather encourage you to avoid statics if
* at all possible.
*/
resolveImportsAsNonScope?: boolean;
}
function deletePreviouslyGeneratedFiles(prjSettings: ProjectSettings): void {
const files = sync(prjSettings.path + "/**/" + getViewTestFilename("*", "*"),
{nodir:true, ignore: prjSettings.blacklistedPaths});
files.forEach(f => unlinkSync(f));
}
/**
* Will go through the views and controllers in the project folder and
* generate viewtest typescript files to ascertain type-safety of the views.
* NOTE: The function returns a promise but is not fully async: a good part of its
* runtime is spend running synchronous functions.
*/
export async function processProject(prjSettings: ProjectSettings): Promise<any> {
deletePreviouslyGeneratedFiles(prjSettings);
const files = sync(prjSettings.path + "/**/*.@(js|ts)",
{nodir:true, ignore: prjSettings.blacklistedPaths});
const viewInfos = await Promise.all(
files.map(f => extractCtrlViewConnsAngularModule(
f, prjSettings.path,
prjSettings.ctrlViewConnectors, prjSettings.modelViewConnectors)));
const viewFilenameToControllerNames: HashMap<string,Vector<ControllerViewInfo>> =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.controllerViewInfos))
.appendAll(prjSettings.extraCtrlViewConnections)
.groupBy(cvi => cvi.viewPath);
const controllerNameToFilename =
Vector.ofIterable(viewInfos)
.filter(vi => vi.controllerName.isSome())
// JS files are not going to have a scope interface
// definition so they're not helpful. Also, we can
// get twice the same file: original TS & compiled JS.
// => keep only the original TS in that case.
.filter(vi => vi.fileName.toLowerCase().endsWith(".ts"))
.toMap(vi => [vi.controllerName.getOrThrow(), vi.fileName]);
const viewFilenameToCtrlFilenamesViewConns =
viewFilenameToControllerNames
.map<string,Vector<string>>(
(viewFname,ctrlViewInfos) =>
[viewFname, collectionKeepDefined(
ctrlViewInfos.map(cvi => controllerNameToFilename.get(cvi.controllerName).getOrUndefined()))]);
const viewFilenameToCtrlFilenamesModelConns =
Vector.ofIterable(viewInfos)
.flatMap(vi => Vector.ofIterable(vi.modelViewInfos))
.groupBy(mvi => mvi.viewPath)
.mapValues(mvis => mvis.map(mvi => mvi.modelPath));
const viewFilenameToCtrlFilenames = viewFilenameToCtrlFilenamesViewConns.mergeWith(
viewFilenameToCtrlFilenamesModelConns, (views | getViewTestFilename | identifier_name |
main_glcn.py | parser.add_argument('--method', default='vat')
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--in_channels', type=int, default=3)
parser.add_argument('--out_channels', type=int, default=7)
parser.add_argument('--topk', type=int, default=10)
parser.add_argument('--ngcn_layers', type=int, default=30)
parser.add_argument('--nclass', type=int, default=10)
parser.add_argument('--gamma_reg', type=float, default=0.01)
parser.add_argument('--lamda_reg', type=float, default=0.00001)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--cuda', dest='cuda', default='0', type=str)
parser.add_argument('--mode', default='gpu', help='cpu/gpu')
parser.add_argument('--train', default=True, action='store_false')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
# set up gpu
if opt.mode == 'gpu':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda)
print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
print('Using CPU', flush= True)
opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu')
def tocuda(x):
if opt.use_cuda:
return x.cuda()
return x
def train(model, x, y, optimizer, lamda_reg=0.0):
|
def eval(y_pred, y):
# print(semi_outputs.shape)
# y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)]
prob, idx = torch.max(y_pred, dim=1)
return torch.eq(idx, y).float().mean()
# Several Ways to initialize the weights
# 1. initialize different weights using different initialization
def weights_init(m):
"""
Usage: model.apply(weights_init)
:param m:
:return:
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.bias.data.fill_(0)
# 2. weight different weights using different torch.nn methods
def init_all(model, init_funcs):
"""
Usage: init_all(model, init_funcs)
:param model:
:param init_funcs:
:return:
"""
for p in model.parameters():
init_func = init_funcs.get(len(p.shape), init_funcs["default"])
init_func(p)
init_funcs = {
1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias
2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight
3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter
4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter
"default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else
}
if opt.dataset == 'svhn':
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=opt.dataroot, split='train', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'cifar10':
num_labeled = 1000
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'mnist':
# num_labeled = 1000
opt.in_channels = 1
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
else:
raise NotImplementedError
train_data = []
train_target = []
for (data, target) in train_loader:
train_data.append(data)
train_target.append(target)
train_data = torch.cat(train_data, dim=0)
train_target = torch.cat(train_target, dim=0)
print(f"Total number of dataset {opt.dataset} is {train_data.shape}")
unique_labels = np.unique(train_target)
print("Unique Labels: ", unique_labels)
n_class = len(unique_labels)
nSamples_per_class_train = 100
nSamples_per_class_val = 100
nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val
select_train_data = []
select_train_label = []
select_val_data = []
select_val_label = []
unlabeled_train_data = []
unlabeled_train_label = []
for label in unique_labels:
label_mask = (train_target == label)
current_label_X = train_data[label_mask]
current_label_y = train_target[label_mask]
select_train_data.append(current_label_X[:nSamples_per_class_train])
select_train_label.append(current_label_y[:nSamples_per_class_train])
select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val])
select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val])
unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000])
unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000])
train_data = torch.cat(select_train_data, dim=0).to(opt.device)
train_target = torch.cat(select_train_label, dim=0).to(opt.device)
valid_data = torch.cat(select_val_data, dim=0).to(opt.device)
valid_target = torch.cat(select_val_label, dim=0).to(opt.device)
test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device)
test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device)
# random shuffle the data
train_random_ind = np.arange(nSamples_per_class_train * n_class)
val_random_ind = np.arange(nSamples_per_class_val * n_class)
test_random_ind = np.arange(nSamples_per_unlabel * n_class)
np.random.shuffle(train_random_ind)
np.random.shuffle(val_random_ind)
np.random.shuffle(test_random_ind)
train_data = train_data[train_random_ind]
train_target = train_target[train_random_ind]
valid_data = valid_data[val_random_ind]
valid_target = valid_target[val_random_ind]
test_data = test_data[test_random_ind]
test_target = test_target[test_random_ind]
all_data = torch.cat([train_data, valid_data, test_data], dim=0)
all_data = torch.reshape(all_data, (1000*n_class, -1))
print(all_data.shape)
path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models'
if not os.path.exists(os.path.dirname(path_best_model)):
os.mkdir(os.path.dirname(path_best_model))
opt.in_channels = all_data.shape[1]
model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers,
opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device)
# model.apply(weights_init)
init_all(model, init_funcs)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if os.path.exists(path_best_model):
# original saved file with DataParallel
state_dict = torch.load(path_best_model)
model.load_state_dict | model.train()
# ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
# semi_outputs have been log_softmax, so only NLLLoss() here
nll_loss = nn.NLLLoss()
semi_outputs, loss_GL, S = model(x)
# print("The learned S is ", torch.sum(S, dim=-1))
ce_loss = nll_loss(semi_outputs[:num_labeled], y)
loss = ce_loss + lamda_reg * loss_GL
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print("First Row of X")
# print(x[0])
# print("Adj Matrix....")
# print(S[S > 0])
return semi_outputs, loss, ce_loss | identifier_body |
main_glcn.py | parser.add_argument('--method', default='vat')
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--in_channels', type=int, default=3)
parser.add_argument('--out_channels', type=int, default=7)
parser.add_argument('--topk', type=int, default=10)
parser.add_argument('--ngcn_layers', type=int, default=30)
parser.add_argument('--nclass', type=int, default=10)
parser.add_argument('--gamma_reg', type=float, default=0.01)
parser.add_argument('--lamda_reg', type=float, default=0.00001)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--cuda', dest='cuda', default='0', type=str)
parser.add_argument('--mode', default='gpu', help='cpu/gpu')
parser.add_argument('--train', default=True, action='store_false')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
# set up gpu
if opt.mode == 'gpu':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda)
print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
print('Using CPU', flush= True)
opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu')
def tocuda(x):
if opt.use_cuda:
return x.cuda()
return x
def train(model, x, y, optimizer, lamda_reg=0.0):
model.train()
# ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
# semi_outputs have been log_softmax, so only NLLLoss() here
nll_loss = nn.NLLLoss()
semi_outputs, loss_GL, S = model(x)
# print("The learned S is ", torch.sum(S, dim=-1))
ce_loss = nll_loss(semi_outputs[:num_labeled], y)
loss = ce_loss + lamda_reg * loss_GL
optimizer.zero_grad() | # print("First Row of X")
# print(x[0])
# print("Adj Matrix....")
# print(S[S > 0])
return semi_outputs, loss, ce_loss
def eval(y_pred, y):
# print(semi_outputs.shape)
# y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)]
prob, idx = torch.max(y_pred, dim=1)
return torch.eq(idx, y).float().mean()
# Several Ways to initialize the weights
# 1. initialize different weights using different initialization
def weights_init(m):
"""
Usage: model.apply(weights_init)
:param m:
:return:
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.bias.data.fill_(0)
# 2. weight different weights using different torch.nn methods
def init_all(model, init_funcs):
"""
Usage: init_all(model, init_funcs)
:param model:
:param init_funcs:
:return:
"""
for p in model.parameters():
init_func = init_funcs.get(len(p.shape), init_funcs["default"])
init_func(p)
init_funcs = {
1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias
2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight
3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter
4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter
"default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else
}
if opt.dataset == 'svhn':
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=opt.dataroot, split='train', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'cifar10':
num_labeled = 1000
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'mnist':
# num_labeled = 1000
opt.in_channels = 1
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
else:
raise NotImplementedError
train_data = []
train_target = []
for (data, target) in train_loader:
train_data.append(data)
train_target.append(target)
train_data = torch.cat(train_data, dim=0)
train_target = torch.cat(train_target, dim=0)
print(f"Total number of dataset {opt.dataset} is {train_data.shape}")
unique_labels = np.unique(train_target)
print("Unique Labels: ", unique_labels)
n_class = len(unique_labels)
nSamples_per_class_train = 100
nSamples_per_class_val = 100
nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val
select_train_data = []
select_train_label = []
select_val_data = []
select_val_label = []
unlabeled_train_data = []
unlabeled_train_label = []
for label in unique_labels:
label_mask = (train_target == label)
current_label_X = train_data[label_mask]
current_label_y = train_target[label_mask]
select_train_data.append(current_label_X[:nSamples_per_class_train])
select_train_label.append(current_label_y[:nSamples_per_class_train])
select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val])
select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val])
unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000])
unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000])
train_data = torch.cat(select_train_data, dim=0).to(opt.device)
train_target = torch.cat(select_train_label, dim=0).to(opt.device)
valid_data = torch.cat(select_val_data, dim=0).to(opt.device)
valid_target = torch.cat(select_val_label, dim=0).to(opt.device)
test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device)
test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device)
# random shuffle the data
train_random_ind = np.arange(nSamples_per_class_train * n_class)
val_random_ind = np.arange(nSamples_per_class_val * n_class)
test_random_ind = np.arange(nSamples_per_unlabel * n_class)
np.random.shuffle(train_random_ind)
np.random.shuffle(val_random_ind)
np.random.shuffle(test_random_ind)
train_data = train_data[train_random_ind]
train_target = train_target[train_random_ind]
valid_data = valid_data[val_random_ind]
valid_target = valid_target[val_random_ind]
test_data = test_data[test_random_ind]
test_target = test_target[test_random_ind]
all_data = torch.cat([train_data, valid_data, test_data], dim=0)
all_data = torch.reshape(all_data, (1000*n_class, -1))
print(all_data.shape)
path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models'
if not os.path.exists(os.path.dirname(path_best_model)):
os.mkdir(os.path.dirname(path_best_model))
opt.in_channels = all_data.shape[1]
model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers,
opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device)
# model.apply(weights_init)
init_all(model, init_funcs)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if os.path.exists(path_best_model):
# original saved file with DataParallel
state_dict = torch.load(path_best_model)
model.load_state_dict | loss.backward()
optimizer.step()
| random_line_split |
main_glcn.py | parser.add_argument('--method', default='vat')
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--in_channels', type=int, default=3)
parser.add_argument('--out_channels', type=int, default=7)
parser.add_argument('--topk', type=int, default=10)
parser.add_argument('--ngcn_layers', type=int, default=30)
parser.add_argument('--nclass', type=int, default=10)
parser.add_argument('--gamma_reg', type=float, default=0.01)
parser.add_argument('--lamda_reg', type=float, default=0.00001)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--cuda', dest='cuda', default='0', type=str)
parser.add_argument('--mode', default='gpu', help='cpu/gpu')
parser.add_argument('--train', default=True, action='store_false')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
# set up gpu
if opt.mode == 'gpu':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda)
print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
print('Using CPU', flush= True)
opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu')
def tocuda(x):
if opt.use_cuda:
return x.cuda()
return x
def train(model, x, y, optimizer, lamda_reg=0.0):
model.train()
# ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
# semi_outputs have been log_softmax, so only NLLLoss() here
nll_loss = nn.NLLLoss()
semi_outputs, loss_GL, S = model(x)
# print("The learned S is ", torch.sum(S, dim=-1))
ce_loss = nll_loss(semi_outputs[:num_labeled], y)
loss = ce_loss + lamda_reg * loss_GL
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print("First Row of X")
# print(x[0])
# print("Adj Matrix....")
# print(S[S > 0])
return semi_outputs, loss, ce_loss
def eval(y_pred, y):
# print(semi_outputs.shape)
# y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)]
prob, idx = torch.max(y_pred, dim=1)
return torch.eq(idx, y).float().mean()
# Several Ways to initialize the weights
# 1. initialize different weights using different initialization
def | (m):
"""
Usage: model.apply(weights_init)
:param m:
:return:
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.bias.data.fill_(0)
# 2. weight different weights using different torch.nn methods
def init_all(model, init_funcs):
"""
Usage: init_all(model, init_funcs)
:param model:
:param init_funcs:
:return:
"""
for p in model.parameters():
init_func = init_funcs.get(len(p.shape), init_funcs["default"])
init_func(p)
init_funcs = {
1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias
2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight
3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter
4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter
"default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else
}
if opt.dataset == 'svhn':
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=opt.dataroot, split='train', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'cifar10':
num_labeled = 1000
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'mnist':
# num_labeled = 1000
opt.in_channels = 1
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
else:
raise NotImplementedError
train_data = []
train_target = []
for (data, target) in train_loader:
train_data.append(data)
train_target.append(target)
train_data = torch.cat(train_data, dim=0)
train_target = torch.cat(train_target, dim=0)
print(f"Total number of dataset {opt.dataset} is {train_data.shape}")
unique_labels = np.unique(train_target)
print("Unique Labels: ", unique_labels)
n_class = len(unique_labels)
nSamples_per_class_train = 100
nSamples_per_class_val = 100
nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val
select_train_data = []
select_train_label = []
select_val_data = []
select_val_label = []
unlabeled_train_data = []
unlabeled_train_label = []
for label in unique_labels:
label_mask = (train_target == label)
current_label_X = train_data[label_mask]
current_label_y = train_target[label_mask]
select_train_data.append(current_label_X[:nSamples_per_class_train])
select_train_label.append(current_label_y[:nSamples_per_class_train])
select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val])
select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val])
unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000])
unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000])
train_data = torch.cat(select_train_data, dim=0).to(opt.device)
train_target = torch.cat(select_train_label, dim=0).to(opt.device)
valid_data = torch.cat(select_val_data, dim=0).to(opt.device)
valid_target = torch.cat(select_val_label, dim=0).to(opt.device)
test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device)
test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device)
# random shuffle the data
train_random_ind = np.arange(nSamples_per_class_train * n_class)
val_random_ind = np.arange(nSamples_per_class_val * n_class)
test_random_ind = np.arange(nSamples_per_unlabel * n_class)
np.random.shuffle(train_random_ind)
np.random.shuffle(val_random_ind)
np.random.shuffle(test_random_ind)
train_data = train_data[train_random_ind]
train_target = train_target[train_random_ind]
valid_data = valid_data[val_random_ind]
valid_target = valid_target[val_random_ind]
test_data = test_data[test_random_ind]
test_target = test_target[test_random_ind]
all_data = torch.cat([train_data, valid_data, test_data], dim=0)
all_data = torch.reshape(all_data, (1000*n_class, -1))
print(all_data.shape)
path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models'
if not os.path.exists(os.path.dirname(path_best_model)):
os.mkdir(os.path.dirname(path_best_model))
opt.in_channels = all_data.shape[1]
model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers,
opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device)
# model.apply(weights_init)
init_all(model, init_funcs)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if os.path.exists(path_best_model):
# original saved file with DataParallel
state_dict = torch.load(path_best_model)
model.load_state | weights_init | identifier_name |
main_glcn.py | parser.add_argument('--method', default='vat')
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--in_channels', type=int, default=3)
parser.add_argument('--out_channels', type=int, default=7)
parser.add_argument('--topk', type=int, default=10)
parser.add_argument('--ngcn_layers', type=int, default=30)
parser.add_argument('--nclass', type=int, default=10)
parser.add_argument('--gamma_reg', type=float, default=0.01)
parser.add_argument('--lamda_reg', type=float, default=0.00001)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--cuda', dest='cuda', default='0', type=str)
parser.add_argument('--mode', default='gpu', help='cpu/gpu')
parser.add_argument('--train', default=True, action='store_false')
opt = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
# set up gpu
if opt.mode == 'gpu':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.cuda)
print('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES']), flush= True)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
print('Using CPU', flush= True)
opt.device = torch.device('cuda:0' if opt.mode == 'gpu' else 'cpu')
def tocuda(x):
if opt.use_cuda:
return x.cuda()
return x
def train(model, x, y, optimizer, lamda_reg=0.0):
model.train()
# ce = nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
# semi_outputs have been log_softmax, so only NLLLoss() here
nll_loss = nn.NLLLoss()
semi_outputs, loss_GL, S = model(x)
# print("The learned S is ", torch.sum(S, dim=-1))
ce_loss = nll_loss(semi_outputs[:num_labeled], y)
loss = ce_loss + lamda_reg * loss_GL
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print("First Row of X")
# print(x[0])
# print("Adj Matrix....")
# print(S[S > 0])
return semi_outputs, loss, ce_loss
def eval(y_pred, y):
# print(semi_outputs.shape)
# y_pred = semi_outputs[num_labeled:(num_labeled+num_valid)]
prob, idx = torch.max(y_pred, dim=1)
return torch.eq(idx, y).float().mean()
# Several Ways to initialize the weights
# 1. initialize different weights using different initialization
def weights_init(m):
"""
Usage: model.apply(weights_init)
:param m:
:return:
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.bias.data.fill_(0)
# 2. weight different weights using different torch.nn methods
def init_all(model, init_funcs):
"""
Usage: init_all(model, init_funcs)
:param model:
:param init_funcs:
:return:
"""
for p in model.parameters():
init_func = init_funcs.get(len(p.shape), init_funcs["default"])
init_func(p)
init_funcs = {
1: lambda x: torch.nn.init.normal_(x, mean=0., std=1.), # can be bias
2: lambda x: torch.nn.init.xavier_normal_(x, gain=1.), # can be weight
3: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv1D filter
4: lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # can be conv2D filter
"default": lambda x: torch.nn.init.xavier_uniform_(x, gain=1.), # everything else
}
if opt.dataset == 'svhn':
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(root=opt.dataroot, split='train', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4377, 0.4438, 0.4728), (0.1980, 0.2010, 0.1970))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'cifar10':
num_labeled = 1000
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])),
batch_size=100, shuffle=True)
elif opt.dataset == 'mnist':
# num_labeled = 1000
opt.in_channels = 1
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=opt.dataroot, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
else:
raise NotImplementedError
train_data = []
train_target = []
for (data, target) in train_loader:
train_data.append(data)
train_target.append(target)
train_data = torch.cat(train_data, dim=0)
train_target = torch.cat(train_target, dim=0)
print(f"Total number of dataset {opt.dataset} is {train_data.shape}")
unique_labels = np.unique(train_target)
print("Unique Labels: ", unique_labels)
n_class = len(unique_labels)
nSamples_per_class_train = 100
nSamples_per_class_val = 100
nSamples_per_unlabel = 1000 - nSamples_per_class_train - nSamples_per_class_val
select_train_data = []
select_train_label = []
select_val_data = []
select_val_label = []
unlabeled_train_data = []
unlabeled_train_label = []
for label in unique_labels:
|
train_data = torch.cat(select_train_data, dim=0).to(opt.device)
train_target = torch.cat(select_train_label, dim=0).to(opt.device)
valid_data = torch.cat(select_val_data, dim=0).to(opt.device)
valid_target = torch.cat(select_val_label, dim=0).to(opt.device)
test_data = torch.cat(unlabeled_train_data, dim=0).to(opt.device)
test_target = torch.cat(unlabeled_train_label, dim=0).to(opt.device)
# random shuffle the data
train_random_ind = np.arange(nSamples_per_class_train * n_class)
val_random_ind = np.arange(nSamples_per_class_val * n_class)
test_random_ind = np.arange(nSamples_per_unlabel * n_class)
np.random.shuffle(train_random_ind)
np.random.shuffle(val_random_ind)
np.random.shuffle(test_random_ind)
train_data = train_data[train_random_ind]
train_target = train_target[train_random_ind]
valid_data = valid_data[val_random_ind]
valid_target = valid_target[val_random_ind]
test_data = test_data[test_random_ind]
test_target = test_target[test_random_ind]
all_data = torch.cat([train_data, valid_data, test_data], dim=0)
all_data = torch.reshape(all_data, (1000*n_class, -1))
print(all_data.shape)
path_best_model = f'./saved_models/{opt.dataset}/glcn_best_models'
if not os.path.exists(os.path.dirname(path_best_model)):
os.mkdir(os.path.dirname(path_best_model))
opt.in_channels = all_data.shape[1]
model = GLCN(opt.in_channels, opt.out_channels, opt.ngcn_layers,
opt.nclass, opt.gamma_reg, opt.dropout, opt.topk).to(opt.device)
# model.apply(weights_init)
init_all(model, init_funcs)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
if os.path.exists(path_best_model):
# original saved file with DataParallel
state_dict = torch.load(path_best_model)
model.load_state | label_mask = (train_target == label)
current_label_X = train_data[label_mask]
current_label_y = train_target[label_mask]
select_train_data.append(current_label_X[:nSamples_per_class_train])
select_train_label.append(current_label_y[:nSamples_per_class_train])
select_val_data.append(current_label_X[nSamples_per_class_train:nSamples_per_class_train+nSamples_per_class_val])
select_val_label.append(current_label_y[nSamples_per_class_train:nSamples_per_class_train + nSamples_per_class_val])
unlabeled_train_data.append(current_label_X[nSamples_per_class_train + nSamples_per_class_val:1000])
unlabeled_train_label.append(current_label_y[nSamples_per_class_train + nSamples_per_class_val:1000]) | conditional_block |
plot.go | averageLatency float64
periodicLatencies []analytics.PeriodicAvgLatency
}
func (sts *stats) fromPackets(packets []packet.Packet) {
sts.averageLatency = analytics.CalcPositiveAverageLatency(packets)
sts.periodicLatencies = analytics.CalcPeriodicAverageLatency(packets)
}
func maxPacketsValue(packets []packet.Packet) (xMin int64, xMax int64, yMin float64, yMax float64) {
xMin, xMax = int64(1<<63-1), -int64(1<<63-1)
yMin, yMax = float64(xMin), float64(xMax)
for _, pkt := range packets {
x := pkt.ReceivedAt().UnixNano()
y := pkt.Value()
if x < xMin {
xMin = x
}
if x > xMax {
xMax = x
}
if y < yMin {
yMin = y
}
if y > yMax {
yMax = y
}
}
//fmt.Printf("boundaries: x = %v .. %v, y = %v .. %v\n", xMin, xMax, yMin, yMax)
return
}
func SavePDF(packets []packet.Packet, inputFilename string, filename string, verbose bool) (err error) {
plot := plotter{
xPaperSize: 842 * 4,
yPaperSize: 595,
xLeftMargin: 12,
xRightMargin: 12,
yTopMargin: 24,
yBottomMargin: 12,
titlePrefix: "TRex Packets Chart",
inputFilename: inputFilename,
//outputFilename: filename,
}
plot.fromPackets(packets)
stats := stats{averageLatency: 0}
stats.fromPackets(packets)
pdf, err := preparePdf(&plot, stats)
if err != nil {
return
}
drawPackets(&pdf, packets, &plot)
drawAnalytics(&pdf, stats, &plot)
drawAxis(&pdf, &plot)
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
err = pdf.Write(f)
if err != nil {
return err
}
return nil
}
func preparePdf(plot *plotter, sts stats) (pdf gopdf.GoPdf, err error) {
pdf = gopdf.GoPdf{}
pdf.Start(gopdf.Config{PageSize: gopdf.Rect{W: plot.xPaperSize, H: plot.yPaperSize}, Unit: gopdf.Unit_PT})
pdf.SetInfo(gopdf.PdfInfo{
Title: fmt.Sprintf("%v for %v", plot.titlePrefix, plot.inputFilename),
Subject: plot.titlePrefix,
Creator: "trex-helpers",
Producer: "https://github.com/signintech/gopdf",
CreationDate: time.Now(),
})
pdf.AddPage()
err = pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf")
if err != nil {
return
}
err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf")
if err != nil {
return
}
err = makeTitle(&pdf, plot.inputFilename)
if err != nil {
return
}
err = makeFootnote(&pdf, plot)
if err != nil {
return
}
// due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page
err = makeAxisAnnotations(&pdf, plot)
if err != nil {
return
}
err = makeStatsAnnotations(&pdf, sts, plot)
if err != nil {
return
}
return pdf, err
}
func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) {
pdf.SetTextColor(0x00, 0x00, 0x00)
err = pdf.SetFont("FiraSans-Book", "", 18)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(22)
err = pdf.Text("TRex Packets Chart for ")
if err != nil {
return
}
err = pdf.SetFont("FiraSans-Medium", "", 18)
if err != nil {
return
}
err = pdf.Text(inputFilename)
if err != nil {
return
}
return nil
}
func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) {
err = pdf.SetFont("FiraSans-Book", "", 8)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(plot.yPaperSize - 3)
err = pdf.Text(fmt.Sprintf("%v", time.Now()))
if err != nil {
return err
}
pdf.SetX(plot.xPaperSize - 106)
err = pdf.Text("generated with trex-helpers")
if err != nil {
return err
}
pdf.AddExternalLink("https://github.com/mateumann/trex-helpers",
plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10)
return nil
}
func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) {
pdf.SetTextColor(0, 0, 0)
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return
}
for _, y := range verticalSteps(plot) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale
err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y))
if err != nil {
return
}
}
for _, x := range horizontalSteps(true, plot) {
xOnPaper := plot.xLeftMargin + x*plot.xScale
err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000))
if err != nil {
return
}
}
return nil
}
func verticalSteps(plot *plotter) (steps []float64) {
plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1))
lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep)
hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep)
for y := lo; y <= hi; y += plot.yLineStep {
steps = append(steps, float64(y))
}
return
}
func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) {
if forAnnotations {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1))
} else {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2))
}
//fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep)
hi := plot.xLineStep * (plot.xMax / plot.xLineStep)
for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep {
steps = append(steps, float64(x-plot.xMin))
}
return
}
func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) {
pdf.SetTextColor(r, g, b)
pdf.SetX(x)
pdf.SetY(y)
err = pdf.Text(text)
if err != nil {
return
}
return nil
}
func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return err
}
err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b,
fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency))
if err != nil {
return err
}
pdf.SetStrokeColor(0xfb, 0x49, 0x34)
for _, periodicData := range sts.periodicLatencies {
x0 := float64 | random_line_split | ||
plot.go | (packets []packet.Packet) {
sts.averageLatency = analytics.CalcPositiveAverageLatency(packets)
sts.periodicLatencies = analytics.CalcPeriodicAverageLatency(packets)
}
func maxPacketsValue(packets []packet.Packet) (xMin int64, xMax int64, yMin float64, yMax float64) {
xMin, xMax = int64(1<<63-1), -int64(1<<63-1)
yMin, yMax = float64(xMin), float64(xMax)
for _, pkt := range packets {
x := pkt.ReceivedAt().UnixNano()
y := pkt.Value()
if x < xMin {
xMin = x
}
if x > xMax {
xMax = x
}
if y < yMin {
yMin = y
}
if y > yMax {
yMax = y
}
}
//fmt.Printf("boundaries: x = %v .. %v, y = %v .. %v\n", xMin, xMax, yMin, yMax)
return
}
func SavePDF(packets []packet.Packet, inputFilename string, filename string, verbose bool) (err error) {
plot := plotter{
xPaperSize: 842 * 4,
yPaperSize: 595,
xLeftMargin: 12,
xRightMargin: 12,
yTopMargin: 24,
yBottomMargin: 12,
titlePrefix: "TRex Packets Chart",
inputFilename: inputFilename,
//outputFilename: filename,
}
plot.fromPackets(packets)
stats := stats{averageLatency: 0}
stats.fromPackets(packets)
pdf, err := preparePdf(&plot, stats)
if err != nil {
return
}
drawPackets(&pdf, packets, &plot)
drawAnalytics(&pdf, stats, &plot)
drawAxis(&pdf, &plot)
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
err = pdf.Write(f)
if err != nil {
return err
}
return nil
}
func preparePdf(plot *plotter, sts stats) (pdf gopdf.GoPdf, err error) {
pdf = gopdf.GoPdf{}
pdf.Start(gopdf.Config{PageSize: gopdf.Rect{W: plot.xPaperSize, H: plot.yPaperSize}, Unit: gopdf.Unit_PT})
pdf.SetInfo(gopdf.PdfInfo{
Title: fmt.Sprintf("%v for %v", plot.titlePrefix, plot.inputFilename),
Subject: plot.titlePrefix,
Creator: "trex-helpers",
Producer: "https://github.com/signintech/gopdf",
CreationDate: time.Now(),
})
pdf.AddPage()
err = pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf")
if err != nil {
return
}
err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf")
if err != nil {
return
}
err = makeTitle(&pdf, plot.inputFilename)
if err != nil {
return
}
err = makeFootnote(&pdf, plot)
if err != nil {
return
}
// due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page
err = makeAxisAnnotations(&pdf, plot)
if err != nil {
return
}
err = makeStatsAnnotations(&pdf, sts, plot)
if err != nil {
return
}
return pdf, err
}
func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) {
pdf.SetTextColor(0x00, 0x00, 0x00)
err = pdf.SetFont("FiraSans-Book", "", 18)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(22)
err = pdf.Text("TRex Packets Chart for ")
if err != nil {
return
}
err = pdf.SetFont("FiraSans-Medium", "", 18)
if err != nil {
return
}
err = pdf.Text(inputFilename)
if err != nil {
return
}
return nil
}
func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) {
err = pdf.SetFont("FiraSans-Book", "", 8)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(plot.yPaperSize - 3)
err = pdf.Text(fmt.Sprintf("%v", time.Now()))
if err != nil {
return err
}
pdf.SetX(plot.xPaperSize - 106)
err = pdf.Text("generated with trex-helpers")
if err != nil {
return err
}
pdf.AddExternalLink("https://github.com/mateumann/trex-helpers",
plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10)
return nil
}
func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) {
pdf.SetTextColor(0, 0, 0)
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return
}
for _, y := range verticalSteps(plot) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale
err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y))
if err != nil {
return
}
}
for _, x := range horizontalSteps(true, plot) {
xOnPaper := plot.xLeftMargin + x*plot.xScale
err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000))
if err != nil {
return
}
}
return nil
}
func verticalSteps(plot *plotter) (steps []float64) {
plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1))
lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep)
hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep)
for y := lo; y <= hi; y += plot.yLineStep {
steps = append(steps, float64(y))
}
return
}
func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) {
if forAnnotations {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1))
} else {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2))
}
//fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep)
hi := plot.xLineStep * (plot.xMax / plot.xLineStep)
for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep {
steps = append(steps, float64(x-plot.xMin))
}
return
}
func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) {
pdf.SetTextColor(r, g, b)
pdf.SetX(x)
pdf.SetY(y)
err = pdf.Text(text)
if err != nil {
return
}
return nil
}
func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return err
}
err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b,
fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency))
if err != nil {
return err
}
pdf.SetStrokeColor(0xfb, 0x49, 0x34)
for _, periodicData := range sts.periodicLatencies {
x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin)
x0OnPaper := plot.xLeftMargin + x0*plot.x | fromPackets | identifier_name | |
plot.go | ackets)
sts.periodicLatencies = analytics.CalcPeriodicAverageLatency(packets)
}
func maxPacketsValue(packets []packet.Packet) (xMin int64, xMax int64, yMin float64, yMax float64) {
xMin, xMax = int64(1<<63-1), -int64(1<<63-1)
yMin, yMax = float64(xMin), float64(xMax)
for _, pkt := range packets {
x := pkt.ReceivedAt().UnixNano()
y := pkt.Value()
if x < xMin {
xMin = x
}
if x > xMax {
xMax = x
}
if y < yMin {
yMin = y
}
if y > yMax {
yMax = y
}
}
//fmt.Printf("boundaries: x = %v .. %v, y = %v .. %v\n", xMin, xMax, yMin, yMax)
return
}
func SavePDF(packets []packet.Packet, inputFilename string, filename string, verbose bool) (err error) {
plot := plotter{
xPaperSize: 842 * 4,
yPaperSize: 595,
xLeftMargin: 12,
xRightMargin: 12,
yTopMargin: 24,
yBottomMargin: 12,
titlePrefix: "TRex Packets Chart",
inputFilename: inputFilename,
//outputFilename: filename,
}
plot.fromPackets(packets)
stats := stats{averageLatency: 0}
stats.fromPackets(packets)
pdf, err := preparePdf(&plot, stats)
if err != nil {
return
}
drawPackets(&pdf, packets, &plot)
drawAnalytics(&pdf, stats, &plot)
drawAxis(&pdf, &plot)
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
err = pdf.Write(f)
if err != nil {
return err
}
return nil
}
func preparePdf(plot *plotter, sts stats) (pdf gopdf.GoPdf, err error) {
pdf = gopdf.GoPdf{}
pdf.Start(gopdf.Config{PageSize: gopdf.Rect{W: plot.xPaperSize, H: plot.yPaperSize}, Unit: gopdf.Unit_PT})
pdf.SetInfo(gopdf.PdfInfo{
Title: fmt.Sprintf("%v for %v", plot.titlePrefix, plot.inputFilename),
Subject: plot.titlePrefix,
Creator: "trex-helpers",
Producer: "https://github.com/signintech/gopdf",
CreationDate: time.Now(),
})
pdf.AddPage()
err = pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf")
if err != nil {
return
}
err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf")
if err != nil {
return
}
err = makeTitle(&pdf, plot.inputFilename)
if err != nil {
return
}
err = makeFootnote(&pdf, plot)
if err != nil {
return
}
// due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page
err = makeAxisAnnotations(&pdf, plot)
if err != nil {
return
}
err = makeStatsAnnotations(&pdf, sts, plot)
if err != nil {
return
}
return pdf, err
}
func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) {
pdf.SetTextColor(0x00, 0x00, 0x00)
err = pdf.SetFont("FiraSans-Book", "", 18)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(22)
err = pdf.Text("TRex Packets Chart for ")
if err != nil {
return
}
err = pdf.SetFont("FiraSans-Medium", "", 18)
if err != nil {
return
}
err = pdf.Text(inputFilename)
if err != nil {
return
}
return nil
}
func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) {
err = pdf.SetFont("FiraSans-Book", "", 8)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(plot.yPaperSize - 3)
err = pdf.Text(fmt.Sprintf("%v", time.Now()))
if err != nil {
return err
}
pdf.SetX(plot.xPaperSize - 106)
err = pdf.Text("generated with trex-helpers")
if err != nil {
return err
}
pdf.AddExternalLink("https://github.com/mateumann/trex-helpers",
plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10)
return nil
}
func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) {
pdf.SetTextColor(0, 0, 0)
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return
}
for _, y := range verticalSteps(plot) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale
err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y))
if err != nil {
return
}
}
for _, x := range horizontalSteps(true, plot) {
xOnPaper := plot.xLeftMargin + x*plot.xScale
err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000))
if err != nil {
| return nil
}
func verticalSteps(plot *plotter) (steps []float64) {
plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1))
lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep)
hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep)
for y := lo; y <= hi; y += plot.yLineStep {
steps = append(steps, float64(y))
}
return
}
func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) {
if forAnnotations {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1))
} else {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2))
}
//fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep)
hi := plot.xLineStep * (plot.xMax / plot.xLineStep)
for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep {
steps = append(steps, float64(x-plot.xMin))
}
return
}
func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) {
pdf.SetTextColor(r, g, b)
pdf.SetX(x)
pdf.SetY(y)
err = pdf.Text(text)
if err != nil {
return
}
return nil
}
func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return err
}
err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b,
fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency))
if err != nil {
return err
}
pdf.SetStrokeColor(0xfb, 0x49, 0x34)
for _, periodicData := range sts.periodicLatencies {
x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin)
x0OnPaper := plot.xLeftMargin + x0*plot.xScale
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value- | return
}
}
| conditional_block |
plot.go | pdf.AddTTFFont("FiraSans-Book", "/usr/share/fonts/TTF/FiraSans-Book.ttf")
if err != nil {
return
}
err = pdf.AddTTFFont("FiraSans-Medium", "/usr/share/fonts/TTF/FiraSans-Medium.ttf")
if err != nil {
return
}
err = makeTitle(&pdf, plot.inputFilename)
if err != nil {
return
}
err = makeFootnote(&pdf, plot)
if err != nil {
return
}
// due to some bug(?) in gopdf one cannot reliably write text on already “drawn” PDF page
err = makeAxisAnnotations(&pdf, plot)
if err != nil {
return
}
err = makeStatsAnnotations(&pdf, sts, plot)
if err != nil {
return
}
return pdf, err
}
func makeTitle(pdf *gopdf.GoPdf, inputFilename string) (err error) {
pdf.SetTextColor(0x00, 0x00, 0x00)
err = pdf.SetFont("FiraSans-Book", "", 18)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(22)
err = pdf.Text("TRex Packets Chart for ")
if err != nil {
return
}
err = pdf.SetFont("FiraSans-Medium", "", 18)
if err != nil {
return
}
err = pdf.Text(inputFilename)
if err != nil {
return
}
return nil
}
func makeFootnote(pdf *gopdf.GoPdf, plot *plotter) (err error) {
err = pdf.SetFont("FiraSans-Book", "", 8)
if err != nil {
return
}
pdf.SetX(4)
pdf.SetY(plot.yPaperSize - 3)
err = pdf.Text(fmt.Sprintf("%v", time.Now()))
if err != nil {
return err
}
pdf.SetX(plot.xPaperSize - 106)
err = pdf.Text("generated with trex-helpers")
if err != nil {
return err
}
pdf.AddExternalLink("https://github.com/mateumann/trex-helpers",
plot.xPaperSize-106.5, plot.yPaperSize-10.5, 105, 10)
return nil
}
func makeAxisAnnotations(pdf *gopdf.GoPdf, plot *plotter) (err error) {
pdf.SetTextColor(0, 0, 0)
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return
}
for _, y := range verticalSteps(plot) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale
err = makeAnnotation(pdf, plot.xLeftMargin, yOnPaper-4, 0, 0, 0, fmt.Sprintf("%v µs", y))
if err != nil {
return
}
}
for _, x := range horizontalSteps(true, plot) {
xOnPaper := plot.xLeftMargin + x*plot.xScale
err = makeAnnotation(pdf, xOnPaper-6, plot.yZeroAt+16, 0, 0, 0, fmt.Sprintf("%v s", x/1000/1000/1000))
if err != nil {
return
}
}
return nil
}
func verticalSteps(plot *plotter) (steps []float64) {
plot.yLineStep = int64(math.Pow10(int(math.Ceil(math.Log10((plot.yMax-plot.yMin)/2))) - 1))
lo := plot.yLineStep * (int64(plot.yMin) / plot.yLineStep)
hi := plot.yLineStep * (int64(plot.yMax) / plot.yLineStep)
for y := lo; y <= hi; y += plot.yLineStep {
steps = append(steps, float64(y))
}
return
}
func horizontalSteps(forAnnotations bool, plot *plotter) (steps []float64) {
if forAnnotations {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 1))
} else {
plot.xLineStep = int64(math.Pow10(int(math.Ceil(math.Log10(float64(plot.xMax-plot.xMin)))) - 2))
}
//fmt.Printf("forAnnotations = %5v, xLineStep = %v\n", forAnnotations, plot.xLineStep)
hi := plot.xLineStep * (plot.xMax / plot.xLineStep)
for x := plot.xMin + plot.xLineStep; x <= hi+plot.xLineStep; x += plot.xLineStep {
steps = append(steps, float64(x-plot.xMin))
}
return
}
func makeAnnotation(pdf *gopdf.GoPdf, x, y float64, r, g, b uint8, text string) (err error) {
pdf.SetTextColor(r, g, b)
pdf.SetX(x)
pdf.SetY(y)
err = pdf.Text(text)
if err != nil {
return
}
return nil
}
func makeStatsAnnotations(pdf *gopdf.GoPdf, sts stats, plot *plotter) (err error) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale
err = pdf.SetFont("FiraSans-Book", "", 12)
if err != nil {
return err
}
err = makeAnnotation(pdf, plot.xLeftMargin+20, yOnPaper-5, 0xd3, 0x86, 0x9b,
fmt.Sprintf("avg. lat. %.2f µs", sts.averageLatency))
if err != nil {
return err
}
pdf.SetStrokeColor(0xfb, 0x49, 0x34)
for _, periodicData := range sts.periodicLatencies {
x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin)
x0OnPaper := plot.xLeftMargin + x0*plot.xScale
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale
if periodicData.Value < 0 {
yOnPaper += 20
}
err = makeAnnotation(pdf, x0OnPaper+5, yOnPaper-5, 0xfb, 0x49, 0x34,
fmt.Sprintf("%.2f µs", periodicData.Value))
if err != nil {
return err
}
}
return nil
}
func drawAnalytics(pdf *gopdf.GoPdf, sts stats, plot *plotter) {
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (sts.averageLatency-plot.yMin)*plot.yScale
pdf.SetStrokeColor(0xd3, 0x86, 0x9b)
pdf.SetLineWidth(1)
pdf.SetLineType("dashed")
pdf.Line(plot.xLeftMargin, yOnPaper, plot.xPaperSize-plot.xRightMargin, yOnPaper)
//pdf.SetStrokeColor(0xd3, 0x86, 0x9b)
pdf.SetStrokeColor(0xfb, 0x49, 0x34)
for _, periodicData := range sts.periodicLatencies {
x0 := float64(periodicData.StartTimestamp.UnixNano() - plot.xMin)
x1 := float64(periodicData.EndTimestamp.UnixNano() - plot.xMin)
x0OnPaper := plot.xLeftMargin + x0*plot.xScale
x1OnPaper := plot.xLeftMargin + x1*plot.xScale
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (periodicData.Value-plot.yMin)*plot.yScale
pdf.Line(x0OnPaper, yOnPaper, x1OnPaper, yOnPaper)
}
}
func drawPackets(pdf *gopdf.GoPdf, packets []packet.Packet, plot *plotter) {
// first draw "other" packets, then the rest
pdf.SetLineWidth(plot.xScale)
pdf.SetLineType("solid")
for _, pkt := range packets {
if pkt.Type() != packet.TypeOther {
continue
}
makeLine(pdf, pkt, plot)
}
for _, pkt := range packets {
if pkt.Type() == packet.TypeOther {
continue
}
makeLine(pdf, pkt, plot)
}
}
func makeLine(pdf *gopdf.GoPdf, pkt packet.Packet, plot *plotter) {
x := | float64(pkt.ReceivedAt().UnixNano() - plot.xMin)
y := pkt.Value()
pdf.SetStrokeColor(pktColor(pkt))
xOnPaper := plot.xLeftMargin + x*plot.xScale
yOnPaper := plot.yPaperSize - plot.yBottomMargin - (y-plot.yMin)*plot.yScale
pdf.Line(xOnPaper, plot.yZeroAt, xOnPaper, yOnPaper)
}
func | identifier_body | |
cc.rs | it.
let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) };
fat_ptr[1]
}
}
impl CcDyn for CcDummy {
fn gc_ref_count(&self) -> usize {
1
}
fn gc_traverse(&self, _tracer: &mut Tracer) {}
fn gc_clone(&self) -> Box<dyn GcClone> {
panic!("bug: CcDummy::gc_clone should never be called");
}
}
impl<T: Trace> Cc<T> {
/// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage.
///
/// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html).
pub fn new(value: T) -> Cc<T> {
collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space))
}
}
impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> {
/// Constructs a new [`Cc<T>`](type.Cc.html) in the given
/// [`ObjectSpace`](struct.ObjectSpace.html).
///
/// To collect cycles, call `ObjectSpace::collect_cycles()`.
pub(crate) fn new_in_space(value: T, space: &O) -> Self {
let is_tracked = T::is_type_tracked();
let cc_box = RawCcBox {
ref_count: space.new_ref_count(is_tracked),
value: UnsafeCell::new(ManuallyDrop::new(value)),
#[cfg(test)]
name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()),
};
let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked {
// Create a GcHeader before the CcBox. This is similar to cpython.
let header = space.empty_header();
let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box };
let mut boxed = Box::new(cc_box_with_header);
// Fix-up fields in GcHeader. This is done after the creation of the
// Box so the memory addresses are stable.
space.insert(&mut boxed.header, &boxed.cc_box);
debug_assert_eq!(
mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(),
mem::size_of::<RawCcBoxWithGcHeader<T, O>>()
);
let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box;
Box::leak(boxed);
ptr
} else | ;
// safety: ccbox_ptr cannot be null from the above code.
let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) };
let result = Self(non_null);
if is_tracked {
debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)"));
} else {
debug::log(|| (result.debug_name(), "new (CcBox)"));
}
debug_assert_eq!(result.ref_count(), 1);
result
}
/// Convert to `RawCc<dyn Trace>`.
pub fn into_dyn(self) -> RawCc<dyn Trace, O> {
#[cfg(feature = "nightly")]
{
// Requires CoerceUnsized, which is currently unstable.
self
}
// safety: Trait object magic. Test by test_dyn_downcast.
#[cfg(not(feature = "nightly"))]
unsafe {
// XXX: This depends on rust internals. But it works on stable.
// Replace this with CoerceUnsized once that becomes stable.
// Cc<dyn Trace> has 2 usize values: The first one is the same
// as Cc<T>. The second one is the vtable. The vtable pointer
// is the same as the second pointer of `&dyn Trace`.
let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace);
let self_ptr: usize = mem::transmute(self);
fat_ptr[0] = self_ptr;
mem::transmute(fat_ptr)
}
}
}
impl<T: Trace + Clone> Cc<T> {
/// Update the value `T` in a copy-on-write way.
///
/// If the ref count is 1, the value is updated in-place.
/// Otherwise a new `Cc<T>` will be created.
pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) {
let need_clone = self.ref_count() > 1;
if need_clone {
let mut value = <Cc<T>>::deref(self).clone();
update_func(&mut value);
*self = Cc::new(value);
} else {
let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get();
let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut();
update_func(value_mut);
}
}
}
impl<T: ?Sized, O: AbstractObjectSpace> RawCcBox<T, O> {
#[inline]
fn header_ptr(&self) -> *const () {
self.header() as *const _ as _
}
#[inline]
fn header(&self) -> &O::Header {
debug_assert!(self.is_tracked());
// safety: See `Cc::new`. GcHeader is before CcBox for tracked objects.
unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) }
}
#[inline]
fn is_tracked(&self) -> bool {
self.ref_count.is_tracked()
}
#[inline]
fn is_dropped(&self) -> bool {
self.ref_count.is_dropped()
}
#[inline]
fn inc_ref(&self) -> usize {
self.ref_count.inc_ref()
}
#[inline]
fn dec_ref(&self) -> usize {
self.ref_count.dec_ref()
}
#[inline]
fn ref_count(&self) -> usize {
self.ref_count.ref_count()
}
#[inline]
fn weak_count(&self) -> usize {
self.ref_count.weak_count()
}
#[inline]
fn set_dropped(&self) -> bool {
self.ref_count.set_dropped()
}
#[inline]
pub(crate) fn drop_t(&self) {
let already_dropped = self.set_dropped();
if !already_dropped {
debug::log(|| (self.debug_name(), "drop (T)"));
// safety: is_dropped() check ensures T is only dropped once. Other
// places (ex. gc collector) ensure that T is no longer accessed.
unsafe { ManuallyDrop::drop(&mut *(self.value.get())) };
}
}
pub(crate) fn trace_t(&self, tracer: &mut Tracer) {
if !self.is_tracked() {
return;
}
debug::log(|| (self.debug_name(), "trace"));
// For other non-`Cc<T>` container types, `trace` visit referents,
// is recursive, and does not call `tracer` directly. For `Cc<T>`,
// `trace` stops here, is non-recursive, and does apply `tracer`
// to the actual `GcHeader`. It's expected that the upper layer
// calls `gc_traverse` on everything (not just roots).
tracer(self.header_ptr());
}
pub(crate) fn debug_name(&self) -> String {
#[cfg(test)]
{
self.name.clone()
}
#[cfg(not(test))]
{
#[allow(unused_mut)]
let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value);
#[cfg(all(feature = "debug", feature = "nightly"))]
{
if !self.is_dropped() && crate::debug::GC_DROPPING.with(|t| !t.get()) {
let debug = self.deref().optional_debug();
if !debug.is_empty() {
result += &format!(" {}", debug);
}
}
}
return result;
}
}
}
#[cfg(all(feature = "debug", feature = "nightly"))]
pub(crate) trait OptionalDebug {
fn optional_debug(&self) -> String;
}
#[cfg(all(feature = "debug", feature = "nightly"))]
impl<T: ?Sized> OptionalDebug for T {
default fn optional_debug(&self) -> String {
"".to_string()
}
}
#[cfg(all(feature = "debug", feature = "nightly"))]
impl<T: std::fmt::Debug + ?Sized> OptionalDebug for T {
fn optional_debug(&self) -> String {
format!("{:?}", self)
}
}
impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> {
/// Obtains a "weak reference", a non-owning pointer.
pub fn downgrade(&self) -> RawWeak<T, O> {
let inner = self.inner();
inner.ref_count.inc_weak();
debug::log(|| {
(
inner.debug_name(),
format!("new-weak ({})", inner.ref_count.weak_count()),
)
});
RawWeak(self.0)
}
/// | {
Box::into_raw(Box::new(cc_box))
} | conditional_block |
cc.rs | #[inline]
pub fn weak_count(&self) -> usize {
self.inner().weak_count()
}
}
impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> {
#[inline]
pub(crate) fn inner(&self) -> &RawCcBox<T, O> {
// safety: CcBox lifetime maintained by ref count. Pointer is valid.
unsafe { self.0.as_ref() }
}
/// `trace` without `T: Trace` bound.
///
/// Useful for structures with `Cc<T>` fields where `T` does not implement
/// `Trace`. For example, `struct S(Cc<Box<dyn MyTrait>>)`. To implement
/// `Trace` for `S`, it can use `Cc::trace(&self.0, tracer)`.
#[inline]
pub fn trace(&self, tracer: &mut Tracer) {
self.inner().trace_t(tracer);
}
#[inline]
fn inc_ref(&self) -> usize {
self.inner().inc_ref()
}
#[inline]
fn dec_ref(&self) -> usize {
self.inner().dec_ref()
}
#[inline]
pub(crate) fn ref_count(&self) -> usize {
self.inner().ref_count()
}
/// Get the weak (non-owning) reference count.
#[inline]
pub fn weak_count(&self) -> usize {
self.inner().weak_count()
}
pub(crate) fn debug_name(&self) -> String {
self.inner().debug_name()
}
}
impl<T: ?Sized, O: AbstractObjectSpace> RawWeak<T, O> {
#[inline]
fn inner(&self) -> &RawCcBox<T, O> {
// safety: CcBox lifetime maintained by ref count. Pointer is valid.
unsafe { self.0.as_ref() }
}
}
impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawCc<T, O> {
#[inline]
fn clone(&self) -> Self {
// In theory self.inner().ref_count.locked() is needed.
// Practically this is an atomic operation that cannot be split so locking
// becomes optional.
// let _locked = self.inner().ref_count.locked();
self.inc_ref();
debug::log(|| (self.debug_name(), format!("clone ({})", self.ref_count())));
Self(self.0)
}
}
impl<T: ?Sized, O: AbstractObjectSpace> Clone for RawWeak<T, O> {
#[inline]
fn clone(&self) -> Self {
let inner = self.inner();
let ref_count = &inner.ref_count;
ref_count.inc_weak();
debug::log(|| {
(
inner.debug_name(),
format!("clone-weak ({})", ref_count.weak_count()),
)
});
Self(self.0)
}
}
impl<T: ?Sized> Deref for Cc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
self.inner().deref()
}
}
impl<T: ?Sized, O: AbstractObjectSpace> Deref for RawCcBox<T, O> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
debug_assert!(
!self.is_dropped(),
concat!(
"bug: accessing a dropped CcBox detected\n",
"This usually happens after ignoring another panic triggered by the collector."
)
);
// safety: CcBox (and its value) lifetime maintained by ref count.
// If `Trace` is implemented correctly then the GC won't drop_t()
// incorrectly and this pointer is valid. Otherwise the above
// assertion can prevent UBs on debug build.
unsafe { &*self.value.get() }
}
}
fn drop_ccbox<T: ?Sized, O: AbstractObjectSpace>(cc_box: *mut RawCcBox<T, O>) {
// safety: See Cc::new. The pointer was created by Box::into_raw.
let cc_box: Box<RawCcBox<T, O>> = unsafe { Box::from_raw(cc_box) };
let is_tracked = cc_box.is_tracked();
if is_tracked {
// The real object is CcBoxWithGcHeader. Drop that instead.
// safety: See Cc::new for CcBoxWithGcHeader.
let gc_box: Box<RawCcBoxWithGcHeader<T, O>> = unsafe { cast_box(cc_box) };
O::remove(&gc_box.header);
// Drop T if it hasn't been dropped yet.
// This needs to be after O::remove so the collector won't have a
// chance to read dropped content.
gc_box.cc_box.drop_t();
debug::log(|| (gc_box.cc_box.debug_name(), "drop (CcBoxWithGcHeader)"));
drop(gc_box);
} else {
// Drop T if it hasn't been dropped yet.
cc_box.drop_t();
debug::log(|| (cc_box.debug_name(), "drop (CcBox)"));
drop(cc_box);
}
}
impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawCc<T, O> {
fn drop(&mut self) {
let ptr: *mut RawCcBox<T, O> = self.0.as_ptr();
let inner = self.inner();
// Block threaded collector. This is needed because "drop()" is a
// complex operation. The whole operation needs to be "atomic".
let _locked = inner.ref_count.locked();
let old_ref_count = self.dec_ref();
debug::log(|| (self.debug_name(), format!("drop ({})", self.ref_count())));
debug_assert!(old_ref_count >= 1);
if old_ref_count == 1 {
if self.weak_count() == 0 {
// safety: CcBox lifetime maintained by ref count.
drop_ccbox(ptr);
} else {
inner.drop_t();
}
}
}
}
impl<T: ?Sized, O: AbstractObjectSpace> Drop for RawWeak<T, O> {
fn drop(&mut self) {
let ptr: *mut RawCcBox<T, O> = self.0.as_ptr();
let inner = self.inner();
let ref_count = &inner.ref_count;
// Block threaded collector to "freeze" the ref count, for safety.
let _locked = ref_count.locked();
let old_ref_count = ref_count.ref_count();
let old_weak_count = ref_count.dec_weak();
debug::log(|| {
(
inner.debug_name(),
format!("drop-weak ({})", ref_count.weak_count()),
)
});
debug_assert!(old_weak_count >= 1);
if old_ref_count == 0 && old_weak_count == 1 {
// safety: CcBox lifetime maintained by ref count.
drop_ccbox(ptr);
}
}
}
impl<T: Trace + ?Sized, O: AbstractObjectSpace> CcDyn for RawCcBox<T, O> {
fn gc_ref_count(&self) -> usize {
self.ref_count()
}
fn gc_traverse(&self, tracer: &mut Tracer) {
debug::log(|| (self.debug_name(), "gc_traverse"));
T::trace(self.deref(), tracer)
}
fn gc_clone(&self) -> Box<dyn GcClone> {
self.ref_count.inc_ref();
debug::log(|| {
let msg = format!("gc_clone ({})", self.ref_count());
(self.debug_name(), msg)
});
// safety: The pointer is compatible. The mutability is different only
// to satisfy NonNull (NonNull::new requires &mut). The returned value
// is still "immutable". &self can also never be nonnull.
let ptr: NonNull<RawCcBox<T, O>> =
unsafe { NonNull::new_unchecked(self as *const _ as *mut _) };
let cc = RawCc::<T, O>(ptr);
Box::new(cc)
}
#[cfg(feature = "debug")]
fn gc_debug_name(&self) -> String {
self.debug_name()
}
}
impl<T: Trace + ?Sized, O: AbstractObjectSpace> GcClone for RawCc<T, O> {
fn gc_ref_count(&self) -> usize {
self.ref_count()
}
fn gc_drop_t(&self) {
self.inner().drop_t()
}
}
impl<T: Trace> Trace for Cc<T> {
fn trace(&self, tracer: &mut Tracer) {
Cc::<T>::trace(self, tracer)
}
#[inline]
fn is_type_tracked() -> bool {
T::is_type_tracked()
}
}
impl Trace for Cc<dyn Trace> {
fn trace(&self, tracer: &mut Tracer) {
Cc::<dyn Trace>::trace(self, tracer)
}
#[inline]
fn is_type_tracked() -> bool {
// Trait objects can be anything.
true
}
}
#[cfg(feature = "nightly")]
impl<T: ?Sized + std::marker::Unsize<U>, U: ?Sized, O: AbstractObjectSpace>
std::ops::CoerceUnsized<RawCc<U, O>> for RawCc<T, O>
{
}
#[inline]
unsafe fn | cast_ref | identifier_name | |
cc.rs | it.
let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) };
fat_ptr[1]
}
}
impl CcDyn for CcDummy {
fn gc_ref_count(&self) -> usize {
1
}
fn gc_traverse(&self, _tracer: &mut Tracer) {}
fn gc_clone(&self) -> Box<dyn GcClone> {
panic!("bug: CcDummy::gc_clone should never be called");
}
}
impl<T: Trace> Cc<T> {
/// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage.
///
/// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html).
pub fn new(value: T) -> Cc<T> |
}
impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> {
/// Constructs a new [`Cc<T>`](type.Cc.html) in the given
/// [`ObjectSpace`](struct.ObjectSpace.html).
///
/// To collect cycles, call `ObjectSpace::collect_cycles()`.
pub(crate) fn new_in_space(value: T, space: &O) -> Self {
let is_tracked = T::is_type_tracked();
let cc_box = RawCcBox {
ref_count: space.new_ref_count(is_tracked),
value: UnsafeCell::new(ManuallyDrop::new(value)),
#[cfg(test)]
name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()),
};
let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked {
// Create a GcHeader before the CcBox. This is similar to cpython.
let header = space.empty_header();
let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box };
let mut boxed = Box::new(cc_box_with_header);
// Fix-up fields in GcHeader. This is done after the creation of the
// Box so the memory addresses are stable.
space.insert(&mut boxed.header, &boxed.cc_box);
debug_assert_eq!(
mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(),
mem::size_of::<RawCcBoxWithGcHeader<T, O>>()
);
let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box;
Box::leak(boxed);
ptr
} else {
Box::into_raw(Box::new(cc_box))
};
// safety: ccbox_ptr cannot be null from the above code.
let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) };
let result = Self(non_null);
if is_tracked {
debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)"));
} else {
debug::log(|| (result.debug_name(), "new (CcBox)"));
}
debug_assert_eq!(result.ref_count(), 1);
result
}
/// Convert to `RawCc<dyn Trace>`.
pub fn into_dyn(self) -> RawCc<dyn Trace, O> {
#[cfg(feature = "nightly")]
{
// Requires CoerceUnsized, which is currently unstable.
self
}
// safety: Trait object magic. Test by test_dyn_downcast.
#[cfg(not(feature = "nightly"))]
unsafe {
// XXX: This depends on rust internals. But it works on stable.
// Replace this with CoerceUnsized once that becomes stable.
// Cc<dyn Trace> has 2 usize values: The first one is the same
// as Cc<T>. The second one is the vtable. The vtable pointer
// is the same as the second pointer of `&dyn Trace`.
let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace);
let self_ptr: usize = mem::transmute(self);
fat_ptr[0] = self_ptr;
mem::transmute(fat_ptr)
}
}
}
impl<T: Trace + Clone> Cc<T> {
/// Update the value `T` in a copy-on-write way.
///
/// If the ref count is 1, the value is updated in-place.
/// Otherwise a new `Cc<T>` will be created.
pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) {
let need_clone = self.ref_count() > 1;
if need_clone {
let mut value = <Cc<T>>::deref(self).clone();
update_func(&mut value);
*self = Cc::new(value);
} else {
let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get();
let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut();
update_func(value_mut);
}
}
}
impl<T: ?Sized, O: AbstractObjectSpace> RawCcBox<T, O> {
#[inline]
fn header_ptr(&self) -> *const () {
self.header() as *const _ as _
}
#[inline]
fn header(&self) -> &O::Header {
debug_assert!(self.is_tracked());
// safety: See `Cc::new`. GcHeader is before CcBox for tracked objects.
unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) }
}
#[inline]
fn is_tracked(&self) -> bool {
self.ref_count.is_tracked()
}
#[inline]
fn is_dropped(&self) -> bool {
self.ref_count.is_dropped()
}
#[inline]
fn inc_ref(&self) -> usize {
self.ref_count.inc_ref()
}
#[inline]
fn dec_ref(&self) -> usize {
self.ref_count.dec_ref()
}
#[inline]
fn ref_count(&self) -> usize {
self.ref_count.ref_count()
}
#[inline]
fn weak_count(&self) -> usize {
self.ref_count.weak_count()
}
#[inline]
fn set_dropped(&self) -> bool {
self.ref_count.set_dropped()
}
#[inline]
pub(crate) fn drop_t(&self) {
let already_dropped = self.set_dropped();
if !already_dropped {
debug::log(|| (self.debug_name(), "drop (T)"));
// safety: is_dropped() check ensures T is only dropped once. Other
// places (ex. gc collector) ensure that T is no longer accessed.
unsafe { ManuallyDrop::drop(&mut *(self.value.get())) };
}
}
pub(crate) fn trace_t(&self, tracer: &mut Tracer) {
if !self.is_tracked() {
return;
}
debug::log(|| (self.debug_name(), "trace"));
// For other non-`Cc<T>` container types, `trace` visit referents,
// is recursive, and does not call `tracer` directly. For `Cc<T>`,
// `trace` stops here, is non-recursive, and does apply `tracer`
// to the actual `GcHeader`. It's expected that the upper layer
// calls `gc_traverse` on everything (not just roots).
tracer(self.header_ptr());
}
pub(crate) fn debug_name(&self) -> String {
#[cfg(test)]
{
self.name.clone()
}
#[cfg(not(test))]
{
#[allow(unused_mut)]
let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value);
#[cfg(all(feature = "debug", feature = "nightly"))]
{
if !self.is_dropped() && crate::debug::GC_DROPPING.with(|t| !t.get()) {
let debug = self.deref().optional_debug();
if !debug.is_empty() {
result += &format!(" {}", debug);
}
}
}
return result;
}
}
}
#[cfg(all(feature = "debug", feature = "nightly"))]
pub(crate) trait OptionalDebug {
fn optional_debug(&self) -> String;
}
#[cfg(all(feature = "debug", feature = "nightly"))]
impl<T: ?Sized> OptionalDebug for T {
default fn optional_debug(&self) -> String {
"".to_string()
}
}
#[cfg(all(feature = "debug", feature = "nightly"))]
impl<T: std::fmt::Debug + ?Sized> OptionalDebug for T {
fn optional_debug(&self) -> String {
format!("{:?}", self)
}
}
impl<T: ?Sized, O: AbstractObjectSpace> RawCc<T, O> {
/// Obtains a "weak reference", a non-owning pointer.
pub fn downgrade(&self) -> RawWeak<T, O> {
let inner = self.inner();
inner.ref_count.inc_weak();
debug::log(|| {
(
inner.debug_name(),
format!("new-weak ({})", inner.ref_count.weak_count()),
)
});
RawWeak(self.0)
}
/// | {
collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space))
} | identifier_body |
cc.rs | /// Force drop the value T.
fn gc_drop_t(&self);
/// Returns the reference count. This is useful for verification.
fn gc_ref_count(&self) -> usize;
}
/// A dummy implementation without drop side-effects.
pub(crate) struct CcDummy;
impl CcDummy {
pub(crate) fn ccdyn_vptr() -> *mut () {
let mut dummy = CcDummy;
// safety: To access vtable pointer. Stable API cannot do it.
let fat_ptr: [*mut (); 2] = unsafe { mem::transmute(&mut dummy as &mut dyn CcDyn) };
fat_ptr[1]
}
}
impl CcDyn for CcDummy {
fn gc_ref_count(&self) -> usize {
1
}
fn gc_traverse(&self, _tracer: &mut Tracer) {}
fn gc_clone(&self) -> Box<dyn GcClone> {
panic!("bug: CcDummy::gc_clone should never be called");
}
}
impl<T: Trace> Cc<T> {
/// Constructs a new [`Cc<T>`](type.Cc.html) in a thread-local storage.
///
/// To collect cycles, use [`collect_thread_cycles`](fn.collect_thread_cycles.html).
pub fn new(value: T) -> Cc<T> {
collect::THREAD_OBJECT_SPACE.with(|space| Self::new_in_space(value, space))
}
}
impl<T: Trace, O: AbstractObjectSpace> RawCc<T, O> {
/// Constructs a new [`Cc<T>`](type.Cc.html) in the given
/// [`ObjectSpace`](struct.ObjectSpace.html).
///
/// To collect cycles, call `ObjectSpace::collect_cycles()`.
pub(crate) fn new_in_space(value: T, space: &O) -> Self {
let is_tracked = T::is_type_tracked();
let cc_box = RawCcBox {
ref_count: space.new_ref_count(is_tracked),
value: UnsafeCell::new(ManuallyDrop::new(value)),
#[cfg(test)]
name: debug::NEXT_DEBUG_NAME.with(|n| n.get().to_string()),
};
let ccbox_ptr: *mut RawCcBox<T, O> = if is_tracked {
// Create a GcHeader before the CcBox. This is similar to cpython.
let header = space.empty_header();
let cc_box_with_header = RawCcBoxWithGcHeader { header, cc_box };
let mut boxed = Box::new(cc_box_with_header);
// Fix-up fields in GcHeader. This is done after the creation of the
// Box so the memory addresses are stable.
space.insert(&mut boxed.header, &boxed.cc_box);
debug_assert_eq!(
mem::size_of::<O::Header>() + mem::size_of::<RawCcBox<T, O>>(),
mem::size_of::<RawCcBoxWithGcHeader<T, O>>()
);
let ptr: *mut RawCcBox<T, O> = &mut boxed.cc_box;
Box::leak(boxed);
ptr
} else {
Box::into_raw(Box::new(cc_box))
};
// safety: ccbox_ptr cannot be null from the above code.
let non_null = unsafe { NonNull::new_unchecked(ccbox_ptr) };
let result = Self(non_null);
if is_tracked {
debug::log(|| (result.debug_name(), "new (CcBoxWithGcHeader)"));
} else {
debug::log(|| (result.debug_name(), "new (CcBox)"));
}
debug_assert_eq!(result.ref_count(), 1);
result
}
/// Convert to `RawCc<dyn Trace>`.
pub fn into_dyn(self) -> RawCc<dyn Trace, O> {
#[cfg(feature = "nightly")]
{
// Requires CoerceUnsized, which is currently unstable.
self
}
// safety: Trait object magic. Test by test_dyn_downcast.
#[cfg(not(feature = "nightly"))]
unsafe {
// XXX: This depends on rust internals. But it works on stable.
// Replace this with CoerceUnsized once that becomes stable.
// Cc<dyn Trace> has 2 usize values: The first one is the same
// as Cc<T>. The second one is the vtable. The vtable pointer
// is the same as the second pointer of `&dyn Trace`.
let mut fat_ptr: [usize; 2] = mem::transmute(self.inner().deref() as &dyn Trace);
let self_ptr: usize = mem::transmute(self);
fat_ptr[0] = self_ptr;
mem::transmute(fat_ptr)
}
}
}
impl<T: Trace + Clone> Cc<T> {
/// Update the value `T` in a copy-on-write way.
///
/// If the ref count is 1, the value is updated in-place.
/// Otherwise a new `Cc<T>` will be created.
pub fn update_with(&mut self, mut update_func: impl FnMut(&mut T)) {
let need_clone = self.ref_count() > 1;
if need_clone {
let mut value = <Cc<T>>::deref(self).clone();
update_func(&mut value);
*self = Cc::new(value);
} else {
let value_ptr: *mut ManuallyDrop<T> = self.inner().value.get();
let value_mut: &mut T = unsafe { &mut *value_ptr }.deref_mut();
update_func(value_mut);
}
}
}
impl<T: ?Sized, O: AbstractObjectSpace> RawCcBox<T, O> {
#[inline]
fn header_ptr(&self) -> *const () {
self.header() as *const _ as _
}
#[inline]
fn header(&self) -> &O::Header {
debug_assert!(self.is_tracked());
// safety: See `Cc::new`. GcHeader is before CcBox for tracked objects.
unsafe { cast_ref(self, -(mem::size_of::<O::Header>() as isize)) }
}
#[inline]
fn is_tracked(&self) -> bool {
self.ref_count.is_tracked()
}
#[inline]
fn is_dropped(&self) -> bool {
self.ref_count.is_dropped()
}
#[inline]
fn inc_ref(&self) -> usize {
self.ref_count.inc_ref()
}
#[inline]
fn dec_ref(&self) -> usize {
self.ref_count.dec_ref()
}
#[inline]
fn ref_count(&self) -> usize {
self.ref_count.ref_count()
}
#[inline]
fn weak_count(&self) -> usize {
self.ref_count.weak_count()
}
#[inline]
fn set_dropped(&self) -> bool {
self.ref_count.set_dropped()
}
#[inline]
pub(crate) fn drop_t(&self) {
let already_dropped = self.set_dropped();
if !already_dropped {
debug::log(|| (self.debug_name(), "drop (T)"));
// safety: is_dropped() check ensures T is only dropped once. Other
// places (ex. gc collector) ensure that T is no longer accessed.
unsafe { ManuallyDrop::drop(&mut *(self.value.get())) };
}
}
pub(crate) fn trace_t(&self, tracer: &mut Tracer) {
if !self.is_tracked() {
return;
}
debug::log(|| (self.debug_name(), "trace"));
// For other non-`Cc<T>` container types, `trace` visit referents,
// is recursive, and does not call `tracer` directly. For `Cc<T>`,
// `trace` stops here, is non-recursive, and does apply `tracer`
// to the actual `GcHeader`. It's expected that the upper layer
// calls `gc_traverse` on everything (not just roots).
tracer(self.header_ptr());
}
pub(crate) fn debug_name(&self) -> String {
#[cfg(test)]
{
self.name.clone()
}
#[cfg(not(test))]
{
#[allow(unused_mut)]
let mut result = format!("{} at {:p}", std::any::type_name::<T>(), &self.value);
#[cfg(all(feature = "debug", feature = "nightly"))]
{
if !self.is_dropped() && crate::debug::GC_DROPPING.with(|t| !t.get()) {
let debug = self.deref().optional_debug();
if !debug.is_empty() {
result += &format!(" {}", debug);
}
}
}
return result;
}
}
}
#[cfg(all(feature = "debug", feature = "nightly"))]
pub(crate) trait OptionalDebug {
fn optional_debug(&self) -> String;
}
#[cfg(all(feature = "debug", feature = "nightly"))]
impl<T: ?Sized> OptionalDebug for T {
default fn optional_debug(&self) -> String {
"".to_string()
}
}
#[cfg(all(feature = "debug", feature = "nightly"))]
impl<T: std::fmt::Debug + ?Sized> OptionalDebug for T {
fn optional_debug(&self) -> String {
format!("{:?}", self)
}
}
impl<T: ?Sized, O | random_line_split | ||
agent_test.py | agent did not explore enough nodes during the search; it " + \
"did not finish the first layer of available moves."
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
return 1000 * timeit.default_timer()
def timeout(time_limit):
"""
Function decorator for unittest test cases to specify test case timeout.
"""
class TimeoutException(Exception):
""" Subclass Exception to catch timer expiration during search """
pass
def handler(*args, **kwargs):
""" Generic handler to raise an exception when a timer expires """
raise TimeoutException("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self, *args, **kwargs):
signal.signal(signal.SIGALRM, handler)
signal.alarm(time_limit)
try:
return testcase(self, *args, **kwargs)
finally:
signal.alarm(0)
return testWrapper
return wrapUnitTest
class EvalTable():
def __init__(self, table):
self.table = table
def score(self, game, player):
row, col = game.get_player_location(player)
return self.table[row][col]
class CounterBoard(isolation.Board):
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
def copy(self):
new_board = CounterBoard(self.__player_1__, self.__player_2__,
width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board.__active_player__ = self.__active_player__
new_board.__inactive_player__ = self.__inactive_player__
new_board.__last_player_move__ = copy(self.__last_player_move__)
new_board.__player_symbols__ = copy(self.__player_symbols__)
new_board.__board_state__ = deepcopy(self.__board_state__)
new_board.counter = self.counter
new_board.visited = self.visited
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7):
reload(game_agent)
agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(1)
# @unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test CustomPlayer.minimax """
h, w = 7, 7
method = "minimax"
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1
value_table[4][3] = 2
value_table[6][6] = 3
eval_fn = EvalTable(value_table)
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
counts = [(8, 8), (92, 27), (1650, 43)]
for idx, depth in enumerate([1, 3, 5]):
|
@timeout(1)
# @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
""" Test CustomPlayer.alphabeta """
h, w = 7, 7
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[2][5] = 1
value_table[0][4] = 2
value_table[1][0] = 3
value_table[5][5] = 4
eval_fn = EvalTable(value_table)
expected_moves = [set([(2, 5)]),
set([(2, 5)]),
set([(1, 4)]),
set([(1, 4), (2, 5)])]
counts = [(2, 2), (26, 13), (552, 36), (10564, 47)]
for idx, depth in enumerate([1, 3, 5, 7]):
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0))
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
num_explored_valid = board.counts[0] <= counts[idx][0]
num_unique_valid = board.counts[1] <= counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move))
@timeout(1)
# @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test
def test_alphabeta_pruning(self):
""" Test pruning in CustomPlayer.alphabeta """
h, w = 15, 15
depth = 6
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[3][14] = 1
eval_fn = EvalTable(value_table)
blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14),
(2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12),
(4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13),
(5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3),
(11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)]
agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h)
for r, c in blocked_cells:
board.__board_state__[r][c] = "X"
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
expected_move = (2, 13)
max_visits = (40, 18)
num_explored_valid = board.counts[0] < max_visits[0]
num_unique_valid = board.counts[1] <= max_visits[1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1]))
self.assertEqual(move, expected_move,
WRONG_MOVE.format(method, depth, expected_move, move))
@timeout(10)
# @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_id(self):
""" Test iterative deepening for CustomPlayer.minimax """
w, h = | agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0))
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move)) | conditional_block |
agent_test.py | Your agent did not explore enough nodes during the search; it " + \
"did not finish the first layer of available moves."
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
return 1000 * timeit.default_timer()
def timeout(time_limit):
"""
Function decorator for unittest test cases to specify test case timeout.
"""
class TimeoutException(Exception):
""" Subclass Exception to catch timer expiration during search """
pass
def handler(*args, **kwargs):
""" Generic handler to raise an exception when a timer expires """
raise TimeoutException("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self, *args, **kwargs):
signal.signal(signal.SIGALRM, handler)
signal.alarm(time_limit)
try:
return testcase(self, *args, **kwargs)
finally:
signal.alarm(0)
return testWrapper
return wrapUnitTest
class EvalTable():
def __init__(self, table):
self.table = table
def score(self, game, player):
row, col = game.get_player_location(player)
return self.table[row][col]
class CounterBoard(isolation.Board):
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
def copy(self):
new_board = CounterBoard(self.__player_1__, self.__player_2__,
width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board.__active_player__ = self.__active_player__
new_board.__inactive_player__ = self.__inactive_player__
new_board.__last_player_move__ = copy(self.__last_player_move__)
new_board.__player_symbols__ = copy(self.__player_symbols__)
new_board.__board_state__ = deepcopy(self.__board_state__)
new_board.counter = self.counter
new_board.visited = self.visited
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7):
reload(game_agent)
agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(1)
# @unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test CustomPlayer.minimax """
h, w = 7, 7
method = "minimax"
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1
value_table[4][3] = 2
value_table[6][6] = 3
eval_fn = EvalTable(value_table)
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
counts = [(8, 8), (92, 27), (1650, 43)]
for idx, depth in enumerate([1, 3, 5]):
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0))
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move))
@timeout(1)
# @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
""" Test CustomPlayer.alphabeta """
h, w = 7, 7
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[2][5] = 1
value_table[0][4] = 2
value_table[1][0] = 3
value_table[5][5] = 4
eval_fn = EvalTable(value_table)
expected_moves = [set([(2, 5)]),
set([(2, 5)]), | set([(1, 4)]),
set([(1, 4), (2, 5)])]
counts = [(2, 2), (26, 13), (552, 36), (10564, 47)]
for idx, depth in enumerate([1, 3, 5, 7]):
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0))
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
num_explored_valid = board.counts[0] <= counts[idx][0]
num_unique_valid = board.counts[1] <= counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move))
@timeout(1)
# @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test
def test_alphabeta_pruning(self):
""" Test pruning in CustomPlayer.alphabeta """
h, w = 15, 15
depth = 6
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[3][14] = 1
eval_fn = EvalTable(value_table)
blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14),
(2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12),
(4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13),
(5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3),
(11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)]
agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h)
for r, c in blocked_cells:
board.__board_state__[r][c] = "X"
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
expected_move = (2, 13)
max_visits = (40, 18)
num_explored_valid = board.counts[0] < max_visits[0]
num_unique_valid = board.counts[1] <= max_visits[1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1]))
self.assertEqual(move, expected_move,
WRONG_MOVE.format(method, depth, expected_move, move))
@timeout(10)
# @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_id(self):
""" Test iterative deepening for CustomPlayer.minimax """
w, h = | random_line_split | |
agent_test.py | Your agent did not explore enough nodes during the search; it " + \
"did not finish the first layer of available moves."
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
return 1000 * timeit.default_timer()
def timeout(time_limit):
"""
Function decorator for unittest test cases to specify test case timeout.
"""
class TimeoutException(Exception):
""" Subclass Exception to catch timer expiration during search """
pass
def handler(*args, **kwargs):
""" Generic handler to raise an exception when a timer expires """
raise TimeoutException("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self, *args, **kwargs):
signal.signal(signal.SIGALRM, handler)
signal.alarm(time_limit)
try:
return testcase(self, *args, **kwargs)
finally:
signal.alarm(0)
return testWrapper
return wrapUnitTest
class EvalTable():
def __init__(self, table):
self.table = table
def score(self, game, player):
row, col = game.get_player_location(player)
return self.table[row][col]
class CounterBoard(isolation.Board):
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
def copy(self):
new_board = CounterBoard(self.__player_1__, self.__player_2__,
width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board.__active_player__ = self.__active_player__
new_board.__inactive_player__ = self.__inactive_player__
new_board.__last_player_move__ = copy(self.__last_player_move__)
new_board.__player_symbols__ = copy(self.__player_symbols__)
new_board.__board_state__ = deepcopy(self.__board_state__)
new_board.counter = self.counter
new_board.visited = self.visited
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7):
reload(game_agent)
agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(1)
# @unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test CustomPlayer.minimax """
h, w = 7, 7
method = "minimax"
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1
value_table[4][3] = 2
value_table[6][6] = 3
eval_fn = EvalTable(value_table)
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
counts = [(8, 8), (92, 27), (1650, 43)]
for idx, depth in enumerate([1, 3, 5]):
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0))
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move))
@timeout(1)
# @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
| move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
num_explored_valid = board.counts[0] <= counts[idx][0]
num_unique_valid = board.counts[1] <= counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move))
@timeout(1)
# @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test
def test_alphabeta_pruning(self):
""" Test pruning in CustomPlayer.alphabeta """
h, w = 15, 15
depth = 6
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[3][14] = 1
eval_fn = EvalTable(value_table)
blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14),
(2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12),
(4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13),
(5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3),
(11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)]
agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h)
for r, c in blocked_cells:
board.__board_state__[r][c] = "X"
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
expected_move = (2, 13)
max_visits = (40, 18)
num_explored_valid = board.counts[0] < max_visits[0]
num_unique_valid = board.counts[1] <= max_visits[1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1]))
self.assertEqual(move, expected_move,
WRONG_MOVE.format(method, depth, expected_move, move))
@timeout(10)
# @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_id(self):
""" Test iterative deepening for CustomPlayer.minimax """
w, h = | """ Test CustomPlayer.alphabeta """
h, w = 7, 7
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[2][5] = 1
value_table[0][4] = 2
value_table[1][0] = 3
value_table[5][5] = 4
eval_fn = EvalTable(value_table)
expected_moves = [set([(2, 5)]),
set([(2, 5)]),
set([(1, 4)]),
set([(1, 4), (2, 5)])]
counts = [(2, 2), (26, 13), (552, 36), (10564, 47)]
for idx, depth in enumerate([1, 3, 5, 7]):
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0)) | identifier_body |
agent_test.py | agent did not explore enough nodes during the search; it " + \
"did not finish the first layer of available moves."
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
return 1000 * timeit.default_timer()
def timeout(time_limit):
"""
Function decorator for unittest test cases to specify test case timeout.
"""
class TimeoutException(Exception):
""" Subclass Exception to catch timer expiration during search """
pass
def handler(*args, **kwargs):
""" Generic handler to raise an exception when a timer expires """
raise TimeoutException("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self, *args, **kwargs):
signal.signal(signal.SIGALRM, handler)
signal.alarm(time_limit)
try:
return testcase(self, *args, **kwargs)
finally:
signal.alarm(0)
return testWrapper
return wrapUnitTest
class EvalTable():
def __init__(self, table):
self.table = table
def score(self, game, player):
row, col = game.get_player_location(player)
return self.table[row][col]
class CounterBoard(isolation.Board):
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
def | (self):
new_board = CounterBoard(self.__player_1__, self.__player_2__,
width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board.__active_player__ = self.__active_player__
new_board.__inactive_player__ = self.__inactive_player__
new_board.__last_player_move__ = copy(self.__last_player_move__)
new_board.__player_symbols__ = copy(self.__player_symbols__)
new_board.__board_state__ = deepcopy(self.__board_state__)
new_board.counter = self.counter
new_board.visited = self.visited
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT(self, depth, eval_fn, iterative=False, method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7):
reload(game_agent)
agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(1)
# @unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test CustomPlayer.minimax """
h, w = 7, 7
method = "minimax"
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1
value_table[4][3] = 2
value_table[6][6] = 3
eval_fn = EvalTable(value_table)
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
counts = [(8, 8), (92, 27), (1650, 43)]
for idx, depth in enumerate([1, 3, 5]):
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(2, 3), loc2=(0, 0))
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e3)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move))
@timeout(1)
# @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
""" Test CustomPlayer.alphabeta """
h, w = 7, 7
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[2][5] = 1
value_table[0][4] = 2
value_table[1][0] = 3
value_table[5][5] = 4
eval_fn = EvalTable(value_table)
expected_moves = [set([(2, 5)]),
set([(2, 5)]),
set([(1, 4)]),
set([(1, 4), (2, 5)])]
counts = [(2, 2), (26, 13), (552, 36), (10564, 47)]
for idx, depth in enumerate([1, 3, 5, 7]):
agentUT, board = self.initAUT(depth, eval_fn, False, method, loc1=(0, 6), loc2=(0, 0))
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
num_explored_valid = board.counts[0] <= counts[idx][0]
num_unique_valid = board.counts[1] <= counts[idx][1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx],
WRONG_MOVE.format(method, depth, expected_moves[idx], move))
@timeout(1)
# @unittest.skip("Skip alpha-beta pruning test.") # Uncomment this line to skip test
def test_alphabeta_pruning(self):
""" Test pruning in CustomPlayer.alphabeta """
h, w = 15, 15
depth = 6
method = "alphabeta"
value_table = [[0] * w for _ in range(h)]
value_table[3][14] = 1
eval_fn = EvalTable(value_table)
blocked_cells = [(0, 9), (0, 13), (0, 14), (1, 8), (1, 9), (1, 14),
(2, 9), (2, 11), (3, 8), (3, 10), (3, 11), (3, 12),
(4, 9), (4, 11), (4, 13), (5, 10), (5, 12), (5, 13),
(5, 14), (6, 11), (6, 13), (9, 0), (9, 2), (10, 3),
(11, 3), (12, 0), (12, 1), (12, 3), (12, 4), (12, 5)]
agentUT, board = self.initAUT(depth, eval_fn, False, method, (0, 14), (14, 0), w, h)
for r, c in blocked_cells:
board.__board_state__[r][c] = "X"
move = agentUT.get_move(board, board.get_legal_moves(), lambda: 1e4)
expected_move = (2, 13)
max_visits = (40, 18)
num_explored_valid = board.counts[0] < max_visits[0]
num_unique_valid = board.counts[1] <= max_visits[1]
self.assertTrue(num_explored_valid,
WRONG_NUM_EXPLORED.format(method, depth, max_visits[0], board.counts[0]))
self.assertTrue(num_unique_valid,
UNEXPECTED_VISIT.format(method, depth, max_visits[1], board.counts[1]))
self.assertEqual(move, expected_move,
WRONG_MOVE.format(method, depth, expected_move, move))
@timeout(10)
# @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_id(self):
""" Test iterative deepening for CustomPlayer.minimax """
w, h = | copy | identifier_name |
zoekmachine.py | "14": "Kunst & Cultuur",
"15": "Overig",
'16': "Biologie",
"17": "Wiskunde",
"18": "Natuur- en scheikunde",
"19": "Psychologie",
"20": "Sociale wetenschap",
"21": "Overig",
"22": "Auto's",
"23" : "Vliegtuigen",
"24" : "Boten",
"25" : "Openbaar vervoer",
"26" : "Motorfietsen",
"27" : "Fietsen",
"28": 'Overig',
"29": 'Spellen',
"30": 'Computergames',
"31": "Hobby\'s",
"32": "Sporten",
"33": "Overig",
"34": 'Caraïben',
"35": "Noord-Amerika",
"36": 'Zuid-Amerika',
"37": "Afrika",
"38": 'Antarctica',
"39": 'Azië',
'40': 'Europa',
"41": "Midden-Amerika",
'42': "Midden-Oosten",
"43": "Oceanië",
"44": "Overig",
"45": "Overig",
"46": "Mode & Accessoires",
"47": "Familie & Relatie",
"48": "Gezondheid",
"49": "Zwangerschap",
"50": 'Onderwijs',
"51": 'Milieu',
"52": "Politiek & Overheid",
"53": "Samenleving",
"54": 'Overig',
"55": "Boeken & Auteurs",
"56": "Genealogie",
"57": 'Geschiedenis',
"58": 'Filosofie',
"59": 'Poëzie',
"60": "Beeldende kunst",
"61": "Overig",
"62": "Schoonmaken & Wassen",
"63": 'Interieur',
"64": 'Doe-Het-Zelf',
"65": 'Tuin',
"66": 'Huisdieren',
"67": 'Overig',
"68": 'Dranken',
"69": 'Koken & Recepten',
"70": 'Vegetarisch & Veganistisch',
"71": "Uit eten",
"72": "Overig",
"73": 'Beroemdheden',
"74": 'Stripboeken & Tekenfilms',
"75": 'Tijdschriften',
"76": 'Horoscoop',
"77": 'Films',
"78": 'Muziek',
"79": 'Radio',
"80": 'Televisie',
"81": 'Overig',
"82": 'Videocameras',
"83": "Camera\'s",
"84": "Telefoon & Abonnementen",
"85": 'Spelcomputers',
"86": 'Audio',
"87": "Handhelds & Smartphones",
"88": "Televisies",
"89": 'Overig',
"90": 'Hardware',
"91": 'Software',
"92": 'Internet',
"93": 'Programmeren & Design',
"94": 'Veiligheid',
"95": 'Overig',
"96": 'Carrière & Werk',
"97": 'Financiën',
"98": 'Huren & Vastgoed',
"100": 'Belasting',
"101": 'Overig',
"103": 'Ondernemen',
"104": "Religie",
"106": 'Vrachtwagens & Transport',
"107": 'Treinen',
"108": "Taal",
"109": "Spiritualiteit",
"110": 'Ruimtevaart & Sterrenkunde',
"111": "Besturingssystemen",
"113": "Voetbal",
"114": 'Wielrennen',
"115": 'Tennis',
"116": "Formule 1",
"117": "Hockey",
"118": 'Schaatsen',
"119": 'Overig',
"120": 'Vragen aan mannen',
"121": 'Vragen aan vrouwen',
"122": "GoeieVraag.nl",
"123": "Ouderschap & Opvoeding",
"124": 'Wetgeving',
"125": 'Wintersport',
"126": 'Feestdagen',
"127": "Sinterklaas",
"128": 'Kerst',
"129": 'Pasen',
"130": "Andere feestdagen",
"131": "Seksualiteit",
"132": "Aardrijkskunde & Aardwetenschappen",
"133": "Energie",
"134": "Verzekeringen",
"135": "Sparen & Beleggen",
"136": "Overig",
"137": "Alternatieve geneeswijzen",
"138": "Gebit",
"139": 'Psyche',
"140": 'Voeding',
"141": "Ziekten",
"142": "Optiek",
"143": "Lichamelijke klachten",
"144": "Mannelijk lichaam",
"145": "Vrouwelijk lichaam",
"146": "Overig",
"147": 'Kinderen',
"148": "Reparaties",
"149": "Banden",
"150": "Brom- & Snorfietsen",
"151": "Weblogs",
"152": 'Webshops',
"156": "Meteorologie",
"157": "Lenen",
"158": "Sparen",
"159": "Hypotheek",
"160": "Economie",
"161": "Techniek",
"162": "Landbouw & Veeteelt",
"163": "Medicijnen",
"164": "Huid-, haarverzorging en Make-up",
"165": "Fotografie",
"166": 'Winkels',
"167": "Huishoudelijke apparaten",
"168": "Sociale Media"
}
#Search for documents containing the query and make a list of categories
def searchFAC(term):
text = ''
quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source']
results = []
cat = c.Counter()
res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}})
print("%d documents found" % res['hits']['total'])
for doc in res['hits']['hits']:
url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/"
title = doc['_source']['Question']
text = text + title
results.append(wrapinresults(url, title))
caterogynr= doc['_source']['Category']
cat[switcherGetCat.get(caterogynr, "None")] += 1
return cat, res
#Search for documents containing the query and filter out unwanted categories
def searchFAC2(term, catNr):
text = ''
quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source']
results = []
res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}})
print("%d documents found" % res['hits']['total'])
for doc in res['hits']['hits']:
url = "h | ttps://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/"
title = doc['_source']['Question']
text = text + title
if int(doc['_source']['Category']) == catNr:
results.append(wrapinresults(url, title))
ma | conditional_block | |
zoekmachine.py | : "Persoon & Gezondheid",
"3" : "Maatschappij",
"4" : "Financiën & Werk",
"5" : "Vervoer",
"6" : "Computers & Internet",
"7" : "Elektronica",
"8" : "Entertainment & Muziek",
"9" : "Eten & Drinken",
"10": "Sport, Spel & Recreatie",
'11': "Huis & Tuin",
"12": "Wetenschap",
"13": "Vakantie & Reizen",
"14": "Kunst & Cultuur",
"15": "Overig",
'16': "Biologie",
"17": "Wiskunde",
"18": "Natuur- en scheikunde",
"19": "Psychologie",
"20": "Sociale wetenschap",
"21": "Overig",
"22": "Auto's",
"23" : "Vliegtuigen",
"24" : "Boten",
"25" : "Openbaar vervoer",
"26" : "Motorfietsen",
"27" : "Fietsen",
"28": 'Overig',
"29": 'Spellen',
"30": 'Computergames',
"31": "Hobby\'s",
"32": "Sporten",
"33": "Overig",
"34": 'Caraïben',
"35": "Noord-Amerika",
"36": 'Zuid-Amerika',
"37": "Afrika",
"38": 'Antarctica',
"39": 'Azië',
'40': 'Europa',
"41": "Midden-Amerika",
'42': "Midden-Oosten",
"43": "Oceanië",
"44": "Overig",
"45": "Overig",
"46": "Mode & Accessoires",
"47": "Familie & Relatie",
"48": "Gezondheid",
"49": "Zwangerschap",
"50": 'Onderwijs',
"51": 'Milieu',
"52": "Politiek & Overheid",
"53": "Samenleving",
"54": 'Overig',
"55": "Boeken & Auteurs",
"56": "Genealogie",
"57": 'Geschiedenis',
"58": 'Filosofie',
"59": 'Poëzie',
"60": "Beeldende kunst",
"61": "Overig",
"62": "Schoonmaken & Wassen",
"63": 'Interieur',
"64": 'Doe-Het-Zelf',
"65": 'Tuin',
"66": 'Huisdieren',
"67": 'Overig',
"68": 'Dranken',
"69": 'Koken & Recepten',
"70": 'Vegetarisch & Veganistisch',
"71": "Uit eten",
"72": "Overig",
"73": 'Beroemdheden',
"74": 'Stripboeken & Tekenfilms',
"75": 'Tijdschriften',
"76": 'Horoscoop',
"77": 'Films',
"78": 'Muziek',
"79": 'Radio',
"80": 'Televisie',
"81": 'Overig',
"82": 'Videocameras',
"83": "Camera\'s",
"84": "Telefoon & Abonnementen",
"85": 'Spelcomputers',
"86": 'Audio',
"87": "Handhelds & Smartphones",
"88": "Televisies",
"89": 'Overig',
"90": 'Hardware',
"91": 'Software',
"92": 'Internet',
"93": 'Programmeren & Design',
"94": 'Veiligheid',
"95": 'Overig',
"96": 'Carrière & Werk',
"97": 'Financiën',
"98": 'Huren & Vastgoed',
"100": 'Belasting',
"101": 'Overig',
"103": 'Ondernemen',
"104": "Religie",
"106": 'Vrachtwagens & Transport',
"107": 'Treinen',
"108": "Taal",
"109": "Spiritualiteit",
"110": 'Ruimtevaart & Sterrenkunde',
"111": "Besturingssystemen",
"113": "Voetbal",
"114": 'Wielrennen',
"115": 'Tennis',
"116": "Formule 1",
"117": "Hockey",
"118": 'Schaatsen',
"119": 'Overig',
"120": 'Vragen aan mannen',
"121": 'Vragen aan vrouwen',
"122": "GoeieVraag.nl",
"123": "Ouderschap & Opvoeding",
"124": 'Wetgeving',
"125": 'Wintersport',
"126": 'Feestdagen',
"127": "Sinterklaas",
"128": 'Kerst',
"129": 'Pasen',
"130": "Andere feestdagen",
"131": "Seksualiteit",
"132": "Aardrijkskunde & Aardwetenschappen",
"133": "Energie",
"134": "Verzekeringen",
"135": "Sparen & Beleggen",
"136": "Overig",
"137": "Alternatieve geneeswijzen",
"138": "Gebit",
"139": 'Psyche',
"140": 'Voeding',
"141": "Ziekten",
"142": "Optiek",
"143": "Lichamelijke klachten",
"144": "Mannelijk lichaam",
"145": "Vrouwelijk lichaam",
"146": "Overig",
"147": 'Kinderen',
"148": "Reparaties",
"149": "Banden",
"150": "Brom- & Snorfietsen",
"151": "Weblogs",
"152": 'Webshops',
"156": "Meteorologie",
"157": "Lenen",
"158": "Sparen",
"159": "Hypotheek",
"160": "Economie",
"161": "Techniek",
"162": "Landbouw & Veeteelt",
"163": "Medicijnen",
"164": "Huid-, haarverzorging en Make-up",
"165": "Fotografie",
"166": 'Winkels',
"167": "Huishoudelijke apparaten",
"168": "Sociale Media"
}
#Search for documents containing the query and make a list of categories
def searchFAC(term):
text = ''
quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source']
results = []
cat = c.Counter()
res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}})
print("%d documents found" % res['hits']['total'])
for doc in res['hits']['hits']:
url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/"
title = doc['_source']['Question']
text = text + title
results.append(wrapinresults(url, title))
caterogynr= doc['_source']['Category']
cat[switcherGetCat.get(caterogynr, "None")] += 1
return cat, res
#Search for documents containing the query and filter out unwanted categories
def searchFA | C2(term, c | identifier_name | |
zoekmachine.py | list_results[4], list_results[5], list_results[6], list_results[7], list_results[8], list_results[9])
f.write(whole)
f.close()
open_new_tab(filename)
#Wrap results into hrefs to place in result blocks
def wrapinresults(url, question):
wrapper = """<p><a href=%s>%s</a></p>"""
whole = wrapper % (url, question)
return(whole)
#Makes wordcloud, removing stopwords, showing image
def makeWordCloud(term ,text):
text = text.lower()
text = text.replace(term, '')
text = text.replace('de', '')
text = text.replace('het', '')
text = text.replace('een', '')
text = text.replace('zijn', '')
wordcloud = WordCloud().generate(text)
image = wordcloud.to_image()
image.show()
#Shows in which year the most hits have been made
def showTimeLine(res):
from datetime import datetime
import matplotlib.pyplot as plt
timeline = []
#finds date of creatio of the questions
for doc in res['hits']['hits']:
date = doc['_source']['Date']
date = date.split(" ", 1)[0]
datetime_object = datetime.strptime(date, '%Y-%m-%d').date()
timeline.append(datetime_object)
#creates figure
x = timeline | fig, ax = plt.subplots()
ax.bar(timeline, y, width = 10)
fig.autofmt_xdate()
plt.show()
#Performs actual search
def search(term, filter):
text = ''
quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source']
#Connect to cloud and find results
results = []
res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}})
print("%d documents found" % res['hits']['total'])
#Create the results to print on SERP
for doc in res['hits']['hits']:
url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/"
title = doc['_source']['Question']
text = text + title
date = doc['_source']['Date']
date = date.split("-", 1)[0]
if int(date) >= int(filter):
results.append(wrapinresults(url, title))
#Show all work to user
makeWordCloud(term, text)
docscount = len(results)
if docscount < 10:
for x in range(0, (10 - docscount)):
results.append("")
wrapStringInHTMLWindows(term, "serp", results, "body")
showTimeLine(res)
#Ask user for query
def simple():
print("What are you looking for?")
term = sys.stdin.readline()
search(term, 0000)
#Ask user for query in which time setting
def advanced():
print("What are you looking for? (ADV)")
term = sys.stdin.readline()
print("From what year on?")
year = sys.stdin.readline()
search(term, year)
#Return that the choice was not available
def invalid():
print("Not a valid choice")
#List of all categories in data set
switcherGetCat = {
"1" : "Alle categorieën",
"2" : "Persoon & Gezondheid",
"3" : "Maatschappij",
"4" : "Financiën & Werk",
"5" : "Vervoer",
"6" : "Computers & Internet",
"7" : "Elektronica",
"8" : "Entertainment & Muziek",
"9" : "Eten & Drinken",
"10": "Sport, Spel & Recreatie",
'11': "Huis & Tuin",
"12": "Wetenschap",
"13": "Vakantie & Reizen",
"14": "Kunst & Cultuur",
"15": "Overig",
'16': "Biologie",
"17": "Wiskunde",
"18": "Natuur- en scheikunde",
"19": "Psychologie",
"20": "Sociale wetenschap",
"21": "Overig",
"22": "Auto's",
"23" : "Vliegtuigen",
"24" : "Boten",
"25" : "Openbaar vervoer",
"26" : "Motorfietsen",
"27" : "Fietsen",
"28": 'Overig',
"29": 'Spellen',
"30": 'Computergames',
"31": "Hobby\'s",
"32": "Sporten",
"33": "Overig",
"34": 'Caraïben',
"35": "Noord-Amerika",
"36": 'Zuid-Amerika',
"37": "Afrika",
"38": 'Antarctica',
"39": 'Azië',
'40': 'Europa',
"41": "Midden-Amerika",
'42': "Midden-Oosten",
"43": "Oceanië",
"44": "Overig",
"45": "Overig",
"46": "Mode & Accessoires",
"47": "Familie & Relatie",
"48": "Gezondheid",
"49": "Zwangerschap",
"50": 'Onderwijs',
"51": 'Milieu',
"52": "Politiek & Overheid",
"53": "Samenleving",
"54": 'Overig',
"55": "Boeken & Auteurs",
"56": "Genealogie",
"57": 'Geschiedenis',
"58": 'Filosofie',
"59": 'Poëzie',
"60": "Beeldende kunst",
"61": "Overig",
"62": "Schoonmaken & Wassen",
"63": 'Interieur',
"64": 'Doe-Het-Zelf',
"65": 'Tuin',
"66": 'Huisdieren',
"67": 'Overig',
"68": 'Dranken',
"69": 'Koken & Recepten',
"70": 'Vegetarisch & Veganistisch',
"71": "Uit eten",
"72": "Overig",
"73": 'Beroemdheden',
"74": 'Stripboeken & Tekenfilms',
"75": 'Tijdschriften',
"76": 'Horoscoop',
"77": 'Films',
"78": 'Muziek',
"79": 'Radio',
"80": 'Televisie',
"81": 'Overig',
"82": 'Videocameras',
"83": "Camera\'s",
"84": "Telefoon & Abonnementen",
"85": 'Spelcomputers',
"86": 'Audio',
"87": "Handhelds & Smartphones",
"88": "Televisies",
"89": 'Overig',
"90": 'Hardware',
"91": 'Software',
"92": 'Internet',
"93": 'Programmeren & Design',
"94": 'Veiligheid',
"95": 'Overig',
"96": 'Carrière & Werk',
"97": 'Financiën',
"98": 'Huren & Vastgoed',
"100": 'Belasting',
"101": 'Overig',
"103": 'Ondernemen',
"104": "Religie",
"106": 'Vrachtwagens & Transport',
"107": 'Treinen',
"108": "Taal",
"109": "Spiritualiteit",
"110": 'Ruimtevaart & Sterrenkunde',
"111": "Besturingssystemen",
"113": "Voetbal",
"114": 'Wielrennen',
"115": 'Tennis',
"116": "Formule 1",
"117": "Hockey",
"118": 'Schaatsen',
"119": 'Overig',
"120": 'Vragen aan mannen',
"121": 'Vragen aan vrouwen',
"122": "GoeieVraag.nl | y = range(len(timeline))
| random_line_split |
zoekmachine.py | _results[4], list_results[5], list_results[6], list_results[7], list_results[8], list_results[9])
f.write(whole)
f.close()
open_new_tab(filename)
#Wrap results into hrefs to place in result blocks
def wrapinresults(url, question):
wrapper = """<p><a href=%s>%s</a></p>"""
whole = wrapper % (url, question)
return(whole)
#Makes wordcloud, removing stopwords, showing image
def makeWordCloud(term ,text):
text = text.lower()
text = text.replace(term, '')
text = text.replace('de', '')
text = text.replace('het', '')
text = text.replace('een', '')
text = text.replace('zijn', '')
wordcloud = WordCloud().generate(text)
image = wordcloud.to_image()
image.show()
#Shows in which year the most hits have been made
def showTimeLine(res):
from datetime import datetime
import matplotlib.pyplot as plt
timeline = []
#finds date of creatio of the questions
for doc in res['hits']['hits']:
date = doc['_source']['Date']
date = date.split(" ", 1)[0]
datetime_object = datetime.strptime(date, '%Y-%m-%d').date()
timeline.append(datetime_object)
#creates figure
x = timeline
y = range(len(timeline))
fig, ax = plt.subplots()
ax.bar(timeline, y, width = 10)
fig.autofmt_xdate()
plt.show()
#Performs actual search
def search(term, filter):
text = ''
quest = es.get(index="zoekmachine", doc_type="question", id=5)['_source']
#Connect to cloud and find results
results = []
res = es.search(index="zoekmachine", doc_type="question", body={"query": {"match": {"Question": term}}})
print("%d documents found" % res['hits']['total'])
#Create the results to print on SERP
for doc in res['hits']['hits']:
url = "https://www.startpagina.nl/v/vraag/" + doc['_source']['Number'] + "/"
title = doc['_source']['Question']
text = text + title
date = doc['_source']['Date']
date = date.split("-", 1)[0]
if int(date) >= int(filter):
results.append(wrapinresults(url, title))
#Show all work to user
makeWordCloud(term, text)
docscount = len(results)
if docscount < 10:
for x in range(0, (10 - docscount)):
results.append("")
wrapStringInHTMLWindows(term, "serp", results, "body")
showTimeLine(res)
#Ask user for query
def simple():
|
#Ask user for query in which time setting
def advanced():
print("What are you looking for? (ADV)")
term = sys.stdin.readline()
print("From what year on?")
year = sys.stdin.readline()
search(term, year)
#Return that the choice was not available
def invalid():
print("Not a valid choice")
#List of all categories in data set
switcherGetCat = {
"1" : "Alle categorieën",
"2" : "Persoon & Gezondheid",
"3" : "Maatschappij",
"4" : "Financiën & Werk",
"5" : "Vervoer",
"6" : "Computers & Internet",
"7" : "Elektronica",
"8" : "Entertainment & Muziek",
"9" : "Eten & Drinken",
"10": "Sport, Spel & Recreatie",
'11': "Huis & Tuin",
"12": "Wetenschap",
"13": "Vakantie & Reizen",
"14": "Kunst & Cultuur",
"15": "Overig",
'16': "Biologie",
"17": "Wiskunde",
"18": "Natuur- en scheikunde",
"19": "Psychologie",
"20": "Sociale wetenschap",
"21": "Overig",
"22": "Auto's",
"23" : "Vliegtuigen",
"24" : "Boten",
"25" : "Openbaar vervoer",
"26" : "Motorfietsen",
"27" : "Fietsen",
"28": 'Overig',
"29": 'Spellen',
"30": 'Computergames',
"31": "Hobby\'s",
"32": "Sporten",
"33": "Overig",
"34": 'Caraïben',
"35": "Noord-Amerika",
"36": 'Zuid-Amerika',
"37": "Afrika",
"38": 'Antarctica',
"39": 'Azië',
'40': 'Europa',
"41": "Midden-Amerika",
'42': "Midden-Oosten",
"43": "Oceanië",
"44": "Overig",
"45": "Overig",
"46": "Mode & Accessoires",
"47": "Familie & Relatie",
"48": "Gezondheid",
"49": "Zwangerschap",
"50": 'Onderwijs',
"51": 'Milieu',
"52": "Politiek & Overheid",
"53": "Samenleving",
"54": 'Overig',
"55": "Boeken & Auteurs",
"56": "Genealogie",
"57": 'Geschiedenis',
"58": 'Filosofie',
"59": 'Poëzie',
"60": "Beeldende kunst",
"61": "Overig",
"62": "Schoonmaken & Wassen",
"63": 'Interieur',
"64": 'Doe-Het-Zelf',
"65": 'Tuin',
"66": 'Huisdieren',
"67": 'Overig',
"68": 'Dranken',
"69": 'Koken & Recepten',
"70": 'Vegetarisch & Veganistisch',
"71": "Uit eten",
"72": "Overig",
"73": 'Beroemdheden',
"74": 'Stripboeken & Tekenfilms',
"75": 'Tijdschriften',
"76": 'Horoscoop',
"77": 'Films',
"78": 'Muziek',
"79": 'Radio',
"80": 'Televisie',
"81": 'Overig',
"82": 'Videocameras',
"83": "Camera\'s",
"84": "Telefoon & Abonnementen",
"85": 'Spelcomputers',
"86": 'Audio',
"87": "Handhelds & Smartphones",
"88": "Televisies",
"89": 'Overig',
"90": 'Hardware',
"91": 'Software',
"92": 'Internet',
"93": 'Programmeren & Design',
"94": 'Veiligheid',
"95": 'Overig',
"96": 'Carrière & Werk',
"97": 'Financiën',
"98": 'Huren & Vastgoed',
"100": 'Belasting',
"101": 'Overig',
"103": 'Ondernemen',
"104": "Religie",
"106": 'Vrachtwagens & Transport',
"107": 'Treinen',
"108": "Taal",
"109": "Spiritualiteit",
"110": 'Ruimtevaart & Sterrenkunde',
"111": "Besturingssystemen",
"113": "Voetbal",
"114": 'Wielrennen',
"115": 'Tennis',
"116": "Formule 1",
"117": "Hockey",
"118": 'Schaatsen',
"119": 'Overig',
"120": 'Vragen aan mannen',
"121": 'Vragen aan vrouwen',
"122": "GoeieVraag | print("What are you looking for?")
term = sys.stdin.readline()
search(term, 0000) | identifier_body |
bayes.py | 所有词的出现数初始化为1, 并将分母初始化为2
"""
p0Num = ones(numWords); p1Num = ones(numWords) # change to ones()
p0Denom = 2.0; p1Denom = 2.0 # change to 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
# 2. (以下两行) 向量相加
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
# 3. 对每个元素做除法
"""
另一个遇到的问题是下溢出, 这是由于太多很小的数相乘造成的。 当计算乘积p(w0|ci)p(w1|ci)p(w2|ci)...p(wN|ci)时, 由于大部分因子都
非常小, 所以程序会下溢出或者得到不正确的答案。 (读者可以用Python尝试相乘许多很小的数, 最后四舍五入后会得到0。 ) 一种解决
办法是对乘积取自然对数。 在代数中有ln(a*b) = ln(a)+ln(b), 于是通过求对数可以避免下溢出或者浮点数舍入导致的错误。 同时, 采用
自然对数进行处理不会有任何损失。 图4-4给出函数f(x)与ln(f(x))的曲线。 检查这两条曲线, 就会发现它们在相同区域内同时增加或者减
少, 并且在相同点上取到极值。 它们的取值虽然不同, 但不影响最终结果。 通过修改return前的两行代码, 将上述做法用到分类器中:
"""
p1Vect = log(p1Num/p1Denom) # change to log()
p0Vect = log(p0Num/p0Denom) # change to log()
return p0Vect,p1Vect,pAbusive
def test2():
listPosts, listClass = loadDataSet()
# 构建了一个包含所有词的列表mVocabList
mVocabList = createVocabList(listPosts)
setOfWords2Vec(mVocabList, listPosts[0])
trainMat = []
for postinDoc in listPosts:
temp = setOfWords2Vec(mVocabList, postinDoc)
trainMat.append(temp)
# 文档属于侮辱类的概率pAb
p0v, p1v, pAb = trainNB0(trainMat, listClass)
print pAb
"""
接下来看一看在给定文档类别条件下词汇表中单词的出现概率, 看看是否正确。 词汇表中的第一个词是cute, 其在类别0中出现1次, 而在类别1中
从未出现。 对应的条件概率分别为0.041 666 67与0.0。 该计算是正确的。 我们找找所有概率中的最大值, 该值出现在P(1)数组第26个下标位
置, 大小为0.157 894 74。 在myVocabList的第26个下标位置上可以查到该单词是stupid。 这意味着stupid是最能表征类别1(侮辱性文档类)的单词。
"""
"""
代码有4个输入: 要分类的向量vec2Classify以及使用函数trainNB0()计算得到的三个概率。 使用NumPy的数组来计算两个
向量相乘的结果❶。 这里的相乘是指对应元素相乘, 即先将两个向量中的第1个元素相乘, 然后将第2个元素相乘, 以此类推。 接下来将词汇表
中所有词的对应值相加, 然后将该值加到类别的对数概率上。 最后, 比较类别的概率返回大概率对应的类别标签。 这一切不是很难, 对吧?
"""
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
# 1. 元素相乘 分类计算的核心
p1 = sum(vec2Classify * p1Vec) + log(pClass1) # element-wise mult
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
"""
对文本做一些修改, 看看分类器会输出什么结果。 这个例子非常简单,但是它展示了朴素贝叶斯分类器的工作原理。
接下来,我们会对代码做些修改, 使分类器工作得更好。
函数setOfWords2Vec()稍加修改, 修改后的函数称为bagOfWords2Vec()
-----------------------------------准备数据: 文档词袋模型---------------------------------------
"""
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
# todo 这个词的操作
returnVec[vocabList.index(word)] += 1
return returnVec
"""
函数是一个便利函数(convenience function) , 该函数封装所有操作, 以节省输入
"""
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
# testingNB()
# -----------------------------------使用朴素贝叶斯过滤垃圾邮件----------------------------
"""
准备数据: 切分文本
可以看到, 切分的结果不错, 但是标点符号也被当成了词的一部分。 可以使用正则表示式来切分句子, 其中分隔符是除单词、 数字外的任意字符串
"""
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
""""
函数spamTest()对贝叶斯垃圾邮件分类器进行自动化处理。 导入文件夹spam与ham下的文本文件, 并将它们解析为词列表❶。 接下来
构建一个测试集与一个训练集, 两个集合中的邮件都是随机选出的。 本例中共有50封电子邮件, 并不是很多, 其中的10封电子邮件被随机选择
为测试集。 分类器所需要的概率计算只利用训练集中的文档来完成。Python变量trainingSet是一个整数列表, 其中的值从0到49。 接下
来, 随机选择其中10个文件❷。 选择出的数字所对应的文档被添加到测试集, 同时也将其从训练集中剔除。 这种随机选择数据的一部分作为训
练集, 而剩余部分作为测试集的过程称为留存交叉验证(hold-out crossvalidation) 。 假定现在只完成了一次迭代, 那么为了更精确地估计分类
器的错误率, 就应该进行多次迭代后求出平均错误率。接下来的for循环遍历训练集的所有文档, 对每封邮件基于词汇表并使
用setOfWords2Vec()函数来构建词向量。 这些词在traindNB0()函数中用于计算分类所需的概率。 然后遍历测试集, 对其中每封电子邮件进
行分类❸。 如果邮件分类错误, 则错误数加1, 最后给出总的错误百分比
"""
def spamTest():
docList=[]; classList = []; fullText =[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)# create vocabulary | trainingSet = range(50); testSet=[] # create test set
# (以下四行) 随机构建训练集 | random_line_split | |
bayes.py | (侮辱性文档类)的单词。
"""
"""
代码有4个输入: 要分类的向量vec2Classify以及使用函数trainNB0()计算得到的三个概率。 使用NumPy的数组来计算两个
向量相乘的结果❶。 这里的相乘是指对应元素相乘, 即先将两个向量中的第1个元素相乘, 然后将第2个元素相乘, 以此类推。 接下来将词汇表
中所有词的对应值相加, 然后将该值加到类别的对数概率上。 最后, 比较类别的概率返回大概率对应的类别标签。 这一切不是很难, 对吧?
"""
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
# 1. 元素相乘 分类计算的核心
p1 = sum(vec2Classify * p1Vec) + log(pClass1) # element-wise mult
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
"""
对文本做一些修改, 看看分类器会输出什么结果。 这个例子非常简单,但是它展示了朴素贝叶斯分类器的工作原理。
接下来,我们会对代码做些修改, 使分类器工作得更好。
函数setOfWords2Vec()稍加修改, 修改后的函数称为bagOfWords2Vec()
-----------------------------------准备数据: 文档词袋模型---------------------------------------
"""
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
# todo 这个词的操作
returnVec[vocabList.index(word)] += 1
return returnVec
"""
函数是一个便利函数(convenience function) , 该函数封装所有操作, 以节省输入
"""
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
# testingNB()
# -----------------------------------使用朴素贝叶斯过滤垃圾邮件----------------------------
"""
准备数据: 切分文本
可以看到, 切分的结果不错, 但是标点符号也被当成了词的一部分。 可以使用正则表示式来切分句子, 其中分隔符是除单词、 数字外的任意字符串
"""
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
""""
函数spamTest()对贝叶斯垃圾邮件分类器进行自动化处理。 导入文件夹spam与ham下的文本文件, 并将它们解析为词列表❶。 接下来
构建一个测试集与一个训练集, 两个集合中的邮件都是随机选出的。 本例中共有50封电子邮件, 并不是很多, 其中的10封电子邮件被随机选择
为测试集。 分类器所需要的概率计算只利用训练集中的文档来完成。Python变量trainingSet是一个整数列表, 其中的值从0到49。 接下
来, 随机选择其中10个文件❷。 选择出的数字所对应的文档被添加到测试集, 同时也将其从训练集中剔除。 这种随机选择数据的一部分作为训
练集, 而剩余部分作为测试集的过程称为留存交叉验证(hold-out crossvalidation) 。 假定现在只完成了一次迭代, 那么为了更精确地估计分类
器的错误率, 就应该进行多次迭代后求出平均错误率。接下来的for循环遍历训练集的所有文档, 对每封邮件基于词汇表并使
用setOfWords2Vec()函数来构建词向量。 这些词在traindNB0()函数中用于计算分类所需的概率。 然后遍历测试集, 对其中每封电子邮件进
行分类❸。 如果邮件分类错误, 则错误数加1, 最后给出总的错误百分比
"""
def spamTest():
docList=[]; classList = []; fullText =[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)# create vocabulary
trainingSet = range(50); testSet=[] # create test set
# (以下四行) 随机构建训练集
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
# todo del这个操作步骤
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
# (以下四行) 对测试集分类
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
# todo 入参出参的计算方法
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print ("classification error",docList[docIndex])
print ('the error rate is: ',float(errorCount)/len(testSet))
#return vocabList,fullText
# ------------------------自动化处理----------------------------------------
spamTest()
# ----------------------------- 4.7. 示例: 使用朴素贝叶斯分类器从个人广告中获取区域倾向---------------------------
""""
RSS源分类器及高频词去除函数
函数calcMostFreq() ❶。 该函数遍历词汇表中的每个词并统计它在文本中出现的次数, 然后根据出现次数从高到低对词典进行排序,
最后返回排序最高的30个单词。 你很快就会明白这个函数的重要性
以下四行) 计算出现频率
"""
def calcMostFreq(vocabList,fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token]=fullText.count(token)
sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedFreq[:30]
""""
函数localWords()使用两个RSS源作为参数。 RSS源要在函数外
导入, 这样做的原因是RSS源会随时间而改变。 如果想通过改变代码来
比较程序执行的差异, 就应该使用相同的输入。 重新加载RSS源就会得
到新的数据, 但很难确定是代码原因还是输入原因导致输出结果的改
变。 函数localWords()与程序清单4-5中的spamTest()函数几乎相
同, 区别在于这里访问的是RSS源❷而不是文件。 然后调用函
数calcMostFreq()来获得排序最高的30个单词并随后将它们移除❸。
函数的剩余部分与spamTest()基本类似, 不同的是最后一行要返回下
面要用到的值。
"""
def localWords(feed1,feed0):
import feedparser
docList=[]; classList = []; fullText =[]
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
# 2 每次访问一条RSS源
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #NY is class 1
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.app | end(0)
| identifier_name | |
bayes.py | testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
# testingNB()
# -----------------------------------使用朴素贝叶斯过滤垃圾邮件----------------------------
"""
准备数据: 切分文本
可以看到, 切分的结果不错, 但是标点符号也被当成了词的一部分。 可以使用正则表示式来切分句子, 其中分隔符是除单词、 数字外的任意字符串
"""
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
""""
函数spamTest()对贝叶斯垃圾邮件分类器进行自动化处理。 导入文件夹spam与ham下的文本文件, 并将它们解析为词列表❶。 接下来
构建一个测试集与一个训练集, 两个集合中的邮件都是随机选出的。 本例中共有50封电子邮件, 并不是很多, 其中的10封电子邮件被随机选择
为测试集。 分类器所需要的概率计算只利用训练集中的文档来完成。Python变量trainingSet是一个整数列表, 其中的值从0到49。 接下
来, 随机选择其中10个文件❷。 选择出的数字所对应的文档被添加到测试集, 同时也将其从训练集中剔除。 这种随机选择数据的一部分作为训
练集, 而剩余部分作为测试集的过程称为留存交叉验证(hold-out crossvalidation) 。 假定现在只完成了一次迭代, 那么为了更精确地估计分类
器的错误率, 就应该进行多次迭代后求出平均错误率。接下来的for循环遍历训练集的所有文档, 对每封邮件基于词汇表并使
用setOfWords2Vec()函数来构建词向量。 这些词在traindNB0()函数中用于计算分类所需的概率。 然后遍历测试集, 对其中每封电子邮件进
行分类❸。 如果邮件分类错误, 则错误数加1, 最后给出总的错误百分比
"""
def spamTest():
docList=[]; classList = []; fullText =[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)# create vocabulary
trainingSet = range(50); testSet=[] # create test set
# (以下四行) 随机构建训练集
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
# todo del这个操作步骤
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
# (以下四行) 对测试集分类
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
# todo 入参出参的计算方法
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print ("classification error",docList[docIndex])
print ('the error rate is: ',float(errorCount)/len(testSet))
#return vocabList,fullText
# ------------------------自动化处理----------------------------------------
spamTest()
# ----------------------------- 4.7. 示例: 使用朴素贝叶斯分类器从个人广告中获取区域倾向---------------------------
""""
RSS源分类器及高频词去除函数
函数calcMostFreq() ❶。 该函数遍历词汇表中的每个词并统计它在文本中出现的次数, 然后根据出现次数从高到低对词典进行排序,
最后返回排序最高的30个单词。 你很快就会明白这个函数的重要性
以下四行) 计算出现频率
"""
def calcMostFreq(vocabList,fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token]=fullText.count(token)
sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedFreq[:30]
""""
函数localWords()使用两个RSS源作为参数。 RSS源要在函数外
导入, 这样做的原因是RSS源会随时间而改变。 如果想通过改变代码来
比较程序执行的差异, 就应该使用相同的输入。 重新加载RSS源就会得
到新的数据, 但很难确定是代码原因还是输入原因导致输出结果的改
变。 函数localWords()与程序清单4-5中的spamTest()函数几乎相
同, 区别在于这里访问的是RSS源❷而不是文件。 然后调用函
数calcMostFreq()来获得排序最高的30个单词并随后将它们移除❸。
函数的剩余部分与spamTest()基本类似, 不同的是最后一行要返回下
面要用到的值。
"""
def localWords(feed1,feed0):
import feedparser
docList=[]; classList = []; fullText =[]
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
# 2 每次访问一条RSS源
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #NY is class 1
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
# (以下四行) 去掉出现次数最高的那些词
vocabList = createVocabList(docList)#create vocabulary
top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words
for pairW in top30Words:
if pairW[0] in vocabList: vocabList.remove(pairW[0])
trainingSet = range(2*minLen); testSet=[] #create test set
for i in range(20):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print ('the error rate is: ',float(errorCount)/len(testSet))
return vocabList,p0V,p1V
def getTopWords(ny,sf):
import operator
vocabList,p0V,p1V=localWords(ny,sf)
topNY=[]; topSF=[]
for i in range(len(p0V)):
if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))
if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))
sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)
print ("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**")
for item in sortedSF:
print (item[0])
sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)
print ("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**")
for item in sortedNY:
print (item[0])
| conditional_block | ||
bayes.py | testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
# testingNB()
# -----------------------------------使用朴素贝叶斯过滤垃圾邮件----------------------------
"""
准备数据: 切分文本
可以看到, 切分的结果不错, 但是标点符号也被当成了词的一部分。 可以使用正则表示式来切分句子, 其中分隔符是除单词、 数字外的任意字符串
"""
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
""""
函数spamTest()对贝叶斯垃圾邮件分类器进行自动化处理。 导入文件夹spam与ham下的文本文件, 并将它们解析为词列表❶。 接下来
构建一个测试集与一个训练集, 两个集合中的邮件都是随机选出的。 本例中共有50封电子邮件, 并不是很多, 其中的10封电子邮件被随机选择
为测试集。 分类器所需要的概率计算只利用训练集中的文档来完成。Python变量trainingSet是一个整数列表, 其中的值从0到49。 接下
来, 随机选择其中10个文件❷。 选择出的数字所对应的文档被添加到测试集, 同时也将其从训练集中剔除。 这种随机选择数据的一部分作为训
练集, 而剩余部分作为测试集的过程称为留存交叉验证(hold-out crossvalidation) 。 假定现在只完成了一次迭代, 那么为了更精确地估计分类
器的错误率, 就应该进行多次迭代后求出平均错误率。接下来的for循环遍历训练集的所有文档, 对每封邮件基于词汇表并使
用setOfWords2Vec()函数来构建词向量。 这些词在traindNB0()函数中用于计算分类所需的概率。 然后遍历测试集, 对其中每封电子邮件进
行分类❸。 如果邮件分类错误, 则错误数加1, 最后给出总的错误百分比
"""
def spamTest():
docList=[]; classList = []; fullText =[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)# create vocabulary
trainingSet = range(50); testSet=[] # create test set
# (以下四行) 随机构建训练集
for i in range(10):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
# todo del这个操作步骤
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
# (以下四行) 对测试集分类
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
# todo 入参出参的计算方法
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print ("classification error",docList[docIndex])
print ('the error rate is: ',float(errorCount)/len(testSet))
#return vocabList,fullText
# ------------------------自动化处理----------------------------------------
spamTest()
# ----------------------------- 4.7. 示例: 使用朴素贝叶斯分类器从个人广告中获取区域倾向---------------------------
""""
RSS源分类器及高频词去除函数
函数calcMostFreq() ❶。 该函数遍历词汇表中的每个词并统计它在文本中出现的次数, 然后根据出现次数从高到低对词典进行排序,
最后返回排序最高的30个单词。 你很快就会明白这个函数的重要性
以下四行) 计算出现频率
"""
def calcMostFreq(vocabList,fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token]=fullText.count(token)
sortedFreq = sorted(freqDict.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedFreq[:30]
""""
函数localWords()使用两个RSS源作为参数。 RSS源要在函数外
导入, 这样做的原因是RSS源会随时间而改变。 如果想通过改变代码来
比较程序执行的差异, 就应该使用相同的输入。 重新加载RSS源就会得
到新的数据, 但很难确定是代码原因还是输入原因导致输出结果的改
变。 函数localWords()与程序清单4-5中的spamTest()函数几乎相
同, 区别在于这里访问的是RSS源❷而不是文件。 然后调用函
数calcMostFreq()来获得排序最高的30个单词并随后将它们移除❸。
函数的剩余部分与spamTest()基本类似, 不同的是最后一行要返回下
面要用到的值。
"""
def localWords(feed1,feed0):
import feedparser
docList=[]; classList = []; fullText =[]
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
# 2 每次访问一条RSS源
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #NY is class 1
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
# (以下四行) 去掉出现次数最高的那些词
vocabList = createVocabList(docList)#create vocabulary
top30Words = calcMostFreq(vocabList,fullText) #remove top 30 words
for pairW in top30Words:
if pairW[0] in vocabList: vocabList.remove(pairW[0])
trainingSet = range(2*minLen); testSet=[] #create test set
for i in range(20):
randIndex = int(random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]; trainClasses = []
for docIndex in trainingSet:#train the classifier (get probs) trainNB0
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(array(trainMat),array(trainClasses))
errorCount = 0
for docIndex in testSet: #classify the remaining items
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print ('the error rate is: ',float(errorCount)/len(testSet))
return vocabList,p0V,p1V
def getTopWords(ny,sf):
import operator
vocabList,p0V,p1V=localWords(ny,sf)
topNY=[]; topSF=[]
for i in range(len(p0V)):
if p0V[i] > -6.0 : topSF.append((vocabList[i],p0V[i]))
if p1V[i] > -6.0 : topNY.append((vocabList[i],p1V[i]))
sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)
print ("SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**")
for item in sortedSF:
print (item[0])
sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)
print ("NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**")
for item in sortedNY:
print (item[0])
| identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.