input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
0.14, 0.23))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.57, 0.5, 0.23))
fd(1)
color((0.98, 0.74, 0.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.47, 0.18, 0.36))
fd(1)
color((0.88, 0.14, 0.23))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.12, 0.24, 0.48))
fd(1)
color((0.11, 0.22, 0.51))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,102.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.37, 0.38, 0.34))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.14, 0.24))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(8)
color((0.62, 0.53, 0.21))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.23, 0.3, 0.42))
fd(1)
color((0.74, 0.15, 0.28))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.87, 0.67, 0.07))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.73, 0.59, 0.14))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.1, 0.23, 0.49))
fd(1)
color((0.96, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.46, 0.44, 0.29))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.1, 0.23, 0.49))
fd(1)
color((0.97, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(7)
color((0.14, 0.25, 0.47))
fd(1)
color((0.16, 0.22, 0.47))
fd(1)
color((0.81, 0.14, 0.25))
fd(8)
color((0.09, 0.22, 0.5))
fd(1)
color((0.7, 0.57, 0.16))
fd(1)
color((0.98, 0.74, 0.0))
fd(7)
color((0.82, 0.65, 0.09))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.25, 0.2, 0.44))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.8, 0.15, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.95, 0.72, 0.02))
fd(1)
color((0.1, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.75, 0.6, 0.13))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.32, 0.35, 0.37))
fd(1)
color((0.13, 0.22, 0.49))
fd(1)
color((0.19, 0.21, 0.47))
fd(1)
color((0.87, 0.67, 0.06))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.55, 0.49, 0.24))
fd(1)
color((0.98, 0.74, 0.0))
fd(8)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.84, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.37, 0.39, 0.34))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,101.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.96, 0.72, 0.02))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.42, 0.19, 0.39))
fd(1)
color((0.5, 0.46, 0.27))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.09, 0.22, 0.5))
fd(1)
color((0.69, 0.15, 0.29))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.85, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.81, 0.64, 0.1))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.14, 0.25, 0.47))
fd(1)
color((0.11, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.7, 0.16, 0.29))
fd(1)
color((0.35, 0.19, 0.41))
fd(1)
color((0.17, 0.21, 0.47))
fd(1)
color((0.11, 0.22, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.15, 0.25, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(12)
color((0.55, 0.49, 0.24))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.71, 0.58, 0.15))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((0.13, 0.22, 0.48))
fd(1)
color((0.2, 0.21, 0.46))
fd(1)
color((0.5, 0.18, 0.36))
fd(1)
color((0.85, 0.14, 0.24))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.85, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.71, 0.58, 0.15))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.29, 0.34, 0.38))
fd(1)
color((0.1, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.66, 0.16, 0.31))
fd(1)
color((0.27, 0.33, 0.4))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.84, 0.14, 0.24))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.73, 0.15, 0.28))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.57, 0.5, 0.23))
fd(1)
color((0.38, 0.19, 0.4))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.96, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.97, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,100.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.36, 0.38, 0.35))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((0.48, 0.45, 0.28))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.73, 0.59, 0.14))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.15, 0.22, 0.48))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.98, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.68, 0.05))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.28, 0.33, 0.39))
fd(1)
color((0.98, 0.74, 0.0))
fd(9)
color((0.09, 0.22, 0.5))
fd(5)
color((0.11, 0.24, 0.48))
fd(1)
color((0.35, 0.37, 0.36))
fd(1)
color((0.58, 0.51, 0.22))
fd(1)
color((0.82, 0.64, 0.09))
fd(1)
color((0.98, 0.74, 0.0))
fd(33)
color((0.95, 0.72, 0.02))
fd(1)
color((0.72, 0.58, 0.15))
fd(1)
color((0.48, 0.45, 0.28))
fd(1)
color((0.25, 0.31, 0.41))
fd(1)
color((0.09, 0.22, 0.5))
fd(5)
color((0.48, 0.45, 0.28))
fd(1)
color((0.98, 0.74, 0.0))
fd(8)
color((0.97, 0.73, 0.0))
fd(1)
color((0.09, 0.23, 0.49))
fd(1)
color((0.19, 0.21, 0.47))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.92, 0.7, 0.04))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(7)
color((0.09, 0.22, 0.5))
fd(1)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.11, 0.22, 0.49))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.92, 0.7, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.44, 0.42, 0.31))
fd(1)
color((0.09, 0.22, 0.5))
fd(4)
color((0.38, 0.39, 0.33))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,99.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.38, 0.47, 0.66))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.26, 0.37, 0.59))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.1, 0.23, 0.49))
fd(1)
color((0.8, 0.15, 0.25))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.27, 0.2, 0.44))
fd(1)
color((0.84, 0.66, 0.08))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.35, 0.37, 0.35))
fd(1)
color((0.15, 0.22, 0.48))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.2, 0.21, 0.46))
fd(1)
color((0.18, 0.27, 0.45))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.18, 0.27, 0.45))
fd(1)
color((0.57, 0.17, 0.33))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.28, 0.2, 0.43))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.62, 0.53, 0.2))
fd(1)
color((0.98, 0.74, 0.0))
fd(66)
color((0.21, 0.29, 0.43))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.77, 0.15, 0.27))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.97, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.21, 0.29, 0.43))
fd(1)
color((0.17, 0.21, 0.47))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.18, 0.21, 0.47))
fd(1)
color((0.3, 0.35, 0.38))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.91, 0.69, 0.04))
fd(1)
color((0.24, 0.2, 0.45))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.38, 0.19, 0.4))
fd(1)
color((0.27, 0.33, 0.4))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.98, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.3, 0.4, 0.62))
fd(1)
color((1.0, 1.0, 1.0))
fd(4)
color((0.35, 0.44, 0.64))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,98.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.85, 0.87, 0.92))
fd(1)
color((1.0, 1.0, 1.0))
fd(6)
color((0.69, 0.74, 0.83))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.96, 0.72, 0.02))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.73, 0.15, 0.28))
fd(1)
color((0.15, 0.26, 0.46))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.22, 0.3, 0.42))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.82, 0.14, 0.25))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.54, 0.17, 0.35))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.25, 0.31, 0.41))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(6)
color((0.86, 0.14, 0.24))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.2, 0.29, 0.44))
fd(1)
color((0.98, 0.74, 0.0))
fd(61)
color((0.89, 0.69, 0.05))
fd(1)
color((0.12, 0.24, 0.48))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.16, 0.21, 0.47))
fd(1)
color((0.88, 0.14, 0.23))
fd(6)
color((0.78, 0.15, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.29, 0.34, 0.39))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.51, 0.18, 0.36))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.83, 0.14, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.19, 0.28, 0.44))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.16, 0.27, 0.45))
fd(1)
color((0.69, 0.16, 0.29))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.74, 0.77, 0.85))
fd(1)
color((1.0, 1.0, 1.0))
fd(6)
color((0.84, 0.86, 0.91))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(43)
gt(-128.0,97.5)
fd(42)
color((0.09, 0.22, 0.5))
fd(1)
color((0.49, 0.45, 0.28))
fd(1)
color((0.78, 0.62, 0.11))
fd(1)
color((0.17, 0.29, 0.54))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.1, 0.23, 0.51))
fd(1)
color((0.87, 0.67, 0.06))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.92, 0.7, 0.04))
fd(1)
color((0.21, 0.29, 0.43))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.36, 0.38, 0.35))
fd(1)
color((0.97, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.38, 0.39, 0.34))
fd(1)
color((0.32, 0.2, 0.42))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.43, 0.18, 0.38))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.15, 0.26, 0.46))
fd(1)
color((0.69, 0.57, 0.16))
fd(1)
color((0.98, 0.74, 0.0))
fd(66)
color((0.53, 0.47, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.1, 0.22, 0.49))
fd(1)
color((0.58, 0.17, 0.33))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.92, 0.7, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.98, 0.73, 0.0))
fd(1)
color((0.37, 0.39, 0.34))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.2, 0.29, 0.44))
fd(1)
color((0.89, 0.69, 0.05))
fd(1)
color((0.98, 0.74, 0.0))
fd(11)
color((0.97, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.87, 0.14, 0.23))
fd(1)
color((0.15, 0.25, 0.47))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.82, 0.65, 0.09))
fd(1)
color((0.1, 0.23, 0.51))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.15, 0.27, 0.53))
fd(1)
color((0.8, 0.63, 0.1))
fd(1)
color((0.46, 0.44, 0.29))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(42)
gt(-128.0,96.5)
fd(42)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.95, 0.96, 0.97))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.97, 0.97, 0.98))
fd(1)
color((0.21, 0.29, 0.43))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.75, 0.15, 0.27))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.09, 0.22, 0.5))
fd(1)
color((0.68, 0.16, 0.3))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.1, 0.23, 0.49))
fd(1)
color((0.48, 0.45, 0.28))
fd(1)
color((0.98, 0.74, 0.0))
fd(74)
color((0.24, 0.31, 0.42))
fd(1)
color((0.09, 0.22, 0.5))
fd(2)
color((0.13, 0.22, 0.49))
fd(1)
color((0.79, 0.15, 0.26))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(20)
color((0.09, 0.22, 0.5))
fd(1)
color((0.73, 0.15, 0.28))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.5, 0.18, 0.36))
fd(1)
color((0.54, 0.48, 0.25))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.11, 0.24, 0.48))
fd(1)
color((1.0, 1.0, 1.0))
fd(9)
color((0.94, 0.95, 0.96))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(42)
gt(-128.0,95.5)
fd(42)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.78, 0.62, 0.11))
fd(1)
color((0.12, 0.24, 0.48))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.57, 0.5, 0.23))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.11, 0.24, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.31, 0.35, 0.37))
fd(1)
color((0.9, 0.69, 0.05))
fd(1)
color((0.98, 0.74, 0.0))
fd(80)
color((0.82, 0.65, 0.09))
fd(1)
color((0.1, 0.22, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.58, 0.5, 0.23))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.53, 0.48, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.13, 0.24, 0.48))
fd(1)
color((0.81, 0.64, 0.1))
fd(1)
color((0.98, 0.74, 0.0))
fd(5)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.63, 0.16, 0.32))
fd(1)
color((0.43, 0.42, 0.31))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(42)
gt(-128.0,94.5)
fd(42)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.81, 0.15, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.22, 0.21, 0.46))
fd(1)
color((0.83, 0.14, 0.25))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.19, 0.21, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.41, 0.41, 0.32))
fd(1)
color((0.98, 0.74, 0.0))
fd(39)
color((0.91, 0.7, 0.04))
fd(1)
color((0.77, 0.62, 0.12))
fd(1)
color((0.62, 0.53, 0.2))
fd(1)
color((0.51, 0.46, 0.27))
fd(1)
color((0.38, 0.39, 0.34))
fd(1)
color((0.26, 0.32, 0.4))
fd(1)
color((0.14, 0.25, 0.47))
fd(1)
color((0.09, 0.22, 0.5))
fd(14)
color((0.2, 0.29, 0.44))
fd(1)
color((0.32, 0.35, 0.37))
fd(1)
color((0.43, 0.42, 0.31))
fd(1)
color((0.56, 0.49, 0.24))
fd(1)
color((0.7, 0.58, 0.15))
fd(1)
color((0.85, 0.66, 0.07))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(36)
color((0.98, 0.73, 0.0))
fd(1)
color((0.4, 0.4, 0.33))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.2, 0.21, 0.46))
fd(1)
color((0.88, 0.14, 0.23))
fd(1)
color((0.82, 0.14, 0.25))
fd(1)
color((0.2, 0.21, 0.46))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.09, 0.22, 0.5))
fd(1)
color((0.8, 0.15, 0.26))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(10)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(42)
gt(-128.0,93.5)
fd(42)
color((0.09, 0.22, 0.5))
fd(1)
color((0.76, 0.6, 0.13))
fd(1)
color((0.21, 0.29, 0.43))
fd(1)
color((0.72, 0.76, 0.85))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.26, 0.37, 0.6))
fd(1)
color((0.61, 0.52, 0.21))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.76, 0.61, 0.12))
fd(1)
color((0.17, 0.22, 0.47))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.91, 0.7, 0.04))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.8, 0.64, 0.1))
fd(1)
color((0.2, 0.21, 0.46))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.47, 0.18, 0.37))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.76, 0.61, 0.13))
fd(1)
color((0.98, 0.74, 0.0))
fd(25)
color((0.97, 0.73, 0.0))
fd(1)
color((0.92, 0.7, 0.03))
fd(1)
color((0.69, 0.56, 0.17))
fd(1)
color((0.18, 0.28, 0.44))
fd(1)
color((0.09, 0.22, 0.5))
fd(40)
color((0.49, 0.45, 0.28))
fd(1)
color((0.89, 0.69, 0.05))
fd(1)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(24)
color((0.74, 0.6, 0.14))
fd(1)
color((0.09, 0.22, 0.5))
fd(3)
color((0.48, 0.18, 0.36))
fd(1)
color((0.88, 0.14, 0.23))
fd(5)
color((0.18, 0.21, 0.47))
fd(1)
color((0.88, 0.68, 0.06))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.92, 0.7, 0.04))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.52, 0.47, 0.26))
fd(1)
color((0.41, 0.5, 0.67))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.68, 0.73, 0.82))
fd(1)
color((0.26, 0.32, 0.4))
fd(1)
color((0.76, 0.61, 0.13))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.0, 0.0, 0.0))
fd(42)
gt(-128.0,92.5)
fd(42)
color((0.08, 0.21, 0.48))
fd(1)
color((0.11, 0.24, 0.49))
fd(1)
color((0.97, 0.73, 0.0))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.67, 0.16, 0.3))
fd(1)
color((0.11, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.1, 0.23, 0.49))
fd(1)
color((0.84, 0.14, 0.24))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.86, 0.14, 0.24))
fd(1)
color((0.15, 0.22, 0.48))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.22, 0.29, 0.43))
fd(1)
color((0.95, 0.72, 0.02))
fd(1)
color((0.98, 0.74, 0.0))
fd(22)
color((0.71, 0.58, 0.16))
fd(1)
color((0.26, 0.32, 0.4))
fd(1)
color((0.1, 0.23, 0.49))
fd(1)
color((0.09, 0.22, 0.5))
fd(13)
color((0.26, 0.32, 0.4))
fd(1)
color((0.51, 0.47, 0.26))
fd(1)
color((0.7, 0.58, 0.16))
fd(1)
color((0.8, 0.63, 0.1))
fd(1)
color((0.85, 0.66, 0.07))
fd(1)
color((0.91, 0.69, 0.04))
fd(1)
color((0.95, 0.72, 0.02))
fd(1)
color((0.97, 0.73, 0.01))
fd(1)
color((0.98, 0.74, 0.0))
fd(10)
color((0.96, 0.73, 0.01))
fd(1)
color((0.93, 0.71, 0.03))
fd(1)
color((0.88, 0.68, 0.06))
fd(1)
color((0.83, 0.65, 0.09))
fd(1)
color((0.78, 0.62, 0.11))
fd(1)
color((0.62, 0.53, 0.2))
fd(1)
color((0.38, 0.39, 0.34))
fd(1)
color((0.13, 0.25, 0.47))
fd(1)
color((0.09, 0.22, 0.5))
fd(13)
color((0.24, 0.31, 0.42))
fd(1)
color((0.62, 0.53, 0.2))
fd(1)
color((0.98, 0.74, 0.0))
fd(21)
color((0.95, 0.72, 0.02))
fd(1)
color((0.21, 0.29, 0.43))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.15, 0.22, 0.48))
fd(1)
color((0.86, 0.14, 0.24))
fd(1)
color((0.88, 0.14, 0.23))
fd(3)
color((0.8, 0.15, 0.25))
fd(1)
color((0.11, 0.24, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.11, 0.24, 0.49))
fd(1)
color((0.66, 0.16, 0.3))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.7, 0.15, 0.29))
fd(1)
color((0.11, 0.23, 0.49))
fd(1)
color((0.98, 0.74, 0.0))
fd(3)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(8)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.73, 0.0))
fd(1)
color((0.12, 0.24, 0.48))
fd(1)
color((0.07, 0.2, 0.47))
fd(1)
color((0.0, 0.0, 0.0))
fd(42)
gt(-128.0,91.5)
fd(43)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(1)
color((0.53, 0.48, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((1.0, 1.0, 1.0))
fd(6)
color((0.09, 0.22, 0.5))
fd(1)
color((0.82, 0.64, 0.09))
fd(1)
color((0.98, 0.74, 0.0))
fd(2)
color((0.09, 0.22, 0.5))
fd(1)
color((0.47, 0.18, 0.37))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.81, 0.14, 0.25))
fd(1)
color((0.09, 0.22, 0.5))
fd(1)
color((0.98, 0.74, 0.0))
fd(4)
color((0.53, 0.48, 0.25))
fd(1)
color((0.14, 0.22, 0.48))
fd(1)
color((0.88, 0.14, 0.23))
fd(2)
color((0.32, | |
import math
from copy import copy
regDataStructsVer = "1.6"
class RegBank(object):
def __init__(self, name, specifier):
self.name = name
self.specifier = specifier
self.registers = []
# Find don't care positions
pos = 0
while (specifier[len(specifier)-(pos+1)]) == "X":
pos = pos+1
if pos==0:
if "X" not in specifier:
raise ValueError("Bank specifier "+specifier+" is a fixed address!")
else:
raise ValueError("Bank specifier can only have X for lowest bits. Specifier "+specifier+" violates this constraint.")
if "X" in specifier[0:-pos]:
raise ValueError("Bank specifier can only have X for lowest bits. Specifier "+specifier+" violates this constraint.")
strLow = specifier[0:-pos] + "0"*int(pos)
if "0x" in specifier:
# Hexadecimal notation
strHigh = specifier[0:-pos] + "F"*int(pos)
self.addrL = int(strLow, 16)
self.addrH = int(strHigh, 16)
elif "0b" in specifier:
# Binary notation
strHigh = specifier[0:-pos] + "1"*int(pos)
self.addrL = int(strLow, 2)
self.addrH = int(strHigh, 2)
else:
raise ValueError("Invalid bank specifier : "+specifier)
def __repr__(self):
retVal = "REGBANK "+self.getName()+" "+self.getSpecifier()+"\n"
return retVal
def __str__(self):
retVal="Register bank : " + self.name + " " + self.specifier + " ("+hex(self.getAddrH())+","+hex(self.getAddrL())+")\n"
regs = ""
for reg in self.registers:
if regs != "":
regs = regs + ", " + reg.name
else:
regs = reg.name
retVal=retVal+"Registers=["+regs+"]"
return retVal
def getName(self):
return self.name
def getSpecifier(self):
return self.specifier
def getAddrL(self):
# Returns the lowest address in bank
return self.addrL
def getAddrH(self):
# Returns the highest address in bank
return self.addrH
def isInBank(self, addr):
# Check if address is in this bank range
if (addr >= self.addrL) and (addr <= self.addrH):
return True
return False
def addRegister(self, register):
# Check if register is in register bank address range
if not self.isInBank(register.addr):
raise ValueError("Register with address "+hex(register.addr)+" cannot be added to bank "+self.name+" ("+hex(self.getAddrH())+","+hex(self.getAddrL())+")")
# Check if register collides with existing one
for reg in self.registers:
if reg.addr == register.addr:
raise ValueError("Register "+reg.name+" is assigned to the address taken by "+register.name)
self.registers.append(register)
def getRegister(self, regName):
# Get register by name
for reg in self.registers:
if (regName == reg.name):
return reg
raise ValueError("Register "+regName+" does not exist in bank "+self.name)
def hasRegister(self, regName):
# Check if register exists in a register bank
for reg in self.registers:
if (regName == reg.name):
return True
return False
def getRegs(self):
# Get all registers in a bank
return self.registers
def getSpecNBits(self):
addrH = self.getAddrH()
addrL = self.getAddrL()
regAddrSpace = addrH-addrL+1 # Size of register address space in register bank
nBitsRegAddrSpace = int(math.log(regAddrSpace)/math.log(2))
return int(15-nBitsRegAddrSpace)
def getSpecBits(self):
nBits = self.getSpecNBits()
bits = bin(self.getAddrL()) # bits contain 0b prefix
bits = bits[2:] # get rid of 0b
# Make sure that bits has 15 digits
while len(bits)<15:
bits = "0"+bits
return bits[0:nBits]
class Register(object):
@staticmethod
def intToHex(val, upperCase=True):
hexVal = hex(val)[2:]
while len(hexVal)<4:
hexVal = "0"+hexVal
if upperCase:
hexVal = hexVal.upper()
hexVal = "0x"+hexVal
return hexVal
# Instance methods
def __init__(self, regName, regAddr, chip=None):
if " " in regName:
raise ValueError("Invalid register name: "+regName+" (name contains space)")
self.regAddr = regAddr
self.name = regName
if "0x" in regAddr:
addr = int(regAddr,16)
elif "0b" in regAddr:
addr = int(regAddr, 2)
else:
raise ValueError("Invalid register address "+str(regAddr))
self.addr = addr
self.comment = []
self.bitFields = []
self.shadowReg = None # Register which shadows the current register
self.shadowedRegs = [] # Registers which are shadowed by current register
self._chip = chip
@property
def chip(self):
return self._chip
@chip.setter
def chip(self, val):
self._chip = val
@property
def SPIwriteFn(self):
return self.chip.SPIwriteFn
@property
def SPIreadFn(self):
return self.chip.SPIreadFn
@property
def SPIImmediate(self):
return self.getImmediateMode()
def getImmediateMode(self):
return self.chip.SPIImmediate
def __repr__(self):
return self.__str__()
def __REPR__(self):
register = self
regName = self.name
regAddr = self.regAddr
hexAddr = self.intToHex(self.addr)
# Print register
retVal = "REGISTER "+regName+" "+hexAddr+"\n"
if register.isShadowed():
retVal += " SHADOW=" + self.getShadowReg() + "\n"
for bitField in register.getBitFields():
retVal += bitField.__repr__()
retVal += "ENDREGISTER\n"
return retVal
def help(self):
print self.__REPR__()
def __str__(self,maxFieldNameWidth=20):
self.refresh()
hexAddr = self.intToHex(self.addr)
retVal = "Register : " + self.name + " "+hexAddr+"\n"
if self.isShadowed():
retVal = retVal + "Shadowed by : " + self.getShadowReg() + "\n"
if self.isShadowing():
retVal = retVal + "Shadows registers : "
for sreg in self.getShadowedRegs():
retVal = retVal+sreg+" "
retVal = retVal+"\n"
flds = ""
bitRepr = ""
bitReprAll = ["0"]*16
# Determine max bitfield width
for field in self.bitFields:
if maxFieldNameWidth<len(field.name):
maxFieldNameWidth = len(field.name)+1
for field in self.bitFields:
bRep = field.evaluateBinRepr()
bitRepr = bitRepr + field.name + " "*int(maxFieldNameWidth-len(field.name)) + bRep + "\t(" + self.intToHex(int("0b"+bRep.strip(),2)) + " << "+str(field.getPosL())+")\t("+str(int("0b"+bRep.strip(),2)) + " << "+str(field.getPosL())+")\n"
for i in range(0,16):
if bRep[i]!=" ":
bitReprAll[i] = bRep[i]
if flds!="":
flds = flds+", "+field.name
else:
flds = field.name
#retVal = retVal+"Fields=["+flds+"]\n"
retVal = retVal + bitRepr
bRep = ""
for i in range(0,16):
if bitReprAll[i]=="0":
bRep = bRep + "0"
else:
bRep = bRep + "1"
retVal = retVal + "Register value " + " "*int(maxFieldNameWidth-len("Register value "))+ bRep + "\t("+self.intToHex(int("0b"+bRep,2))+")\n"
for comment in self.getComments():
retVal=retVal+"#! " + comment.rstrip()+"\n"
return retVal
def getScriptRepr(self):
self.refresh()
retVal = self.name + " "
for field in self.bitFields:
retVal += field.getName() + "=0b"+field.evaluateBinRepr().strip() + " "
retVal.strip()
return retVal
def addBitField(self, bitField):
# Check if bitfield collides with existing ones
for field in self.bitFields:
if field.isInField(bitField.getPosH()) or field.isInField(bitField.getPosL()):
raise ValueError("Bit field "+bitField.name+" position "+bitField.position+" collides with "+field.name+" position "+field.position)
# All OK, add bitfield to register
self.bitFields.append(bitField)
def getBitFieldByName(self, bitFieldName):
for field in self.bitFields:
if (bitFieldName == field.name):
return field
raise ValueError("Bit field "+bitFieldName+" does not exist in register "+self.name)
def getBitFields(self):
return self.bitFields
def getName(self):
return self.name
def addComment(self, commentLine):
self.comment.append(commentLine)
def getComments(self):
return self.comment
def getAddrBits(self):
bits = bin(self.addr)
bits = bits[2:] # get rid of 0b
# Make sure that bits has 15 digits
while len(bits)<15:
bits = "0"+bits
return bits
def getAddress(self):
return self.addr
def getValue(self, noUpdate=False):
# Evaluate bitfields
if not noUpdate:
self.refresh()
val = 0
for field in self.getBitFields():
val = val | field.evaluate()
return val
def setValue(self, val, noUpdate=False):
# Write value to bitfields
for field in self.getBitFields():
field.setValueFromReg(val)
if not noUpdate:
self.immediateWrite()
def refresh(self):
# Read the value from chip if immediate mode is enabled
if self.getImmediateMode():
if self.SPIreadFn==None:
raise AttributeError("SPIreadFn must be set to use immediate mode")
else:
addr = self.getAddress()
val = self.SPIreadFn([addr])[0]
self.setValue(val, noUpdate=True)
def immediateWrite(self):
# Check if immediate mode is enabled
if self.getImmediateMode():
# Immediate mode is enabled, write the new value
if self.SPIwriteFn==None:
raise AttributeError("SPIwriteFn must be set to use immediate mode")
else:
addr = self.getAddress()
val = self.getValue(noUpdate=True)
self.SPIwriteFn([(addr, val)])
def getValueBin(self):
val = self.getValue()
valB = bin(val)
valB = valB[2:]
while len(valB) < 16:
valB = "0"+valB
return valB
def getReadValue(self):
# Ignore write-only fields
val = 0
for field in self.getBitFields():
if (field.mode == "R") or (field.mode == "RI") or (field.mode == "RW") or (field.mode == "RWI"):
val = val | field.evaluate()
return val
def getReadValueBin(self):
val = self.getReadValue()
valB = bin(val)
valB = valB[2:]
while len(valB) < 16:
valB = "0"+valB
return valB
def getShadowReg(self):
# Return the shadow register
return self.shadowReg
def getShadowedRegs(self):
# Return the list of shadowed registers
return self.shadowedRegs
def addShadowedReg(self, regName):
self.shadowedRegs.append(regName)
def clearShadowedRegs(self):
self.shadowedRegs = []
def isShadowed(self):
# Determine if this register is shadowed by other register
if self.shadowReg == None:
return False
return True
def isShadowing(self):
# Determine if this register shadows other registers
if len(self.shadowedRegs) == 0:
return False
return True
def __len__(self):
# Return the number of bitfields
return len(self.bitFields)
def __getitem__(self, key):
self.refresh()
# Get the bitfield value
bitField = self.getBitFieldByName(key)
return bitField.getValue()
def __setitem__(self, key, value):
# Set the bitfield value
if key=="":
self.setValue(value)
else:
bitField = self.getBitFieldByName(key)
if isinstance(value, int):
val = value
elif "0b" in value:
val = int(value,2)
elif "0x" in value:
val = int(value,16)
else:
raise ValueError("Unknown radix in value "+str(value))
bitField.setValue(val)
self.immediateWrite()
class BitField(object):
def __init__(self, name, position, defValue, mode):
self.name | |
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetSinkRequest(proto.Message):
r"""The parameters to ``GetSink``.
Attributes:
sink_name (str):
Required. The resource name of the sink:
::
"projects/[PROJECT_ID]/sinks/[SINK_ID]"
"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
"folders/[FOLDER_ID]/sinks/[SINK_ID]"
For example:
``"projects/my-project/sinks/my-sink"``
"""
sink_name = proto.Field(
proto.STRING,
number=1,
)
class CreateSinkRequest(proto.Message):
r"""The parameters to ``CreateSink``.
Attributes:
parent (str):
Required. The resource in which to create the sink:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
For examples:
``"projects/my-project"`` ``"organizations/123456789"``
sink (googlecloudsdk.third_party.gapic_clients.logging_v2.types.LogSink):
Required. The new sink, whose ``name`` parameter is a sink
identifier that is not already in use.
unique_writer_identity (bool):
Optional. Determines the kind of IAM identity returned as
``writer_identity`` in the new sink. If this value is
omitted or set to false, and if the sink's parent is a
project, then the value returned as ``writer_identity`` is
the same group or service account used by Cloud Logging
before the addition of writer identities to this API. The
sink's destination must be in the same project as the sink
itself.
If this field is set to true, or if the sink is owned by a
non-project resource such as an organization, then the value
of ``writer_identity`` will be a unique service account used
only for exports from the new sink. For more information,
see ``writer_identity`` in
[LogSink][google.logging.v2.LogSink].
"""
parent = proto.Field(
proto.STRING,
number=1,
)
sink = proto.Field(
proto.MESSAGE,
number=2,
message='LogSink',
)
unique_writer_identity = proto.Field(
proto.BOOL,
number=3,
)
class UpdateSinkRequest(proto.Message):
r"""The parameters to ``UpdateSink``.
Attributes:
sink_name (str):
Required. The full resource name of the sink to update,
including the parent resource and the sink identifier:
::
"projects/[PROJECT_ID]/sinks/[SINK_ID]"
"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
"folders/[FOLDER_ID]/sinks/[SINK_ID]"
For example:
``"projects/my-project/sinks/my-sink"``
sink (googlecloudsdk.third_party.gapic_clients.logging_v2.types.LogSink):
Required. The updated sink, whose name is the same
identifier that appears as part of ``sink_name``.
unique_writer_identity (bool):
Optional. See
[sinks.create][google.logging.v2.ConfigServiceV2.CreateSink]
for a description of this field. When updating a sink, the
effect of this field on the value of ``writer_identity`` in
the updated sink depends on both the old and new values of
this field:
- If the old and new values of this field are both false or
both true, then there is no change to the sink's
``writer_identity``.
- If the old value is false and the new value is true, then
``writer_identity`` is changed to a unique service
account.
- It is an error if the old value is true and the new value
is set to false or defaulted to false.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Field mask that specifies the fields in ``sink``
that need an update. A sink field will be overwritten if,
and only if, it is in the update mask. ``name`` and output
only fields cannot be updated.
An empty ``updateMask`` is temporarily treated as using the
following mask for backwards compatibility purposes:
``destination,filter,includeChildren``
At some point in the future, behavior will be removed and
specifying an empty ``updateMask`` will be an error.
For a detailed ``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask
For example: ``updateMask=filter``
"""
sink_name = proto.Field(
proto.STRING,
number=1,
)
sink = proto.Field(
proto.MESSAGE,
number=2,
message='LogSink',
)
unique_writer_identity = proto.Field(
proto.BOOL,
number=3,
)
update_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
class DeleteSinkRequest(proto.Message):
r"""The parameters to ``DeleteSink``.
Attributes:
sink_name (str):
Required. The full resource name of the sink to delete,
including the parent resource and the sink identifier:
::
"projects/[PROJECT_ID]/sinks/[SINK_ID]"
"organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]"
"folders/[FOLDER_ID]/sinks/[SINK_ID]"
For example:
``"projects/my-project/sinks/my-sink"``
"""
sink_name = proto.Field(
proto.STRING,
number=1,
)
class LogExclusion(proto.Message):
r"""Specifies a set of log entries that are filtered out by a sink. If
your Google Cloud resource receives a large volume of log entries,
you can use exclusions to reduce your chargeable logs. Note that
exclusions on organization-level and folder-level sinks don't apply
to child resources. Note also that you cannot modify the \_Required
sink or exclude logs from it.
Attributes:
name (str):
Required. A client-assigned identifier, such as
``"load-balancer-exclusion"``. Identifiers are limited to
100 characters and can include only letters, digits,
underscores, hyphens, and periods. First character has to be
alphanumeric.
description (str):
Optional. A description of this exclusion.
filter (str):
Required. An `advanced logs
filter <https://cloud.google.com/logging/docs/view/advanced-queries>`__
that matches the log entries to be excluded. By using the
`sample
function <https://cloud.google.com/logging/docs/view/advanced-queries#sample>`__,
you can exclude less than 100% of the matching log entries.
For example, the following query matches 99% of low-severity
log entries from Google Cloud Storage buckets:
``resource.type=gcs_bucket severity<ERROR sample(insertId, 0.99)``
disabled (bool):
Optional. If set to True, then this exclusion is disabled
and it does not exclude any log entries. You can [update an
exclusion][google.logging.v2.ConfigServiceV2.UpdateExclusion]
to change the value of this field.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The creation timestamp of the
exclusion.
This field may not be present for older
exclusions.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The last update timestamp of the
exclusion.
This field may not be present for older
exclusions.
"""
name = proto.Field(
proto.STRING,
number=1,
)
description = proto.Field(
proto.STRING,
number=2,
)
filter = proto.Field(
proto.STRING,
number=3,
)
disabled = proto.Field(
proto.BOOL,
number=4,
)
create_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
update_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
class ListExclusionsRequest(proto.Message):
r"""The parameters to ``ListExclusions``.
Attributes:
parent (str):
Required. The parent resource whose exclusions are to be
listed.
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]".
page_token (str):
Optional. If present, then retrieve the next batch of
results from the preceding call to this method.
``pageToken`` must be the value of ``nextPageToken`` from
the previous response. The values of other method parameters
should be identical to those in the previous call.
page_size (int):
Optional. The maximum number of results to return from this
request. Non-positive values are ignored. The presence of
``nextPageToken`` in the response indicates that more
results might be available.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_token = proto.Field(
proto.STRING,
number=2,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
class ListExclusionsResponse(proto.Message):
r"""Result returned from ``ListExclusions``.
Attributes:
exclusions (Sequence[googlecloudsdk.third_party.gapic_clients.logging_v2.types.LogExclusion]):
A list of exclusions.
next_page_token (str):
If there might be more results than appear in this response,
then ``nextPageToken`` is included. To get the next set of
results, call the same method again using the value of
``nextPageToken`` as ``pageToken``.
"""
@property
def raw_page(self):
return self
exclusions = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='LogExclusion',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetExclusionRequest(proto.Message):
r"""The parameters to ``GetExclusion``.
Attributes:
name (str):
Required. The resource name of an existing exclusion:
::
"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]"
"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]"
"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]"
For example:
``"projects/my-project/exclusions/my-exclusion"``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateExclusionRequest(proto.Message):
r"""The parameters to ``CreateExclusion``.
Attributes:
parent (str):
Required. The parent resource in which to create the
exclusion:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
For examples:
``"projects/my-logging-project"``
``"organizations/123456789"``
exclusion (googlecloudsdk.third_party.gapic_clients.logging_v2.types.LogExclusion):
Required. The new exclusion, whose ``name`` parameter is an
exclusion name that is not already used in the parent
resource.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
exclusion = proto.Field(
proto.MESSAGE,
number=2,
message='LogExclusion',
)
class UpdateExclusionRequest(proto.Message):
r"""The parameters to ``UpdateExclusion``.
Attributes:
name (str):
Required. The resource name of the exclusion to update:
::
"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]"
"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]"
"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]"
For example:
``"projects/my-project/exclusions/my-exclusion"``
exclusion (googlecloudsdk.third_party.gapic_clients.logging_v2.types.LogExclusion):
Required. New values for the existing exclusion. Only the
fields specified in ``update_mask`` are relevant.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. A non-empty list of fields to change in the
existing exclusion. New values for the fields are taken from
the corresponding fields in the
[LogExclusion][google.logging.v2.LogExclusion] included in
this request. Fields not mentioned in ``update_mask`` are
not changed and are ignored in the request.
For example, to change the filter and description of an
exclusion, specify an ``update_mask`` of
``"filter,description"``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
exclusion = proto.Field(
proto.MESSAGE,
number=2,
message='LogExclusion',
)
update_mask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
class DeleteExclusionRequest(proto.Message):
r"""The parameters to ``DeleteExclusion``.
Attributes:
name (str):
Required. The resource name of an existing exclusion to
delete:
::
"projects/[PROJECT_ID]/exclusions/[EXCLUSION_ID]"
"organizations/[ORGANIZATION_ID]/exclusions/[EXCLUSION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/exclusions/[EXCLUSION_ID]"
"folders/[FOLDER_ID]/exclusions/[EXCLUSION_ID]"
For example:
``"projects/my-project/exclusions/my-exclusion"``
"""
name = proto.Field(
proto.STRING,
number=1,
)
class GetCmekSettingsRequest(proto.Message):
r"""The parameters to
[GetCmekSettings][google.logging.v2.ConfigServiceV2.GetCmekSettings].
See `Enabling CMEK for Log
Router <https://cloud.google.com/logging/docs/routing/managed-encryption>`__
for more information.
Attributes:
name (str):
Required. The resource for which to retrieve CMEK settings.
::
"projects/[PROJECT_ID]/cmekSettings"
"organizations/[ORGANIZATION_ID]/cmekSettings"
"billingAccounts/[BILLING_ACCOUNT_ID]/cmekSettings"
"folders/[FOLDER_ID]/cmekSettings"
For example:
``"organizations/12345/cmekSettings"``
Note: CMEK for the Log Router can be configured | |
#!/usr/bin/env python
# coding: utf-8
# # Scenario
#
# We are working on preparing a prototype machine learning model for Zyfra, a company that developes efficiency solutions for heavy industry.
#
# The ML model should predict the amount of gold (Au) recovered from gold ore using data on extraction and purification.
#
# Machine learning prediction question: find the ML model that best predicts the two target values given the predictor variables present in both the test and train dataframes.
#
# The target values are rougher.output.recovery & final.output.recovery
#
# Useful Features (predictor parameters common to both train and test dataframes)
#
# Datasets: gold_recovery_full.csv, gold_recovery_train.csv, gold_recovery_test.csv
#
# Analysis done December 2021
# In[1]:
#import libraries
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn import svm
from sklearn import linear_model
from sklearn.model_selection import *
from sklearn.ensemble import *
from sklearn.tree import *
from sklearn.linear_model import *
from sklearn.metrics import *
from sklearn.utils import shuffle
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import train_test_split , cross_val_score
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_validate
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.metrics import make_scorer
from sklearn import metrics
import pandas as pd
import numpy as np
import random
random_state=42
random.seed(random_state)
np.random.seed(random_state)
# import sys and insert code to ignore warnings
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# # Download and prepare the data
# # 1.1. Open the files and look into the data.
#
# In[2]:
# load the data
try:
train = pd.read_csv('/datasets/gold_recovery_train.csv')
except:
print('ERROR: Unable to find or access file.')
try:
test = pd.read_csv('/datasets/gold_recovery_test.csv')
except:
print('ERROR: Unable to find or access file.')
try:
full = pd.read_csv('/datasets/gold_recovery_full.csv')
except:
print('ERROR: Unable to find or access file.')
# In[3]:
# create basic loop to get info on dfs
# create list of dfs
dfs = [train, test, full]
for df in dfs:
print('\n')
print("=" * 23)
name =[x for x in globals() if globals()[x] is df][0]
print("Dataframe Name: %s" % name)
print("=" * 23)
print('Number of duplicate rows:', df.duplicated().sum())
print('Number rows and columns:', df.shape, '\n')
print("Count total NaN at each column in a DataFrame :")
print(df.isnull().sum())
# In[4]:
full.head(1)
# In[5]:
train.head(1)
# In[6]:
test.head(1)
# We note three dfs: train, test, full. There are no duplicates, but many NaN values in every df.
#
# The full df contains all the training and test sets. The test df only contains 53 columns, while the train and full dfs contain 87.
#
# We've been told some parameters are not available because they were measured and/or calculated much later. We are told some of the features that are present in the training set may be absent from the test set. "The test set also doesn't contain targets."
# # 1.2. Check that recovery is calculated correctly. Using the training set, calculate recovery for the rougher.output.recovery feature. Find the MAE between your calculations and the feature values. Provide findings.
# In[7]:
# calculate MAE
rougher_output_recovery_calc = 100 * (train['rougher.output.concentrate_au'] * (train['rougher.input.feed_au'] - train['rougher.output.tail_au'])) / (train['rougher.input.feed_au'] * (train['rougher.output.concentrate_au'] - train['rougher.output.tail_au']))
df_output_rougher = pd.DataFrame({"output_recovery":train["rougher.output.recovery"],"calc":rougher_output_recovery_calc}).dropna()
MAE = mean_absolute_error(df_output_rougher["output_recovery"],df_output_rougher["calc"])
print(f"MAE={MAE}")
# The MAE is very small, indicating the recovery is calculated correctly.
# # 1.3. Analyze the features not available in the test set. What are these parameters? What is their type?
# In[8]:
# list the features in the full set
full.info()
# In[9]:
# list the features not available in test set
not_in_test = full.columns.difference(test.columns)
full[not_in_test].head(1)
# In[10]:
# list the parameters and types of features not available in test set
full[not_in_test].info()
# So we anticipated 34 columns (87 in train/full - 53 in test) would be missing in test and we've now verified the columns that are missing.
#
# We observe all 34 are float64 types and are different different measurements that have to do with output.
#
# Our 2 target features, final.output.recovery & rougher.output.recovery, are also missing from the test df.
#
# We were told the full df has all the records for the train and test dfs. We will investigate if we can replace the values for our targets from the full df.
# # 1.4. Perform data preprocessing.
# We need to add the target columns (final.output.recovery and rougher.output.recovery) to the test df.
#
# We will use the date column, after verifying there are no duplicates, as the index so we fill in corresponding information for the appropriate rows.
# In[11]:
# check for duplicates in date columns
full["date"].is_unique
# In[12]:
test["date"].is_unique
# Each entry in the date column is unique for both the full and test dataframes. Now we can add the columns.
# In[13]:
# create a temporary df from full with target columns
df1 = pd.DataFrame(full, columns = ['final.output.recovery', 'rougher.output.recovery', 'date'])
print('Temporary df', df1.shape)
print('Test df before', test.shape)
# add target columns to test df using the reference date as index
test_w_targets = pd.merge(test, df1, on="date", how="inner")
print('Test df after', test_w_targets.shape, '\n')
print(test_w_targets.info())
# In[14]:
# select a date from a random row in test
test_row = test.iloc[227,0]
# Verify columns match using date as index in full and test dfs
cols = ['final.output.recovery', 'rougher.output.recovery', 'rougher.input.feed_ag', 'secondary_cleaner.state.floatbank2_b_level', 'date']
f_row = full.loc[full['date'] == test_row]
f_row[cols]
# In[15]:
t_row = test_w_targets.loc[test_w_targets['date'] == test_row]
t_row[cols]
# We added the target columns to the test df using the date column as an index and verified the columns match (full --> test) by displaying sample columns in rows from test_w_targets df and full df.
# In[16]:
# check missing values
print('\nRows with missing values in target values in test df:')
print(test_w_targets['final.output.recovery'].isna().sum())
print(test_w_targets['rougher.output.recovery'].isna().sum())
# We've verified the new columns, final.output.recovery and rougher.output.recovery, have been added, but we still have NaN values in those targets. There is really no way for us to fill in the target values, so we will need to delete those rows.
# In[17]:
# eliminate rows without target values
test_w_targets = test_w_targets[~test_w_targets['final.output.recovery'].isna()]
test_w_targets = test_w_targets[~test_w_targets['rougher.output.recovery'].isna()]
test_w_targets.info()
print('\nRows with missing values in target values in test df:')
print(test_w_targets['final.output.recovery'].isna().sum())
print(test_w_targets['rougher.output.recovery'].isna().sum())
# Similarly, the train df is used to train the model and we need to check for missing values in the target columns. We should drop any of those rows since we do not have a way to replace the target values.
# In[18]:
# check for NaNs in target value columns for train df
print('\nRows with missing values in target values in train df:')
print(train['final.output.recovery'].isna().sum())
print(train['rougher.output.recovery'].isna().sum())
# In[19]:
# eliminate rows without target values since we don't have
# any way to fill in values
train = train[~train['final.output.recovery'].isna()]
train = train[~train['rougher.output.recovery'].isna()]
train.info()
print('\nRows with missing values in target values in train df:')
print(train['final.output.recovery'].isna().sum())
print(train['rougher.output.recovery'].isna().sum())
# We've verified the rows with missing values in the target columns have been deleted. Next we will change datatypes and fill in values for train and test dfs.
# In[20]:
# create new dfs list
dfs = [test_w_targets, train]
# check datatypes
for df in dfs:
df.info()
# In[21]:
# change the date datatype
for df in dfs:
df["date"] = pd.to_datetime(df["date"], format='%Y-%m-%d %H:%M:%S', errors = 'coerce')
# verify change to datetime
print(df["date"])
# We change the datatype for date to datetime from object. The other columns are type float64, which is appropriate.
#
# Next we will fill in missing values for test and train. We were told "Parameters that are next to each other in terms of time are often similar," so we plan to use the forward fill strategy - ffill - since it propagates the last valid observation forward.
# In[22]:
# fillna and verify
test_w_targets = test_w_targets.fillna(method='ffill')
train = train.fillna(method='ffill')
print(test_w_targets.isnull().sum().sum())
print(train.isnull().sum().sum())
# We've verified the missing data has been filled using ffill. Finally we can drop the date column as it will not be useful for our models.
# In[23]:
# remove date column from train and test dfs
test_w_targets.drop('date', inplace=True, axis=1)
train.drop('date', inplace=True, axis=1)
print('Verify new shapes after dropping date col')
print(test_w_targets.shape)
print(train.shape)
# # Analyze the data
# # 2.1. Take note of how the concentrations of metals (Au, Ag, Pb) change depending on the purification stage.
# In[24]:
# returns columns of dfs with selected strings
def cols_with_str(df, string):
cols = [col for col in df.columns if string in col]
print(list(df[cols]))
return df[cols]
def rougher_feed(df, metal, rougher, feed, inpu):
au_rougher = cols_with_str(train,metal)
au_rougher = cols_with_str(au_rougher,rougher)
au_rougher = cols_with_str(au_rougher,feed)
au_rougher = cols_with_str(au_rougher,inpu)
mean = au_rougher.mean()
return mean[0]
# In[25]:
# create features [stage].[parameter_type].[parameter_name] Example: rougher.input.feed_ag
metal = ['ag', 'au', 'pb']
stage = ['rougher', 'primary_cleaner', 'secondary_cleaner', 'final']
param = ['input', 'output', 'state', 'calculation']
# create accumulators for different stages
au_rougher_feed_input = rougher_feed(full,'au','rougher','feed', 'input')
ag_rougher_feed_input = rougher_feed(full,'ag','rougher','feed', 'input')
pb_rougher_feed_input = rougher_feed(full,'pb','rougher','feed', 'input')
au_rougher_output_tail = rougher_feed(full,'au','rougher','output', 'tail')
ag_rougher_output_tail = rougher_feed(full,'ag','rougher','output', 'tail')
pb_rougher_output_tail = rougher_feed(full,'pb','rougher','output', 'tail')
au_rougher_output_concentrate = rougher_feed(full,'au','rougher','output', 'concentrate')
ag_rougher_output_concentrate = | |
to set
internally.
It is deprecated to specify both "shift" and "start_time".
If this does happen, timeshift() will print a warning to
stderr and ignore the "shift" argument.
If "shift" is negative and sufficiently large that it would
leave some event with a negative tick-value, then the score
is shifted so that the first event occurs at time 0. This
also occurs if "start_time" is negative, and is also the
default if neither "shift" nor "start_time" are specified.
'''
#_warn('tracks='+str(tracks))
if score == None or len(score) < 2:
return [1000, [],]
new_score = [score[0],]
my_type = score_type(score)
if my_type == '':
return new_score
if my_type == 'opus':
_warn("timeshift: opus format is not supported\n")
# _clean_up_scores() 6.2; doesn't exist! what was it supposed to do?
return new_score
if not (shift == None) and not (start_time == None):
_warn("timeshift: shift and start_time specified: ignoring shift\n")
shift = None
if shift == None:
if (start_time == None) or (start_time < 0):
start_time = 0
# shift = start_time - from_time
i = 1 # ignore first element (ticks)
tracks = set(tracks) # defend against tuples and lists
earliest = 1000000000
if not (start_time == None) or shift < 0: # first find the earliest event
while i < len(score):
if len(tracks) and not ((i-1) in tracks):
i += 1
continue
for event in score[i]:
if event[1] < from_time:
continue # just inspect the to_be_shifted events
if event[1] < earliest:
earliest = event[1]
i += 1
if earliest > 999999999:
earliest = 0
if shift == None:
shift = start_time - earliest
elif (earliest + shift) < 0:
start_time = 0
shift = 0 - earliest
i = 1 # ignore first element (ticks)
while i < len(score):
if len(tracks) == 0 or not ((i-1) in tracks): # 3.8
new_score.append(score[i])
i += 1
continue
new_track = []
for event in score[i]:
new_event = list(event)
#if new_event[1] == 0 and shift > 0 and new_event[0] != 'note':
# pass
#elif new_event[1] >= from_time:
if new_event[1] >= from_time:
# 4.1 must not rightshift set_tempo
if new_event[0] != 'set_tempo' or shift<0:
new_event[1] += shift
elif (shift < 0) and (new_event[1] >= (from_time+shift)):
continue
new_track.append(new_event)
if len(new_track) > 0:
new_score.append(new_track)
i += 1
_clean_up_warnings()
return new_score
def segment(score=None, start_time=None, end_time=None, start=0, end=100000000,
tracks={0,1,2,3,4,5,6,7,8,10,11,12,13,14,15}):
r'''Returns a "score" which is a segment of the one supplied
as the argument, beginning at "start_time" ticks and ending
at "end_time" ticks (or at the end if "end_time" is not supplied).
If the set "tracks" is specified, only those tracks will
be returned.
'''
if score == None or len(score) < 2:
return [1000, [],]
if start_time == None: # as of 4.2 start_time is recommended
start_time = start # start is legacy usage
if end_time == None: # likewise
end_time = end
new_score = [score[0],]
my_type = score_type(score)
if my_type == '':
return new_score
if my_type == 'opus':
# more difficult (disconnecting note_on's from their note_off's)...
_warn("segment: opus format is not supported\n")
_clean_up_warnings()
return new_score
i = 1 # ignore first element (ticks); we count in ticks anyway
tracks = set(tracks) # defend against tuples and lists
while i < len(score):
if len(tracks) and not ((i-1) in tracks):
i += 1
continue
new_track = []
channel2cc_num = {} # most recent controller change before start
channel2cc_val = {}
channel2cc_time = {}
channel2patch_num = {} # keep most recent patch change before start
channel2patch_time = {}
set_tempo_num = 500000 # most recent tempo change before start 6.3
set_tempo_time = 0
earliest_note_time = end_time
for event in score[i]:
if event[0] == 'control_change': # 6.5
cc_time = channel2cc_time.get(event[2]) or 0
if (event[1] <= start_time) and (event[1] >= cc_time):
channel2cc_num[event[2]] = event[3]
channel2cc_val[event[2]] = event[4]
channel2cc_time[event[2]] = event[1]
elif event[0] == 'patch_change':
patch_time = channel2patch_time.get(event[2]) or 0
if (event[1]<=start_time) and (event[1] >= patch_time): # 2.0
channel2patch_num[event[2]] = event[3]
channel2patch_time[event[2]] = event[1]
elif event[0] == 'set_tempo':
if (event[1]<=start_time) and (event[1]>=set_tempo_time): #6.4
set_tempo_num = event[2]
set_tempo_time = event[1]
if (event[1] >= start_time) and (event[1] <= end_time):
new_track.append(event)
if (event[0] == 'note') and (event[1] < earliest_note_time):
earliest_note_time = event[1]
if len(new_track) > 0:
new_track.append(['set_tempo', start_time, set_tempo_num])
for c in channel2patch_num:
new_track.append(['patch_change',start_time,c,channel2patch_num[c]],)
for c in channel2cc_num: # 6.5
new_track.append(['control_change',start_time,c,channel2cc_num[c],channel2cc_val[c]])
new_score.append(new_track)
i += 1
_clean_up_warnings()
return new_score
def score_type(opus_or_score=None):
r'''Returns a string, either 'opus' or 'score' or ''
'''
if opus_or_score == None or str(type(opus_or_score)).find('list')<0 or len(opus_or_score) < 2:
return ''
i = 1 # ignore first element
while i < len(opus_or_score):
for event in opus_or_score[i]:
if event[0] == 'note':
return 'score'
elif event[0] == 'note_on':
return 'opus'
i += 1
return ''
def concatenate_scores(scores):
r'''Concatenates a list of scores into one score.
If the scores differ in their "ticks" parameter,
they will all get converted to millisecond-tick format.
'''
# the deepcopys are needed if the input_score's are refs to the same obj
# e.g. if invoked by midisox's repeat()
input_scores = _consistentise_ticks(scores) # 3.7
output_score = copy.deepcopy(input_scores[0])
for input_score in input_scores[1:]:
output_stats = score2stats(output_score)
delta_ticks = output_stats['nticks']
itrack = 1
while itrack < len(input_score):
if itrack >= len(output_score): # new output track if doesn't exist
output_score.append([])
for event in input_score[itrack]:
output_score[itrack].append(copy.deepcopy(event))
output_score[itrack][-1][1] += delta_ticks
itrack += 1
return output_score
def merge_scores(scores):
r'''Merges a list of scores into one score. A merged score comprises
all of the tracks from all of the input scores; un-merging is possible
by selecting just some of the tracks. If the scores differ in their
"ticks" parameter, they will all get converted to millisecond-tick
format. merge_scores attempts to resolve channel-conflicts,
but there are of course only 15 available channels...
'''
input_scores = _consistentise_ticks(scores) # 3.6
output_score = [1000]
channels_so_far = set()
all_channels = {0,1,2,3,4,5,6,7,8,10,11,12,13,14,15}
global Event2channelindex
for input_score in input_scores:
new_channels = set(score2stats(input_score).get('channels_total', []))
new_channels.discard(9) # 2.8 cha9 must remain cha9 (in GM)
for channel in channels_so_far & new_channels:
# consistently choose lowest avaiable, to ease testing
free_channels = list(all_channels - (channels_so_far|new_channels))
if len(free_channels) > 0:
free_channels.sort()
free_channel = free_channels[0]
else:
free_channel = None
break
itrack = 1
while itrack < len(input_score):
for input_event in input_score[itrack]:
channel_index=Event2channelindex.get(input_event[0],False)
if channel_index and input_event[channel_index]==channel:
input_event[channel_index] = free_channel
itrack += 1
channels_so_far.add(free_channel)
channels_so_far |= new_channels
output_score.extend(input_score[1:])
return output_score
def _ticks(event):
return event[1]
def mix_opus_tracks(input_tracks): # 5.5
r'''Mixes an array of tracks into one track. A mixed track
cannot be un-mixed. It is assumed that the tracks share the same
ticks parameter and the same tempo.
Mixing score-tracks is trivial (just insert all events into one array).
Mixing opus-tracks is only slightly harder, but it's common enough
that a dedicated function is useful.
'''
output_score = [1000, []]
for input_track in input_tracks: # 5.8
input_score = opus2score([1000, input_track])
for event in input_score[1]:
output_score[1].append(event)
output_score[1].sort(key=_ticks)
output_opus = score2opus(output_score)
return output_opus[1]
def mix_scores(scores):
r'''Mixes a list of scores into one one-track score.
A mixed score cannot be un-mixed. Hopefully the scores
have no undesirable channel-conflicts between them.
If the scores differ in their "ticks" parameter,
they will all get converted to millisecond-tick format.
'''
input_scores = _consistentise_ticks(scores) # 3.6
output_score = [1000, []]
for input_score in input_scores:
for input_track in input_score[1:]:
output_score[1].extend(input_track)
return output_score
def score2stats(opus_or_score=None):
r'''Returns a dict of some basic stats about the score, like
bank_select (list of tuples (msb,lsb)),
channels_by_track (list of lists), channels_total (set),
general_midi_mode (list),
ntracks, nticks, patch_changes_by_track (list of dicts),
num_notes_by_channel (list of numbers),
patch_changes_total (set),
percussion (dict histogram of channel 9 events),
pitches (dict histogram of pitches on channels other than 9),
pitch_range_by_track (list, by track, of two-member-tuples),
pitch_range_sum (sum over tracks of the pitch_ranges),
'''
bank_select_msb = -1
bank_select_lsb = -1
bank_select = []
channels_by_track = []
channels_total = set([])
general_midi_mode = []
num_notes_by_channel = dict([])
patches_used_by_track = []
patches_used_total = set([])
patch_changes_by_track = []
patch_changes_total = set([])
percussion = dict([]) # histogram of channel 9 "pitches"
pitches = dict([]) # histogram of pitch-occurrences channels 0-8,10-15
pitch_range_sum = 0 # u pitch-ranges of each track
pitch_range_by_track = []
is_a_score = True
if opus_or_score == None:
return {'bank_select':[], 'channels_by_track':[], 'channels_total':[],
'general_midi_mode':[], 'ntracks':0, 'nticks':0,
'num_notes_by_channel':dict([]),
'patch_changes_by_track':[], 'patch_changes_total':[],
'percussion':{}, 'pitches':{}, 'pitch_range_by_track':[],
'ticks_per_quarter':0, 'pitch_range_sum':0}
ticks_per_quarter = opus_or_score[0]
i = 1 # ignore first element, which is ticks
nticks = 0
while i < | |
<reponame>theRealCarneiro/pulsemeeter
import os
import re
import signal
import shutil
import threading
import sys
from .app_list_widget import AppList
from .eq_popover import EqPopover
from .latency_popover import LatencyPopover
from .rnnoise_popover import RnnoisePopover
from .groups_popover import JackGroupsPopover
from .port_select_popover import PortSelectPopover
from .vumeter_widget import Vumeter
from ..settings import LAYOUT_DIR
from ..socket import Client
from gi import require_version as gi_require_version
# from pulsectl import Pulse
gi_require_version('Gtk', '3.0')
gi_require_version('AppIndicator3', '0.1')
from gi.repository import Gtk, GLib, AppIndicator3
class MainWindow(Gtk.Window):
def __init__(self, isserver=False, trayonly=False):
self.isserver = isserver
self.client = Client(listen=True)
self.config = self.client.config
self.trayonly = trayonly
self.windowinstance = None
self.tray = None
if isserver:
self.tray = self.create_indicator()
self.client.set_callback_function('tray', self.update_tray_status)
if trayonly:
self.client.set_callback_function('exit', self.close_on_server_exit)
return
self.windowinstance = self.start_window(isserver)
def start_window(self, isserver):
self.trayonly = False
self.exit_flag = False
GLib.threads_init()
Gtk.Window.__init__(self)
self.builder = Gtk.Builder()
self.layout = self.config['layout']
component_list = [
'window',
'menu_popover',
'rename_popover',
'popover_entry',
'latency_popover',
'latency_adjust',
'rnnoise_popover',
'rnnoise_latency_adjust',
'rnnoise_threshold_adjust',
'jack_group_popover',
'sink_input_list',
'source_output_list',
'sink_input_scroll',
'source_output_scroll',
'source_output_viewport',
'sink_input_viewport',
'vumeter_toggle',
'vi_1_peak',
'channel_groups',
]
for i in range(1, 4):
component_list.append(f'hi_{i}_adjust')
component_list.append(f'vi_{i}_adjust')
component_list.append(f'a_{i}_adjust')
component_list.append(f'b_{i}_adjust')
try:
self.builder.add_objects_from_file(
os.path.join(LAYOUT_DIR, f'{self.layout}.glade'),
component_list
)
except Exception as ex:
print('Error building main window!\n{}'.format(ex))
sys.exit(1)
self.devices = {}
self.devices['a'] = self.client.list_hardware_devices('sinks')
self.devices['hi'] = self.client.list_hardware_devices('sources')
# self.devices['b'] = self.client.list_virtual_devices('sources')
# self.devices['vi'] = self.client.list_virtual_devices('sinks')
self.hardware_comboboxes = {}
self.primary_buttons = {}
self.volume_adjusts = {}
self.volume_sliders = {}
self.mute_buttons = {}
self.loopback_buttons = {}
self.rnnoise_buttons = {}
self.eq_buttons = {}
self.enable_vumeters = True
if shutil.which('pulse-vumeter') is False or \
self.config['enable_vumeters'] is False:
self.enable_vumeters = False
self.start_hardware_comboboxes()
self.start_inputs()
self.start_outputs()
self.start_vumeters()
self.start_app_list()
self.start_menu_items()
# self.start_layout_combobox()
self.window = self.builder.get_object('window')
# self.add_window(self.window)
# super().__init__(self.window)
self.listen_socket()
self.window.connect('delete_event', self.delete_event)
# self.window.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.builder.connect_signals(self)
self.window.show_all()
if isserver is not False:
signal.signal(signal.SIGTERM, self.delete_event)
signal.signal(signal.SIGINT, self.delete_event)
return self.window
def start_menu_items(self):
if self.layout == 'default':
self.menu_button = self.builder.get_object('menu_button')
self.menu_popover = self.builder.get_object('menu_popover')
self.menu_popover.set_relative_to(self.menu_button)
self.menu_button.connect('pressed', self.open_settings)
self.vumeter_toggle = self.builder.get_object('vumeter_toggle')
self.vumeter_toggle.set_active(self.enable_vumeters)
self.vumeter_toggle.connect('toggled', self.toggle_vumeters)
self.cleanup_toggle = self.builder.get_object('cleanup_toggle')
self.cleanup_toggle.set_active(self.config['cleanup'])
self.cleanup_toggle.connect('toggled', self.toggle_cleanup)
self.tray_toggle = self.builder.get_object('tray_toggle')
self.tray_toggle.set_active(self.config['tray'])
self.tray_toggle.connect('toggled', self.toggle_tray)
self.layout_combobox = self.builder.get_object('layout_combobox')
layout_list = os.listdir(LAYOUT_DIR)
i = 0
for layout in layout_list:
self.layout_combobox.append_text(layout[:len(layout) - 6])
if layout[:len(layout) - 6] == self.layout:
self.layout_combobox.set_active(i)
i += 1
self.layout_combobox.connect('changed', self.change_layout)
# self.jack_toggle_button = self.builder.get_object('jack_toggle')
# self.jack_toggle_button.set_active(self.pulse.config['jack']['enable'])
# self.jack_toggle_button.connect('toggled', self.toggle_jack)
# self.jack_toggle_button.set_sensitive(False)
# self.test = self.builder.get_object('test')
# self.test.connect('pressed', self.open_group_popover)
# self.jack_gp_popover = self.builder.get_object('jack_group_popover')
# self.jack_gp_popover.set_relative_to(self.test)
# self.jack_toggle_button.connect('toggled', self.toggle_jack)
def toggle_tray(self, widget):
state = widget.get_active()
self.client.set_tray(state)
if self.isserver:
if state:
if self.tray is None:
self.tray = self.create_indicator()
self.tray.set_status(1)
else:
self.tray.set_status(0)
def toggle_cleanup(self, widget):
self.client.set_cleanup(widget.get_active())
# not perfect yet but works
def change_layout(self, combobox):
self.client.set_layout(combobox.get_active_text())
self.windowinstance.destroy()
self.delete_event()
self.windowinstance = self.start_window(self.isserver)
self.trayonly = False
def open_settings(self, widget):
self.menu_popover.popup()
def toggle_jack(self, widget):
self.pulse.config['jack']['enable'] = widget.get_active()
for i in ['vi', 'hi']:
for j in self.pulse.config[i]:
self.pulse.config[i][j]['jack'] = widget.get_active()
def toggle_vumeters(self, widget):
if not shutil.which('pulse-vumeter'):
return
self.enable_vumeters = widget.get_active()
self.config['enable_vumeters'] = widget.get_active()
for device_type in ['hi', 'vi', 'a', 'b']:
for device_id in self.config[device_type]:
# if self.config[device_type][device_id]['name'] != '':
if widget.get_active() is False:
self.vu_list[device_type][device_id].close()
else:
self.vu_list[device_type][device_id].reload_device()
self.vu_list[device_type][device_id].start()
def start_vumeters(self):
self.vu_list = {}
for device_type in ['hi', 'vi', 'a', 'b']:
self.vu_list[device_type] = {}
for device_id in self.config[device_type]:
device_config = self.config[device_type][device_id]
grid = self.builder.get_object(f'{device_type}_{device_id}_vumeter')
vert = True if self.layout == 'default' else False
vumeter = Vumeter(device_type, device_id,
self.config, vertical=vert)
grid.add(vumeter)
if device_config['name'] != '':
if self.enable_vumeters is True:
try:
vumeter.start()
except Exception:
print('Could not start vumeter for',
'{device_type}{device_id}')
self.vu_list[device_type][device_id] = vumeter
def start_app_list(self):
# this is probably not the best solution but it handles the pactl errors fine
sink_input_viewport = self.builder.get_object('sink_input_viewport')
source_output_viewport = self.builder.get_object('source_output_viewport')
try:
self.sink_input_box = AppList('sink-input', self.client)
self.source_output_box = AppList('source-output', self.client)
sink_input_viewport.add(self.sink_input_box)
source_output_viewport.add(self.source_output_box)
self.subscribe_thread = threading.Thread(target=self.listen_subscribe, args=())
self.subscribe_thread.start()
except Exception as ex:
print('App sinks returned an error, audio backend returned error')
print(ex)
if self.windowinstance is not None:
self.windowinstance.destroy()
self.delete_event()
sys.exit(1)
def start_hardware_comboboxes(self):
for device_type in ['hi', 'a']:
self.hardware_comboboxes[device_type] = {}
name_size = 35 if device_type == 'a' else 20
if self.layout != 'default':
name_size = 100
devices = self.devices[device_type]
# for each combobox
found = False
for device_id in self.config[device_type]:
device_config = self.config[device_type][device_id]
combobox = self.builder.get_object(f'{device_type}_{device_id}_combobox')
combobox.append_text('')
for i in range(0, len(devices)):
text = devices[i]['description'][:name_size]
if len(text) == name_size:
text = text + '...'
combobox.append_text(text)
if devices[i]['name'] == device_config['name']:
found = True
combobox.set_active(i + 1)
if found is False and device_config['jack'] is False:
device_config['name'] = ''
combobox.connect('changed', self.on_combo_changed, device_type,
device_id, devices)
self.hardware_comboboxes[device_type][device_id] = combobox
def start_inputs(self):
self.rename_popover = self.builder.get_object('rename_popover')
self.Popover_Entry = self.builder.get_object('popover_entry')
self.Popover_Entry.connect('activate', self.label_rename_entry)
self.primary_buttons['vi'] = {}
# for each input device
for input_type in ['hi', 'vi']:
self.volume_adjusts[input_type] = {}
self.volume_sliders[input_type] = {}
self.mute_buttons[input_type] = {}
self.loopback_buttons[input_type] = {}
for input_id in self.config[input_type]:
if input_type == 'vi':
name = self.config['vi'][input_id]['name']
label = self.builder.get_object(f'vi_{input_id}_label')
label.set_text(name if name != '' else
f'Virtual Input {input_id}')
label_evt_box = self.builder.get_object(f'vi_{input_id}_label_event_box')
label_evt_box.connect('button_press_event', self.label_click,
label, 'vi', input_id)
primary = self.builder.get_object(f'vi_{input_id}_primary')
primary.set_active(self.config['vi'][input_id]['primary'])
if self.config['vi'][input_id]['primary'] is True:
primary.set_sensitive(False)
primary.connect('clicked', self.toggle_primary, 'vi', input_id)
self.primary_buttons['vi'][input_id] = primary
else:
# noise reduction button
rnnoise = self.builder.get_object(f'hi_{input_id}_rnnoise')
rnnoise.set_active(self.config['hi'][input_id]['use_rnnoise'])
rnnoise.connect('clicked', self.toggle_rnnoise, input_id)
rnnoise.connect('button_press_event', self.open_popover,
RnnoisePopover, input_type, input_id)
self.rnnoise_buttons[input_id] = rnnoise
# check for rnnoise plugin
found = 0
for lib in ['lib', 'lib64']:
for path in [f'/usr/{lib}/ladspa',
f'/usr/local/{lib}/ladspa']:
for plugin in ['librnnoise_ladspa.so',
'rnnoise_ladspa.so']:
if os.path.isfile(os.path.join(path, plugin)):
found = 1
break
if found == 0:
rnnoise.set_visible(False)
rnnoise.set_no_show_all(True)
source_config = self.config[input_type][input_id]
# connect volume sliders
adjust = self.builder.get_object(f'{input_type}_{input_id}_adjust')
adjust.set_value(source_config['vol'])
vol_slider = self.builder.get_object(f'{input_type}_{input_id}_vol')
vol_slider.connect('value-changed', self.volume_change,
input_type, input_id)
vol_slider.add_mark(100, Gtk.PositionType.TOP, '')
self.volume_adjusts[input_type][input_id] = adjust
self.volume_sliders[input_type][input_id] = vol_slider
# connect mute buttons
mute = self.builder.get_object(f'{input_type}_{input_id}_mute')
mute.set_active(self.config[input_type][input_id]['mute'])
mute.connect('clicked', self.toggle_mute, input_type, input_id)
self.mute_buttons[input_type][input_id] = mute
# connection buttons
self.loopback_buttons[input_type][input_id] = {}
for output_type in ['a', 'b']:
for output_id in self.config[output_type]:
sink = output_type + output_id
button = self.builder.get_object(f'{input_type}_{input_id}_{sink}')
button.set_active(source_config[sink])
self.loopback_buttons[input_type][input_id][sink] = button
button.connect('clicked', self.toggle_loopback, input_type,
input_id, output_type, output_id)
if self.config['jack']['enable'] is False:
button.connect('button_press_event', self.latency_popover,
LatencyPopover, input_type, input_id, output_type, output_id)
else:
button.connect('button_press_event',
self.open_popover, PortSelectPopover,
[input_type, input_id, sink])
# start output devices
def start_outputs(self):
self.primary_buttons['b'] = {}
for output_type in ['a', 'b']:
self.volume_adjusts[output_type] = {}
self.volume_sliders[output_type] = {}
self.mute_buttons[output_type] = {}
self.eq_buttons[output_type] = {}
for output_id in self.config[output_type]:
sink_config = self.config[output_type][output_id]
if output_type == 'b':
primary = self.builder.get_object(f'b_{output_id}_primary')
primary.set_active(sink_config['primary'])
if sink_config['primary'] is True:
primary.set_sensitive(False)
primary.connect('clicked', self.toggle_primary, 'b', output_id)
self.primary_buttons['b'][output_id] = primary
label = self.builder.get_object(f'b{output_id}_label')
if label is not None:
label.set_text(f'B{output_id} - {sink_config["name"]}')
# volume slider and adjustment
adjust = self.builder.get_object(f'{output_type}_{output_id}_adjust')
adjust.set_value(sink_config['vol'])
vol_slider = self.builder.get_object(f'{output_type}_{output_id}_vol')
vol_slider.connect('value-changed', self.volume_change,
output_type, output_id)
vol_slider.add_mark(100, Gtk.PositionType.TOP, '')
self.volume_adjusts[output_type][output_id] = adjust
self.volume_sliders[output_type][output_id] = vol_slider
# mute button
mute = self.builder.get_object(f'{output_type}_{output_id}_mute')
mute.set_active(sink_config['mute'])
mute.connect('clicked', self.toggle_mute, output_type, output_id)
self.mute_buttons[output_type][output_id] = mute
# eq button
eq = self.builder.get_object(f'{output_type}_{output_id}_eq')
eq.set_active(sink_config['use_eq'])
eq.connect('clicked', self.toggle_eq, output_type, output_id)
eq.connect('button_press_event', self.open_popover, EqPopover,
output_type, output_id)
self.eq_buttons[output_type][output_id] = eq
# to hide eq button if plugin not found
found = 0
for arc in ['', '64']:
for path in [f'/usr/lib{arc}/ladspa',
f'/usr/local/lib{arc}/ladspa']:
if os.path.isfile(os.path.join(path, 'mbeq_1197.so')):
found = 1
if found == 0:
eq.set_visible(False)
eq.set_no_show_all(True)
def toggle_eq(self, button, output_type, output_id):
state = button.get_active()
self.client.eq(output_type, output_id, state)
def toggle_rnnoise(self, widget, input_id):
state = widget.get_active()
self.client.rnnoise(input_id, state)
def toggle_mute(self, button, device_type, device_id):
state = button.get_active()
self.client.mute(device_type, device_id, state)
def toggle_loopback(self, button, input_type, input_id, output_type, output_id):
state = button.get_active()
self.client.connect(input_type, input_id, output_type, output_id, state)
def volume_change(self, slider, device_type, device_id):
val = int(slider.get_value())
if self.config[device_type][device_id]['vol'] != val:
self.client.volume(device_type, device_id, val)
def open_group_popover(self, widget):
JackGroupsPopover(widget, self.pulse)
def open_popover(self, button, event, popover, device_type, device_id):
if event.button == 3:
if self.config[device_type][device_id]['name'] != '':
popover(button, self.client, device_type, device_id)
def latency_popover(self, button, event, popover, input_type, input_id,
output_type, output_id):
if event.button == 3:
if self.config[input_type][input_id]['name'] != '':
popover(button, self.client, [input_type, input_id],
[output_type, output_id])
def label_rename_entry(self, widget):
name = widget.get_text()
device_type = self.rename_device_type
device_id = self.rename_device_id
old_name = self.active_label.get_text()
if re.match('^[a-zA-Z0-9"_"]*$', name) and name != old_name:
self.client.rename(device_type, device_id, name)
self.active_label.set_text(name)
# self.sink_input_box.load_application_list()
# self.source_output_box.load_application_list()
self.vu_list[device_type][device_id].restart()
else:
dialog = Gtk.MessageDialog(
transient_for=self.windowinstance,
flags=0,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.OK,
text='name is not allowed'
)
dialog.format_secondary_text('The name can only consist of numbers, letters and "_".')
dialog.run()
dialog.destroy()
return
self.rename_popover.popdown()
def label_click(self, widget, event, label, device_type, device_id):
self.rename_device_type = device_type
self.rename_device_id = device_id
self.active_label = label
self.rename_popover.set_relative_to(widget)
self.rename_popover.popup()
def on_combo_changed(self, widget, output_type, output_id, device):
model = widget.get_active()
name = device[model - 1]['name'] if model > 0 else ''
self.client.change_hardware_device(output_type, output_id, name)
self.vu_list[output_type][output_id].restart()
# if re.search('JACK:', device[model - 1]['description']):
# self.pulse.config[device_type][device_id]['jack'] = True
# else:
# self.pulse.config[device_type][device_id]['jack'] = False
def toggle_primary(self, widget, device_type, device_id):
if widget.get_active() is False:
return
else:
widget.set_sensitive(False)
button_list = self.primary_buttons[device_type]
for button in button_list:
if button_list[button] | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import elasticsearch_dsl
import pytest
import webob
from h.search import Search, index, query
MISSING = object()
ES_VERSION = (1, 7, 0)
OFFSET_DEFAULT = 0
LIMIT_DEFAULT = query.LIMIT_DEFAULT
LIMIT_MAX = query.LIMIT_MAX
OFFSET_MAX = query.OFFSET_MAX
class TestLimiter(object):
def test_it_limits_number_of_annotations(self, Annotation, search):
dt = datetime.datetime
ann_ids = [
Annotation(updated=dt(2017, 1, 4)).id,
Annotation(updated=dt(2017, 1, 3)).id,
Annotation(updated=dt(2017, 1, 2)).id,
Annotation(updated=dt(2017, 1, 1)).id,
]
params = webob.multidict.MultiDict([("offset", 1), ("limit", 2)])
result = search.run(params)
assert sorted(result.annotation_ids) == sorted(ann_ids[1:3])
@pytest.mark.parametrize(
"offset,from_",
[
# defaults to OFFSET_DEFAULT
(MISSING, OFFSET_DEFAULT),
# straightforward pass-through
(7, 7),
(42, 42),
# string values should be converted
("23", 23),
("82", 82),
# invalid values should be ignored and the default should be returned
("foo", OFFSET_DEFAULT),
("", OFFSET_DEFAULT),
(" ", OFFSET_DEFAULT),
("-23", OFFSET_DEFAULT),
("32.7", OFFSET_DEFAULT),
("9801", OFFSET_MAX),
],
)
def test_offset(self, es_dsl_search, pyramid_request, offset, from_):
limiter = query.Limiter()
params = webob.multidict.MultiDict({"offset": offset})
if offset is MISSING:
params = webob.multidict.MultiDict({})
q = limiter(es_dsl_search, params).to_dict()
assert q["from"] == from_
@pytest.mark.parametrize(
"limit,expected",
[
("MAX", LIMIT_DEFAULT),
(LIMIT_MAX + 1, LIMIT_MAX),
(LIMIT_MAX, LIMIT_MAX),
("150", 150),
(0, 0),
(-1, LIMIT_DEFAULT),
(1.5, 1),
],
)
def test_limit_output_within_bounds(
self, es_dsl_search, pyramid_request, limit, expected
):
"""Given any string input, output should be in the allowed range."""
limiter = query.Limiter()
q = limiter(
es_dsl_search, webob.multidict.MultiDict({"limit": limit})
).to_dict()
assert isinstance(q["size"], int)
assert q["size"] == expected
def test_limit_set_to_default_when_missing(self, es_dsl_search, pyramid_request):
limiter = query.Limiter()
q = limiter(es_dsl_search, webob.multidict.MultiDict({})).to_dict()
assert q["size"] == LIMIT_DEFAULT
@pytest.fixture
def search(self, search):
search.append_modifier(query.Limiter())
return search
class TestKeyValueMatcher(object):
def test_ands_multiple_key_values(self, Annotation, search):
ann_ids = [Annotation().id, Annotation().id]
reply1 = Annotation(references=[ann_ids[0]]).id
reply2 = Annotation(references=[ann_ids[0], reply1]).id
params = webob.multidict.MultiDict(
[("references", ann_ids[0]), ("references", reply1)]
)
result = search.run(params)
assert result.annotation_ids == [reply2]
@pytest.fixture
def search(self, search):
search.append_modifier(query.KeyValueMatcher())
return search
class TestSorter(object):
@pytest.mark.parametrize(
"sort_key,order,expected_order",
[
# Sort supports "updated" and "created" fields.
("updated", "desc", [1, 0, 2]),
("updated", "asc", [2, 0, 1]),
("created", "desc", [2, 0, 1]),
("created", "asc", [1, 0, 2]),
("group", "asc", [2, 0, 1]),
("id", "asc", [0, 2, 1]),
("user", "asc", [2, 0, 1]),
# Default sort order should be descending.
("updated", None, [1, 0, 2]),
# Default sort field should be "updated".
(None, "asc", [2, 0, 1]),
],
)
def test_it_sorts_annotations(
self, Annotation, search, sort_key, order, expected_order
):
dt = datetime.datetime
# nb. Test annotations have a different ordering for updated vs created
# and creation order is different than updated/created asc/desc.
ann_ids = [
Annotation(
updated=dt(2017, 1, 1),
groupid="12345",
userid="acct:foo@auth1",
id="1",
created=dt(2017, 1, 1),
).id,
Annotation(
updated=dt(2018, 1, 1),
groupid="12347",
userid="acct:foo@auth2",
id="9",
created=dt(2016, 1, 1),
).id,
Annotation(
updated=dt(2016, 1, 1),
groupid="12342",
userid="acct:boo@auth1",
id="2",
created=dt(2018, 1, 1),
).id,
]
params = webob.multidict.MultiDict({})
if sort_key:
params["sort"] = sort_key
if order:
params["order"] = order
result = search.run(params)
actual_order = [ann_ids.index(id_) for id_ in result.annotation_ids]
assert actual_order == expected_order
def test_incomplete_date_defaults_to_min_datetime_values(
self, es_dsl_search, pyramid_request
):
"""
The default date should be:
1970, 1st month, 1st day, 0 hrs, 0 min, 0 sec, 0 ms
"""
sorter = query.Sorter()
params = {"search_after": "2018"}
q = sorter(es_dsl_search, params).to_dict()
assert q["search_after"] == [1514764800000.0]
def test_it_ignores_unknown_sort_fields(self, search):
search.run(webob.multidict.MultiDict({"sort": "no_such_field"}))
@pytest.mark.parametrize(
"date,expected",
[
("1514773561300", [2]),
("2018-01-01T02:26:01.03", [2]),
("2018-01-01T02:26:01.03+00:00", [2]),
("2018-01-01T02:26:01.037224+00:00", [2]),
("2017-01", [1, 2]),
("2017", [1, 2]),
("2018-01-01", [1, 2]),
],
)
def test_it_finds_all_annotations_after_date(
self, search, Annotation, date, expected
):
dt = datetime.datetime
ann_ids = [
Annotation(updated=dt(2017, 1, 1), created=dt(2017, 1, 1)).id,
Annotation(updated=dt(2018, 1, 1, 2, 26, 1), created=dt(2016, 1, 1)).id,
Annotation(
updated=dt(2018, 1, 1, 2, 26, 1, 500000), created=dt(2016, 1, 1)
).id,
Annotation(updated=dt(2016, 1, 1), created=dt(2018, 1, 1)).id,
]
result = search.run(
webob.multidict.MultiDict({"search_after": date, "order": "asc"})
)
assert sorted(result.annotation_ids) == sorted(
[ann_ids[idx] for idx in expected]
)
def test_it_finds_all_annotations_after_id(self, search, Annotation):
ann_ids = sorted(
[
str(Annotation(id="09").id),
str(Annotation(id="11").id),
str(Annotation(id="02").id),
]
)
result = search.run(
webob.multidict.MultiDict(
{"search_after": ann_ids[1], "sort": "id", "order": "asc"}
)
)
assert result.annotation_ids == [ann_ids[2]]
def test_it_ignores_search_after_if_invalid_date_format(self, search, Annotation):
dt = datetime.datetime
ann_ids = [
Annotation(updated=dt(2016, 1, 1), created=dt(2018, 1, 1)).id,
Annotation(updated=dt(2017, 1, 1), created=dt(2017, 1, 1)).id,
Annotation(updated=dt(2018, 1, 1, 2, 26, 1), created=dt(2016, 1, 1)).id,
]
result = search.run(
webob.multidict.MultiDict({"search_after": "invalid_date", "order": "asc"})
)
assert result.annotation_ids == ann_ids
class TestTopLevelAnnotationsFilter(object):
def test_it_filters_out_replies_but_leaves_annotations_in(self, Annotation, search):
annotation = Annotation()
Annotation(references=[annotation.id])
result = search.run(webob.multidict.MultiDict({}))
assert [annotation.id] == result.annotation_ids
@pytest.fixture
def search(self, search):
search.append_modifier(query.TopLevelAnnotationsFilter())
return search
class TestAuthorityFilter(object):
def test_it_filters_out_non_matching_authorities(self, Annotation, search):
annotations_auth1 = [
Annotation(userid="acct:foo@auth1").id,
Annotation(userid="acct:bar@auth1").id,
]
# Make some other annotations that are of different authority.
Annotation(userid="acct:bat@auth2")
Annotation(userid="acct:bar@auth3")
result = search.run(webob.multidict.MultiDict({}))
assert sorted(result.annotation_ids) == sorted(annotations_auth1)
@pytest.fixture
def search(self, search):
search.append_modifier(query.AuthorityFilter("auth1"))
return search
class TestAuthFilter(object):
def test_logged_out_user_can_not_see_private_annotations(self, search, Annotation):
Annotation()
Annotation()
result = search.run(webob.multidict.MultiDict({}))
assert not result.annotation_ids
def test_logged_out_user_can_see_shared_annotations(self, search, Annotation):
shared_ids = [Annotation(shared=True).id, Annotation(shared=True).id]
result = search.run(webob.multidict.MultiDict({}))
assert sorted(result.annotation_ids) == sorted(shared_ids)
def test_logged_in_user_can_only_see_their_private_annotations(
self, search, pyramid_config, Annotation
):
userid = "acct:bar@auth2"
pyramid_config.testing_securitypolicy(userid)
# Make a private annotation from a different user.
Annotation(userid="acct:foo@auth2").id
users_private_ids = [Annotation(userid=userid).id, Annotation(userid=userid).id]
result = search.run(webob.multidict.MultiDict({}))
assert sorted(result.annotation_ids) == sorted(users_private_ids)
def test_logged_in_user_can_see_shared_annotations(
self, search, pyramid_config, Annotation
):
userid = "acct:bar@auth2"
pyramid_config.testing_securitypolicy(userid)
shared_ids = [
Annotation(userid="acct:foo@auth2", shared=True).id,
Annotation(userid=userid, shared=True).id,
]
result = search.run(webob.multidict.MultiDict({}))
assert sorted(result.annotation_ids) == sorted(shared_ids)
@pytest.fixture
def search(self, search, pyramid_request):
search.append_modifier(query.AuthFilter(pyramid_request))
return search
class TestGroupFilter(object):
def test_matches_only_annotations_from_specified_group(
self, search, Annotation, group
):
Annotation(groupid="group2")
Annotation(groupid="group3")
group1_annotations = [
Annotation(groupid=group.pubid).id,
Annotation(groupid=group.pubid).id,
]
result = search.run(webob.multidict.MultiDict({"group": group.pubid}))
assert sorted(result.annotation_ids) == sorted(group1_annotations)
@pytest.fixture
def search(self, search):
search.append_modifier(query.GroupFilter())
return search
@pytest.fixture
def group(self, factories):
return factories.OpenGroup(name="group1", pubid="group1id")
class TestGroupAuthFilter(object):
def test_does_not_return_annotations_if_group_not_readable_by_user(
self, search, Annotation, group_service
):
group_service.groupids_readable_by.return_value = []
Annotation(groupid="group2").id
Annotation(groupid="group1").id
Annotation(groupid="group1").id
result = search.run(webob.multidict.MultiDict({}))
assert not result.annotation_ids
def test_returns_annotations_if_group_readable_by_user(
self, search, Annotation, group_service
):
group_service.groupids_readable_by.return_value = ["group1"]
Annotation(groupid="group2", shared=True).id
expected_ids = [
Annotation(groupid="group1").id,
Annotation(groupid="group1").id,
]
result = search.run(webob.multidict.MultiDict({}))
assert sorted(result.annotation_ids) == sorted(expected_ids)
@pytest.fixture
def search(self, search, pyramid_request):
search.append_modifier(query.GroupAuthFilter(pyramid_request))
return search
class TestUserFilter(object):
def test_filters_annotations_by_user(self, search, Annotation):
Annotation(userid="acct:foo@auth2", shared=True)
expected_ids = [Annotation(userid="acct:bar@auth2", shared=True).id]
result = search.run(webob.multidict.MultiDict({"user": "bar"}))
assert sorted(result.annotation_ids) == sorted(expected_ids)
def test_filter_is_case_insensitive(self, search, Annotation):
ann_id = Annotation(userid="acct:bob@example", shared=True).id
result = search.run(webob.multidict.MultiDict({"user": "BOB"}))
assert result.annotation_ids == [ann_id]
def test_filters_annotations_by_multiple_users(self, search, Annotation):
Annotation(userid="acct:foo@auth2", shared=True)
expected_ids = [
Annotation(userid="acct:bar@auth2", shared=True).id,
Annotation(userid="acct:baz@auth2", shared=True).id,
]
params = webob.multidict.MultiDict()
params.add("user", "bar")
params.add("user", "baz")
result = search.run(params)
assert sorted(result.annotation_ids) == sorted(expected_ids)
def test_filters_annotations_by_user_and_authority(self, search, Annotation):
Annotation(userid="acct:foo@auth2", shared=True)
expected_ids = [Annotation(userid="acct:foo@auth3", shared=True).id]
result = search.run(webob.multidict.MultiDict({"user": "foo@auth3"}))
assert sorted(result.annotation_ids) == sorted(expected_ids)
@pytest.fixture
def search(self, search):
search.append_modifier(query.UserFilter())
return search
class TestUriCombinedWildcardFilter(object):
# TODO - Explicit test of URL normalization (ie. that search normalizes input
# URL using `h.util.uri.normalize` and queries with that).
@pytest.mark.parametrize("field", ("uri", "url"))
def test_filters_by_field(self, Annotation, get_search, field):
search = get_search()
Annotation(target_uri="https://foo.com")
expected_ids = [Annotation(target_uri="https://bar.com").id]
result = search.run(webob.multidict.MultiDict({field: "https://bar.com"}))
assert sorted(result.annotation_ids) == sorted(expected_ids)
def test_filters_on_whole_url(self, Annotation, get_search):
search = get_search()
Annotation(target_uri="http://bar.com/foo")
expected_ids = [
Annotation(target_uri="http://bar.com").id,
Annotation(target_uri="http://bar.com/").id,
]
result = search.run(webob.multidict.MultiDict({"url": "http://bar.com"}))
assert sorted(result.annotation_ids) == sorted(expected_ids)
def test_filters_aliases_http_and_https(self, Annotation, get_search):
search = get_search()
expected_ids = [
Annotation(target_uri="http://bar.com").id,
Annotation(target_uri="https://bar.com").id,
]
result = search.run(webob.multidict.MultiDict({"url": "http://bar.com"}))
assert sorted(result.annotation_ids) == sorted(expected_ids)
def test_returns_all_annotations_with_equivalent_uris(
self, Annotation, get_search, storage
):
search = get_search()
# Mark all these uri's as equivalent uri's.
storage.expand_uri.side_effect = lambda _, x: [
"urn:x-pdf:1234",
"file:///Users/june/article.pdf",
"doi:10.1.1/1234",
"http://reading.com/x-pdf",
]
Annotation(target_uri="urn:x-pdf:1235")
Annotation(target_uri="file:///Users/jane/article.pdf").id
expected_ids = [
Annotation(target_uri="urn:x-pdf:1234").id,
Annotation(target_uri="doi:10.1.1/1234").id,
Annotation(target_uri="http://reading.com/x-pdf").id,
Annotation(target_uri="file:///Users/june/article.pdf").id,
]
params = webob.multidict.MultiDict()
params.add("url", "urn:x-pdf:1234")
result = search.run(params)
assert sorted(result.annotation_ids) == sorted(expected_ids)
def test_ors_multiple_url_uris(self, Annotation, get_search):
search = get_search()
Annotation(target_uri="http://baz.com")
Annotation(target_uri="https://www.foo.com")
expected_ids = [
Annotation(target_uri="https://bar.com").id,
Annotation(target_uri="http://bat.com").id,
Annotation(target_uri="http://foo.com").id,
Annotation(target_uri="https://foo.com/bar").id,
]
params = webob.multidict.MultiDict()
params.add("uri", "http://bat.com")
params.add("uri", "https://bar.com")
params.add("url", "http://foo.com")
params.add("url", "https://foo.com/bar")
result = search.run(params)
assert sorted(result.annotation_ids) == sorted(expected_ids)
@pytest.mark.parametrize(
"params,expected_ann_indexes,separate_keys",
[
# Test with separate_keys = True (aka uri/url are exact match & wildcard_uri is wildcard match.)
(
webob.multidict.MultiDict([("wildcard_uri", "http://bar.com/baz_45")]),
[2, 3],
True,
),
(
webob.multidict.MultiDict(
[
("uri", "urn:x-pdf:a34480f5dbed8c4482a3a921e0196d2a"),
("wildcard_uri", "http://bar.com/baz*45"),
]
),
[2, 3, 4, 5],
True,
),
(
webob.multidict.MultiDict(
[
("uri", "urn:x-pdf:a34480f5dbed8c4482a3a921e0196d2a"),
("url", "http://bar.com/baz*45"),
]
),
[3, 5],
True,
),
# Test with separate_keys = False (aka uri/url contain both exact & wildcard matches.)
(
webob.multidict.MultiDict([("uri", "http://bar.com/baz-45_")]),
[1],
False,
),
(
webob.multidict.MultiDict([("uri", "http://bar.com/*")]),
[0, 1, 2, 3, 4],
False,
),
(
webob.multidict.MultiDict(
[
("uri", "urn:x-pdf:a34480f5dbed8c4482a3a921e0196d2a"),
("uri", "http://bar.com/baz*45"),
]
),
[2, 3, 4, 5],
False,
),
],
)
def test_matches(
self, get_search, Annotation, params, expected_ann_indexes, separate_keys
):
"""
All uri matches (wildcard and exact) are OR'd.
"""
search = get_search(separate_keys)
ann_ids = [
Annotation(target_uri="http://bar.com?foo").id,
Annotation(target_uri="http://bar.com/baz-457").id,
Annotation(target_uri="http://bar.com/baz-45").id,
Annotation(target_uri="http://bar.com/baz*45").id,
Annotation(target_uri="http://bar.com/baz/*/45").id,
| |
<gh_stars>0
import os
import datetime
import pyodbc
import pymongo
import pandas as pd
import altair as alt
import streamlit as st
from collections import Counter
## streamlit page config
st.set_page_config(page_title='LS Dashboard monitoring', page_icon='😎', layout='wide', initial_sidebar_state='expanded', menu_items={'Get Help': None, 'Report a bug': None, 'About': None})
## connection string
# db LSDB
client = pymongo.MongoClient(**st.secrets["mongo"])
# get SQL server auth
# @st.cache(ttl=600)
def get_data():
db = client.logshipping
items = db.lsdb.find()
items = list(items) # make hashable for st.cache
return items
_id, driver, server, instance_name, ls_database, trusted_connection = get_data()[2].values()
windows_login = f'''\{instance_name} -d {ls_database} -E '''
conn_str = (f'Driver={driver};Server={server}\{instance_name};Database={ls_database};Trusted_Connection={trusted_connection};')
# db rahasia
conn_str2 = (f'Driver=SQL Server;Server={server}\{instance_name};Database=rahasia;Trusted_Connection=yes;')
# df 2 csv
@st.cache
def convert_df(df):
# IMPORTANT: Cache the conversion to prevent computation on every rerun
return df.to_csv().encode('utf-8')
# return dataframe
def read2(conn_str, query):
cnxn = pyodbc.connect(conn_str)
cursor = cnxn.cursor()
cursor.execute(query)
columns = [column[0] for column in cursor.description]
results = []
for row in cursor.fetchall():
results.append(list(row[0:len(columns)]))
cnxn.close()
df = pd.DataFrame(results, columns=columns)
return df
# return dataframe
def read(conn_str, query):
cnxn = pyodbc.connect(conn_str)
return pd.read_sql_query(query, cnxn)
# void function
def write(conn_str, query):
cnxn = pyodbc.connect(conn_str)
cursor = cnxn.cursor()
with cnxn:
cursor.execute(query)
cnxn.close()
# format date time column
def format_datetime(x):
return x.strftime("%m/%d/%Y %H:%M:%S")
# return folder size in MB
@st.cache(ttl=600)
def foldersizeMB(path):
size = 0
# path = path.replace('\\', "/")
# path = path[:-1]
for ele in os.scandir(path):
size += os.stat(ele).st_size
return size/1000 # MB
## Pengambilan data
backupRestoreReport = read(conn_str=conn_str, query='SELECT * FROM PMAG_BackupRestoreReport') # success backup view
backupRestoreReport = backupRestoreReport[backupRestoreReport['Duration (millisecond)'] < 2000]
failBackupRestoreReport = read(conn_str=conn_str, query='SELECT * FROM PMAG_FailBackupRestoreReport') # fail backup view
restoreReport = read(conn_str=conn_str, query='SELECT * FROM PMAG_LogRestoreHistory ORDER BY RestoreTime') # for calculate last restore time and first restore time
activeSecondary = read(conn_str=conn_str, query="SELECT * FROM PMAG_ActiveSecondaries") # view recent active sec. per db
secondariesFolder = read(conn_str=conn_str, query="SELECT DISTINCT ServerInstance, CommonFolder FROM PMAG_Secondaries") # list of secondary instances
database = read(conn_str=conn_str, query="SELECT DatabaseName FROM PMAG_Databases") # lis of db
db_instance = pd.merge(activeSecondary, secondariesFolder, on=['ServerInstance']) # list db join list of instance folder
recent_backup_per_db = pd.merge(activeSecondary, backupRestoreReport.groupby(["Database"])["Backup Time"].max(), right_index=True, left_on='DatabaseName')
refresh = datetime.datetime.now().strftime("%m/%d/%Y %H:%M:%S")
## sorting
backupRestoreReport = backupRestoreReport.sort_values(by='Backup Time').reset_index(drop=True)
failBackupRestoreReport = failBackupRestoreReport.sort_values(by='ID').reset_index(drop=True)
## Side Panel
with st.sidebar:
# filter data
with st.expander("⚙️ Apply filter to data", expanded=True):
# form input
col1, col2, col3 = st.columns([4, 4, 4])
database_name = st.multiselect("Database", database['DatabaseName'].unique(), default=database['DatabaseName'].unique())
serverInstanceList = [w.replace('.\\', '') for w in secondariesFolder['ServerInstance'].unique()]
# backup_instances = st.multiselect("Backup Instances", secondariesFolder['ServerInstance'].unique(), default=secondariesFolder['ServerInstance'].unique())
backup_instances = st.multiselect("Backup Instances", serverInstanceList, default=serverInstanceList)
backup_instances = ['.\\' + w for w in backup_instances]
date = st.date_input("Backup Date", value=[datetime.datetime.now() - datetime.timedelta(days=7), datetime.datetime.now()], max_value=datetime.datetime.now(), help="YYYY/MM/DD")
# filter rules
FilterApplied = (Counter(database_name) != Counter(database['DatabaseName'].unique())) \
or (Counter(backup_instances) != Counter(secondariesFolder['ServerInstance'].unique())) \
or (date != (datetime.date.today() - datetime.timedelta(days=7), datetime.date.today()))
# restore filter button
if FilterApplied:
if st.button('restore filter'):
database_name = database['DatabaseName'].unique()
backup_instances = secondariesFolder['ServerInstance'].unique()
date = (datetime.date.today() - datetime.timedelta(days=7), datetime.date.today())
# filter df backupRestoreReport
backupRestoreReport = backupRestoreReport[(backupRestoreReport['Database'].isin(database_name))] # filter db name
backupRestoreReport = backupRestoreReport[(backupRestoreReport['Backup Server'].isin(backup_instances))] # filter backup instances
# filter date
backupRestoreReport['Backup Date'] = pd.to_datetime(backupRestoreReport['Backup Date']).dt.date
backupRestoreReport = backupRestoreReport[(backupRestoreReport['Backup Date'] >= date[0]) & (backupRestoreReport['Backup Date'] <= date[1])]
# filter df failBackupRestoreReport
failBackupRestoreReport = failBackupRestoreReport[(failBackupRestoreReport['Database'].isin(database_name))] # filter db name
failBackupRestoreReport = failBackupRestoreReport[(failBackupRestoreReport['Backup Server'].isin(backup_instances))] # filter backup instances
# filter date
failBackupRestoreReport['Backup Date'] = pd.to_datetime(failBackupRestoreReport['Backup Date']).dt.date
failBackupRestoreReport = failBackupRestoreReport[(failBackupRestoreReport['Backup Date'] >= date[0]) & (failBackupRestoreReport['Backup Date'] <= date[1])]
# filter list of instance
secondariesFolder = secondariesFolder[(secondariesFolder['ServerInstance'].isin(backup_instances))]
# filter list of db
activeSecondary = activeSecondary[(activeSecondary['DatabaseName'].isin(database_name))]
# initiate new backup
with st.expander("Initiate new backup"):
with st.form(key='newbackup', clear_on_submit=True):
db = st.text_input("Database", placeholder="exist database name", help=f'DB must exist in {server}/{instance_name} instance')
backup_instance = st.text_input("Backup instance", value="SQLSEC", help="makesure that instance connceted as server object")
instance_commonfolder = st.text_input("Instance Folder", value="D:\SQLBackupsSEC\\", help="for storing backup & standby files")
instance_datafolder = st.text_input("Data Folder", value=f'C:\Program Files\Microsoft SQL Server\MSSQL15.{backup_instance}\MSSQL\DATA\\')
instance_LogFolder = st.text_input("Log Folder", value=f'C:\Program Files\Microsoft SQL Server\MSSQL15.{backup_instance}\MSSQL\DATA\\')
submit = st.form_submit_button('Initialize backup')
if submit:
# cnxn2 = pyodbc.connect(conn_str_master)
# cursor2 = cnxn2.cursor()
# with cnxn2:
# cursor2.execute(f'''
# DECLARE
# @s NVARCHAR(128) = N'.\{backup_instance}',
# @t NVARCHAR(128) = N'true';
# EXEC [master].dbo.sp_addlinkedserver @server = @s, @srvproduct = N'SQL Server';
# EXEC [master].dbo.sp_addlinkedsrvlogin @rmtsrvname = @s, @useself = @t;
# EXEC [master].dbo.sp_serveroption @server = @s, @optname = N'collation compatible', @optvalue = @t;
# EXEC [master].dbo.sp_serveroption @server = @s, @optname = N'data access', @optvalue = @t;
# EXEC [master].dbo.sp_serveroption @server = @s, @optname = N'rpc', @optvalue = @t;
# EXEC [master].dbo.sp_serveroption @server = @s, @optname = N'rpc out', @optvalue = @t;
# ''')
cnxn = pyodbc.connect(conn_str)
cursor = cnxn.cursor()
with cnxn:
# set recovery full
cursor.execute(f'''
ALTER DATABASE {db} SET RECOVERY FULL;
''')
# insert to PMAG_Databases
cursor.execute(f'''
INSERT dbo.PMAG_Databases(DatabaseName) SELECT N'{db}';
''')
# insert to PMAG_Secondaries
cursor.execute(f'''
INSERT dbo.PMAG_Secondaries( DatabaseName, ServerInstance, CommonFolder, DataFolder, LogFolder, StandByLocation)
SELECT DatabaseName = N'{db}', ServerInstance = name, CommonFolder = '{instance_commonfolder}', DataFolder = '{instance_datafolder}', LogFolder = '{instance_LogFolder}', StandByLocation = '{instance_commonfolder}'
FROM sys.servers
WHERE name LIKE N'.\{backup_instance}';
''')
os.system(f'''
sqlcmd -S {server}{windows_login}-Q "EXEC dbo.PMAG_Backup @dbname = N'{db}', @type = 'bak', @init = 1;"
''')
# success
st.success('backup has been started')
# manage backup manualy
with st.expander("Trigger Backup and Delete", expanded=True):
# go backup restore now
st.subheader("run log shipping now")
with st.form(key='lsnow', clear_on_submit=True):
col1, col2 = st.columns([3, 2])
db = col1.selectbox("Database", database['DatabaseName'].unique())
tipe = col2.selectbox("backup type", options=['trn'])
submit = st.form_submit_button('📬 start')
if submit:
os.system(f'''
sqlcmd -S {server}{windows_login}-Q "EXEC dbo.PMAG_Backup @dbname = N'{db}', @type = '{tipe}';"
''')
# st.write(f'''sqlcmd -S {server}{windows_login}-Q "EXEC dbo.PMAG_Backup @dbname = N'{db}', @type = '{tipe}';"''')
st.success('backup has been started')
# go clear history
st.subheader("Clear backup history")
with st.form(key='clear', clear_on_submit=True):
col1, col2 = st.columns([1.3, 1])
db = col1.selectbox("Database", database['DatabaseName'].unique())
dayold = col2.selectbox("days older than", [0, 1, 5, 7, 14, 30, 180, 360], index=2)
# st.dataframe(db_instance['CommonFolder'][db_instance['DatabaseName'] == db])
path = db_instance['CommonFolder'][db_instance['DatabaseName'] == db].item()
delete_trn = st.checkbox("delete .trn files?", value=False, help='option for delete .trn files')
submit = st.form_submit_button('🗑️ clear history')
if submit:
# os.system(f'cmd /c "clear-history.bat {dayold} {db} {path}"')
os.system(f'''
sqlcmd -S {server}{windows_login} -Q "EXEC dbo.PMAG_CleanupHistory @dbname = N'{db}', @DaysOld = {dayold};"
''')
if delete_trn:
try:
for i in path.to_list():
os.system(f'''
forfiles /P {i} /S /M {db}*.trn* /D -{dayold} /C "cmd /c del @path"
''')
except:
os.system(f'''
forfiles /P {path} /S /M {db}*.trn* /D -{dayold} /C "cmd /c del @path"
''')
st.success('deleted')
# Insert tools (just for test)
with st.expander("Add new record to DB rahasia"):
## add LastUpdate record
st.subheader("add one record to table LastUpdate")
with st.form(key='LastUpdate', clear_on_submit=True):
submit = st.form_submit_button('⏱️ add current time')
if submit:
write(conn_str=conn_str2, query=f"INSERT LastUpdate(EventTime) SELECT SYSDATETIME()")
st.success('new record has been added')
## add mahasiswa record
st.subheader("add one record to table mahasiswa")
with st.form(key='mahasiswa', clear_on_submit=True):
nrp = st.number_input("nrp", min_value=0, step=1)
nama = st.text_input("nama", placeholder='nama')
submit = st.form_submit_button('+1 submit')
if submit:
write(conn_str=conn_str2, query=f"INSERT mahasiswa(nrp, nama) SELECT '{nrp}', '{nama}'")
st.success('new record has been added')
# else:
## read from standby server
## proses data
try:
last = restoreReport['RestoreTime'].tail(1).item()
first = restoreReport['RestoreTime'].head(1).item()
except:
e = RuntimeError('Buat minimal satu log shipping backup')
st.exception(e)
st.subheader("run log shipping now")
with st.form(key='lsnow2', clear_on_submit=True):
col1, col2 = st.columns([3, 2])
db = col1.selectbox("Database", database['DatabaseName'].unique())
tipe = col2.selectbox("backup type", options=['trn'])
submit = st.form_submit_button('📬 start')
if submit:
# os.system(f'cmd /c "ls.bat {db} {tipe}"')
os.system(f'''
sqlcmd -S {server}{windows_login} -Q "EXEC dbo.PMAG_Backup @dbname = N'{db}', @type = '{tipe}';"
''')
st.success('backup has been started')
st.stop()
n_backup = backupRestoreReport.shape[0]
n_fail = failBackupRestoreReport.shape[0]
avg = backupRestoreReport['Duration (millisecond)'].mean() / 1000
max = backupRestoreReport['Duration (millisecond)'].max() / 1000
secondariesFolder['Backup Folder Size (MB)'] = secondariesFolder['CommonFolder'].apply(foldersizeMB)
secondariesFolder.rename(columns={
'ServerInstance': 'Backup Instance',
'CommonFolder': 'Instance Folder'
}, inplace=True)
recent_backup_per_db['Last restore (minutes ago)'] = (datetime.datetime.now() - recent_backup_per_db['Backup Time']) / pd.Timedelta(minutes=1)
recent_backup_per_db['Last restore (minutes ago)'] = recent_backup_per_db['Last restore (minutes ago)'].map('{:.0f}'.format).map(int)
recent_backup_per_db['Last restore (minutes ago)'] = recent_backup_per_db['Last restore (minutes ago)']
recent_backup_per_db.rename(columns={
'DatabaseName': 'Database',
'ServerInstance': 'Standby Instance'
}, inplace=True)
backupRestoreReport['Backup Time'] = backupRestoreReport['Backup Time'].apply(lambda x: x.strftime("%m/%d/%Y %H:%M:%S"))
## Titile
with st.container():
st.write("")
st.title("📦 Log Shipping Backup Restore Report")
col1, col2, col3, col4 = st.columns(4)
# col1.caption(f'⏰ Data From Last **{-1 * ((first - datetime.datetime.now()).days)}** Days')
col1.caption("🔄️ Last refresh __" + refresh + "__")
col2.caption(f'☁️instance: __{server}\{instance_name}__')
col3.caption(f'🛢️ Database: __{ls_database}__')
col4.caption(f'🕹️ press __ R __ for manual refresh')
st.markdown('---')
## filter data
if FilterApplied:
st.info('filter applied')
## metric
with st.container():
col1, col2, col3, col4, col5, col6, col7 = st.columns(7)
# banyaknya backup
col1.metric("# Success Backup", n_backup, 'backups')
# jml backup yng tdk restore
col2.metric("# Pending or Fail Restore", n_fail, 'backups')
# kpn backup terakhir
col3.metric("Last Backup", f'{((datetime.datetime.now() - last).total_seconds() / 60):.1f} min', 'ago')
# | |
<filename>tests/test_apply_process.py<gh_stars>10-100
import datetime
import math
from typing import List
from unittest import TestCase
import pytest
import geopyspark as gps
import numpy as np
import pytz
from geopyspark.geotrellis import (SpaceTimeKey, Tile, _convert_to_unix_time)
from geopyspark.geotrellis.constants import LayerType
from geopyspark.geotrellis.layer import TiledRasterLayer
from openeo_driver.errors import OpenEOApiException
from pyspark import SparkContext
from shapely.geometry import Point
from openeo_driver.utils import EvalEnv
from openeogeotrellis.geopysparkdatacube import GeopysparkDataCube, GeopysparkCubeMetadata
from openeogeotrellis.geotrellis_tile_processgraph_visitor import GeotrellisTileProcessGraphVisitor
from openeogeotrellis.service_registry import InMemoryServiceRegistry
def _build_metadata(bands: List[str] = ["B01", "B02"]) -> GeopysparkCubeMetadata:
"""Helper to build metadata instance"""
return GeopysparkCubeMetadata({
"cube:dimensions": {
"bands": {"type": "bands", "values": bands}
},
"summaries": {
"eo:bands": [{"name": b, "common_name": "common" + b} for b in bands]
}
})
class TestApplyProcess(TestCase):
first = np.zeros((1, 4, 4))
first.fill(10)
second = np.zeros((1, 4, 4))
second.fill(5)
extent = {'xmin': 0.0, 'ymin': 0.0, 'xmax': 4.0, 'ymax': 4.0}
layout = {'layoutCols': 1, 'layoutRows': 1, 'tileCols': 4, 'tileRows': 4}
now = datetime.datetime.strptime("2017-09-25T11:37:00Z", '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=pytz.UTC)
points = [
Point(1.0, -3.0),
Point(2.0, 4.0),
Point(3.0, 3.0),
Point(1.0, -2.0),
Point(-10.0, 15.0)
]
labeled_points = {
'A': points[0],
'B': points[1],
'C': points[2],
'D': points[3],
'E': points[4]
}
expected_spatial_points_list = [
(Point(1.0, -3.0), [1, 2]),
(Point(2.0, 4.0), [1, 2]),
(Point(3.0, 3.0), [1, 2]),
(Point(1.0, -2.0), [1, 2]),
(Point(-10.0, 15.0), None)
]
expected_spacetime_points_list = [
(Point(1.0, -3.0), now, [3]),
(Point(2.0, 4.0), now, [3]),
(Point(3.0, 3.0), now, [3]),
(Point(1.0, -2.0), now, [3]),
(Point(-10.0, 15.0), None, None)
]
openeo_metadata = {
"bands": [
{
"band_id": "red",
"name": "red",
"offset": 0,
"res_m": 10,
"scale": 0.0001,
"type": "int16",
"unit": "1",
"wavelength_nm": 664.5
},
{
"band_id": "nir",
"name": "nir",
"offset": 0,
"res_m": 10,
"scale": 0.0001,
"type": "int16",
"unit": "1",
"wavelength_nm": 835.1
}
],
"_vito": {"accumulo_data_id": "CGS_SENTINEL2_RADIOMETRY_V101"},
"description": "Sentinel 2 Level-2: Bottom-of-atmosphere reflectances in cartographic geometry",
"extent": {
"bottom": 39,
"crs": "EPSG:4326",
"left": -34,
"right": 35,
"top": 71
},
"product_id": "CGS_SENTINEL2_RADIOMETRY_V101",
"time": {
"from": "2016-01-01",
"to": "2019-10-01"
}
}
def _create_spacetime_layer(self, cells: np.ndarray = None) -> TiledRasterLayer:
# TODO all these "create_spacetime_layer" functions are duplicated across all tests
# and better should be moved to some kind of general factory or test fixture
assert len(cells.shape) == 4
tile = Tile.from_numpy_array(cells, -1)
layer = [(SpaceTimeKey(0, 0, self.now), tile),
(SpaceTimeKey(1, 0, self.now), tile),
(SpaceTimeKey(0, 1, self.now), tile),
(SpaceTimeKey(1, 1, self.now), tile)]
rdd = SparkContext.getOrCreate().parallelize(layer)
metadata = {'cellType': 'int32ud-1',
'extent': self.extent,
'crs': '+proj=longlat +datum=WGS84 +no_defs ',
'bounds': {
'minKey': {'col': 0, 'row': 0, 'instant': _convert_to_unix_time(self.now)},
'maxKey': {'col': 1, 'row': 1, 'instant': _convert_to_unix_time(self.now)}
},
'layoutDefinition': {
'extent': self.extent,
'tileLayout': self.layout
}
}
return TiledRasterLayer.from_numpy_rdd(LayerType.SPACETIME, rdd, metadata)
def create_spacetime_layer(self) -> TiledRasterLayer:
cells = np.array([self.first, self.second], dtype='int')
return self._create_spacetime_layer(cells)
def create_spacetime_layer_singleband(self) -> TiledRasterLayer:
cells = np.array([self.first], dtype='int')
return self._create_spacetime_layer(cells)
def test_point_series(self):
input = self.create_spacetime_layer()
imagecollection = GeopysparkDataCube(pyramid=gps.Pyramid({0: input}))
transformed_collection = imagecollection.apply("cos")
for p in self.points[0:3]:
result = transformed_collection.timeseries(p.x, p.y)
print(result)
value = result.popitem()
self.assertEqual(math.cos(10),value[1][0])
self.assertEqual(math.cos(5), value[1][1])
def test_apply_cos(self):
input = self.create_spacetime_layer()
cube = GeopysparkDataCube(pyramid=gps.Pyramid({0: input}))
res = cube.apply("cos")
data = res.pyramid.levels[0].to_spatial_layer().stitch().cells
np.testing.assert_array_almost_equal(data[0, 2:6, 2:6], np.cos(self.first[0]))
np.testing.assert_array_almost_equal(data[1, 2:6, 2:6], np.cos(self.second[0]))
def test_apply_complex_graph(self):
graph = {
"sin": {
"arguments": {
"x": {
"from_argument": "data"
}
},
"process_id": "sin",
"result": False
},
"multiply": {
"arguments": {
"x": {
"from_node": "sin"
},
"y": 5.0
},
"process_id": "multiply",
"result": True
}
}
input = self.create_spacetime_layer()
cube = GeopysparkDataCube(gps.Pyramid({0: input}), InMemoryServiceRegistry())
res = cube.apply(graph)
data = res.pyramid.levels[0].to_spatial_layer().stitch().cells
np.testing.assert_array_almost_equal(data[0, 2:6, 2:6], 5.0*np.sin(self.first[0]))
np.testing.assert_array_almost_equal(data[1, 2:6, 2:6], 5.0*np.sin(self.second[0]))
def test_reduce_bands(self):
input = self.create_spacetime_layer()
input = gps.Pyramid({0: input})
collection_metadata = GeopysparkCubeMetadata({
"cube:dimensions": {
"my_bands": {"type": "bands", "values": ["B04", "B08"]},
}
})
imagecollection = GeopysparkDataCube(pyramid=input, metadata=collection_metadata)
visitor = GeotrellisTileProcessGraphVisitor()
graph = {
"sum": {
"arguments": {
"data": {
"from_argument": "dimension_data"
},
"ignore_nodata":True
},
"process_id": "sum"
},
"subtract": {
"arguments": {
"data": {
"from_argument": "dimension_data"
}
},
"process_id": "subtract"
},
"divide": {
"arguments": {
"data":[ {
"from_node": "sum"
},
{
"from_node": "subtract"
}
]
},
"process_id": "divide",
"result": True
}
}
visitor.accept_process_graph(graph)
stitched = imagecollection.reduce_dimension(dimension='my_bands', reducer=visitor, env=EvalEnv()).pyramid.levels[0].to_spatial_layer().stitch()
print(stitched)
self.assertEqual(3.0, stitched.cells[0][0][0])
def test_reduce_bands_logical_ops(self):
input = self.create_spacetime_layer_singleband()
input = gps.Pyramid({0: input})
imagecollection = GeopysparkDataCube(pyramid=input)
visitor = GeotrellisTileProcessGraphVisitor()
graph = {
"eq": {
"arguments": {
"x": {
"from_argument": "data"
},
"y": 10
},
"process_id": "eq",
},
"not": {
"arguments": {
"expression": {
"from_node": "eq"
}
},
"process_id": "not",
"result": True
}
}
visitor.accept_process_graph(graph)
stitched = imagecollection.reduce_bands(visitor).pyramid.levels[0].to_spatial_layer().stitch()
print(stitched)
self.assertEqual(0, stitched.cells[0][0][0])
def test_apply_if(self):
input = self.create_spacetime_layer_singleband()
input = gps.Pyramid({0: input})
imagecollection = GeopysparkDataCube(pyramid=input)
graph = {
"6": {
"arguments": {
"reject": {"from_parameter":"x"},
"value": {
"from_node": "10"
},
"accept": 2.0
},
"process_id": "if",
"result": True
},
"10": {
"process_id": "gt",
"arguments": {
"x": {
"from_parameter": "x"
},
"y": 7.0
}
}
}
stitched = imagecollection.apply(graph).pyramid.levels[0].to_spatial_layer().stitch()
print(stitched)
self.assertEqual(2.0, stitched.cells[0][0][0])
def test_reduce_bands_comparison_ops(self):
input = self.create_spacetime_layer_singleband()
input = gps.Pyramid({0: input})
imagecollection = GeopysparkDataCube(pyramid=input)
visitor = GeotrellisTileProcessGraphVisitor()
graph = {
"gt": {
"arguments": {
"x": {
"from_argument": "data"
},
"y": 6.0
},
"process_id": "gt",
"result": True
}
}
visitor.accept_process_graph(graph)
stitched = imagecollection.reduce_bands(visitor).pyramid.levels[0].to_spatial_layer().stitch()
print(stitched)
self.assertEqual(1, stitched.cells[0][0][0])
def test_reduce_bands_arrayelement(self):
input = self.create_spacetime_layer()
input = gps.Pyramid({0: input})
imagecollection = GeopysparkDataCube(pyramid=input)
visitor = GeotrellisTileProcessGraphVisitor()
graph ={
"arrayelement3": {
"process_id": "array_element",
"result": False,
"arguments": {
"data": {
"from_argument": "data"
},
"index": 0
}
},
"subtract1": {
"process_id": "subtract",
"result": False,
"arguments": {
"data": [
{
"from_node": "arrayelement1"
},
{
"from_node": "arrayelement2"
}
]
}
},
"arrayelement4": {
"process_id": "array_element",
"result": False,
"arguments": {
"data": {
"from_argument": "data"
},
"index": 1
}
},
"arrayelement1": {
"process_id": "array_element",
"result": False,
"arguments": {
"data": {
"from_argument": "data"
},
"index": 0
}
},
"divide1": {
"process_id": "divide",
"result": True,
"arguments": {
"data": [
{
"from_node": "sum1"
},
{
"from_node": "subtract1"
}
]
}
},
"sum1": {
"process_id": "sum",
"result": False,
"arguments": {
"data": [
{
"from_node": "arrayelement3"
},
{
"from_node": "arrayelement4"
}
]
}
},
"arrayelement2": {
"process_id": "array_element",
"result": False,
"arguments": {
"data": {
"from_argument": "data"
},
"index": 1
}
}
}
visitor.accept_process_graph(graph)
stitched = imagecollection.reduce_bands(visitor).pyramid.levels[0].to_spatial_layer().stitch()
print(stitched)
self.assertEqual(3.0, stitched.cells[0][0][0])
def test_ndvi(self):
imagecollection = self.create_red_nir_layer()
stitched = imagecollection.ndvi().pyramid.levels[0].to_spatial_layer().stitch()
cells = stitched.cells[0, 0:4, 0:4]
expected = np.array([
[np.nan, 1 / 1, 2 / 2, 3 / 3],
[-1 / 1, 0 / 2, 1 / 3, 2 / 4],
[-2 / 2, -1 / 3, 0 / 4, 1 / 5],
[-3 / 3, -2 / 4, -1 / 5, 0 / 6]
])
np.testing.assert_array_almost_equal(cells, expected)
def create_red_nir_layer(self):
red_ramp, nir_ramp = np.mgrid[0:4, 0:4]
layer = self._create_spacetime_layer(cells=np.array([[red_ramp], [nir_ramp]]))
pyramid = gps.Pyramid({0: layer})
metadata = GeopysparkCubeMetadata({
"cube:dimensions": {
# TODO: also specify other dimensions?
"bands": {"type": "bands", "values": ["B04", "B08"]}
},
"summaries": {
"eo:bands": [
{"name": "B04", "common_name": "red"},
{"name": "B08", "common_name": "nir"},
]
}
})
imagecollection = GeopysparkDataCube(pyramid=pyramid, metadata=metadata)
return imagecollection
def test_linear_scale_range(self):
imagecollection = self.create_red_nir_layer()
stitched = imagecollection.ndvi().linear_scale_range(-1, 1, 0, 100).pyramid.levels[0].to_spatial_layer().stitch()
cells = stitched.cells[0, 0:4, 0:4]
expected =50.0* (1.0 +np.array([
[np.nan, 1 / 1, 2 / 2, 3 / 3],
[-1 / 1, 0 / 2, 1 / 3, 2 / 4],
[-2 / 2, -1 / 3, 0 / 4, 1 / 5],
[-3 / 3, -2 / 4, -1 / 5, 0 / 6]
]))
expected[0][0]=255.0
np.testing.assert_array_almost_equal(cells, expected.astype(np.uint8))
def test_linear_scale_range_reduce(self):
imagecollection = self.create_red_nir_layer()
visitor = GeotrellisTileProcessGraphVisitor()
graph = {
"scale": {
"process_id": "linear_scale_range",
"result": True,
"arguments": {
"x": {
"from_argument": "data"
},
"inputMin": -1,
"inputMax": 1,
"outputMin": 0,
"outputMax": 100,
}
}
}
visitor.accept_process_graph(graph)
scaled_layer = imagecollection.ndvi().reduce_bands(visitor).pyramid.levels[0].to_spatial_layer()
assert scaled_layer.layer_metadata.cell_type == 'uint8ud255'
stitched = scaled_layer.stitch()
cells = stitched.cells[0, 0:4, 0:4]
expected =50.0* (1.0 +np.array([
[np.nan, 1 / 1, 2 / 2, 3 / 3],
[-1 / 1, 0 / 2, 1 / 3, 2 / 4],
[-2 / 2, -1 / 3, 0 / 4, 1 / 5],
[-3 / 3, -2 / 4, -1 / 5, 0 / 6]
]))
expected[0][0]=255.0
np.testing.assert_array_almost_equal(cells, expected.astype(np.uint8))
def _test_merge_cubes_subtract_spatial(self, left_spatial=False, right_spatial=False):
# TODO: this would be cleaner with @pytest.mark.parameterize but that's not supported on TestCase methods
red_ramp, nir_ramp = np.mgrid[0:4, 0:4]
layer1 = self._create_spacetime_layer(cells=np.array([[red_ramp]]))
if left_spatial:
layer1 = layer1.to_spatial_layer()
layer2 = self._create_spacetime_layer(cells=np.array([[nir_ramp]]))
if right_spatial:
layer2 = layer2.to_spatial_layer()
metadata = _build_metadata()
cube1 = GeopysparkDataCube(pyramid=gps.Pyramid({0: layer1}), metadata=metadata)
cube2 = GeopysparkDataCube(pyramid=gps.Pyramid({0: layer2}), metadata=metadata)
res = cube1.merge_cubes(cube2, 'subtract')
layer = res.pyramid.levels[0]
if layer.layer_type != LayerType.SPATIAL:
layer = layer.to_spatial_layer()
actual = layer.stitch().cells[0, 0:4, 0:4]
expected = red_ramp - | |
rows5:{}',rows[5])
pkdc('after header changed rows6:{}',rows[6])
pkdc('after header changed rows7:{}',rows[7])
pkdc('after header changed rows8:{}',rows[8])
pkdc('after header changed rows9:{}',rows[9])
pkio.write_text(filename, ''.join(rows))
def _flux_label(model):
if 'fluxType' not in model:
return ''
return 'Flux' if int(model.fluxType) == 1 else 'Intensity'
def _flux_units(model):
if 'fluxType' not in model:
return ''
return 'ph/s/.1%bw' if int(model.fluxType) == 1 else 'ph/s/.1%bw/mm^2'
def _generate_beamline_optics(report, data):
res = PKDict(
names=[],
last_id=None,
watches=PKDict()
)
models = data.models
if len(models.beamline) == 0 \
or not (_SIM_DATA.srw_is_beamline_report(report) or report == 'beamlineAnimation'):
return '', '', res
if _SIM_DATA.is_watchpoint(report):
res.last_id = _SIM_DATA.watchpoint_id(report)
if report == 'multiElectronAnimation':
res.last_id = models.multiElectronAnimation.watchpointId
has_beamline_elements = len(models.beamline) > 0
if has_beamline_elements and not res.last_id:
res.last_id = models.beamline[-1].id
items = []
prev = None
propagation = models.propagation
max_name_size = 0
for item in models.beamline:
is_disabled = 'isDisabled' in item and item.isDisabled
name = _safe_beamline_item_name(item.title, res.names)
max_name_size = max(max_name_size, len(name))
if prev:
size = item.position - prev.position
if size != 0:
# add a drift
drift_name = _safe_beamline_item_name('{}_{}'.format(prev.name, name), res.names)
max_name_size = max(max_name_size, len(drift_name))
res.names.append(drift_name)
items.append(PKDict(
name=drift_name,
type='drift',
position=prev.position,
propagation=prev.drift_propagation,
length=size,
))
pp = propagation[str(item.id)]
item.propagation = pp[0]
item.drift_propagation = pp[1]
item.name = name
if not is_disabled:
if item.type == 'watch' and not items:
# first item is a watch, insert a 0 length drift in front
items.append(PKDict(
name='zero_drift',
type='drift',
position=item.position,
propagation=item.propagation,
length=0,
))
res.names.append(items[-1].name)
if 'heightProfileFile' in item:
item.heightProfileDimension = _height_profile_dimension(item, data)
items.append(item)
res.names.append(name)
if item.type == 'watch':
res.watches[name] = item.id
if int(res.last_id) == int(item.id):
break
prev = item
args = PKDict(
report=report,
items=items,
names=res.names,
postPropagation=models.postPropagation,
maxNameSize=max_name_size,
nameMap=PKDict(
apertureShape='ap_shape',
asymmetryAngle='ang_as',
attenuationLength='atten_len',
complementaryAttenuationLength='atLen2',
complementaryRefractiveIndex='delta2',
coreAttenuationLength='atten_len_core',
coreDiameter='diam_core',
coreRefractiveIndex='delta_core',
crystalThickness='tc',
dSpacing='d_sp',
diffractionOrder='m',
externalAttenuationLength='atten_len_ext',
externalRefractiveIndex='delta_ext',
energyAvg='e_avg',
firstFocusLength='p',
focalLength='q',
focalPlane='foc_plane',
grazingAngle='ang',
gridShape='grid_sh',
grooveDensity0='grDen',
grooveDensity1='grDen1',
grooveDensity2='grDen2',
grooveDensity3='grDen3',
grooveDensity4='grDen4',
heightAmplification='amp_coef',
heightProfileFile='hfn',
horizontalApertureSize='apert_h',
horizontalCenterCoordinate='xc',
horizontalCenterPosition='xc',
horizontalFocalLength='Fx',
horizontalGridDimension='grid_dx',
horizontalGridPitch='pitch_x',
horizontalGridsNumber='grid_nx',
horizontalMaskCoordinate='mask_x0',
horizontalOffset='x',
horizontalPixelsNumber='mask_Nx',
horizontalSamplingInterval='hx',
horizontalSize='Dx',
horizontalTransverseSize='size_x',
imageFile='file_path',
length='L',
mainAttenuationLength='atLen1',
mainRefractiveIndex='delta1',
maskThickness='thick',
normalVectorX='nvx',
normalVectorY='nvy',
normalVectorZ='nvz',
numberOfLenses='n',
numberOfZones='nZones',
orientation='dim',
outerRadius='rn',
radius='r',
refractiveIndex='delta',
sagittalRadius='rs',
sagittalSize='size_sag',
tangentialRadius='rt',
tangentialSize='size_tang',
tangentialVectorX='tvx',
tangentialVectorY='tvy',
thickness='thick',
tipRadius='r_min',
tipWallThickness='wall_thick',
transmissionImage='extTransm',
useCase='uc',
verticalApertureSize='apert_v',
verticalCenterCoordinate='yc',
verticalCenterPosition='yc',
verticalFocalLength='Fy',
verticalGridDimension='grid_dy',
verticalGridPitch='pitch_y',
verticalGridsNumber='grid_ny',
verticalMaskCoordinate='mask_y0',
verticalOffset='y',
verticalPixelsNumber='mask_Ny',
verticalSamplingInterval='hy',
verticalSize='Dy',
verticalTransverseSize='size_y',
),
)
optics = template_common.render_jinja(SIM_TYPE, args, 'beamline_optics.py')
prop = template_common.render_jinja(SIM_TYPE, args, 'beamline_parameters.py')
return optics, prop, res
def _generate_parameters_file(data, plot_reports=False, run_dir=None):
report = data.report
dm = data.models
# do this before validation or arrays get turned into strings
if report == 'rsoptExport':
rsopt_ctx = _rsopt_jinja_context(dm.exportRsOpt)
_validate_data(data, _SCHEMA)
_update_model_fields(dm)
_update_models_for_report(report, dm)
res, v = template_common.generate_parameters_file(data)
v.rs_type = dm.simulation.sourceType
if v.rs_type == 't' and dm.tabulatedUndulator.undulatorType == 'u_i':
v.rs_type = 'u'
if report == 'rsoptExport':
v.update(rsopt_ctx)
# rsopt uses this as a lookup param so want it in one place
v.ws_fni_desc = 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'
if report == 'mirrorReport':
v.mirrorOutputFilename = _OUTPUT_FOR_MODEL[report].filename
return template_common.render_jinja(SIM_TYPE, v, 'mirror.py')
if report == 'brillianceReport':
v.brillianceOutputFilename = _OUTPUT_FOR_MODEL[report].filename
return template_common.render_jinja(SIM_TYPE, v, 'brilliance.py')
if report == 'backgroundImport':
v.tmp_dir = str(run_dir)
v.python_file = run_dir.join('user_python.py')
pkio.write_text(v.python_file, dm.backgroundImport.python)
return template_common.render_jinja(SIM_TYPE, v, 'import.py')
_set_parameters(v, data, plot_reports, run_dir)
return _trim(res + template_common.render_jinja(SIM_TYPE, v))
def _generate_srw_main(data, plot_reports, beamline_info):
report = data.report
for_rsopt = report == 'rsoptExport'
source_type = data.models.simulation.sourceType
run_all = report == _SIM_DATA.SRW_RUN_ALL_MODEL or report == 'rsoptExport'
vp_var = 'vp' if for_rsopt else 'varParam'
content = [
f'v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options({vp_var}), use_sys_argv={plot_reports})',
]
if plot_reports and _SIM_DATA.srw_uses_tabulated_zipfile(data):
content.append('setup_magnetic_measurement_files("{}", v)'.format(data.models.tabulatedUndulator.magneticFile))
if report == 'beamlineAnimation':
content.append("v.si_fn = ''")
content.append("v.ws_fni = ''")
if len(beamline_info.watches):
content.append('v.ws = True')
else:
content.append('v.si = True')
content.append('op = None')
content.append("v.ws_fne = '{}'".format(_wavefront_pickle_filename(0)))
prev_wavefront = None
names = []
for n in beamline_info.names:
names.append(n)
if n in beamline_info.watches:
is_last_watch = n == beamline_info.names[-1]
content.append("names = ['" + "','".join(names) + "']")
names = []
if prev_wavefront:
content.append("v.ws_fnei = '{}'".format(prev_wavefront))
prev_wavefront = _wavefront_pickle_filename(beamline_info.watches[n])
content.append("v.ws_fnep = '{}'".format(prev_wavefront))
content.append('op = set_optics(v, names, {})'.format(is_last_watch))
if not is_last_watch:
content.append('srwl_bl.SRWLBeamline(_name=v.name).calc_all(v, op)')
elif run_all or (_SIM_DATA.srw_is_beamline_report(report) and len(data.models.beamline)):
content.append('names = [{}]'.format(
','.join(["'{}'".format(name) for name in beamline_info.names]),
))
content.append('op = set_optics(v, names, {})'.format(
beamline_info.last_id and int(beamline_info.last_id) == int(data.models.beamline[-1].id)))
content.append('v.ws = True')
if plot_reports:
content.append("v.ws_pl = 'xy'")
else:
content.append('op = None')
if (run_all and source_type != 'g') or report == 'intensityReport':
content.append('v.ss = True')
if plot_reports:
content.append("v.ss_pl = 'e'")
if (run_all and source_type not in ('g', 'm')) or report in 'fluxReport':
content.append('v.sm = True')
if plot_reports:
content.append("v.sm_pl = 'e'")
if (run_all and source_type != 'g') or report == 'powerDensityReport':
content.append('v.pw = True')
if plot_reports:
content.append("v.pw_pl = 'xy'")
if run_all or report in ['initialIntensityReport', 'sourceIntensityReport']:
content.append('v.si = True')
if plot_reports:
content.append("v.si_pl = 'xy'")
if (run_all and source_type != 'g') or report == 'trajectoryReport':
content.append('v.tr = True')
if plot_reports:
content.append("v.tr_pl = 'xz'")
content.append('srwl_bl.SRWLBeamline(_name=v.name).calc_all(v, op)')
return '\n'.join([f' {x}' for x in content] + [''] + ([] if for_rsopt \
else ['main()', '']))
def _get_first_element_position(report, data):
dm = data.models
if report in dm and 'distanceFromSource' in dm[report]:
return dm[report].distanceFromSource
if dm.beamline:
return dm.beamline[0].position
if 'distanceFromSource' in dm.simulation:
return dm.simulation.distanceFromSource
return template_common.DEFAULT_INTENSITY_DISTANCE
def _height_profile_dimension(item, data):
"""Find the dimension of the provided height profile .dat file.
1D files have 2 columns, 2D - 8 columns.
"""
dimension = 0
if item.heightProfileFile and item.heightProfileFile != 'None':
with _SIM_DATA.lib_file_abspath(item.heightProfileFile, data=data).open('r') as f:
header = f.readline().strip().split()
dimension = 1 if len(header) == 2 else 2
return dimension
def _intensity_units(sim_in):
if 'models' in sim_in and _SIM_DATA.srw_is_gaussian_source(sim_in.models.simulation):
if 'report' in sim_in \
and sim_in.report in ('intensityReport', 'sourceIntensityReport'):
i = sim_in.models[sim_in.report].fieldUnits
else:
i = sim_in.models.simulation.fieldUnits
return _SCHEMA.enum.FieldUnits[int(i)][1]
return 'ph/s/.1%bw/mm^2'
def _load_user_model_list(model_name):
f = _SIM_DATA.lib_file_write_path(_USER_MODEL_LIST_FILENAME[model_name])
try:
if f.exists():
return simulation_db.read_json(f)
except Exception:
pkdlog('user list read failed, resetting contents: {}', f)
_save_user_model_list(model_name, [])
return _load_user_model_list(model_name)
def _parse_srw_log(run_dir):
res = ''
p = run_dir.join(template_common.RUN_LOG)
if not p.exists():
return res
with pkio.open_text(p) as f:
for line in f:
m = re.search(r'Error: (.*)', line)
if m:
res += m.group(1) + '\n'
if res:
return res
return 'An unknown error occurred'
def _process_image(data, tmp_dir):
"""Process image and return
Args:
data (dict): description of simulation
Returns:
py.path.local: file to return
"""
# This should just be a basename, but this ensures it.
import srwl_uti_smp
path = str(_SIM_DATA.lib_file_abspath(sirepo.util.secure_filename(data.baseImage)))
m = data.model
with pkio.save_chdir(tmp_dir):
if m.sampleSource == 'file':
s = srwl_uti_smp.SRWLUtiSmp(
file_path=path,
area=None if not int(m.cropArea) else (m.areaXStart, m.areaXEnd, m.areaYStart, m.areaYEnd),
rotate_angle=float(m.rotateAngle),
rotate_reshape=int(m.rotateReshape),
cutoff_background_noise=float(m.cutoffBackgroundNoise),
background_color=int(m.backgroundColor),
invert=int(m.invert),
tile=None if not int(m.tileImage) else (m.tileRows, m.tileColumns),
shift_x=m.shiftX,
shift_y=m.shiftY,
is_save_images=True,
prefix=str(tmp_dir),
output_image_format=m.outputImageFormat,
)
return pkio.py_path(s.processed_image_name)
assert m.sampleSource == 'randomDisk'
s = srwl_uti_smp.srwl_opt_setup_smp_rnd_obj2d(
_thickness=0,
_delta=0,
_atten_len=0,
_dens=m.dens,
_rx=m.rx,
_ry=m.ry,
_obj_type=int(m.obj_type),
_r_min_bw_obj=m.r_min_bw_obj,
_obj_size_min=m.obj_size_min,
_obj_size_max=m.obj_size_max,
_size_dist=int(m.size_dist),
_ang_min=m.ang_min,
_ang_max=m.ang_max,
_ang_dist=int(m.ang_dist),
_rand_alg=int(m.rand_alg),
_obj_par1=m.obj_size_ratio if m.obj_type in ('1', '2', '3') \
else m.poly_sides if m.obj_type == '4' \
else m.rand_shapes,
_obj_par2=m.rand_obj_size == '1' if m.obj_type in ('1', '2', '3') \
else m.rand_poly_side == '1' if m.obj_type == '4' \
else None,
_ret='img',
)
filename = 'sample_processed.{}'.format(m.outputImageFormat)
s.save(filename)
return pkio.py_path(filename)
def _process_rsopt_elements(els):
x = [e for e in els if e.enabled and e.enabled != '0']
for e in x:
for p in _RSOPT_PARAMS:
if p in e:
e[p].offsets = sirepo.util.split_comma_delimited_string(e[f'{p}Offsets'], float)
return x
def _remap_3d(info, allrange, out, report):
x_range = [allrange[3], allrange[4], allrange[5]]
y_range = [allrange[6], allrange[7], allrange[8]]
ar2d = info.points
totLen = int(x_range[2] * y_range[2])
n = len(ar2d) if totLen > len(ar2d) else totLen
ar2d = np.reshape(ar2d[0:n], (int(y_range[2]), int(x_range[2])))
if report.get('usePlotRange', '0') == '1':
ar2d, x_range, y_range = _update_report_range(report, ar2d, x_range, y_range)
if report.get('useIntensityLimits', '0') == '1':
ar2d[ar2d < report.minIntensityLimit] = report.minIntensityLimit
ar2d[ar2d > report.maxIntensityLimit] = report.maxIntensityLimit
ar2d, x_range, y_range = _resize_report(report, ar2d, x_range, y_range)
if report.get('rotateAngle', 0):
ar2d, x_range, y_range = _rotate_report(report, ar2d, x_range, y_range, info)
if out.units[2]:
out.labels[2] = u'{} [{}]'.format(out.labels[2], out.units[2])
if report.get('useIntensityLimits', '0') == '1':
z_range = [report.minIntensityLimit, report.maxIntensityLimit]
else:
z_range = [np.min(ar2d), np.max(ar2d)]
return PKDict(
x_range=x_range,
y_range=y_range,
x_label=info.x_label,
y_label=info.y_label,
z_label=_superscript(out.labels[2]),
title=info.title,
subtitle=_superscript_2(info.subtitle),
z_matrix=ar2d.tolist(),
z_range=z_range,
summaryData=info.summaryData,
)
def _resize_report(report, ar2d, x_range, y_range):
width_pixels = int(report.intensityPlotsWidth)
if not width_pixels:
# upper limit is browser's max html canvas size
width_pixels = _CANVAS_MAX_SIZE
job.init()
# roughly 20x size increase for json
if ar2d.size * _JSON_MESSAGE_EXPANSION > job.cfg.max_message_bytes:
max_width = int(math.sqrt(job.cfg.max_message_bytes / _JSON_MESSAGE_EXPANSION))
if max_width < width_pixels:
pkdc(
'auto scaling dimensions to fit message size. size: {}, max_width: {}',
ar2d.size,
max_width,
)
width_pixels = max_width
# rescale width and height to maximum of width_pixels
if width_pixels and (width_pixels < x_range[2] or width_pixels < y_range[2]):
from scipy | |
the
error-tag 'invalid-value' in this case.
The IETF model in RFC 7223 provides YANG features for the
following (i.e., pre-provisioning and arbitrary-names),
however they are omitted here:
If the device supports pre-provisioning of interface
configuration, the 'pre-provisioning' feature is
advertised.
If the device allows arbitrarily named user-controlled
interfaces, the 'arbitrary-names' feature is advertised.
When a configured user-controlled interface is created by
the system, it is instantiated with the same name in the
/interfaces/interface[name]/state list.
''',
'name',
'openconfig-interfaces', False),
_MetaInfoClassMember('oper-status', REFERENCE_ENUM_CLASS, 'OperStatusEnum' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.State.OperStatusEnum',
[], [],
''' [adapted from IETF interfaces model (RFC 7223)]
The current operational state of the interface.
This leaf has the same semantics as ifOperStatus.
''',
'oper_status',
'openconfig-interfaces', False),
_MetaInfoClassMember('type', REFERENCE_IDENTITY_CLASS, 'InterfaceTypeIdentity' , 'ydk.models.ietf.ietf_interfaces', 'InterfaceTypeIdentity',
[], [],
''' [adapted from IETF interfaces model (RFC 7223)]
The type of the interface.
When an interface entry is created, a server MAY
initialize the type leaf with a valid value, e.g., if it
is possible to derive the type from the name of the
interface.
If a client tries to set the type of an interface to a
value that can never be used by the system, e.g., if the
type is not supported or if the type does not match the
name of the interface, the server MUST reject the request.
A NETCONF server MUST reply with an rpc-error with the
error-tag 'invalid-value' in this case.
''',
'type',
'openconfig-interfaces', False),
],
'openconfig-interfaces',
'state',
_yang_ns._namespaces['openconfig-interfaces'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.HoldTime.Config' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.HoldTime.Config',
False,
[
_MetaInfoClassMember('down', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Dampens advertisement when the interface transitions from
up to down. A zero value means dampening is turned off,
i.e., immediate notification.
''',
'down',
'openconfig-interfaces', False),
_MetaInfoClassMember('up', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Dampens advertisement when the interface
transitions from down to up. A zero value means dampening
is turned off, i.e., immediate notification.
''',
'up',
'openconfig-interfaces', False),
],
'openconfig-interfaces',
'config',
_yang_ns._namespaces['openconfig-interfaces'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.HoldTime.State' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.HoldTime.State',
False,
[
_MetaInfoClassMember('down', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Dampens advertisement when the interface transitions from
up to down. A zero value means dampening is turned off,
i.e., immediate notification.
''',
'down',
'openconfig-interfaces', False),
_MetaInfoClassMember('up', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Dampens advertisement when the interface
transitions from down to up. A zero value means dampening
is turned off, i.e., immediate notification.
''',
'up',
'openconfig-interfaces', False),
],
'openconfig-interfaces',
'state',
_yang_ns._namespaces['openconfig-interfaces'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.HoldTime' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.HoldTime',
False,
[
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.HoldTime.Config',
[], [],
''' Configuration data for interface hold-time settings.
''',
'config',
'openconfig-interfaces', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_interfaces', 'Interfaces.Interface.HoldTime.State',
[], [],
''' Operational state data for interface hold-time.
''',
'state',
'openconfig-interfaces', False),
],
'openconfig-interfaces',
'hold-time',
_yang_ns._namespaces['openconfig-interfaces'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces.Subinterface.Config' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces.Subinterface.Config',
False,
[
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[], [],
''' [adapted from IETF interfaces model (RFC 7223)]
A textual description of the interface.
A server implementation MAY map this leaf to the ifAlias
MIB object. Such an implementation needs to use some
mechanism to handle the differences in size and characters
allowed between this leaf and ifAlias. The definition of
such a mechanism is outside the scope of this document.
Since ifAlias is defined to be stored in non-volatile
storage, the MIB implementation MUST map ifAlias to the
value of 'description' in the persistently stored
datastore.
Specifically, if the device supports ':startup', when
ifAlias is read the device MUST return the value of
'description' in the 'startup' datastore, and when it is
written, it MUST be written to the 'running' and 'startup'
datastores. Note that it is up to the implementation to
decide whether to modify this single leaf in 'startup' or
perform an implicit copy-config from 'running' to
'startup'.
If the device does not support ':startup', ifAlias MUST
be mapped to the 'description' leaf in the 'running'
datastore.
''',
'description',
'openconfig-interfaces', False),
_MetaInfoClassMember('enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' [adapted from IETF interfaces model (RFC 7223)]
This leaf contains the configured, desired state of the
interface.
Systems that implement the IF-MIB use the value of this
leaf in the 'running' datastore to set
IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry
has been initialized, as described in RFC 2863.
Changes in this leaf in the 'running' datastore are
reflected in ifAdminStatus, but if ifAdminStatus is
changed over SNMP, this leaf is not affected.
''',
'enabled',
'openconfig-interfaces', False),
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The index of the subinterface, or logical interface number.
On systems with no support for subinterfaces, or not using
subinterfaces, this value should default to 0, i.e., the
default subinterface.
''',
'index',
'openconfig-interfaces', False),
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], [],
''' [adapted from IETF interfaces model (RFC 7223)]
The name of the interface.
A device MAY restrict the allowed values for this leaf,
possibly depending on the type of the interface.
For system-controlled interfaces, this leaf is the
device-specific name of the interface. The 'config false'
list interfaces/interface[name]/state contains the currently
existing interfaces on the device.
If a client tries to create configuration for a
system-controlled interface that is not present in the
corresponding state list, the server MAY reject
the request if the implementation does not support
pre-provisioning of interfaces or if the name refers to
an interface that can never exist in the system. A
NETCONF server MUST reply with an rpc-error with the
error-tag 'invalid-value' in this case.
The IETF model in RFC 7223 provides YANG features for the
following (i.e., pre-provisioning and arbitrary-names),
however they are omitted here:
If the device supports pre-provisioning of interface
configuration, the 'pre-provisioning' feature is
advertised.
If the device allows arbitrarily named user-controlled
interfaces, the 'arbitrary-names' feature is advertised.
When a configured user-controlled interface is created by
the system, it is instantiated with the same name in the
/interfaces/interface[name]/state list.
''',
'name',
'openconfig-interfaces', False),
_MetaInfoClassMember('unnumbered', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Indicates that the subinterface is unnumbered, and provides
a reference to the subinterface that provides the IP
address information (v4, v6 or both) for the current
subinterface.
''',
'unnumbered',
'openconfig-interfaces', False),
],
'openconfig-interfaces',
'config',
_yang_ns._namespaces['openconfig-interfaces'],
'ydk.models.openconfig.openconfig_interfaces'
),
},
'Interfaces.Interface.Subinterfaces.Subinterface.State.Counters' : {
'meta_info' : _MetaInfoClass('Interfaces.Interface.Subinterfaces.Subinterface.State.Counters',
False,
[
_MetaInfoClassMember('in-broadcast-pkts', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' [adapted from IETF interfaces model (RFC 7223)]
The number of packets, delivered by this sub-layer to a
higher (sub-)layer, that were addressed to a broadcast
address at this sub-layer.
Discontinuities in the value of this counter can occur
at re-initialization of the management system, and at
other times as indicated by the value of
'discontinuity-time'.
''',
'in_broadcast_pkts',
'openconfig-interfaces', False),
_MetaInfoClassMember('in-discards', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' [adapted from IETF interfaces model (RFC 7223)]
Changed the counter type to counter64.
The number of inbound packets that were chosen to be
discarded even though no errors had been detected to
prevent their being deliverable to a higher-layer
protocol. One possible reason for discarding such a
packet could be to free up buffer space.
Discontinuities in the value of this counter can occur
at re-initialization of the management system, and at
other times as indicated by the value of
'discontinuity-time'.
''',
'in_discards',
'openconfig-interfaces', False),
_MetaInfoClassMember('in-errors', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' | |
E501
if 'city' in params:
query_params.append(('city', params['city'])) # noqa: E501
if 'state_region' in params:
query_params.append(('state_region', params['state_region'])) # noqa: E501
if 'postal_code' in params:
query_params.append(('postal_code', params['postal_code'])) # noqa: E501
if 'country_code' in params:
query_params.append(('country_code', params['country_code'])) # noqa: E501
if 'phone' in params:
query_params.append(('phone', params['phone'])) # noqa: E501
if 'email' in params:
query_params.append(('email', params['email'])) # noqa: E501
if 'cc_email' in params:
query_params.append(('cc_email', params['cc_email'])) # noqa: E501
if 'total' in params:
query_params.append(('total', params['total'])) # noqa: E501
if 'screen_branding_theme_code' in params:
query_params.append(('screen_branding_theme_code', params['screen_branding_theme_code'])) # noqa: E501
if 'storefront_host_name' in params:
query_params.append(('storefront_host_name', params['storefront_host_name'])) # noqa: E501
if 'creation_date_begin' in params:
query_params.append(('creation_date_begin', params['creation_date_begin'])) # noqa: E501
if 'creation_date_end' in params:
query_params.append(('creation_date_end', params['creation_date_end'])) # noqa: E501
if 'payment_date_begin' in params:
query_params.append(('payment_date_begin', params['payment_date_begin'])) # noqa: E501
if 'payment_date_end' in params:
query_params.append(('payment_date_end', params['payment_date_end'])) # noqa: E501
if 'shipment_date_begin' in params:
query_params.append(('shipment_date_begin', params['shipment_date_begin'])) # noqa: E501
if 'shipment_date_end' in params:
query_params.append(('shipment_date_end', params['shipment_date_end'])) # noqa: E501
if 'rma' in params:
query_params.append(('rma', params['rma'])) # noqa: E501
if 'purchase_order_number' in params:
query_params.append(('purchase_order_number', params['purchase_order_number'])) # noqa: E501
if 'item_id' in params:
query_params.append(('item_id', params['item_id'])) # noqa: E501
if 'current_stage' in params:
query_params.append(('current_stage', params['current_stage'])) # noqa: E501
if 'channel_partner_code' in params:
query_params.append(('channel_partner_code', params['channel_partner_code'])) # noqa: E501
if 'channel_partner_order_id' in params:
query_params.append(('channel_partner_order_id', params['channel_partner_order_id'])) # noqa: E501
if 'customer_profile_oid' in params:
query_params.append(('customer_profile_oid', params['customer_profile_oid'])) # noqa: E501
if 'refund_date_begin' in params:
query_params.append(('Refund Date Begin', params['refund_date_begin'])) # noqa: E501
if 'refund_date_end' in params:
query_params.append(('Refund Date End', params['refund_date_end'])) # noqa: E501
if 'custom_field_1' in params:
query_params.append(('Custom Field 1', params['custom_field_1'])) # noqa: E501
if 'custom_field_2' in params:
query_params.append(('Custom Field 2', params['custom_field_2'])) # noqa: E501
if 'custom_field_3' in params:
query_params.append(('Custom Field 3', params['custom_field_3'])) # noqa: E501
if 'custom_field_4' in params:
query_params.append(('Custom Field 4', params['custom_field_4'])) # noqa: E501
if 'custom_field_5' in params:
query_params.append(('Custom Field 5', params['custom_field_5'])) # noqa: E501
if 'custom_field_6' in params:
query_params.append(('Custom Field 6', params['custom_field_6'])) # noqa: E501
if 'custom_field_7' in params:
query_params.append(('Custom Field 7', params['custom_field_7'])) # noqa: E501
if 'ship_on_date_begin' in params:
query_params.append(('ship_on_date_begin', params['ship_on_date_begin'])) # noqa: E501
if 'ship_on_date_end' in params:
query_params.append(('ship_on_date_end', params['ship_on_date_end'])) # noqa: E501
if 'limit' in params:
query_params.append(('_limit', params['limit'])) # noqa: E501
if 'offset' in params:
query_params.append(('_offset', params['offset'])) # noqa: E501
if 'sort' in params:
query_params.append(('_sort', params['sort'])) # noqa: E501
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/order/orders', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrdersResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_orders_batch(self, order_batch, **kwargs): # noqa: E501
"""Retrieve order batch # noqa: E501
Retrieves a group of orders from the account based on an array of order ids. If more than 500 order ids are specified, the API call will fail with a bad request error. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_orders_batch(order_batch, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrderQueryBatch order_batch: Order batch (required)
:param str expand: The object expansion to perform on the result.
:return: OrdersResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_orders_batch_with_http_info(order_batch, **kwargs) # noqa: E501
else:
(data) = self.get_orders_batch_with_http_info(order_batch, **kwargs) # noqa: E501
return data
def get_orders_batch_with_http_info(self, order_batch, **kwargs): # noqa: E501
"""Retrieve order batch # noqa: E501
Retrieves a group of orders from the account based on an array of order ids. If more than 500 order ids are specified, the API call will fail with a bad request error. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_orders_batch_with_http_info(order_batch, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrderQueryBatch order_batch: Order batch (required)
:param str expand: The object expansion to perform on the result.
:return: OrdersResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['order_batch', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_orders_batch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'order_batch' is set
if ('order_batch' not in params or
params['order_batch'] is None):
raise ValueError("Missing the required parameter `order_batch` when calling `get_orders_batch`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'expand' in params:
query_params.append(('_expand', params['expand'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'order_batch' in params:
body_params = params['order_batch']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ultraCartOauth', 'ultraCartSimpleApiKey'] # noqa: E501
return self.api_client.call_api(
'/order/orders/batch', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OrdersResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_orders_by_query(self, order_query, **kwargs): # noqa: E501
"""Retrieve orders by query # noqa: E501
Retrieves a group of orders from the account based on a query object. If no parameters are specified, the API call will fail with a bad request error. Always specify some parameters to limit the scope of the orders returned to ones you are truly interested in. You will need to make multiple API calls in order to retrieve the entire result set since this API performs result set pagination. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_orders_by_query(order_query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrderQuery order_query: Order query (required)
:param int limit: The maximum number of records to return on this one API call. (Maximum 200)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the orders. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:param str expand: The object expansion to perform on the result.
:return: OrdersResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_orders_by_query_with_http_info(order_query, **kwargs) # noqa: E501
else:
(data) = self.get_orders_by_query_with_http_info(order_query, **kwargs) # noqa: E501
return data
def get_orders_by_query_with_http_info(self, order_query, **kwargs): # noqa: E501
"""Retrieve orders by query # noqa: E501
Retrieves a group of orders from the account based on a query object. If no parameters are specified, the API call will fail with a bad request error. Always specify some parameters to limit the scope of the orders returned to ones you are truly interested in. You will need to make multiple API calls in order to retrieve the entire result set since this API performs result set pagination. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_orders_by_query_with_http_info(order_query, async_req=True)
>>> result = thread.get()
:param async_req bool
:param OrderQuery order_query: Order query (required)
:param int limit: The maximum number of records to return on this one API call. (Maximum 200)
:param int offset: Pagination of the record set. Offset is a zero based index.
:param str sort: The sort order of the orders. See Sorting documentation for examples of using multiple values and sorting by ascending and descending.
:param str expand: The object expansion to perform on the result.
:return: OrdersResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['order_query', 'limit', 'offset', 'sort', 'expand'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in | |
Recall: 0.97185 F1: 0.93388 F2: 0.95630
# Total predictions: 26000 True positives: 12634 False positives: 1423 False negatives: 366 True negatives: 11577
#clf = svm.LinearSVC(C=10000, class_weight='balanced', dual=True, fit_intercept=True,
# intercept_scaling=1, loss='squared_hinge', max_iter=1000,
# multi_class='ovr', penalty='l2', random_state=42, tol=1e-05,
# verbose=False)
# =============================================================================
# oversampling with smote at beginning of feature engineering workflow
# =============================================================================
# Accuracy: 0.94785 Precision: 0.96060 Recall: 0.93400 F1: 0.94711 F2: 0.93920
# Total predictions: 26000 True positives: 12142 False positives: 498 False negatives: 858 True negatives: 12502
#clf = ensemble.RandomForestClassifier(n_jobs = 8, random_state=42) # untuned
#RFC with a little tuning
# Accuracy: 0.95115 Precision: 0.96614 Recall: 0.93508 F1: 0.95036 F2: 0.94113
# Total predictions: 26000 True positives: 12156 False positives: 426 False negatives: 844 True negatives: 12574
#clf = ensemble.RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
# max_depth=9, max_features=3, max_leaf_nodes=None,
# min_impurity_decrease=0.0, min_impurity_split=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=30, n_jobs=8,
# oob_score=True, random_state=42, verbose=0, warm_start=False)
# skb 40
# Accuracy: 0.95550 Precision: 0.97285 Recall: 0.93715 F1: 0.95467 F2: 0.94408
# Total predictions: 26000 True positives: 12183 False positives: 340 False negatives: 817 True negatives: 12660
#clf = svm.SVC(C=3, cache_size=200, class_weight='balanced', coef0=0.0,
# decision_function_shape='ovr', degree=2, gamma=0.1, kernel='rbf',
# max_iter=-1, probability=False, random_state=42, shrinking=True,
# tol=0.001, verbose=100)
# =============================================================================
# prepare feature lists for export and scoring
# =============================================================================
data = selected_data
#data = oversampling_blsmote
#data = oversampling_data
#data = scaled_data
#predictors = xgb_preds
predictors = data.drop(target,axis=1).columns
#predictors = selected_data.drop(target, axis=1).columns
features = list(predictors.values)[:]
#features = predictors[:]
features_list = features[:]
features_list.insert(0, "poi")
# store dataset in dict format
my_dataset = data.to_dict(orient="index")
#test the clf with dataset and features
test_classifier(clf, my_dataset, features_list) # we cant use xgb native api for test classifier... too bad
# and we can't use XGBClassifier from sklearn either.
#%%
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
#%%
dump_classifier_and_data(clf, my_dataset, features_list)
#%%
"""
Other Gridsearch Approach
# set up dataframe for results
MLA_columns = ["MLA Name", "train f1", "test f1", "test f1 3*STD", "train prec","test prec","train rec", "test rec", "time"]
MLA_compare = pd.DataFrame(columns=MLA_columns)
row_index = 0
#set timer for total runtime
start_total = time.perf_counter()
# put the whole thing in cv loop over train set....
best_estimators = []
best_parameters = []
for clf, param in zip(MLA_pipe, params):
#set timer for cv runtime
start = time.perf_counter()
#MLA is a list of tuples, index 0 is the name and index 1 is the algorithm
#grid_param is a list of param_grids for the gridsearch for each estimator
# do param search
print("started with ", clf[-1][1].__class__.__name__)
train_f1_score=[]
train_precision_score=[]
train_recall_score=[]
test_auc_score=[]
test_f1_score=[]
test_precision_score=[]
test_recall_score=[]
min_target = -1
best_params = None
#get the pipeline object
pipeline = Pipeline(clf, memory = cachedir)
for n, (train_i, test_i) in enumerate(cv_split.split(data[predictors], data[target])): # use kfold and "average" over the whole dataset, use early stopping in xgboost.train for every eval_set
print("\nfitting CV folds k = {}...".format(n+1))
X_train, X_val = data[predictors].iloc[train_i], data[predictors].iloc[test_i]
y_train, y_val= data[target].iloc[train_i], data[target].iloc[test_i]
#create gridsearch clf
model = model_selection.GridSearchCV(pipeline, param_grid=param, cv = cv_split, iid=False, scoring = "f1", verbose = True, return_train_score = True)
# Now fit model on the data
model.fit(X_train, y_train)
# Evaluating generalization to unseen part of train set
# use best estimator - gridsearch is autofitted
#calculate the training accuracy
trainpreds = model.predict(X_train)
train_f1 = f1_score(y_train, trainpreds)
train_precision = precision_score(y_train,trainpreds)
train_recall = recall_score(y_train,trainpreds)
#calculate the validation accuracy
valpreds = model.predict(X_val)
test_f1 = f1_score(y_val, valpreds)
test_precision = precision_score(y_val,valpreds)
test_recall = recall_score(y_val,valpreds)
# store the scores in their respective lists
train_f1_score.append(train_f1)
train_precision_score.append(train_precision)
train_recall_score.append(train_recall)
test_f1_score.append(test_f1)
test_precision_score.append(test_precision)
test_recall_score.append(test_recall)
#if fold is better on test set than another we use that as our best model and parameters
mean_target = test_f1
if mean_target > min_target:
min_target = mean_target
best_algorithm = model.best_estimator_
best_params = model.best_params_
#store best parameters and estimators
best_estimators.append(best_algorithm)
best_parameters.append(best_params)
#store in CV lists
train_f1_std = (np.std(train_f1_score))
train_f1_CV = (np.mean(train_f1_score))
train_precision_CV = (np.mean(train_precision_score))
train_recall_CV = (np.mean(train_recall_score))
test_f1_std = (np.std(test_f1_score))
test_f1_CV = (np.mean(test_f1_score))
test_precision_CV = (np.mean(test_precision_score))
test_recall_CV = (np.mean(test_recall_score))
# store results in DF
MLA_compare.loc[row_index, "train f1"] = train_f1_CV
MLA_compare.loc[row_index, "test f1"] = test_f1_CV
MLA_compare.loc[row_index, "test f1 3*STD"] = test_f1_std*3
MLA_compare.loc[row_index, "train prec"] = train_precision_CV
MLA_compare.loc[row_index, "test prec"] = test_precision_CV
MLA_compare.loc[row_index, "train rec"] = train_recall_CV
MLA_compare.loc[row_index, "test rec"] = test_recall_CV
MLA_compare.loc[row_index, "MLA Name"] = clf[-1][1].__class__.__name__
duration = time.perf_counter() - start
MLA_compare.loc[row_index, "time"] = "{:.0f}:{:.0f}:{:.1f}".format(\
duration // 3600, (duration % 3600 // 60), duration % 60)
row_index+=1
# print and sort table:
MLA_compare.sort_values(by= ["test f1"], ascending = False, inplace=True)
rmtree(cachedir)
# print total search runtime and best params
endtotal = time.perf_counter() - start_total
print("\nBest params for best algorithm {}: {}, f1-score: {}".format(best_algorithm,
best_params, min_target))
print('Total runtime is {:.0f}:{:.0f}:{:.0f}'.format(endtotal // 3600,
(endtotal % 3600 // 60), endtotal % 60))
print(MLA_compare)
print("\n",best_estimators)
print("\n",best_parameters)
"""
#%%
"""
# =============================================================================
# # remove variables with a high VIF
# #adding a constant is very important to calculate the correct VIF - why!?
# =============================================================================
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# calculate_vif_ shows the features which are over the threshold and returns a new dataframe with the features removed.
def calculate_vif_(df, thresh=5):
'''
Calculates VIF each feature in a pandas dataframe
A constant must be added to variance_inflation_factor or the results will be incorrect
:param df: the pandas dataframe
:param thresh: the max VIF value before the feature is removed from the dataframe
:return: dataframe with features removed
'''
const = add_constant(df)
cols = const.columns
variables = np.arange(const.shape[1])
vif_df = pd.Series([variance_inflation_factor(const.values, i)
for i in variables],
index=cols).to_frame()
vif_df = vif_df.sort_values(by=0, ascending=False).rename(columns={0: 'VIF'})
vif_df = vif_df.drop('const')
vif_df = vif_df[vif_df['VIF'] > thresh]
print('Features above VIF threshold:\n')
print(vif_df[vif_df['VIF'] > thresh])
col_to_drop = list(vif_df.index)
for i in col_to_drop:
print('Dropping: {}'.format(i))
df.drop(columns=i, inplace = True)
return df
#%%
scaled_data_vif = scaled_data.copy(deep=True)
#%%
predictors = scaled_data_vif.drop(target, axis=1).columns.values
scaled_data_vif = calculate_vif_(scaled_data_vif[predictors], thresh=10)
# only remove all above VIF threshold of 10 instead of 5
# we remove features later according to XGB feature importances and CV RMSE
#%%
scaled_data_vif.info()
#dropped columns down to 19 for thresh of 5
#dropped columns down to XXXXX for thresh of 10
"""
#%%
"""
# =============================================================================
# # CV with feature importances of XGB
# =============================================================================
params = {
# Parameters that we are going to tune.
'max_depth':3,
'min_child_weight': 1,
'eta':.05,
'subsample': 1,
'colsample_bytree': 1,
'colsample_bylevel': 1,
'lambda': 1,
'gamma' : 0,
'nthread' : 8,
# Other parameters
'objective': 'binary:logistic',
#'booster':'gblinear', # instead of gbtree for testing?
'seed' : 42,
}
seed = 42
metrics = {'auc'} #maybe logloss?
verbose_eval = False
nfold = 10
folds = cv_split
num_boost_round = 1000
early_stopping_rounds=10
labels = target
#data = data1_cl
#data = train
data = scaled_data
predictors = data.drop(target, axis=1).columns.values
# reference the feature list for later use in the feature importance section
features_list = predictors
# create lists to store train and validation CV scores after each full kfold step with all iterations
train_score_CV = []
val_score_CV = []
#create lists to store std scores for every iteration (all folds)
train_acc_std = []
val_acc_std = []
X_train, X_test, y_train, y_test = train_test_split(data[predictors], data[target], test_size=0.3, shuffle = True, random_state=42)
#DMatrix for every train and val set in folds
dtrain = xgboost.DMatrix(X_train, label=y_train.values, feature_names = predictors, nthread = 8)
dtest = xgboost.DMatrix(X_test, label=y_test.values, feature_names = predictors, nthread = 8)
# fit the model ####
clf = xgboost.train(
params,
dtrain,
num_boost_round=num_boost_round,
evals=[(dtest, "Test")],
early_stopping_rounds=early_stopping_rounds,
verbose_eval = False
)
#print and store boost rounds
print("Best AUC score: {:.2f} in {} rounds".format(clf.best_score, clf.best_iteration+1))
#calculate the training accuracy
trainpreds = clf.predict(dtrain, ntree_limit=clf.best_ntree_limit)
trainpreds = np.where(trainpreds > 0.5, 1, 0) #assign binary labels
train_auc = roc_auc_score(y_train, trainpreds)
train_f1 = f1_score(y_train, trainpreds)
train_precision = precision_score(y_train,trainpreds)
train_recall = recall_score(y_train,trainpreds)
#calculate the validation accuracy
valpreds = clf.predict(dtest, ntree_limit=clf.best_ntree_limit)
valpreds = np.where(valpreds > 0.5, 1, 0)
test_auc = roc_auc_score(y_test, valpreds)
test_f1 = f1_score(y_test, valpreds)
test_precision = precision_score(y_test,valpreds)
test_recall = recall_score(y_test,valpreds)
feature_importance = pd.Series(clf.get_score(importance_type='weight')).sort_values(ascending=False)
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
print("roc_auc score is: {:.2f}".format(roc_auc_score(y_test, valpreds)))
print("f1_score is: {:.2f}".format(f1_score(y_test, valpreds)))
print("precision is: {:.2f}".format(precision_score(y_test,valpreds)))
print("recall is: {:.2f}".format(recall_score(y_test,valpreds)))
print(confusion_matrix(y_test,valpreds))
#%%
#feature importances (0 importance features are not included)
print(feature_importance)
#%%
#k: A threshold below which to drop features from the final data set.
# the percentage of the most important feature's importance value
# Can cycle through threshold with CV
CVCompare_columns = ["threshold k", "train auc", "test auc", "CV train auc", "CV test auc", "CV test auc 3*STD", "CV boost_rounds", "time"]
CVCompare = pd.DataFrame(columns=CVCompare_columns)
row_index = 0
for k in [30]:#[0,2,5,10,15,20,25,30,50,70]:
start = time.perf_counter()
fi_threshold = k # | |
semsim: <http://bime.uw.edu/semsim/> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:MediatorParticipant0000
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0002> .
local:ProcessProperty0000
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
local:SinkParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
local:SourceParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000>
semsim:hasMediatorParticipant local:MediatorParticipant0000 ;
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
"""
self.assertTrue(RDF.equals_rdf_vs_string(self.rdf, expected))
def test_physical_process_cellml1(self):
editor = self.rdf.to_editor(TestStrings.cellml, True, False)
with editor.new_physical_process() as physical_process:
physical_process.about("Process", eUriType.LOCAL_URI) \
.add_source("entity1", eUriType.LOCAL_URI, 1) \
.add_sink("entity2", eUriType.LOCAL_URI, 1) \
.add_mediator("entity3", eUriType.LOCAL_URI) \
.has_property("main.ReactionRate", eUriType.MODEL_URI, "opb:OPB_00592")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:MediatorParticipant0000
semsim:hasPhysicalEntityReference local:entity3 .
local:Process
semsim:hasMediatorParticipant local:MediatorParticipant0000 ;
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
local:SinkParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference local:entity2 .
local:SourceParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference local:entity1 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#main.ReactionRate>
bqbiol:isPropertyOf local:Process ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
"""
self.assertTrue(RDF.equals_rdf_vs_string(self.rdf, expected))
def test_physical_process_cellml2(self):
editor = self.rdf.to_editor(TestStrings.cellml, True, False)
with editor.new_physical_process() as physical_process:
physical_process \
.add_source("entity1", eUriType.LOCAL_URI, 1) \
.add_sink("entity2", eUriType.LOCAL_URI, 1) \
.add_mediator("entity3", eUriType.LOCAL_URI) \
.has_property(property_about="main.ReactionRate", about_uri_type=eUriType.MODEL_URI,
is_version_of="opb:OPB_00592")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:MediatorParticipant0000
semsim:hasPhysicalEntityReference local:entity3 .
local:Process0000
semsim:hasMediatorParticipant local:MediatorParticipant0000 ;
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
local:SinkParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference local:entity2 .
local:SourceParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference local:entity1 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#main.ReactionRate>
bqbiol:isPropertyOf local:Process0000 ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
"""
self.assertTrue(RDF.equals_rdf_vs_string(self.rdf, expected))
def test_energy_diff_sbml1(self):
editor = self.rdf.to_editor(TestStrings.sbml, True, False)
with editor.new_energy_diff() as energy_diff:
energy_diff.about("reaction0000", eUriType.MODEL_URI) \
.add_source("species0000", eUriType.MODEL_URI) \
.add_sink("species0001", eUriType.MODEL_URI) \
.has_property("localParameter0000", eUriType.LOCAL_URI, "opb:OPB_01058")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:SinkParticipant0000
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
local:SourceParticipant0000
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
local:localParameter0000
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_01058> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000>
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
"""
self.assertTrue(RDF.equals_rdf_vs_string(self.rdf, expected))
def test_energy_diff_sbml2(self):
editor = self.rdf.to_editor(TestStrings.sbml, True, False)
with editor.new_energy_diff() as energy_diff:
energy_diff.about("reaction0001", eUriType.MODEL_URI) \
.add_source("species0001", eUriType.MODEL_URI) \
.add_sink("species0000", eUriType.MODEL_URI) \
.has_property(is_version_of="opb:OPB_01058")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:EnergyDiffProperty0000
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0001> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_01058> .
local:SinkParticipant0000
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
local:SourceParticipant0000
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0001>
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
"""
self.assertTrue(RDF.equals_rdf_vs_string(self.rdf, expected))
def test_energy_diff_sbml3(self):
sbml = """<sbml xmlns="http://www.sbml.org/sbml/level3/version1/core" level="3" version="1">
<model metaid="NernstExample" id="NernstExample">
<listOfCompartments>
<compartment id="cytoplasm" metaid="cytoplasm" spatialDimensions="3" size="1" constant="true"/>
<compartment id="extracellular" metaid="extracellular" spatialDimensions="3" size="1" constant="true"/>
</listOfCompartments>
<listOfSpecies>
<species id="Ca_ex" metaid="Ca_ex" compartment="extracellular" initialConcentration="2" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"/>
<species id="Ca_cyt" metaid="Ca_cyt" compartment="cytoplasm" initialConcentration="0.07" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false"/>
</listOfSpecies>
<listOfParameters>
<parameter id="NP" metaid="NernstPotential" value="137.04" constant="true"/>
</listOfParameters>
</model>
</sbml>"""
rdf_graph = RDF()
rdf_graph.set_archive_uri("Example.omex")
rdf_graph.set_model_uri("Example.sbml")
editor = rdf_graph.to_editor(sbml, generate_new_metaids=False, sbml_semantic_extraction=False)
# Ca_cyt: Calcium Ions cytosol
# Ca_ex: Calcium Ions extracellular space
# NernstReversalPotential_in: The metaID of the SBML reaction
# OPB/OPB_01581: Nernst reversal potential
with editor.new_energy_diff() as energy_in:
energy_in \
.about("EnergyDiff000", eUriType.LOCAL_URI) \
.add_source(physical_entity_reference="Ca_ex", uri_type=eUriType.MODEL_URI) \
.add_sink(physical_entity_reference="Ca_cyt", uri_type=eUriType.MODEL_URI) \
.has_property(property_about="NernstPotential", about_uri_type=eUriType.MODEL_URI,
is_version_of="OPB:OPB_01581")
print(rdf_graph)
def test_energy_diff_cellml1(self):
editor = self.rdf.to_editor(TestStrings.cellml, True, False)
with editor.new_energy_diff() as energy_diff:
energy_diff.about("main.MembraneVoltage", eUriType.MODEL_URI) \
.add_source("entity1", eUriType.LOCAL_URI) \
.add_sink("entity2", eUriType.LOCAL_URI) \
.has_property("EnergyDiffProperty", eUriType.MODEL_URI, "opb:OPB_00592")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:SinkParticipant0000
semsim:hasPhysicalEntityReference local:entity2 .
local:SourceParticipant0000
semsim:hasPhysicalEntityReference local:entity1 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#EnergyDiffProperty>
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#main.MembraneVoltage> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#main.MembraneVoltage>
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
"""
self.assertTrue(RDF.equals_rdf_vs_string(self.rdf, expected))
def test_energy_diff_cellml2(self):
editor = self.rdf.to_editor(TestStrings.cellml, True, False)
with editor.new_energy_diff() as energy_diff:
energy_diff.about("main.MembraneVoltage", eUriType.MODEL_URI) \
.add_source("entity1", eUriType.LOCAL_URI) \
.add_sink("entity2", eUriType.LOCAL_URI) \
.has_property("EnergyDiffProperty", eUriType.MODEL_URI, "opb:OPB_00592")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:SinkParticipant0000
semsim:hasPhysicalEntityReference local:entity2 .
local:SourceParticipant0000
semsim:hasPhysicalEntityReference local:entity1 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#EnergyDiffProperty>
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#main.MembraneVoltage> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#main.MembraneVoltage>
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
"""
self.assertTrue(RDF.equals_rdf_vs_string(self.rdf, expected))
class AnnotateAModelTest(unittest.TestCase):
maxDiff = None
def setUp(self) -> None:
ant = """
model SmadNuclearTransport
compartment cytosol;
compartment nucleus;
Smad3Cyt in cytosol;
Smad3Nuc in nucleus;
k1 = 0.1;
k2 = 1;
Smad3Nuc = 10;
Smad3Cyt = 10;
r1: Smad3Nuc => Smad3Cyt; k1*Smad3Nuc;
r2: Smad3Cyt => Smad3Nuc; k2*Smad3Cyt;
end
"""
self.sbml = te.antimonyToSBML(ant)
def test_get_metaids(self):
rdf = RDF()
editor = rdf.to_editor(self.sbml, generate_new_metaids=True)
metaids = editor.get_metaids()
expected = ['SmadNuclearTransport',
'compartment0000',
'compartment0001',
'species0000',
'species0001',
'parameter0000',
'parameter0001',
'reaction0000',
'kineticLaw0000',
'reaction0001',
'kineticLaw0001']
actual = metaids
self.assertEqual(expected, actual)
def test_get_xml(self):
rdf = RDF()
editor = rdf.to_editor(self.sbml, generate_new_metaids=True)
xml_with_metaids = editor.get_xml()
expected = """<?xml version="1.1" encoding="UTF-8"?>
<!-- Created by libAntimony version v2.12.0.3 with libSBML version 5.18.1. -->
<sbml xmlns="http://www.sbml.org/sbml/level3/version1/core" level="3" version="1">
<model metaid="SmadNuclearTransport" id="SmadNuclearTransport">
<listOfCompartments>
<compartment id="cytosol" spatialDimensions="3" constant="true" metaid="#species0000"/>
<compartment id="nucleus" spatialDimensions="3" constant="true" metaid="#OmexMetaId0001"/>
</listOfCompartments>
<listOfSpecies>
<species id="Smad3Cyt" compartment="cytosol" initialConcentration="10" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" metaid="#OmexMetaId0002"/>
<species id="Smad3Nuc" compartment="nucleus" initialConcentration="10" hasOnlySubstanceUnits="false" boundaryCondition="false" constant="false" metaid="#OmexMetaId0003"/>
</listOfSpecies>
<listOfParameters>
<parameter id="k1" value="0.1" constant="true"/>
<parameter id="k2" value="1" constant="true"/>
</listOfParameters>
<listOfReactions>
<reaction id="r1" reversible="false" fast="false" metaid="#OmexMetaId0004">
<listOfReactants>
<speciesReference species="Smad3Nuc" stoichiometry="1" constant="true"/>
</listOfReactants>
<listOfProducts>
<speciesReference species="Smad3Cyt" stoichiometry="1" constant="true"/>
</listOfProducts>
<kineticLaw metaid="#OmexMetaId0005">
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<times/>
<ci> k1 </ci>
<ci> Smad3Nuc </ci>
</apply>
</math>
</kineticLaw>
</reaction>
<reaction id="r2" reversible="false" fast="false" metaid="#OmexMetaId0006">
<listOfReactants>
<speciesReference species="Smad3Cyt" stoichiometry="1" constant="true"/>
</listOfReactants>
<listOfProducts>
<speciesReference species="Smad3Nuc" stoichiometry="1" constant="true"/>
</listOfProducts>
<kineticLaw metaid="#OmexMetaId0007">
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<times/>
<ci> k2 </ci>
<ci> Smad3Cyt </ci>
</apply>
</math>
</kineticLaw>
</reaction>
</listOfReactions>
</model>
</sbml>
"""
actual = xml_with_metaids
print(actual)
self.assertTrue(expected, actual)
def test_annotate_model(self):
"""
Tests the annotation of a model created in setup.
Note: autogenerate the participant ID, currently users,
are asked to give the id, but this isn't really necessary.
Returns:
"""
rdf = RDF()
editor = rdf.to_editor(self.sbml, generate_new_metaids=True)
# model level annotations
with editor.new_singular_annotation() as author:
author.about("SmadNuclearTransport") \
.predicate_from_uri("https://unknownpredicate.com/changeme#author") \
.resource_literal("<NAME>")
# annotate Smad3nuc
with editor.new_physical_entity() as smad3nuc:
smad3nuc \
.about("species0000", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00340") \
.identity("uniprot:P84022") \
.is_part_of("obo/FMA_7163") \
.is_part_of("obo/FMA_264020")
# annotate Smad3nuc
with editor.new_physical_entity() as smad3nuc:
smad3nuc \
.about("species0001", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00340") \
.identity("uniprot:P84022") \
.is_part_of("obo/FMA_7163") \
.is_part_of("obo/FMA_63877") \
.is_part_of("obo/FMA_63840")
# annotate r1 (Smad3Nuc -> Smad3Cyt)
with editor.new_physical_process() as export_reaction:
export_reaction \
.about("reaction0000", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00237") \
.add_source("species0000", eUriType.MODEL_URI, 1) \
.add_sink("species0001", eUriType.MODEL_URI, 1)
# annotate r2 (Smad3Cyt -> Smad3Nuc)
with editor.new_physical_process() as export_reaction:
export_reaction \
.about("reaction0001", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00237") \
.add_source("species0001", eUriType.MODEL_URI, 1) \
.add_sink("species0000", eUriType.MODEL_URI, 1)
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:EntityProperty0000
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> ;
bqbiol:isVersionOf <https://identifiers.org/OPB:OPB_00340> .
local:EntityProperty0001
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> ;
bqbiol:isVersionOf <https://identifiers.org/OPB:OPB_00340> .
local:ProcessProperty0000
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
local:ProcessProperty0001
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0001> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
local:ProcessProperty0002
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000> ;
bqbiol:isVersionOf <https://identifiers.org/OPB:OPB_00237> .
local:ProcessProperty0003
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0001> ;
bqbiol:isVersionOf <https://identifiers.org/OPB:OPB_00237> .
local:SinkParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
local:SinkParticipant0001
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
local:SinkParticipant0002
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
local:SinkParticipant0003
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
local:SourceParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
local:SourceParticipant0001
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
local:SourceParticipant0002
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
local:SourceParticipant0003
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#SmadNuclearTransport>
<https://unknownpredicate.com/changeme#author> "<NAME>" .
<http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000>
semsim:hasSinkParticipant local:SinkParticipant0000, local:SinkParticipant0002 ;
semsim:hasSourceParticipant local:SourceParticipant0000, local:SourceParticipant0002 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0001>
semsim:hasSinkParticipant local:SinkParticipant0001, local:SinkParticipant0003 ;
semsim:hasSourceParticipant local:SourceParticipant0001, local:SourceParticipant0003 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#species0000>
bqbiol:is <https://identifiers.org/uniprot:P84022> ;
bqbiol:isPartOf <http://omex-library.org/NewOmex.omex/NewModel.xml#cytosol>, <https://identifiers.org/obo/FMA_264020>, <https://identifiers.org/obo/FMA_7163> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#species0001>
bqbiol:is <https://identifiers.org/uniprot:P84022> ;
bqbiol:isPartOf <http://omex-library.org/NewOmex.omex/NewModel.xml#nucleus>, <https://identifiers.org/obo/FMA_63840>, <https://identifiers.org/obo/FMA_63877>, <https://identifiers.org/obo/FMA_7163> .
"""
self.assertTrue(RDF.equals_rdf_vs_string(rdf, expected))
def test_to_editor_with_sbml_extraction(self):
rdf = RDF()
editor = rdf.to_editor(self.sbml, generate_new_metaids=True, sbml_semantic_extraction=True)
# model level annotations
with editor.new_singular_annotation() as author:
author.about("SmadNuclearTransport") \
.predicate_from_uri("https://unknownpredicate.com/changeme#author") \
.resource_literal("<NAME>")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix bqbiol: <http://biomodels.net/biology-qualifiers/> .
@prefix semsim: <http://bime.uw.edu/semsim/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
local:ProcessProperty0000
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
local:ProcessProperty0001
bqbiol:isPropertyOf <http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0001> ;
bqbiol:isVersionOf <https://identifiers.org/opb:OPB_00592> .
local:SinkParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
local:SinkParticipant0001
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
local:SourceParticipant0000
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0001> .
local:SourceParticipant0001
semsim:hasMultiplier "1"^^rdf:double ;
semsim:hasPhysicalEntityReference <http://omex-library.org/NewOmex.omex/NewModel.xml#species0000> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#SmadNuclearTransport>
<https://unknownpredicate.com/changeme#author> "<NAME>" .
<http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0000>
semsim:hasSinkParticipant local:SinkParticipant0000 ;
semsim:hasSourceParticipant local:SourceParticipant0000 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#reaction0001>
semsim:hasSinkParticipant local:SinkParticipant0001 ;
semsim:hasSourceParticipant local:SourceParticipant0001 .
<http://omex-library.org/NewOmex.omex/NewModel.xml#species0000>
bqbiol:isPartOf <http://omex-library.org/NewOmex.omex/NewModel.xml#cytosol> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#species0001>
bqbiol:isPartOf <http://omex-library.org/NewOmex.omex/NewModel.xml#nucleus> ."""
self.assertTrue(RDF.equals_rdf_vs_string(rdf, expected))
def test_to_editor_without_sbml_extraction(self):
rdf = RDF()
editor = rdf.to_editor(self.sbml, generate_new_metaids=True, sbml_semantic_extraction=False)
# model level annotations
with editor.new_singular_annotation() as author:
author.about("SmadNuclearTransport") \
.predicate_from_uri("https://unknownpredicate.com/changeme#author") \
.resource_literal("<NAME>")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#SmadNuclearTransport>
<https://unknownpredicate.com/changeme#author> "<NAME>sh" .
"""
self.assertTrue(RDF.equals_rdf_vs_string(rdf, expected))
def test_personal_information(self):
rdf = RDF()
editor = rdf.to_editor(self.sbml, generate_new_metaids=True, sbml_semantic_extraction=False)
with editor.new_personal_information() as personal_information:
personal_information.add_creator("1234-1234-1234-1234") \
.add_name("Ciaran") \
.add_mbox("<EMAIL>") \
.add_account_name("1234-1234-1234-1234") \
.add_account_service_homepage("https://github.com/sys-bio/libomexmeta")
expected = """@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix dc: <https://dublincore.org/specifications/dublin-core/dcmi-terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix OMEXlib: <http://omex-library.org/> .
@prefix myOMEX: <http://omex-library.org/NewOmex.omex/> .
@prefix local: <http://omex-library.org/NewOmex.omex/NewModel.rdf#> .
<http://omex-library.org/NewOmex.omex/NewModel.xml>
dc:creator <http://omex-library.org/NewOmex.omex/NewModel.xml#PersonalInfo0000> .
<http://omex-library.org/NewOmex.omex/NewModel.xml#PersonalInfo0000>
foaf:accountName <https://orcid.org/1234-1234-1234-1234> ;
foaf:accountServiceHomepage <https://github.com/sys-bio/libomexmeta> ;
foaf:mbox "cwelsh<EMAIL>" ;
foaf:name "Ciaran" ;
dc:creator <https://identifiers.org/orcid/1234-1234-1234-1234> .
"""
| |
# Copyright 2020 Inspur
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
import decorator
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
from venusclient.common.apiclient import exceptions
from venusclient.i18n import _
DEPRECATION_BASE = ('%sThe --%s parameter is deprecated and '
'will be removed in a future release. Use the '
'<%s> positional parameter %s.')
NAME_DEPRECATION_HELP = DEPRECATION_BASE % ('', 'name', 'name', 'instead')
NAME_DEPRECATION_WARNING = DEPRECATION_BASE % (
'WARNING: ', 'name', 'name', 'to avoid seeing this message')
CLUSTER_DEPRECATION_HELP = DEPRECATION_BASE % ('', 'cluster', 'cluster',
'instead')
CLUSTER_DEPRECATION_WARNING = DEPRECATION_BASE % (
'WARNING: ', 'cluster', 'cluster', 'to avoid seeing this message')
VENUS_CLIENT_DEPRECATION_WARNING = (
'WARNING: The venus client is deprecated and will be removed in a future '
'release.\nUse the OpenStack client to avoid seeing this message.')
def deprecation_message(preamble, new_name):
msg = ('%s This parameter is deprecated and will be removed in a future '
'release. Use --%s instead.' % (preamble, new_name))
return msg
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
class DuplicateArgs(Exception):
"""More than one of the same argument type was passed."""
def __init__(self, param, dupes):
msg = _('Duplicate "%(param)s" arguments: %(dupes)s') % {
'param': param, 'dupes': ", ".join(dupes)}
super(DuplicateArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def validate_name_args(positional_name, optional_name):
if optional_name:
print(NAME_DEPRECATION_WARNING)
if positional_name and optional_name:
raise DuplicateArgs("<name>", (positional_name, optional_name))
def validate_cluster_args(positional_cluster, optional_cluster):
if optional_cluster:
print(CLUSTER_DEPRECATION_WARNING)
if positional_cluster and optional_cluster:
raise DuplicateArgs("<cluster>", (positional_cluster,
optional_cluster))
def deprecated(message):
"""Decorator for marking a call as deprecated by printing a given message.
Example:
>>> @deprecated("Bay functions are deprecated and should be replaced by "
... "calls to cluster")
... def bay_create(args):
... pass
"""
@decorator.decorator
def wrapper(func, *args, **kwargs):
print(message)
return func(*args, **kwargs)
return wrapper
def deprecation_map(dep_map):
"""Decorator for applying a map of deprecating arguments to a function.
The map connects deprecating arguments and their replacements. The
shell.py script uses this map to create mutually exclusive argument groups
in argparse and also prints a deprecation warning telling the user to
switch to the updated argument.
NOTE: This decorator MUST be the outermost in the chain of argument
decorators to work correctly.
Example usage:
>>> @deprecation_map({ "old-argument": "new-argument" })
... @args("old-argument", required=True)
... @args("new-argument", required=True)
... def do_command_line_stuff():
... pass
"""
def _decorator(func):
if not hasattr(func, 'arguments'):
return func
func.deprecated_groups = []
for old_param, new_param in dep_map.items():
old_info, new_info = None, None
required = False
for (args, kwargs) in func.arguments:
if old_param in args:
old_info = (args, kwargs)
# Old arguments shouldn't be required if they were not
# previously, so prioritize old requirement
if 'required' in kwargs:
required = kwargs['required']
# Set to false so argparse doesn't get angry
kwargs['required'] = False
elif new_param in args:
new_info = (args, kwargs)
kwargs['required'] = False
if old_info and new_info:
break
# Add a tuple of (old, new, required), which in turn is:
# ((old_args, old_kwargs), (new_args, new_kwargs), required)
func.deprecated_groups.append((old_info, new_info, required))
# Remove arguments that would be duplicated by the groups we made
func.arguments.remove(old_info)
func.arguments.remove(new_info)
return func
return _decorator
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
data = '-'
if field in formatters:
data = formatters[field](o)
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
if data is None:
data = '-'
row.append(data)
pt.add_row(row)
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
def keys_and_vals_to_strs(dictionary):
"""Recursively convert a dictionary's keys and values to strings.
:param dictionary: dictionary whose keys/vals are to be converted to strs
"""
def to_str(k_or_v):
if isinstance(k_or_v, dict):
return keys_and_vals_to_strs(k_or_v)
elif isinstance(k_or_v, str):
return str(k_or_v)
else:
return k_or_v
return dict((to_str(k), to_str(v)) for k, v in dictionary.items())
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in dct.items():
# convert dict to str to check length
if isinstance(v, dict):
v = str(keys_and_vals_to_strs(v))
if wrap > 0:
v = textwrap.fill(str(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, str) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
elif isinstance(v, list):
val = str([str(i) for i in v])
if val is None:
val = '-'
pt.add_row([k, val])
else:
if v is None:
v = '-'
pt.add_row([k, v])
print(encodeutils.safe_encode(pt.get_string()).decode())
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def | |
delete-orphan')
# 微猜想评审状态,0:待评审;1:已通过;-1 已否决
status = db.Column(db.Integer, default=0)
# 用户在孵化器中引用的微知识,一对多,一方
microknos_cites = db.relationship('MicroknosCites', backref='microcon',
lazy='dynamic', cascade='all, delete-orphan')
def __repr__(self):
return '<Micropub {}>'.format(self.title)
def to_dict(self):
data = {
'id': self.id,
'title': self.title,
'summary': self.summary,
'micropubs': [micropub.id for micropub in self.micropubs],
'author_id': self.author_id,
'tags': [tag.content for tag in self.tags], # 需要数量吗
'timestamp': self.timestamp,
'status': self.status,
'pros_num': self.pros.count(),
'pros': [self.pros_to_dict(item) for item in self.pros],
'cons_num': self.cons.count(),
'cons': [self.cons_to_dict(item) for item in self.cons],
'views': self.views,
'likes': self.likers.count(),
'likers_id': [user.id for user in self.likers],
'collects': self.collecters.count(),
'collecters_id': [user.id for user in self.collecters],
'comments': [comment.to_dict() for comment in self.comments],
'_links': {
'self': url_for('api.get_microcon', id=self.id), # 有啥用
'author_url': url_for('api.get_user', id=self.author_id),
'tags_urls': [url_for('api.get_tag', id=tag.id) for tag in self.tags],
'micropubs_urls': [url_for('api.get_micropub', id=micropub.id)
for micropub in self.micropubs]
}
}
return data
def pros_to_dict(self, pro):
item = db.engine.execute("select * from microcons_pors where microcon_id=? and user_id=?",
[self.id, pro.id])
item = list(item)[0]
data = {
'user_id': item[1],
'timestamp': item[2],
'reason': item[3]
}
return data
def cons_to_dict(self, con):
item = db.engine.execute("select * from microcons_cons where microcon_id=? and user_id=?",
[self.id, con.id])
item = list(item)[0]
data = {
'user_id': item[1],
'timestamp': item[2],
'reason': item[3]
}
return data
def add_tags(self, tags): # 以 content list 的形式传入参数
for tag in tags:
new_tag = Tag()
new_tag.from_dict({'content': tag})
new_tag.microcon = self # important
db.session.add(new_tag)
db.session.commit()
def updata_tags(self, tags): # 先删除再新建
for tag in self.tags:
db.session.delete(tag)
db.session.commit()
self.add_tags(tags)
def add_micropubs(self, micropubs):
for m in micropubs:
self.micropubs.append(m)
db.session.commit()
def update_micropubs(self, micropubs):
for m in self.micropubs:
self.micropubs.remove(m)
self.add_micropubs(micropubs)
def from_dict(self, data, add_new=False):
for field in ['title', 'summary', 'timestamp']:
if field in data:
setattr(self, field, data[field])
if 'tags' in data:
if add_new:
self.add_tags(data['tags'])
else:
self.updata_tags(data['tags'])
if 'micropubs' in data: # 修改微猜想引用的
if add_new:
self.add_micropubs(data['micropubs'])
else:
self.update_micropubs(data['micropubs'])
# 该微猜想是否被某用户点赞
def is_liked_by(self, user):
return user in self.likers
# 点赞微猜想
def liked_by(self, user):
if not self.is_liked_by(user):
self.likers.append(user)
# 切记要先添加点赞记录到数据库
# 因为 new_micropubs_likes() 会查询 micropubs_likes 关联表
# db.session.add(self)
db.session.commit()
return True
return False
# 取消点赞
def unliked_by(self, user):
if self.is_liked_by(user):
self.likers.remove(user)
# db.session.add(self)
db.session.commit()
return True
return False
# 该微猜想是否被某用户收藏
def is_collected_by(self, user):
return user in self.collecters
# 收藏微猜想
def collected_by(self, user):
if not self.is_collected_by(user):
self.collecters.append(user)
db.session.commit()
return True
return False
# 取消收藏
def uncollected_by(self, user):
if self.is_collected_by(user):
self.collecters.remove(user)
db.session.commit()
return True
return False
def viewed(self):
self.views += 1
def is_judged_by(self, user):
return (user in self.pros) or (user in self.cons)
def proed_by(self, user, reason):
if not self.is_judged_by(user):
self.pros.append(user)
db.session.commit()
db.engine.execute("update microcons_pors set reason=? "
"where microcon_id=? and user_id=?", [reason, self.id, user.id])
return True
return False
def coned_by(self, user, reason):
if not self.is_judged_by(user):
self.cons.append(user)
db.session.commit()
db.engine.execute("update microcons_cons set reason=? "
"where microcon_id=? and user_id=?", [reason, self.id, user.id])
return True
return False
def remove_all_judge(self):
for item in self.pros:
self.pros.remove(item)
for item in self.cons:
self.cons.remove(item)
#
class Comment(PaginatedAPIMixin, db.Model):
__tablename__ = 'comments'
# __table_args__ = {"extend_existing": True} # 如果表已经被创建过,需要加这个参数提供扩展
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.now)
mark_read = db.Column(db.Boolean, default=False) # 微知识作者会收到评论提醒,可以标为已读
disabled = db.Column(db.Boolean, default=False) # 屏蔽显示
# 评论与对它点赞的人是多对多关系
likers = db.relationship('User', secondary=comments_likes, backref=db.backref('liked_comments', lazy='dynamic'),
lazy='dynamic')
# 外键,评论作者的 id
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# 外键,评论所属微知识的 id
micropub_id = db.Column(db.Integer, db.ForeignKey('micropubs.id'))
microcon_id = db.Column(db.Integer, db.ForeignKey('microcons.id'))
# 自引用的多级评论实现
parent_id = db.Column(db.Integer, db.ForeignKey('comments.id', ondelete='CASCADE'))
# 级联删除的 cascade 必须定义在 "多" 的那一侧,所以不能这样定义: parent = db.relationship('Comment', backref='children', remote_side=[id], cascade='all, delete-orphan')
parent = db.relationship('Comment', backref=db.backref('children', cascade='all, delete-orphan'), remote_side=[id])
cradle_id = db.Column(db.Integer, db.ForeignKey('cradles.id'))
def __repr__(self):
return '<Comment {}>'.format(self.id)
def get_descendants(self):
'''获取评论的所有子孙'''
data = set()
def descendants(comment):
if comment.children:
data.update(comment.children)
for child in comment.children:
descendants(child)
descendants(self)
return data
def get_ancestors(self):
'''获取评论的所有祖先'''
data = []
def ancestors(comment):
if comment.parent:
data.append(comment.parent)
ancestors(comment.parent)
ancestors(self)
return data
def to_dict(self):
data = {
'id': self.id,
'body': self.body,
'timestamp': self.timestamp,
'mark_read': self.mark_read,
'disabled': self.disabled,
'likers_id': [user.id for user in self.likers],
'author': {
'id': self.author.id,
'username': self.author.username,
'name': self.author.name,
'avatar': self.author.avatar(128)
},
'micropub': {
'id': self.micropub.id,
'title': self.micropub.title,
'author_id': self.micropub.author.id
} if self.micropub else None,
'microcon': {
'id': self.microcon.id,
'title': self.microcon.title,
'author_id': self.microcon.author.id
} if self.microcon else None,
'cradle': {
'id': self.cradle.id,
'title': self.cradle.title,
'body': self.cradle.body,
'sponsor': {
'id': self.cradle.sponsor.id,
'username': self.cradle.sponsor.username,
'name': self.cradle.sponsor.name,
'avatar': self.cradle.sponsor.avatar(128)
}
} if self.cradle else None,
'parent_id': self.parent.id if self.parent else None,
# 'children': [child.to_dict() for child in self.children] if self.children else None,
'_links': {
'self': url_for('api.get_comment', id=self.id),
'author_url': url_for('api.get_user', id=self.author_id),
'cradle_url': url_for('api.get_cradle', id=self.cradle_id) if self.cradle else None,
'micropub_url': url_for('api.get_micropub', id=self.micropub_id) if self.micropub else None,
'microcon_url': url_for('api.get_microcon', id=self.microcon_id) if self.microcon else None,
'parent_url': url_for('api.get_comment', id=self.parent.id) if self.parent else None,
'children_url': [url_for('api.get_comment', id=child.id) for child in
self.children] if self.children else None
}
}
return data
def from_dict(self, data):
for field in ['body', 'timestamp', 'mark_read', 'disabled', 'author_id', 'parent_id',
'micropub_id','microcon_id']:
if field in data:
setattr(self, field, data[field])
def is_liked_by(self, user):
'''判断用户 user 是否已经对该评论点过赞'''
return user in self.likers
def liked_by(self, user):
'''点赞'''
if not self.is_liked_by(user):
self.likers.append(user)
db.session.commit()
return True
return False
def unliked_by(self, user):
'''取消点赞'''
if self.is_liked_by(user):
self.likers.remove(user)
db.session.commit()
return True
return False
class Notification(db.Model): # 不需要分页
__tablename__ = 'notifications'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), index=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
timestamp = db.Column(db.Float, index=True, default=time)
payload_json = db.Column(db.Text)
def __repr__(self):
return '<Notification {}>'.format(self.id)
def get_data(self):
return json.loads(str(self.payload_json))
def to_dict(self):
data = {
'id': self.id,
'name': self.name,
'user': {
'id': self.user.id,
'username': self.user.username,
'name': self.user.name,
'avatar': self.user.avatar(128)
},
'timestamp': self.timestamp,
'payload': self.get_data(),
'_links': {
'self': url_for('api.get_notification', id=self.id),
'user_url': url_for('api.get_user', id=self.user_id)
}
}
return data
def from_dict(self, data):
for field in ['body', 'timestamp']:
if field in data:
setattr(self, field, data[field])
class Message(PaginatedAPIMixin, db.Model):
__tablename__ = 'messages'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.now)
sender_id = db.Column(db.Integer, db.ForeignKey('users.id'))
recipient_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __repr__(self):
return '<Message {}>'.format(self.id)
def to_dict(self):
data = {
'id': self.id,
'body': self.body,
'timestamp': self.timestamp,
'sender': self.sender.to_dict(),
'recipient': self.recipient.to_dict(),
'_links': {
'self': url_for('api.get_message', id=self.id),
'sender_url': url_for('api.get_user', id=self.sender_id),
'recipient_url': url_for('api.get_user', id=self.recipient_id)
}
}
return data
def from_dict(self, data):
for field in ['body', 'timestamp']:
if field in data:
setattr(self, field, data[field])
class Task(PaginatedAPIMixin, db.Model):
__tablename__ = 'tasks'
# 不使用默认的整数主键,而是用 RQ 为每个任务生成的字符串ID
id = db.Column(db.String(36), primary_key=True)
# 任务名
name = db.Column(db.String(128), index=True)
# 任务描述
description = db.Column(db.String(128))
# 任务所属的用户
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# 是否已执行完成
complete = db.Column(db.Boolean, default=False)
def get_progress(self):
'''返回Task对象实时的进度'''
try:
# 通过Task.id,返回RQ job实例
rq_job = current_app.task_queue.fetch_job(self.id)
except Exception:
rq_job = None
return rq_job.meta.get('progress', 0) if rq_job is not None else 100
def to_dict(self):
data = {
'id': self.id,
'name': self.name,
'description': self.description,
'progress': self.get_progress(),
'complete': self.complete,
'_links': {
'user_url': url_for('api.get_user', id=self.user.id)
}
}
return data
def __repr__(self):
return '<Task {}>'.format(self.id)
class DDL(PaginatedAPIMixin, db.Model):
__tablename__ = 'ddls'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.now) # 创建或最后一次修改时间
deadline = db.Column(db.DateTime, index=True) # 截至时间
cradle_id = db.Column(db.Integer, db.ForeignKey('cradles.id')) # 一对多,多方
passed = db.Column(db.Boolean, default=False) # 是否截止
def __repr__(self):
return '<DDL {}>'.format(self.id)
def to_dict(self):
self.passed = self.deadline < datetime.now() # TODO
data = {
'id': self.id,
'timestamp': self.timestamp,
'deanline': self.deadline,
'body': self.body,
'passed': self.passed,
'cradle': {
'id': self.cradle.id,
'sponsor': self.cradle.sponsor_id,
'title': self.cradle.title
},
'_links': {
'self': url_for('api.get_ddl', id=self.id),
'cradle_url': url_for('api.get_cradle', id=self.cradle_id)
}
}
return data
def from_dict(self, data):
for field in ['body', 'timestamp', 'deadline']:
if field in data:
setattr(self, field, data[field])
# 孵化器中的微知识引用
class MicroknosCites(PaginatedAPIMixin, db.Model):
__tablename__ = 'microknos_cites'
id = db.Column(db.Integer, primary_key=True)
micropub_id = db.Column(db.Integer, db.ForeignKey('micropubs.id')) # 一对多,多方
microcon_id = db.Column(db.Integer, db.ForeignKey('microcons.id')) # 一对多,多方
user_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 一对多,多方
timestamp = db.Column(db.DateTime, index=True, default=datetime.now)
cradle_id = db.Column(db.Integer, db.ForeignKey('cradles.id')) # 一对多,多方
reason = db.Column(db.TEXT)
def __repr__(self):
return '<MicroknosCites {}>'.format(self.id)
def to_dict(self):
data = {
'_links':{
'self': url_for('api.get_microkno_cite', id=self.id),
'micropub_url': url_for('api.get_micropub', id=self.micropub_id) if self.micropub else None,
'microcon_url': url_for('api.get_microcon', id=self.microcon_id) if self.microcon else None,
'user_url': url_for('api.get_user', id=self.user_id),
'cradle_url': url_for('api.get_cradle', id=self.cradle_id),
},
'micropub': self.micropub.to_dict() if self.micropub else None,
'microcon': self.microcon.to_dict() if self.microcon else None,
'user': {
'id': self.user.id,
'username': self.user.username,
'name': self.user.name,
'avatar': self.user.avatar(128)
},
'cradle': {
'id': self.cradle.id,
'title': self.cradle.title,
'body': self.cradle.body,
'sponsor': {
'id': self.cradle.sponsor.id,
'username': self.cradle.sponsor.username,
'name': self.cradle.sponsor.name,
'avatar': self.cradle.sponsor.avatar(128)
},
},
'timestamp': self.timestamp,
'reason': self.reason,
}
return data
def from_dict(self, data):
for field in ['reason', 'timestamp']:
if field in data:
setattr(self, field, data[field])
class Cradle(PaginatedAPIMixin, db.Model):
__tablename__ = 'cradles'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.TEXT)
body = | |
data:: Pin.OUT_PP")
shed.vars(old=".. data:: Pin.PULL_DOWN")
shed.vars(old=".. data:: Pin.PULL_NONE")
shed.vars(
old=".. data:: Pin.PULL_UP", end="class PinAF -- Pin Alternate Functions",
)
nxt = "pyb.RTC.rst"
_pin_af(end=nxt, shed=shed)
return nxt
def _led(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.LED(id)", new="def __init__(self, id: int, /)",
)
shed.def_(
old=".. method:: LED.intensity([value])",
new=[
"def intensity(self) -> int",
"def intensity(self, value: int, /) -> None",
],
)
shed.def_(
old=".. method:: LED.off()", new="def off(self) -> None",
)
shed.def_(
old=".. method:: LED.on()", new="def on(self) -> None",
)
nxt = "pyb.Pin.rst"
shed.def_(
old=".. method:: LED.toggle()", new="def toggle(self) -> None", end=nxt,
)
return nxt
def _lcd(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this)
shed.def_(
old=".. class:: pyb.LCD(skin_position)",
new="def __init__(self, skin_position: str, /)",
)
shed.def_(
old=".. method:: LCD.command(instr_data, buf)",
new="def command(self, inst_data: int, buf: bytes, /) -> None",
)
shed.def_(
old=".. method:: LCD.contrast(value)",
new="def contrast(self, value: int, /) -> None",
)
shed.def_(
old=".. method:: LCD.fill(colour)",
new="def fill(self, colour: int, /) -> None",
)
shed.def_(
old=".. method:: LCD.get(x, y)", new="def get(self, x: int, y: int, /) -> int",
)
shed.def_(
old=".. method:: LCD.light(value)",
new="def light(self, value: bool | int, /) -> None",
)
shed.def_(
old=".. method:: LCD.pixel(x, y, colour)",
new="def pixel(self, x: int, y: int, colour: int, /) -> None",
)
shed.def_(
old=".. method:: LCD.show()", new="def show(self) -> None",
)
shed.def_(
old=".. method:: LCD.text(str, x, y, colour)",
new="def text(self, str: str, x: int, y: int, colour: int, /) -> None",
)
nxt = "pyb.LED.rst"
shed.def_(
old=".. method:: LCD.write(str)",
new="def write(self, str: str, /) -> None",
end=nxt,
)
return nxt
def _i2c(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this,)
shed.def_(
old=r".. class:: pyb.I2C(bus, ...)",
new="""
def __init__(
self,
bus: int | str,
mode: str,
/,
*,
addr: int = 0x12,
baudrate: int = 400_000,
gencall: bool = False,
dma: bool = False
)
""",
)
shed.def_(
old=r".. method:: I2C.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=r".. method:: I2C.init(mode, *, addr=0x12, baudrate=400000, gencall=False, dma=False)",
new="""
def init(
self,
bus: int | str,
mode: str,
/,
*,
addr: int = 0x12,
baudrate: int = 400_000,
gencall: bool = False,
dma: bool = False
) -> None
""",
)
shed.def_(
old=r".. method:: I2C.is_ready(addr)",
new="def is_ready(self, addr: int, /) -> bool",
)
shed.def_(
old=r".. method:: I2C.mem_read(data, addr, memaddr, *, timeout=5000, addr_size=8)",
new="""
def mem_read(
self,
data: int | AnyWritableBuf,
addr: int,
memaddr: int,
/,
*,
timeout: int = 5000,
addr_size: int = 8,
) -> bytes
""",
)
return "pyb.LCD.rst"
def _flash(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this, super_class="AbstractBlockDev")
shed.def_(
old=".. class:: pyb.Flash()",
new="""
@overload
def __init__(self)
""",
)
shed.def_(
old=r".. class:: pyb.Flash(*, start=-1, len=-1)",
new="""
@overload
def __init__(self, *, start: int = -1, len: int = -1)
""",
)
shed.defs_with_common_description(
cmd=".. method:: Flash.", # Needs `.` at end!
old2new={
"readblocks(block_num, buf)": "def readblocks(self, blocknum: int, buf: bytes, offset: int = 0, /) -> None",
"readblocks(block_num, buf, offset)": "",
"writeblocks(block_num, buf)": "def writeblocks(self, blocknum: int, buf: bytes, offset: int = 0, /) -> None",
"writeblocks(block_num, buf, offset)": "",
"ioctl(cmd, arg)": "def ioctl(self, op: int, arg: int) -> int | None",
},
end="Hardware Note",
)
nxt = "pyb.I2C.rst"
shed.pyi.doc.extend(shed.extra_notes(end=nxt))
return nxt
def _ext_int(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this,)
shed.def_(
old=".. class:: pyb.ExtInt(pin, mode, pull, callback)",
new="def __init__(self, pin: int | str | Pin, mode: int, pull: int, callback: Callable[[int], None])",
)
shed.def_(
old=".. classmethod:: ExtInt.regs()",
new="""
@staticmethod
def regs() -> None
""",
)
shed.def_(
old=".. method:: ExtInt.disable()", new="def disable(self) -> None",
)
shed.def_(
old=".. method:: ExtInt.enable()", new="def enable(self) -> None",
)
shed.def_(
old=".. method:: ExtInt.line()", new="def line(self) -> int",
)
shed.def_(
old=".. method:: ExtInt.swint()", new="def swint(self) -> None",
)
shed.vars(old=".. data:: ExtInt.IRQ_FALLING")
shed.vars(old=".. data:: ExtInt.IRQ_RISING")
nxt = "pyb.Flash.rst"
shed.vars(old=".. data:: ExtInt.IRQ_RISING_FALLING", end=nxt)
return nxt
def _dac(this: str, shed: RST2PyI) -> str:
shed.class_from_file(
pre_str="# noinspection PyShadowingNames",
old=this,
post_doc='''
NORMAL: ClassVar[int] = ...
"""
Normal mode (output buffer once) for `mode` argument of `write_timed`.
"""
CIRCULAR: ClassVar[int] = ...
"""
Circular mode (output buffer continuously) for `mode` argument of `write_timed`.
"""
''',
)
shed.def_(
old=r".. class:: pyb.DAC(port, bits=8, *, buffering=None)",
new="def __init__(self, port: int | Pin, /, bits: int = 8, *, buffering: bool | None = None)",
)
shed.def_(
old=r".. method:: DAC.init(bits=8, *, buffering=None)",
new="def init(self, bits: int = 8, *, buffering: bool | None = None) -> None",
)
shed.def_(
old=".. method:: DAC.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=".. method:: DAC.noise(freq)", new="def noise(self, freq: int, /) -> None",
)
shed.def_(
old=".. method:: DAC.triangle(freq)",
new="def triangle(self, freq: int, /) -> None",
)
shed.def_(
old=".. method:: DAC.write(value)",
new="def write(self, value: int, /) -> None",
)
nxt = "pyb.ExtInt.rst"
shed.def_(
old=r".. method:: DAC.write_timed(data, freq, *, mode=DAC.NORMAL)",
new="def write_timed(self, data: AnyWritableBuf, freq: int | Timer, /, *, mode: int = NORMAL) -> None",
end=nxt,
)
return nxt
def _can(this: str, shed: RST2PyI) -> str:
shed.class_from_file(old=this,)
shed.def_(
old=".. class:: pyb.CAN(bus, ...)",
new="""
def __init__(
self,
bus: int | str,
mode: int,
/,
extframe: bool = False,
prescaler: int = 100,
*,
sjw: int = 1,
bs1: int = 6,
bs2: int = 8,
auto_restart: bool = False
)
""",
)
shed.def_(
old=".. classmethod:: CAN.initfilterbanks(nr)",
new="""
@staticmethod
def initfilterbanks(nr: int, /) -> None
""",
)
shed.def_(
old=(
r".. method:: CAN.init(mode, extframe=False, prescaler=100, *, sjw=1, bs1=6, "
r"bs2=8, auto_restart=False, baudrate=0, sample_point=75)"
),
new="""
def init(
self,
mode: int,
/,
extframe: bool = False ,
prescaler: int = 100,
*,
sjw: int = 1,
bs1: int = 6,
bs2: int = 8,
auto_restart: bool = False,
baudrate: int = 0,
sample_point: int = 75
) -> None
""",
)
shed.def_(
old=".. method:: CAN.deinit()", new="def deinit(self) -> None",
)
shed.def_(
old=".. method:: CAN.restart()", new="def restart(self) -> None",
)
shed.def_(
old=".. method:: CAN.state()", new="def state(self) -> int",
)
shed.def_(
old=".. method:: CAN.info([list])",
new=[
"def info(self) -> list[int]",
"def info(self, list: list[int], /) -> list[int]",
],
)
shed.def_(
old=r".. method:: CAN.setfilter(bank, mode, fifo, params, *, rtr)",
new=[
"""
def setfilter(self, bank: int, mode: int, fifo: int, params: Sequence[int], /) -> None
""",
"""
def setfilter(
self,
bank: int,
mode: int,
fifo: int,
params: Sequence[int],
/,
*,
rtr: Sequence[bool]
) -> None
""",
],
)
shed.def_(
old=".. method:: CAN.clearfilter(bank)",
new="def clearfilter(self, bank: int, /) -> None",
)
shed.def_(
old=".. method:: CAN.any(fifo)", new="def any(self, fifo: int, /) -> bool",
)
shed.def_(
old=r".. method:: CAN.recv(fifo, list=None, *, timeout=5000)",
new=[
"def recv(self, fifo: int, /, *, timeout: int = 5000) -> tuple[int, bool, int, memoryview]",
"def recv(self, fifo: int, list: None, /, *, timeout: int = 5000) -> tuple[int, bool, int, memoryview]",
"def recv(self, fifo: int, list: list[int | bool | memoryview], /, *, timeout: int = 5000) -> None",
],
)
shed.def_(
old=r".. method:: CAN.send(data, id, *, timeout=0, rtr=False)",
new="""
def send(self, data: int | AnyWritableBuf, id: int, /, *, timeout: int = 0, rtr: bool = False) -> None
""",
)
shed.def_(
old=".. method:: CAN.rxcallback(fifo, fun)",
new="def rxcallback(self, fifo: int, fun: Callable[[CAN], None], /) -> None",
)
shed.vars(
old=[
".. data:: CAN.NORMAL",
"CAN.LOOPBACK",
"CAN.SILENT",
"CAN.SILENT_LOOPBACK",
],
)
shed.vars(
old=[
".. data:: CAN.STOPPED",
"CAN.ERROR_ACTIVE",
"CAN.ERROR_WARNING",
"CAN.ERROR_PASSIVE",
"CAN.BUS_OFF",
],
)
nxt = "pyb.DAC.rst"
shed.vars(
old=[".. data:: CAN.LIST16", "CAN.MASK16", "CAN.LIST32", "CAN.MASK32"], end=nxt,
)
return nxt
def _adc_all(*, this: str, end: str, shed: RST2PyI) -> None:
shed.consume_containing_line(this)
shed.consume_minuses_underline_line()
shed.consume_blank_line()
doc = []
for doc_line in shed.rst:
if doc_line.lstrip().startswith(end):
shed.rst.push_line(doc_line)
break
doc.append(f" {doc_line}\n")
else:
assert False, f"Did not find: {end}"
new_class = Class()
shed.pyi.classes.append(new_class)
new_class.class_def = "class ADCAll:"
new_class.doc = doc
new_class.defs.append(
f'''
def __init__(self, resolution: int, mask: int = 0xffffffff, /):
"""
Create a multi-channel ADC instance.
``resolution`` is the number of bits for all the ADCs (even those not enabled); one of:
14, 12, 10, or 8 bits.
To avoid unwanted activation of analog inputs (channel 0..15) a second parameter, ``mask``,
can be specified.
This parameter is a binary pattern where each requested analog input has the corresponding bit set.
The default value is 0xffffffff which means | |
hold answer
if type(logical_zero_strings) != list:
raise Exception('logical_zero_strings should be a list')
if type(logical_one_strings) != list:
raise Exception('logical_one_strings should be a list')
validate_integer(data1_location)
validate_integer(data2_location)
if simple:
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical zero should be a list with one entry')
if len(logical_zero_strings) != 1:
raise Exception('with simple decoding logical one should be a list with one entry')
simple_parity_bits = calculate_simple_parity_bits()
new_counts = {str(i) + str(j):0 for i in range(3) for j in range(3)}
for key, value in counts.items():
#split out the data parts of key
data1 = key.split()[data1_location]
data2 = key.split()[data2_location]
#need to reverse the string from qiskit format
reverse1 = string_reverse(data1)
reverse2 = string_reverse(data2)
if simple:
#string is calculated from parity
bit_string1 = ['']
bit_string2 = ['']
for bit_location in simple_parity_bits:
bit_string1.append(reverse1[bit_location])
bit_string2.append(reverse2[bit_location])
new_data1 = str(calculate_parity(bit_string1))
new_data2 = str(calculate_parity(bit_string2))
else:
new_data1 = look_up_data(reverse1, logical_zero_strings, logical_one_strings)
new_data2 = look_up_data(reverse2, logical_zero_strings, logical_one_strings)
new_key = new_data1 + new_data2
if new_counts.get(new_key) == None:
new_counts.update({new_key: value})
else:
new_counts[new_key] = new_counts[new_key] + value
return(new_counts)
def look_up_data(input_string, logical_zero, logical_one):
"""Looks up the input data to determine if the string is a logical one,
logical zero, or outside the code base.
Parameters
----------
input_string : str
data for analysis
logical_zero : list
list of strings representing a logical zero
logical_one : str
list of strings representing a logical one
Returns
-------
output_string : str
result of look-up"""
if input_string in logical_zero:
output_string = '0'
elif input_string in logical_one:
output_string = '1'
else:
output_string = 'E'
return(output_string)
def print_time():
"""Prints current time"""
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
return
def validate_integer(number):
"""Checks if a number is an integer.
Parameters
----------
number: int
number to be validated
"""
if type(number) != int:
raise ValueError(f'The number {number} entered is not an integer')
def process_FT_results(counts, codewords, data_meas_strings = ['0'],
anc_zero = '0', anc_one = '1',
verbose = False, data_qubits = 7,
ancilla_start = 0, data_meas_start = 0, data_start = 0,
ancilla_types = 2, ancilla_qubits = 0, ancilla_meas_repeats = 1,
data_meas_qubits = 0, data_meas_repeats = 0,
post_selection = False, simple = False,
):
"""Process results from fault tolerant processing.
Parameters
----------
counts : dictionary
results for analysis
codewords : list
list of valid data codewords
data_meas_strings: string
allowed strings for the data measurement bits
anc_zero : string
allowed strings for the ancilla zero
anc_one : string
allowed strings for the ancilla one
verbose : bool
if true enables printing
data_qubits : int
Length of data bit string. Usually seven
ancilla_start : int
starting place for ancilla (if any)
data_meas_start : int
starting place for data measurement qubits (if any)
data_start : int
starting place for data string
ancilla_types : int
number of different ancilla types. Normally 2 (X and Z) or 0
ancilla_qubits : int
number of strings for each ancilla qubits. Normally 0, 1 or 3
ancilla_meas_repeats : int
number of times ancilla measurements are repeated. Normally 3 or 1
data_meas_qubits : int
number of distinct data measurement qubits. Normally 7, 1 or 0
data_meas_repeats: int
number of times data measurements are repeated. Normally 3 or 1.
post_select: bool
if true then only strings in logical zero are invalid
simple : bool
if true then simple decoding based on three bits shall be used.
Returns
-------
error_rate : float
error rate calculated
rejected : int
strings rejected for validation
accepted : int
strings accepted for validation
valid : int
strings validated and found to be in the code space
invalid : int
strings validated and found to not be in the code space
Notes
-----
This function takes the output string, splits it, and determines if it passes
data and ancilla checks. If so the data keyword is validated.
"""
anc_meas_strings = [anc_zero, anc_one]
validate_integer(ancilla_start)
validate_integer(data_meas_start)
validate_integer(data_start)
validate_integer(ancilla_types)
validate_integer(ancilla_qubits)
validate_integer(ancilla_meas_repeats)
validate_integer(data_meas_qubits)
validate_integer(data_meas_repeats)
total_keys = ancilla_types * ancilla_qubits * ancilla_meas_repeats
total_keys = total_keys + (data_meas_qubits * data_meas_repeats) + 1
count_valid = 0
count_invalid = 0
count_outside_codeword = 0
ancilla_rejected = 0
ancilla_accepted = 0
data_rejected = 0
data_accepted = 0
rejected = 0
accepted = 0
for string, value in counts.items():
qubit_strings = []
data_syndrome_strings = []
data_OK = False
for i in range(total_keys):
qubit_strings.append(string.split()[i])
data_string = qubit_strings[data_start]
for i in range(data_meas_start, data_meas_start + data_meas_repeats):
#need to reverse strings because Qiskit reverses them
data_syndrome_strings.append(string_reverse(qubit_strings[i]))
if data_meas_repeats == 3:
if data_syndrome_strings[2] in data_meas_strings:
if data_syndrome_strings[1] in data_meas_strings:
if data_syndrome_strings[0] in data_meas_strings:
data_OK = True
elif data_meas_repeats == 0:
data_OK = True
else:
raise Exception('At present only 3 or zero data measurements are coded for')
if data_OK:
data_accepted = data_accepted + value
if ancilla_qubits == 0:
#no ancilla
ancilla_accepted = data_accepted
ancilla_rejected = 0
ancilla_OK = True
corrected_data_string = data_string
elif ancilla_qubits == 1:
#simple case without fault tolerance. No check on ancilla possible
ancilla_OK = True
ancilla_accepted = data_accepted
ancilla_rejected = 0
if ancilla_meas_repeats != 1:
raise Exception('can not handle multiple measurements on one ancilla qubit')
ancilla = qubit_strings[ancilla_start]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
elif ancilla_qubits == 3:
#complex case with fault tolerance
count_ancilla_OK = 0
X = ['' for i in range(ancilla_qubits)]
for i in range(ancilla_types):
for j in range(ancilla_meas_repeats):
first = i * (ancilla_qubits * ancilla_meas_repeats) + j * ancilla_meas_repeats
second = first + 1
third = second + 1
if qubit_strings[third] == qubit_strings[second]:
if qubit_strings[second] == qubit_strings[first]:
if qubit_strings[first] in anc_meas_strings:
count_ancilla_OK = count_ancilla_OK + 1
if i == 0:
#only interested in X values
if qubit_strings[first] in anc_zero:
X[j] = '0'
elif qubit_strings[first] in anc_one:
X[j] = '1'
else:
raise Exception('Error in processing strings for i, j, k = {i}, {j}, {k}')
if count_ancilla_OK == ancilla_qubits * ancilla_types:
ancilla_OK = True
ancilla_accepted = ancilla_accepted + value
#always first three ancilla with Steane code
ancilla = X[0] + X[1] + X[2]
corrected_data_string = correct_qubit(data_string, ancilla, data_qubits)
else:
ancilla_OK = False
ancilla_rejected = ancilla_rejected + value
else:
raise Exception('Can only process ancilla strings of 0, 1 or 3 qubits')
if ancilla_OK:
#need to reverse string because of Qisit convention
reversed_data_string = string_reverse(corrected_data_string)
valid, invalid, outside_codeword = compute_string_validity(value,
codewords,
reversed_data_string,
post_selection = post_selection,
simple = simple,
)
count_valid = count_valid + valid
count_invalid = count_invalid + invalid
count_outside_codeword = count_outside_codeword + outside_codeword
else:
data_rejected = data_rejected + value
if ancilla_accepted != 0:
# calculate on ancilla_accepted because this always holds the amounts to be validated
error_rate = count_invalid / ancilla_accepted
else:
error_rate = 0
print('Error rate not defined as no strings accepted')
rejected = data_rejected + ancilla_rejected
accepted = ancilla_accepted
if verbose:
print(f'At the data validation stage')
print(f'There are {data_rejected} strings rejected and {data_accepted} strings submitted for processing')
print(f'Making {data_rejected + data_accepted} in total submitted for data processing')
print()
print(f'At the ancilla validation stage')
print(f'There are {ancilla_rejected} strings rejected and {ancilla_accepted} strings submitted for validation')
print(f'Making {ancilla_rejected + ancilla_accepted} in total submitted to check against ancilla')
print()
print(f'Of these {ancilla_accepted} strings validated there are {count_valid} valid strings and {count_invalid} invalid_strings')
if post_selection:
print(f'There were {count_outside_codeword} strings that were neither logical one or logical zero')
print(f'The error rate is {error_rate:.4f}')
return(error_rate, rejected, accepted, count_valid, count_invalid)
def get_parity_check_matrix():
"""Stores the parity matrix in one place"""
parity_check_matrix = ['0001111',
'0110011',
'1010101'
]
return(parity_check_matrix)
def get_codewords():
"""Stores the codewords for the logical zero in one place
Returns
-------
codewords : list
A list of valid codewords for the logical zero
"""
codewords =['0000000',
'1010101',
'0110011',
'1100110',
'0001111',
'1011010',
'0111100',
'1101001'
]
return(codewords)
def calculate_parity_matrix_totals():
"""Calculates the number of items in each row of the parity matrix
Returns
-------
parity_matrix_totals : list
List holding parity matrix totals for each row in the parity matrix.
"""
parity_check_matrix = get_parity_check_matrix()
n = len(parity_check_matrix[0])
parity_matrix_totals = [ 0 for x in range(n)] # define an empty list
#ready to work out parity_matrix_totals
#calculate the number of | |
<filename>artikcloud/apis/rules_api.py<gh_stars>0
# coding: utf-8
"""
ARTIK Cloud API
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class RulesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_rule(self, rule_info, user_id, **kwargs):
"""
Create Rule
Create a new Rule
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rule(rule_info, user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RuleCreationInfo rule_info: Rule object that needs to be added (required)
:param str user_id: User ID (required)
:return: RuleEnvelope
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_rule_with_http_info(rule_info, user_id, **kwargs)
else:
(data) = self.create_rule_with_http_info(rule_info, user_id, **kwargs)
return data
def create_rule_with_http_info(self, rule_info, user_id, **kwargs):
"""
Create Rule
Create a new Rule
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_rule_with_http_info(rule_info, user_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param RuleCreationInfo rule_info: Rule object that needs to be added (required)
:param str user_id: User ID (required)
:return: RuleEnvelope
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_info', 'user_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_info' is set
if ('rule_info' not in params) or (params['rule_info'] is None):
raise ValueError("Missing the required parameter `rule_info` when calling `create_rule`")
# verify the required parameter 'user_id' is set
if ('user_id' not in params) or (params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `create_rule`")
resource_path = '/rules'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'user_id' in params:
query_params['userId'] = params['user_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'rule_info' in params:
body_params = params['rule_info']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['artikcloud_oauth']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleEnvelope',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_rule(self, rule_id, **kwargs):
"""
Delete Rule
Delete a Rule
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_rule(rule_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str rule_id: Rule ID. (required)
:return: RuleEnvelope
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_rule_with_http_info(rule_id, **kwargs)
else:
(data) = self.delete_rule_with_http_info(rule_id, **kwargs)
return data
def delete_rule_with_http_info(self, rule_id, **kwargs):
"""
Delete Rule
Delete a Rule
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_rule_with_http_info(rule_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str rule_id: Rule ID. (required)
:return: RuleEnvelope
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_id' is set
if ('rule_id' not in params) or (params['rule_id'] is None):
raise ValueError("Missing the required parameter `rule_id` when calling `delete_rule`")
resource_path = '/rules/{ruleId}'.replace('{format}', 'json')
path_params = {}
if 'rule_id' in params:
path_params['ruleId'] = params['rule_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['art<EMAIL>_oauth']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleEnvelope',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_rule(self, rule_id, **kwargs):
"""
Get Rule
Get a rule using the Rule ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rule(rule_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str rule_id: Rule ID. (required)
:return: RuleEnvelope
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_rule_with_http_info(rule_id, **kwargs)
else:
(data) = self.get_rule_with_http_info(rule_id, **kwargs)
return data
def get_rule_with_http_info(self, rule_id, **kwargs):
"""
Get Rule
Get a rule using the Rule ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_rule_with_http_info(rule_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str rule_id: Rule ID. (required)
:return: RuleEnvelope
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['rule_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'rule_id' is set
if ('rule_id' not in params) or (params['rule_id'] is None):
raise ValueError("Missing the required parameter `rule_id` when calling `get_rule`")
resource_path = '/rules/{ruleId}'.replace('{format}', 'json')
path_params = {}
if 'rule_id' in params:
path_params['ruleId'] = params['rule_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = ['artikcloud_oauth']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RuleEnvelope',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_rule(self, rule_id, rule_info, **kwargs):
"""
Update Rule
Update an existing Rule
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_rule(rule_id, rule_info, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str rule_id: Rule ID. (required)
:param RuleUpdateInfo rule_info: Rule object that needs to be updated (required)
:return: RuleEnvelope
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_rule_with_http_info(rule_id, rule_info, **kwargs)
else:
(data) = self.update_rule_with_http_info(rule_id, rule_info, **kwargs)
return data
def update_rule_with_http_info(self, rule_id, rule_info, **kwargs):
"""
Update Rule
Update an existing Rule
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please | |
# Originally generated by list_pcode: procedure_hierarchy
# Copyright (c) 2017 <NAME>
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
hierarchy = {(b'MAINLIB ', 78): (b'MAINLIB ', 27),
(b'MAINLIB ', 80): (b'MAINLIB ', 36),
(b'MAINLIB ', 81): (b'MAINLIB ', 15),
(b'MAINLIB ', 82): (b'MAINLIB ', 53),
(b'MAINLIB ', 84): (b'MAINLIB ', 73),
(b'SHIPLIB ', 27): (b'SHIPLIB ', 16),
(b'SHIPLIB ', 28): (b'SHIPLIB ', 27),
(b'SHIPLIB ', 36): (b'SHIPLIB ', 35),
(b'SHIPLIB ', 37): (b'SHIPLIB ', 35),
(b'WINDOWLI', 23): (b'WINDOWLI', 8),
(b'WINDOWLI', 24): (b'WINDOWLI', 23),
(b'WINDOWLI', 28): (b'WINDOWLI', 27),
(b'WINDOWLI', 29): (b'WINDOWLI', 21),
(b'XDOCOMBA', 2): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 3): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 4): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 5): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 6): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 7): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 8): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 9): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 10): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 11): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 12): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 13): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 14): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 15): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 16): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 17): (b'XDOCOMBA', 1),
(b'XDOCOMBA', 18): (b'XDOCOMBA', 17),
(b'XDOFIGHT', 2): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 3): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 4): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 5): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 6): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 7): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 8): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 9): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 10): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 11): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 12): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 13): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 14): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 15): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 16): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 17): (b'XDOFIGHT', 16),
(b'XDOFIGHT', 18): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 19): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 20): (b'XDOFIGHT', 19),
(b'XDOFIGHT', 21): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 22): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 23): (b'XDOFIGHT', 1),
(b'XDOFIGHT', 24): (b'XDOFIGHT', 1),
(b'XDOINTER', 2): (b'XDOINTER', 1),
(b'XDOINTER', 3): (b'XDOINTER', 1),
(b'XDOINTER', 4): (b'XDOINTER', 1),
(b'XDOINTER', 5): (b'XDOINTER', 4),
(b'XDOINTER', 6): (b'XDOINTER', 1),
(b'XDOINTER', 7): (b'XDOINTER', 6),
(b'XDOINTER', 8): (b'XDOINTER', 6),
(b'XDOINTER', 9): (b'XDOINTER', 1),
(b'XDOINTER', 10): (b'XDOINTER', 1),
(b'XDOINTER', 11): (b'XDOINTER', 1),
(b'XDOINTER', 12): (b'XDOINTER', 11),
(b'XDOINTER', 13): (b'XDOINTER', 11),
(b'XDOINTER', 14): (b'XDOINTER', 1),
(b'XDOINTER', 15): (b'XDOINTER', 1),
(b'XDOINTER', 16): (b'XDOINTER', 1),
(b'XDOINTER', 17): (b'XDOINTER', 16),
(b'XDOINTER', 18): (b'XDOINTER', 16),
(b'XDOINTER', 19): (b'XDOINTER', 16),
(b'XDOINTER', 20): (b'XDOINTER', 16),
(b'XDOINTER', 21): (b'XDOINTER', 17),
(b'XDOINTER', 22): (b'XDOINTER', 1),
(b'XDOINTER', 23): (b'XDOINTER', 22),
(b'XDOINTER', 24): (b'XDOINTER', 23),
(b'XDOINTER', 25): (b'XDOINTER', 22),
(b'XDOINTER', 26): (b'XDOINTER', 22),
(b'XDOINTER', 27): (b'XDOINTER', 22),
(b'XDOINTER', 28): (b'XDOINTER', 22),
(b'XDOINTER', 29): (b'XDOINTER', 1),
(b'XDOINTER', 30): (b'XDOINTER', 1),
(b'XDOINTER', 31): (b'XDOINTER', 1),
(b'XDOINTER', 32): (b'XDOINTER', 31),
(b'XDOINTER', 33): (b'XDOINTER', 31),
(b'XDOINTER', 34): (b'XDOINTER', 31),
(b'XDOINTER', 35): (b'XDOINTER', 1),
(b'XDOINTER', 36): (b'XDOINTER', 1),
(b'XDOINTER', 37): (b'XDOINTER', 36),
(b'XDOINTER', 38): (b'XDOINTER', 36),
(b'XDOINTER', 39): (b'XDOINTER', 1),
(b'XDOINTER', 40): (b'XDOINTER', 39),
(b'XDOINTER', 41): (b'XDOINTER', 39),
(b'XDOINTER', 42): (b'XDOINTER', 39),
(b'XDOINTER', 43): (b'XDOINTER', 1),
(b'XDOINTER', 44): (b'XDOINTER', 1),
(b'XDOINTER', 45): (b'XDOINTER', 1),
(b'XDOREPAI', 2): (b'XDOREPAI', 1),
(b'XDOREPAI', 3): (b'XDOREPAI', 1),
(b'XDOREPAI', 4): (b'XDOREPAI', 1),
(b'XDOREPAI', 5): (b'XDOREPAI', 1),
(b'XDOREPAI', 6): (b'XDOREPAI', 1),
(b'XDOREPAI', 7): (b'XDOREPAI', 1),
(b'XDOREPAI', 8): (b'XDOREPAI', 1),
(b'XDOREPAI', 9): (b'XDOREPAI', 8),
(b'XDOREPAI', 10): (b'XDOREPAI', 1),
(b'XDOREPAI', 11): (b'XDOREPAI', 1),
(b'XDOTRADI', 2): (b'XDOTRADI', 1),
(b'XDOTRADI', 3): (b'XDOTRADI', 1),
(b'XDOTRADI', 4): (b'XDOTRADI', 1),
(b'XDOTRADI', 5): (b'XDOTRADI', 1),
(b'XDOTRADI', 6): (b'XDOTRADI', 1),
(b'XDOTRADI', 7): (b'XDOTRADI', 1),
(b'XDOTRADI', 8): (b'XDOTRADI', 7),
(b'XDOTRADI', 9): (b'XDOTRADI', 7),
(b'XDOTRADI', 10): (b'XDOTRADI', 1),
(b'XDOTRADI', 11): (b'XDOTRADI', 1),
(b'XDOTRADI', 12): (b'XDOTRADI', 1),
(b'XDOTRADI', 13): (b'XDOTRADI', 1),
(b'XDOTRADI', 14): (b'XDOTRADI', 1),
(b'XDOTRADI', 15): (b'XDOTRADI', 1),
(b'XDOTRADI', 16): (b'XDOTRADI', 1),
(b'XDOTRADI', 17): (b'XDOTRADI', 16),
(b'XDOTRADI', 18): (b'XDOTRADI', 1),
(b'XDOTRADI', 19): (b'XDOTRADI', 18),
(b'XDOTRADI', 20): (b'XDOTRADI', 19),
(b'XDOTRADI', 21): (b'XDOTRADI', 18),
(b'XDOTRADI', 22): (b'XDOTRADI', 1),
(b'XDOTRADI', 23): (b'XDOTRADI', 1),
(b'XDOTRADI', 24): (b'XDOTRADI', 23),
(b'XDOTRADI', 25): (b'XDOTRADI', 1),
(b'XDOTRADI', 26): (b'XDOTRADI', 1),
(b'XDOTRADI', 27): (b'XDOTRADI', 1),
(b'XDOTRADI', 28): (b'XDOTRADI', 1),
(b'XDOTRADI', 29): (b'XDOTRADI', 1),
(b'XDOTRADI', 30): (b'XDOTRADI', 1),
(b'XDOTRADI', 31): (b'XDOTRADI', 30),
(b'XDOTRADI', 32): (b'XDOTRADI', 30),
(b'XDOTRADI', 33): (b'XDOTRADI', 30),
(b'XDOTRADI', 34): (b'XDOTRADI', 1),
(b'XDOUNITE', 2): (b'XDOUNITE', 1),
(b'XDOUNITE', 3): (b'XDOUNITE', 1),
(b'XDOUNITE', 4): (b'XDOUNITE', 1),
(b'XDOUNITE', 5): (b'XDOUNITE', 1),
(b'XDOUNITE', 6): (b'XDOUNITE', 1),
(b'XDOUNITE', 7): (b'XDOUNITE', 1),
(b'XDOUNITE', 8): (b'XDOUNITE', 1),
(b'XDOUNITE', 9): (b'XDOUNITE', 8),
(b'XDOUNITE', 10): (b'XDOUNITE', 9),
(b'XDOUNITE', 11): (b'XDOUNITE', 1),
(b'XDOUNITE', 12): (b'XDOUNITE', 11),
(b'XDOUNITE', 13): (b'XDOUNITE', 11),
(b'XDOUNITE', 14): (b'XDOUNITE', 11),
(b'XDOUNITE', 15): (b'XDOUNITE', 1),
(b'XDOUNITE', 16): (b'XDOUNITE', 15),
(b'XDOUNITE', 17): (b'XDOUNITE', 16),
(b'XDOUNITE', 18): (b'XDOUNITE', 15),
(b'XDOUNITE', 19): (b'XDOUNITE', 15),
(b'XDOUNITE', 20): (b'XDOUNITE', 19),
(b'XDOUNITE', 21): (b'XDOUNITE', 20),
(b'XDOUNITE', 22): (b'XDOUNITE', 15),
(b'XDOUNITE', 23): (b'XDOUNITE', 15),
(b'XDOUSERM', 2): (b'XDOUSERM', 1),
(b'XDOUSERM', 3): (b'XDOUSERM', 1),
(b'XDOUSERM', 4): (b'XDOUSERM', 1),
(b'XDOUSERM', 5): (b'XDOUSERM', 4),
(b'XDOUSERM', 6): (b'XDOUSERM', 1),
(b'XDOUSERM', 7): (b'XDOUSERM', 1),
(b'XDOUSERM', 8): (b'XDOUSERM', 1),
(b'XDOUSERM', 9): (b'XDOUSERM', 1),
(b'XDOUSERM', 10): (b'XDOUSERM', 1),
(b'XDOUSERM', 11): (b'XDOUSERM', 1),
(b'XDOUSERM', 12): (b'XDOUSERM', 1),
(b'XDOUSERM', 13): (b'XDOUSERM', 1),
(b'XDOUSERM', 14): (b'XDOUSERM', 1),
(b'XDOUSERM', 15): (b'XDOUSERM', 1),
(b'XDOUSERM', 16): (b'XDOUSERM', 1),
(b'XDOUSERM', 17): (b'XDOUSERM', 1),
(b'XLANDFX ', 2): (b'XLANDFX ', 1),
(b'XLANDFX ', 3): (b'XLANDFX ', 1),
(b'XLANDFX ', 4): (b'XLANDFX ', 1),
(b'XLANDFX ', 5): (b'XLANDFX ', 1),
(b'XLANDFX ', 6): (b'XLANDFX ', 1),
(b'XLANDFX ', 7): (b'XLANDFX ', 1),
(b'XLANDFX ', 8): (b'XLANDFX ', 1),
(b'XLANDFX ', 9): (b'XLANDFX ', 1),
(b'XLANDFX ', 10): (b'XLANDFX ', 1),
(b'XLANDFX ', 11): (b'XLANDFX ', 1),
(b'XLANDFX ', 12): (b'XLANDFX ', 1),
(b'XLANDFX ', 13): (b'XLANDFX ', 1),
(b'XLANDFX ', 14): (b'XLANDFX ', 1),
(b'XMOVEINB', 2): (b'XMOVEINB', 1),
(b'XMOVEINB', 3): (b'XMOVEINB', 1),
(b'XMOVEINB', 4): (b'XMOVEINB', 1),
(b'XMOVEINB', 5): (b'XMOVEINB', 1),
(b'XMOVEINB', 6): (b'XMOVEINB', 1),
(b'XMOVEINB', 7): (b'XMOVEINB', 1),
(b'XMOVEINB', 8): (b'XMOVEINB', 7),
(b'XMOVEINB', 9): (b'XMOVEINB', 1),
(b'XMOVEINB', 10): (b'XMOVEINB', 1),
(b'XMOVEINB', 11): (b'XMOVEINB', 1),
(b'XMOVEINB', 12): (b'XMOVEINB', 1),
(b'XMOVEINB', 13): (b'XMOVEINB', 1),
(b'XMOVEINB', 14): (b'XMOVEINB', 1),
(b'XMOVEINB', 15): (b'XMOVEINB', 1),
(b'XMOVEINB', 16): (b'XMOVEINB', 1),
(b'XMOVEINB', 17): (b'XMOVEINB', 1),
(b'XMOVEINB', 18): (b'XMOVEINB', 1),
(b'XMOVEINB', 19): (b'XMOVEINB', 1),
(b'XMOVEINB', 20): (b'XMOVEINB', 1),
(b'XMOVEINB', 21): (b'XMOVEINB', 1),
(b'XMOVEINB', 22): (b'XMOVEINB', 1),
(b'XMOVEINB', 23): (b'XMOVEINB', 1),
(b'XMOVEINB', 24): (b'XMOVEINB', 1),
(b'XMOVEINB', 25): (b'XMOVEINB', 1),
(b'XMOVEINB', 26): (b'XMOVEINB', 1),
(b'XMOVEINB', 27): (b'XMOVEINB', 1),
(b'XMOVEINB', 28): (b'XMOVEINB', 1),
(b'XMOVEONG', 2): (b'XMOVEONG', 1),
(b'XMOVEONG', 3): (b'XMOVEONG', 1),
(b'XMOVEONG', 4): (b'XMOVEONG', 3),
(b'XMOVEONG', 5): (b'XMOVEONG', 3),
(b'XMOVEONG', 6): (b'XMOVEONG', 1),
(b'XMOVEONG', 7): (b'XMOVEONG', 1),
(b'XMOVEONG', 8): (b'XMOVEONG', 1),
(b'XMOVEONG', 9): (b'XMOVEONG', 1),
(b'XMOVEONG', 10): (b'XMOVEONG', 9),
(b'XMOVEONG', 11): (b'XMOVEONG', 1),
(b'XMOVEONG', 12): (b'XMOVEONG', 11),
(b'XMOVEONG', 13): (b'XMOVEONG', 11),
(b'XMOVEONG', 14): (b'XMOVEONG', 11),
(b'XMOVEONG', 15): (b'XMOVEONG', 11),
(b'XMOVEONG', 16): (b'XMOVEONG', 11),
(b'XMOVEONG', 17): (b'XMOVEONG', 1),
(b'XMOVEONG', 18): (b'XMOVEONG', 1),
(b'XMOVEONG', 19): (b'XMOVEONG', 1),
(b'XMOVEONG', 20): (b'XMOVEONG', 1),
(b'XMOVEONG', 21): (b'XMOVEONG', 1),
(b'XMOVEONG', 22): (b'XMOVEONG', 1),
(b'XMOVEONG', 23): (b'XMOVEONG', 1),
(b'XMOVEONG', 24): (b'XMOVEONG', 1),
(b'XMOVEONG', 25): (b'XMOVEONG', 1),
(b'XMOVEONG', 26): (b'XMOVEONG', 25),
(b'XMOVEONG', 27): (b'XMOVEONG', 25),
(b'XMOVEONG', 28): (b'XMOVEONG', 1),
(b'XMOVEONG', 29): (b'XMOVEONG', 1),
(b'XMOVEONG', 30): (b'XMOVEONG', 1),
(b'XMOVEONG', 31): (b'XMOVEONG', 30),
(b'XMOVEONG', 32): (b'XMOVEONG', 1),
(b'XMOVEONG', 33): (b'XMOVEONG', 1),
(b'XMOVEONG', 34): (b'XMOVEONG', 1),
(b'XMOVEONG', 35): (b'XMOVEONG', 1),
(b'XMOVEONG', 36): (b'XMOVEONG', 1),
(b'XMOVEONG', 37): (b'XMOVEONG', 36),
(b'XMOVEONG', 38): (b'XMOVEONG', 37),
(b'XMOVEONG', 39): (b'XMOVEONG', 1),
(b'XMOVEONG', 40): (b'XMOVEONG', 1),
(b'XMOVEONG', 41): (b'XMOVEONG', 40),
(b'XMOVEONG', 42): (b'XMOVEONG', 40),
(b'XMOVEONG', 43): (b'XMOVEONG', 1),
(b'XMOVEONG', 44): (b'XMOVEONG', 43),
(b'XMOVEONG', 45): (b'XMOVEONG', 44),
(b'XMOVEONG', 46): (b'XMOVEONG', 43),
(b'XMOVEONG', 47): (b'XMOVEONG', 46),
(b'XMOVEONG', 48): (b'XMOVEONG', 43),
(b'XMOVEONG', 49): (b'XMOVEONG', 43),
(b'XMOVEONG', 50): (b'XMOVEONG', 43),
(b'XMOVEONG', 51): (b'XMOVEONG', 1),
(b'XMOVEONG', 52): (b'XMOVEONG', 51),
(b'XMOVEONG', 53): (b'XMOVEONG', 52),
(b'XMOVEONG', 54): (b'XMOVEONG', 52),
(b'XMOVEONG', 55): (b'XMOVEONG', 1),
(b'XMOVEONS', 2): (b'XMOVEONS', 1),
(b'XMOVEONS', 3): (b'XMOVEONS', 1),
(b'XMOVEONS', 4): (b'XMOVEONS', 1),
(b'XMOVEONS', 5): (b'XMOVEONS', 1),
(b'XMOVEONS', 6): (b'XMOVEONS', 1),
(b'XMOVEONS', 7): (b'XMOVEONS', 1),
(b'XMOVEONS', 8): (b'XMOVEONS', 1),
(b'XMOVEONS', 9): (b'XMOVEONS', 1),
(b'XMOVEONS', 10): (b'XMOVEONS', 1),
(b'XMOVEONS', 11): (b'XMOVEONS', 1),
(b'XMOVEONS', 12): (b'XMOVEONS', 1),
(b'XPILOTAG', 2): (b'XPILOTAG', 1),
(b'XPILOTAG', 3): (b'XPILOTAG', 1),
(b'XPILOTAG', 4): (b'XPILOTAG', 1),
(b'XPILOTAG', 5): (b'XPILOTAG', 1),
(b'XPILOTAG', 6): (b'XPILOTAG', 1),
(b'XPILOTAG', 7): (b'XPILOTAG', 6),
(b'XPILOTAG', 8): (b'XPILOTAG', 6),
(b'XPILOTAG', 9): (b'XPILOTAG', 1),
(b'XPILOTAG', 10): (b'XPILOTAG', 1),
(b'XPILOTAG', 11): (b'XPILOTAG', 1),
(b'XREADMAP', 2): (b'XREADMAP', 1),
(b'XREADMAP', 3): (b'XREADMAP', 2),
(b'XREADMAP', 4): (b'XREADMAP', 3),
(b'XREADMAP', 5): (b'XREADMAP', 2),
(b'XREADMAP', 6): (b'XREADMAP', 2),
(b'XREADMAP', 7): (b'XREADMAP', 6),
(b'XREADMAP', 8): (b'XREADMAP', 1),
(b'XREADMAP', 9): (b'XREADMAP', 8),
(b'XREADMAP', 10): (b'XREADMAP', 9),
(b'XREADMAP', 11): (b'XREADMAP', 9),
| |
<filename>peleenet/components/train/src/peleenet.py
import argparse
import datetime
import json
import math
import os
import pickle
import shutil
from collections import OrderedDict
from random import randrange
from typing import List, Tuple
import numpy as np # type: ignore
import tensorflow as tf # type: ignore
from PIL import Image # type: ignore
from tensorflow.keras import Sequential, regularizers # type: ignore
from tensorflow.keras.callbacks import (ModelCheckpoint, # type: ignore
ReduceLROnPlateau)
from tensorflow.keras.layers import (Activation, # type: ignore
AveragePooling2D, BatchNormalization,
Concatenate, Conv2D, Dense, Dropout,
Flatten, GlobalAveragePooling2D, Input,
MaxPool2D)
from tensorflow.keras.models import Model # type: ignore
import tensorflow_datasets as tfds # type: ignore
class _DenseLayer(Model):
def __init__(self, num_input_features, growth_rate, bottleneck_width, drop_rate):
super(_DenseLayer, self).__init__()
growth_rate: int = int(growth_rate / 2)
inter_channel: int = int(growth_rate * bottleneck_width / 4) * 4
if inter_channel > num_input_features / 2:
inter_channel = int(num_input_features / 8) * 4
print(f'adjusting inter_channel to {inter_channel}')
self.branch1a = BasicConv2D(inter_channel, kernel_size=1, padding="same")
self.branch1b = BasicConv2D(growth_rate, kernel_size=3, padding="same")
self.branch2a = BasicConv2D(inter_channel, kernel_size=1, padding="same")
self.branch2b = BasicConv2D(growth_rate, kernel_size=3, padding="same")
self.branch2c = BasicConv2D(growth_rate, kernel_size=3, padding="same")
def call(self, x):
branch1 = self.branch1a(x)
branch1 = self.branch1b(branch1)
branch2 = self.branch2a(x)
branch2 = self.branch2b(branch2)
branch2 = self.branch2c(branch2)
return Concatenate()([x, branch1, branch2])
class _DenseBlock(Sequential):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add(layer)
class _StemBlock(Model):
def __init__(self, num_init_features):
super(_StemBlock, self).__init__()
num_stem_features = int(num_init_features/2)
self.stem1 = BasicConv2D(out_channels=num_init_features, kernel_size=3, strides=2)
self.stem2a = BasicConv2D(out_channels=num_stem_features, kernel_size=1, strides=1)
self.stem2b = BasicConv2D(out_channels=num_init_features, kernel_size=3, strides=2)
self.stem3 = BasicConv2D(out_channels=num_init_features, kernel_size=1, strides=1)
self.pool = MaxPool2D(2)
def call(self, x):
out = self.stem1(x)
branch2 = self.stem2a(out)
branch2 = self.stem2b(branch2)
branch1 = self.pool(out)
out = Concatenate()([branch1, branch1])
out = self.stem3(out)
return out
class BasicConv2D(Model):
def __init__(self, out_channels, activation=True, **kwargs):
super(BasicConv2D, self).__init__()
self.conv = Conv2D(filters=out_channels, use_bias=False, kernel_initializer='glorot_uniform', kernel_regularizer=tf.keras.regularizers.l2(5e-4), **kwargs)
self.norm = BatchNormalization()
self.activation = activation
def call(self, x):
x = self.conv(x)
x = self.norm(x)
if self.activation:
return Activation('relu')(x)
else:
return x
class PeleeNet(Model):
def __init__(self, growth_rate=32, block_config=[3,4,8,6], num_init_features=32,
bottleneck_width=[1,2,4,4], drop_rate=0.5, num_classes=1000):
super(PeleeNet, self).__init__()
self.features = Sequential(
_StemBlock(num_init_features))
if type(growth_rate) is list:
growth_rates = growth_rate
assert len(growth_rates) == 4
else:
growth_rates = [growth_rate] * 4
if type(bottleneck_width) is list:
bottleneck_widths = bottleneck_width
assert len(bottleneck_widths) == 4
else:
bottleneck_widths = [bottleneck_width] * 4
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bottleneck_widths[i], growth_rate=growth_rates[i], drop_rate=drop_rate)
self.features.add(block)
num_features = num_features + num_layers * growth_rates[i]
self.features.add(BasicConv2D(num_features, kernel_size=1, strides=1))
if i != len(block_config) - 1:
self.features.add(AveragePooling2D(2))
num_features = num_features
# Dense layer
self.classifier = Dense(num_classes, kernel_initializer='glorot_uniform')
self.drop_rate = drop_rate
def call(self, x):
features = self.features(x)
out = GlobalAveragePooling2D()(features)
if self.drop_rate > 0:
out = Dropout(self.drop_rate)(out)
out = self.classifier(out)
return out
class ImageAugmentation:
"""
Resize all images in dataset to (224,224,3) as there are variable sized images in some datasets
"""
def __init__(self):
pass
def __call__(self, image, label):
aug_img = tf.image.resize(image, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return aug_img, label
class TrainingImageAugmentation:
def __init__(self, log_dir: str, max_images: int, name: str,
input_size: int, scale_img: int, resize: int,
batch_size: int):
self.file_writer = tf.summary.create_file_writer(log_dir)
self.max_images: int = max_images
self.name: str = name
self.input_size: int = input_size
self.resize: int = resize
self.batch_size: int = batch_size
self.scale_img: int = scale_img
self._counter: int = 0
def __call__(self, image, label):
image = tf.cast(image, tf.float32) / 255.0
#aug_img = tf.image.per_image_standardization(image)
aug_img = tf.image.resize(image, (((self.input_size * self.scale_img) + self.resize), ((self.input_size * self.scale_img) + self.resize)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
#aug_img = tf.image.random_crop(aug_img, size=(224, 224, 3))
aug_img = tf.image.random_flip_left_right(aug_img)
aug_img = tf.image.resize(aug_img, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
with self.file_writer.as_default():
tf.summary.image(
self.name,
aug_img,
step=self._counter,
max_outputs = self.max_images
)
self._counter += 1
return aug_img, label
class TestingImageAugmentation:
def __init__(self, log_dir: str, max_images: int, name: str,
input_size: int, scale_img: int, resize: int,
batch_size: int) -> None:
self.file_writer = tf.summary.create_file_writer(log_dir)
self.max_images: int = max_images
self.name: str = name
self.input_size: int = input_size
self.resize: int = resize
self.batch_size: int = batch_size
self.scale_img: int = scale_img
self._counter: int = 0
def __call__(self, image: tf.data.Dataset, label: tf.data.Dataset) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
img: tf.data.Dataset = tf.cast(image, tf.float32) / 255.0
#aug_img = tf.image.per_image_standardization(image)
aug_img: tf.data.Dataset = tf.image.resize(img, (((self.input_size * self.scale_img) + self.resize), ((self.input_size * self.scale_img) + self.resize)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
aug_img = tf.image.resize(aug_img, [224, 224], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
with self.file_writer.as_default():
tf.summary.image(
self.name,
aug_img,
step=self._counter,
max_outputs = self.max_images
)
self._counter += 1
return aug_img, label
def main():
parser = argparse.ArgumentParser(description='PeleeNet Trainer')
parser.add_argument('--input_dir', help="Directory containing training data (eg. /workspace/data)")
parser.add_argument('--output_dir', help="Directory to save model to disk (eg. /tmp/model_dir)")
parser.add_argument('--epochs', help="Number of training epochs")
parser.add_argument('--model_name', help="Name of the model being trained")
parser.add_argument('--model_version', help="Version of the model (eg. 1.0.0 (versioning scheme independent))")
parser.add_argument('--data_augment', help="Enable or disable data augmentation")
parser.add_argument('--resize', help="Resize training data (eg. 32 (where original image size is (224,224) this would resize the image to (256, 256)))")
parser.add_argument('--scale_img', help="Factor by which to scale the input image (eg. 7 (if the input image is 32x32x3 (HWC) the output would be (224,224,3)))")
parser.add_argument('--crop_pct', help="Percentage to center crop training image (eg. 0.5 will center crop to the middle 50% of pixels in the image)")
parser.add_argument('--subtract_pixel_mean', help="Enable or disable subtracting the pixel mean from input image batches")
parser.add_argument('--batch_size', help="Batch size for batching training data (eg. 128)")
parser.add_argument('--learning_rate', help="Learning rate to use with the optimizer we choose on our model (eg. 1e-3 or 0.003)")
parser.add_argument('--momentum', help="Momentum to use for the SGD Optimizer")
parser.add_argument('--lr_patience', help='Number of epochs with no improvement after which learning rate will be reduced. (eg. 5)')
parser.add_argument('--dropout', help="Percentage of dropout to add to the network (eg .5 == 50% dropout rate")
#parser.add_argument('--dataset_split', nargs='+', type=float, help="What splits to use for partitioning data between training, validation, and test (eg. 0.7 0.15 0.15) (repsectively))")
parser.add_argument('--growth_rate', help="Growth Rate as defined in the PeleeNet paper (eg. 32)")
parser.add_argument('--bottle_neck_width', nargs="+", type=str, help="Bottle Neck Width as defined in the PeleeNet paper (eg. 1 2 4 4)")
parser.add_argument('--num_classes', help="Number of classes contained within a dataset. (eg. 1000 for ImageNet)")
parser.add_argument('--input_size', help="Input size of the dataset (eg. 224 for images with (224,224,3) dimensions)")
parser.add_argument('--prefetch_size', help="Number of batches to prefetch for model training (eg. 5)")
parser.add_argument('--shuffle_buffer', help="Number of data points to add to shuffle buffer (eg. 10000)")
args = parser.parse_args()
EPOCHS = int(args.epochs)
LEARNING_RATE = float(args.learning_rate)
MOMENTUM = float(args.momentum)
#TODO(ehenry): Make dataset augmentation optional as an argument for hyperparameter sweeps
DATA_AUGMENTATION = args.data_augment
RESIZE = int(args.resize)
SCALE_IMG = int(args.scale_img)
DROPOUT = float(args.dropout)
PATIENCE = int(args.lr_patience)
INPUT_DIR = str(args.input_dir)
NUM_CLASSES = int(args.num_classes)
INPUT_SIZE = int(args.input_size)
PREFETCH_SIZE = int(args.prefetch_size)
SHUFFLE_BUFFER = int(args.shuffle_buffer)
OUTPUT_DIR = str(args.output_dir)
MODEL_NAME = str(args.model_name)
#TODO(ehenry): This can likely be combined with the DATA_AUGMENTATION flag above.
# It should be made optional for via command line argument for hyperparameter sweeps
if args.crop_pct:
CROP_PERCENT = float(args.crop_pct)
else:
pass
if args.growth_rate:
GROWTH_RATE = int(args.growth_rate)
else:
GROWTH_RATE = 32
if args.bottle_neck_width:
BOTTLENECK_WIDTH = list(args.bottle_neck_width)
else:
BOTTLENECK_WIDTH = [1,2,4,4]
# TODO(ehenry) For data management, is there a way we can automate this process
# for users whom use our platform(s)? Something to investigate when
# looking into what data management means? API calls to FS served by
# Dell EMC storage array?
MODEL_VERSION = args.model_version
MODEL_DIRECTORY = os.path.join(args.output_dir, MODEL_NAME, MODEL_VERSION)
checkpoint_dir = os.path.join(MODEL_DIRECTORY, 'ckpt')
tensorboard_dir = os.path.join(MODEL_DIRECTORY, 'logs')
current_time: str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/train'
train_img_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/train/images'
test_img_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/test/images'
test_log_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/test'
if args.batch_size:
BATCH_SIZE = int(args.batch_size)
else:
BATCH_SIZE = 128
#TODO(ehenry) clean up this logic for directory creation
print(MODEL_DIRECTORY)
if os.path.isdir(MODEL_DIRECTORY) == False:
os.makedirs(os.path.join(MODEL_DIRECTORY, MODEL_VERSION))
os.mkdir(checkpoint_dir)
os.mkdir(tensorboard_dir)
print(f"Training Log Directory : {train_log_dir}")
print(f"Testing Log Directory : {test_log_dir}")
validation_log_dir: str = tensorboard_dir + '/gradient_tape/' + current_time + '/validation'
os.makedirs(train_log_dir)
os.makedirs(test_log_dir)
os.makedirs(validation_log_dir)
else:
print(f"Model {MODEL_NAME} Version {MODEL_VERSION} already exists!")
#TODO(ehenry): Implement logic to write metadata files for use in Kubeflow pipelines
# This specific example will allow for spawning a TensorBoard instance within Kubernetes
# from the Kubeflow Pipelines UI
metadata = {
"outputs": [{
"type": "tensorboard",
"source": train_log_dir,
}]
}
#TODO(ehenry): Define logic for saving model metadata to the metadata module included with Kubeflow
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
# dataset_splits = args.dataset_split
# | |
<gh_stars>0
# preprocess_CPNEFluoxetine_data.py: a file defining CPNE-Fluoxetine dataset-specific preprocessing classes and functions
# SEE LICENSE STATEMENT AT THE END OF THE FILE
# dependency import statements
import os
import pickle as pkl
import argparse
from random import shuffle, randint
import scipy.io as scio
import numpy as np
import mne
# from utils.caching_utils import create_directory
from data_preprocessing.data_preprocessing_utils import (
PreprocessedDataset,
randomly_sample_window_from_signal,
sample_window_from_signal_for_RP_task,
sample_window_from_signal_for_TS_task,
)
class CPNEFluoxetineDataset(PreprocessedDataset):
"""
CPNEFluoxetineDataset Description:
- General Purpose: defines a class for preprocessing and caching the CPNE Fluoxetine dataset
- Usage:
* preprocess_CPNEFluoxetine_data.__main__: uses this class to perform shuffling, splitting, and caching operations on a given CPNE Fluoxetine repository
"""
def __init__(
self,
root_save_directory,
data_split_type,
task_id,
data_source_id,
date_of_data_split,
task_variable_parameter_args=None,
data_source_specific_args=None,
num_splits=1,
holdout_set=False,
split_ratios=(0.7, 0.2, 0.1),
):
"""
CPNEFluoxetineDataset.__init__: defines directory, filenames into which preprocessed data will be saved in addition to member variables
- Inputs:
* root_save_directory (str): the (root) directory to which the preprocessed data will be saved - see templates.file_directory_templates for details
* data_split_type (str): one of ['standardSplit', 'crossValSplit'] denoting the type of dataset being curated
* task_id (str): one of the id's present in self.get_hyperparams_for_task cases denoting the types of tasks being modeled in the current dataset
* data_source_id (str): an identifier of the original dataset (e.g., 'TUAB' or 'CPNEFluoxetine')
* date_of_data_split (str): a YYYYMMDD-formated string denoting the date of dataset curation/preprocessing
* task_variable_parameter_args (argparse obj): arguments for task-specific parameters (e.g., tau-positive for RP task) which we may wish to tune in experiments
* data_source_specific_args (argparse obj): arguments for preprocessing the CPNE Fluoxetine dataset, default=None
* num_splits (int): the number of train-val-test splits to make (esp. for cross-validation), default=1
* holdout_set (boolean): whether a test split needs to be stored as a holdout set, default=False
* split_ratios (tuple): a tuple containing the ratios (float) of each split data subset to be assigned to the train, validation, and test sets, respectively, default=(0.7, 0.2, 0.1)
- Outputs:
* preprocessed-dataset (CPNEFluoxetineDataset): a fully preprocessed CPNE Fluoxetine dataset ready for consummption by the cross-domain-learning repository
- Usage:
* CPNEFluoxetineDataset class: uses CPNEFluoxetineDataset.__init__ to initialize member variables
"""
super(CPNEFluoxetineDataset, self).__init__(
root_save_directory,
data_split_type,
task_id,
data_source_id,
date_of_data_split,
task_variable_parameter_args,
data_source_specific_args,
num_splits,
holdout_set,
split_ratios,
)
pass
def get_class_info_for_data_file(self, data_file_path):
"""
CPNEFluoxetineDataset.draw_sample: draws a random sample from a data file corresponding the self.task_id
- Inputs:
* data_file_path (str): the path to the source data file from which a sample is to be drawn
- Outputs:
* class_label (int): an int representing the behavioral class label corresponding to the data in data_file_path
- Usage:
* CPNEFluoxetineDataset.draw_sample: uses CPNEFluoxetineDataset.get_class_info_for_data_file to determine if
data_file_path corresponds to saline or fluoxetine behavioral classes
"""
SALINE_LABEL = 2
FLUOXETINE_LABEL = 3
split_file_path = data_file_path.split("_")
unique_identifier_for_curr_data = "_".join(split_file_path[:2])
data_file_class_info_dir = os.sep.join(split_file_path[:-2] + ["ClassData"])
curr_data_class_info_file_names = [
x
for x in os.listdir(data_file_class_info_dir)
if unique_identifier_for_curr_data in x
]
assert len(curr_data_class_info_file_names) == 1
curr_data_class_info_file_name = curr_data_class_info_file_names[0]
curr_data_class_info = scio.loadmat(
data_file_class_info_dir + os.sep + curr_data_class_info_file_name
)
if sum(curr_data_class_info["Fluoxetine"][0]) == 0:
assert sum(curr_data_class_info["Saline"][0]) > 0
return SALINE_LABEL
elif sum(curr_data_class_info["Fluoxetine"][0]) > 0:
assert sum(curr_data_class_info["Saline"][0]) == 0
return FLUOXETINE_LABEL
else:
raise ValueError(
"CPNEFluoxetineDataset.get_class_info_for_data_file: unexpected sum of class info"
)
def draw_sample(self, file_path):
"""
CPNEFluoxetineDataset.draw_sample: draws a random sample from a data file corresponding the self.task_id
- Inputs:
* file_path (str): the path to the source data file from which a sample is to be drawn
- Outputs:
* sample (tuple): a tuple formatted as (x1, x2, ..., xn, label_y), representing a single data point for training on self.task_id
- Usage:
* CPNEFluoxetineDataset.cache_samples_for_current_split_fold: uses CPNEFluoxetineDataset.draw_sample prior to draw a single sample
Note: this function draws inspiration from various elements of https://github.com/zacharycbrown/ssl_baselines_for_biosignal_feature_extraction,
particularly the data_utils.py and data_loaders.py files
"""
BINARY_NEG_LABEL = 0
BINARY_POS_LABEL = 1
full_signal = scio.loadmat(file_path)
reliably_active_portion_of_full_signal = np.vstack(
[full_signal[channel_key] for channel_key in self.channel_ids_to_keep]
)
labels = []
if self.num_labels_per_sample != 1:
raise NotImplementedError(
"CPNEFluoxetineDataset.draw_sample: currently only supports one label assignment per data sample"
)
else:
if self.task_id in ["RP", "TS"]:
labels = [randint(BINARY_NEG_LABEL, BINARY_POS_LABEL)]
elif self.task_id == "BehavioralFluoxetine":
if "HomeCage" in file_path:
labels = [0] # homecage label == 0
elif "OFT" in file_path:
labels = [1] # OFT label == 1
elif "DrugRecording" in file_path:
# DrugRecording label in [2,3]
labels = [self.get_class_info_for_data_file(file_path)]
else:
raise ValueError(
"CPNEFluoxetineDataset.draw_sample: the provided file_path=="
+ file_path
+ " does not have the required structure"
)
else:
raise ValueError(
"CPNEFluoxetineDataset.draw_sample: the provided self.task_id=="
+ self.task_id
+ " is not supported"
)
inputs = []
input_starts = []
for window_num in range(self.num_inputs_per_sample):
sampled_window = None
if self.task_id == "BehavioralFluoxetine":
sampled_window, _ = randomly_sample_window_from_signal(
reliably_active_portion_of_full_signal,
self.task_variable_parameter_args.window_len,
)
elif self.task_id == "RP":
curr_window_type = None
curr_anchor_start = None
curr_tpos = None
curr_tneg = None
if window_num == 0:
curr_window_type = "anchor"
elif window_num == 1:
curr_window_type = "other"
curr_anchor_start = input_starts[0]
if labels[0] == BINARY_NEG_LABEL:
curr_tneg = self.task_variable_parameter_args.tneg
elif (
labels[0] == BINARY_POS_LABEL
): # elif labels[1] == BINARY_POS_LABEL:
curr_tpos = self.task_variable_parameter_args.tpos
else:
raise ValueError(
"CPNEFluoxetineDataset.draw_sample: unsupported label for RP task requested"
)
else:
raise ValueError(
"CPNEFluoxetineDataset.draw_sample: too many windows requested for RP task"
)
sampled_window, start_ind = sample_window_from_signal_for_RP_task(
reliably_active_portion_of_full_signal,
self.task_variable_parameter_args.window_len,
window_type=curr_window_type,
anchor_start=curr_anchor_start,
tpos=curr_tpos,
tneg=curr_tneg,
)
input_starts.append(start_ind)
else:
raise NotImplementedError(
"CPNEFluoxetineDataset.draw_sample: window sampling not implemented for self.task_id == "
+ self.task_id
)
filtered_window = (
mne.filter.filter_data( # apply a 4th order butterworth filter
data=sampled_window,
sfreq=self.task_variable_parameter_args.fs,
l_freq=self.task_variable_parameter_args.l_freq,
h_freq=self.task_variable_parameter_args.h_freq,
method=self.task_variable_parameter_args.filter_method,
fir_window=self.task_variable_parameter_args.fir_window,
)
)
inputs.append(
filtered_window[:, 0::1]
) # remove the downsample (fs=1000 -> 250Hz downsample) and record as input
sample = tuple(inputs + labels)
return sample
def cache_samples_for_current_split_fold(
self,
fold_individs,
all_data_file_paths,
num_samps_across_files,
fold_save_dir,
max_num_samps_per_subset_file,
):
"""
CPNEFluoxetineDataset.get_number_of_samples_to_draw_from_each_file: determines how many samples to assign to draw from each file
- Inputs:
* fold_individs (list): a list of individual ids that can be used to filter source data files for inclusion into the current split fold
* all_data_file_paths (list): a list corresonding to all available data files in the source dataset
* num_train_samps_across_files (int): the total number of samples to be drawn from all files combined
* fold_save_dir (str): the directory to which all split fold subset files should be saved
* max_num_samps_per_subset_file (int): the maximum number of samples to be included in each split fold subset file
- Outputs:
* source_file_paths (list): a list of file paths used to populate the current split fold
* num_samps_per_file (list): a list (of int values) representing how many samples were drawn from each file, with each int corresponding to a path in source_file_paths
- Usage:
* CPNEFluoxetineDataset.preprocess_and_cache_data: uses CPNEFluoxetineDataset.get_number_of_samples_to_draw_from_each_file prior to drawing samples
"""
source_file_paths = [
x for x in all_data_file_paths for y in fold_individs if y in x
]
shuffle(source_file_paths)
num_samps_per_source_file = [
len(source_file_paths) // num_samps_across_files
for _ in range(len(source_file_paths))
]
for i in range(num_samps_across_files % len(source_file_paths)):
num_samps_per_source_file[i] += 1
curr_subset_id_counter = 0
curr_subset = []
curr_subset_save_path = os.sep.join(
[fold_save_dir, "subset" + curr_subset_id_counter + ".pkl"]
)
for file_path, num_samps_needed in zip(
source_file_paths, num_samps_per_source_file
):
for i in range(num_samps_needed):
# draw sample
curr_subset.append(self.draw_sample(file_path))
# check if current subset needs to be cached
if len(curr_subset) == max_num_samps_per_subset_file:
with open(curr_subset_save_path, "wb") as outfile:
pkl.dump(curr_subset, outfile)
# initialize new subset
curr_subset_id_counter += 1
curr_subset = []
curr_subset_save_path = os.sep.join(
[fold_save_dir, "subset" + curr_subset_id_counter + ".pkl"]
)
return source_file_paths, num_samps_per_source_file
def preprocess_and_cache_data(self, data_source_specific_args):
"""
CPNEFluoxetineDataset.load_cached_preprocessed_dataset: shuffles, splits, and caches CPNE Fluoxetine data from original source directory
- Inputs:
* data_source_specific_args (argparse obj): arguments for preprocessing the original CPNE Fluoxetine dataset, including
- args.original_data_source_dir (str): the directory containing the original CPNE Fluoxetine dataset
- Outputs:
* */cached_samples*.pkl (cached list): pickle files containing lists of cached samples, with each sample formatted as (x1, x2, ..., xn, label_y)
* cached_data_stats_and_params.pkl (cached dict): pickle file containing dict of info related to how the cached data was formatted/built
- Usage:
* PreprocessedDataset.__init__: uses CPNEFluoxetineDataset.preprocess_and_cache_data when self.data_save_directory is empty
"""
# access source data set and identify individuals
individual_ids = set()
all_available_chans = set()
potentially_inactive_chans = set()
max_num_chans_in_single_recording = None
min_num_chans_in_single_recording = None
all_data_file_paths = []
for environ_dir in os.listdir(
data_source_specific_args.original_data_source_dir
):
curr_environ_path = os.sep.join(
[data_source_specific_args.original_data_source_dir, environ_dir]
)
# track individual ids
for | |
to the local directory
remotedir - the remote directory at Baidu Yun (after app's directory) to sync from. \
if not specified, it defaults to the root directory
localdir - the local directory to sync to if not specified, it defaults to the current directory.
deletelocal - delete local files that are not inside Baidu Yun directory, default is False
'''
result = const.ENoError
rpath = get_pcs_path(remotedir)
same, diff, local, remote = self.__compare(rpath, localdir)
# clear the way
for d in diff:
t = d[0]
p = d[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if t == 'DF':
result = removedir(lcpath, self.verbose)
subresult = self.__downfile(rcpath, lcpath)
if subresult != const.ENoError:
result = subresult
elif t == 'FD':
result = removefile(lcpath, self.verbose)
subresult = makedir(lcpath, verbose = self.verbose)
if subresult != const.ENoError:
result = subresult
else: # " t == 'F' " must be true
result = self.__downfile(rcpath, lcpath)
for r in remote:
t = r[0]
p = r[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if t == 'F':
subresult = self.__downfile(rcpath, lcpath)
if subresult != const.ENoError:
result = subresult
else: # " t == 'D' " must be true
subresult = makedir(lcpath, verbose = self.verbose)
if subresult != const.ENoError:
result = subresult
if str2bool(deletelocal):
for l in local:
# use os.path.isfile()/isdir() instead of l[0], because we need to check file/dir existence.
# as we may have removed the parent dir previously during the iteration
#p = os.path.join(localdir, l[1])
p = joinpath(localdir, l[1])
if os.path.isfile(p):
subresult = removefile(p, self.verbose)
if subresult != const.ENoError:
result = subresult
elif os.path.isdir(p):
subresult = removedir(p, self.verbose)
if subresult != const.ENoError:
result = subresult
return result
def syncup(self, localdir = '', remotedir = '', deleteremote = False):
''' Usage: syncup [localdir] [remotedir] [deleteremote] - \
sync up from the local directory to the remote directory
localdir - the local directory to sync from if not specified, it defaults to the current directory.
remotedir - the remote directory at Baidu Yun (after app's directory) to sync to. \
if not specified, it defaults to the root directory
deleteremote - delete remote files that are not inside the local directory, default is False
'''
result = const.ENoError
rpath = get_pcs_path(remotedir)
#rpartialdir = remotedir.rstrip('/ ')
same, diff, local, remote = self.__compare(rpath, localdir, True)
# clear the way
for d in diff:
t = d[0] # type
p = d[1] # path
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
if self.shalloverwrite("Do you want to overwrite '{}' at Baidu Yun? [y/N]".format(p)):
# this path is before get_pcs_path() since delete() expects so.
#result = self.delete(rpartialdir + '/' + p)
result = self.__delete(rcpath)
# self.pd("diff type: {}".format(t))
# self.__isrev = True
# if t != 'F':
# result = self.move(remotedir + '/' + p, remotedir + '/' + p + '.moved_by_bypy.' + time.strftime("%Y%m%d%H%M%S"))
# self.__isrev = False
if t == 'F' or t == 'FD':
subresult = self.__upload_file(lcpath, rcpath)
if subresult != const.ENoError:
result = subresult
else: # " t == 'DF' " must be true
subresult = self.__mkdir(rcpath)
if subresult != const.ENoError:
result = subresult
else:
pinfo("Uploading '{}' skipped".format(lcpath))
for l in local:
t = l[0]
p = l[1]
#lcpath = os.path.join(localdir, p) # local complete path
lcpath = joinpath(localdir, p) # local complete path
rcpath = rpath + '/' + p # remote complete path
self.pd("local type: {}".format(t))
self.__isrev = False
if t == 'F':
subresult = self.__upload_file(lcpath, rcpath)
if subresult != const.ENoError:
result = subresult
else: # " t == 'D' " must be true
subresult = self.__mkdir(rcpath)
if subresult != const.ENoError:
result = subresult
if str2bool(deleteremote):
# i think the list is built top-down, so directories appearing later are either
# children or another set of directories
pp = '\\' # previous path, setting to '\\' make sure it won't be found in the first step
for r in remote:
#p = rpartialdir + '/' + r[1]
p = rpath + '/' + r[1]
if 0 != p.find(pp): # another path
#subresult = self.delete(p)
subresult = self.__delete(p)
if subresult != const.ENoError:
result = subresult
pp = p
return result
def dumpcache(self):
''' Usage: dumpcache - display file hash cache'''
if cached.cacheloaded:
#pprint.pprint(cached.cache)
MyPrettyPrinter().pprint(cached.cache)
return const.ENoError
else:
perr("Cache not loaded.")
return const.ECacheNotLoaded
def cleancache(self):
''' Usage: cleancache - remove invalid entries from hash cache file'''
if os.path.exists(self.__hashcachepath):
try:
# backup first
backup = self.__hashcachepath + '.lastclean'
shutil.copy(self.__hashcachepath, backup)
self.pd("Hash Cache file '{}' backed up as '{}".format(
self.__hashcachepath, backup))
cached.cleancache()
return const.ENoError
except Exception as ex:
perr(formatex(ex))
return const.EException
else:
return const.EFileNotFound
def __cdl_act(self, r, args):
try:
pr(pprint.pformat(r.json()))
return const.ENoError
except:
pr(pprint.pformat({ 'text': rb(r.text) }))
return const.IETaskNotFound
def __prepare_cdl_add(self, source_url, rpath, timeout):
pr("Adding cloud download task:")
pr("{} =cdl=> {}".format(source_url, rpath))
pars = {
'method': 'add_task',
'source_url': source_url,
'save_path': rpath,
'timeout': 3600 }
return pars
def __cdl_add(self, source_url, rpath, timeout):
pars = self.__prepare_cdl_add(source_url, rpath, timeout)
return self.__post(pcsurl + 'services/cloud_dl', pars, self.__cdl_act)
def __get_cdl_dest(self, source_url, save_path):
rpath = get_pcs_path(save_path)
# download to /apps/bypy root
if rpath == const.AppPcsPath \
or (const.ENoError == self.__get_file_info(rpath) \
and self.__remote_json['isdir']):
filename = source_url.split('/')[-1]
rpath += '/' + filename
return rpath
def cdl_add(self, source_url, save_path = '/', timeout = 3600):
''' Usage: cdl_add <source_url> [save_path] [timeout] - add an offline (cloud) download task
source_url - the URL to download file from.
save_path - path on PCS to save file to. default is to save to root directory '/'.
timeout - timeout in seconds. default is 3600 seconds.
'''
rpath = self.__get_cdl_dest(source_url, save_path)
return self.__cdl_add(source_url, rpath, timeout)
def __get_cdl_query_pars(self, task_ids, op_type):
pars = {
'method': 'query_task',
'task_ids': task_ids,
'op_type': op_type}
return pars
def __cdl_query(self, task_ids, op_type):
pars = self.__get_cdl_query_pars(task_ids, op_type)
return self.__post(pcsurl + 'services/cloud_dl', pars, self.__cdl_act)
def cdl_query(self, task_ids, op_type = 1):
''' Usage: cdl_query <task_ids> - query existing offline (cloud) download tasks
task_ids - task ids seperated by comma (,).
op_type - 0 for task info; 1 for progress info. default is 1
'''
return self.__cdl_query(task_ids, op_type)
def __cdl_mon_act(self, r, args):
try:
task_id, start_time, done = args
j = r.json()
ti = j['task_info'][str(task_id)]
if ('file_size' not in ti) or ('finished_size' not in ti):
done[0] = True
pr(j)
else:
total = int(ti['file_size'])
finish = int(ti['finished_size'])
done[0] = (total != 0 and (total == finish))
pprgr(finish, total, start_time)
if done[0]:
pr(pprint.pformat(j))
return const.ENoError
except Exception as ex:
perr("Exception while monitoring offline (cloud) download task:\n{}".format(formatex(ex)))
perr("Baidu returned:\n{}".format(rb(r.text)))
return const.EInvalidJson
def __cdl_addmon_act(self, r, args):
try:
args[0] = r.json()
pr(pprint.pformat(args[0]))
return const.ENoError
except Exception as ex:
perr("Exception while adding offline (cloud) download task:\n{}".format(formatex(ex)))
perr("Baidu returned:\n{}".format(rb(r.text)))
return const.EInvalidJson
def __cdl_sighandler(self, signum, frame):
pr("Cancelling offline (cloud) download task: {}".format(self.__cdl_task_id))
result = self.__cdl_cancel(self.__cdl_task_id)
pr("Result: {}".format(result))
quit(const.EAbort)
def __cdl_addmon(self, source_url, rpath, timeout = 3600):
pars = self.__prepare_cdl_add(source_url, rpath, timeout)
jc = [{}] # out param
result = self.__post(pcsurl + 'services/cloud_dl',
pars, self.__cdl_addmon_act, jc)
if result == const.ENoError:
if not 'task_id' in jc[0]:
return const.EInvalidJson
task_id = jc[0]['task_id']
pars = self.__get_cdl_query_pars(task_id, 1)
start_time = time.time()
done = [ False ] # out param
# cancel task on Ctrl-C
pr("Press Ctrl-C to cancel the download task")
self.__cdl_task_id = task_id
setsighandler(signal.SIGINT, self.__cdl_sighandler)
setsighandler(signal.SIGHUP, self.__cdl_sighandler)
try:
while True:
result = self.__post(
pcsurl + 'services/cloud_dl', pars, self.__cdl_mon_act,
(task_id, start_time, done))
if result == const.ENoError:
if done[0] == True:
return const.ENoError
else:
return result
time.sleep(5)
except KeyboardInterrupt:
pr("Canceling offline (cloud) downloa task: {}".format(task_id))
self.__cdl_cancel(task_id)
return const.EAbort
else:
return result
def cdl_addmon(self, source_url, save_path = '/', timeout = 3600):
''' Usage: cdl_addmon <source_url> [save_path] [timeout] - add an offline (cloud) download task and monitor the download progress
source_url - the URL to download file from.
save_path - path on PCS to save file to. default is to save to root directory '/'.
timeout - timeout in seconds. default is 3600 seconds.
'''
rpath = self.__get_cdl_dest(source_url, save_path)
return self.__cdl_addmon(source_url, rpath, timeout)
def __cdl_list(self):
pars = {
'method': 'list_task' }
return self.__post(pcsurl + 'services/cloud_dl', pars, self.__cdl_act)
def cdl_list(self):
''' Usage: cdl_list - list offline (cloud) download tasks
'''
return self.__cdl_list()
def __cdl_cancel(self, task_id):
pars = {
'method': 'cancel_task',
'task_id': task_id }
return self.__post(pcsurl + 'services/cloud_dl', pars, self.__cdl_act)
def cdl_cancel(self, task_id):
''' Usage: cdl_cancel <task_id> - cancel an offline (cloud) download task
task_id - id of the task to be canceled.
'''
return self.__cdl_cancel(task_id)
def __get_accept_cmd(self, rpath):
md5str = self.__current_file_md5
slicemd5str = self.__current_file_slice_md5
crcstr = hex(self.__current_file_crc32)
remotepath = rpath[const.AppPcsPathLen:]
if len(remotepath) == 0:
remotepath = 'PATH_NAME_MISSING'
cmd = "bypy accept {} {} {} {} {}".format(
remotepath, self.__current_file_size, md5str, slicemd5str, crcstr)
return cmd
def __share_local_file(self, lpath, rpath, fast):
filesize = getfilesize(lpath)
if filesize < const.MinRapidUploadFileSize:
perr("File size ({}) of '{}' is too small (must be greater or equal than {}) to be shared".format(
human_size(filesize), lpath, human_size(const.MinRapidUploadFileSize)))
return const.EParameter
if fast:
self.__get_hashes_for_rapidupload(lpath, setlocalfile = True)
pr(self.__get_accept_cmd(rpath))
return const.ENoError
ulrpath = const.RemoteTempDir + '/' + posixpath.basename(lpath)
result = self.__upload_file(lpath, ulrpath)
if result != const.ENoError:
perr("Unable to share as uploading failed")
return result
if not self.__rapiduploaded:
i = 0
while i < const.ShareRapidUploadRetries:
i += 1
result = self.__rapidupload_file(lpath, ulrpath, setlocalfile = True)
if result == const.ENoError: # or result == IEMD5NotFound: # retrying if MD5 not found _may_ make the file available?
break;
else:
self.pd("Retrying #{} for sharing '{}'".format(i, lpath))
time.sleep(1)
if result == const.ENoError:
pr(self.__get_accept_cmd(rpath))
return const.ENoError
elif result == const.IEMD5NotFound:
pr("# Sharing (RapidUpload) not possible for '{}', error: {}".format(lpath, result))
return result
else:
pr("# Error sharing '{}', error: {}".format(lpath, result))
return result
def __share_local_dir(self, lpath, rpath, fast):
result = const.ENoError
for walk in self.__walk_normal_file(lpath):
(dirpath, dirnames, filenames) = walk
for filename in filenames:
rpart = os.path.relpath(dirpath, lpath)
if rpart == '.':
rpart | |
<filename>pyabc/inference_util.py
"""Inference utilities."""
# Note: Due to cyclic imports, these need to be separated from other modules
import logging
import uuid
from datetime import datetime, timedelta
from typing import Callable, List
import numpy as np
import pandas as pd
from pyabc.acceptor import Acceptor
from pyabc.distance import Distance
from pyabc.epsilon import Epsilon
from pyabc.model import Model
from pyabc.parameters import Parameter
from pyabc.population import Particle
from pyabc.random_choice import fast_random_choice
from pyabc.random_variables import RV, Distribution
from pyabc.transition import ModelPerturbationKernel, Transition
logger = logging.getLogger("ABC")
class AnalysisVars:
"""Contract object class for passing analysis variables.
Used e.g. to create new sampling tasks or check early stopping.
"""
def __init__(
self,
model_prior: RV,
parameter_priors: List[Distribution],
model_perturbation_kernel: ModelPerturbationKernel,
transitions: List[Transition],
models: List[Model],
summary_statistics: Callable,
x_0: dict,
distance_function: Distance,
eps: Epsilon,
acceptor: Acceptor,
min_acceptance_rate: float,
min_eps: float,
stop_if_single_model_alive: bool,
max_t: int,
max_total_nr_simulations: int,
prev_total_nr_simulations: int,
max_walltime: timedelta,
init_walltime: datetime,
):
self.model_prior = model_prior
self.parameter_priors = parameter_priors
self.model_perturbation_kernel = model_perturbation_kernel
self.transitions = transitions
self.models = models
self.summary_statistics = summary_statistics
self.x_0 = x_0
self.distance_function = distance_function
self.eps = eps
self.acceptor = acceptor
self.min_acceptance_rate = min_acceptance_rate
self.min_eps = min_eps
self.stop_if_single_model_alive = stop_if_single_model_alive
self.max_t = max_t
self.max_total_nr_simulations = max_total_nr_simulations
self.prev_total_nr_simulations = prev_total_nr_simulations
self.max_walltime = max_walltime
self.init_walltime = init_walltime
def create_simulate_from_prior_function(
model_prior: RV,
parameter_priors: List[Distribution],
models: List[Model],
summary_statistics: Callable,
) -> Callable:
"""Create a function that simulates from the prior.
Similar to _create_simulate_function, apart here we sample from the
prior and accept all.
Parameters
----------
model_prior: The model prior.
parameter_priors: The parameter priors.
models: List of all models.
summary_statistics: Computes summary statistics from model output.
Returns
-------
simulate_one:
A function that returns a sampled particle.
"""
# simulation function, simplifying some parts compared to later
def simulate_one():
# sample model
m = int(model_prior.rvs())
# sample parameter
theta = parameter_priors[m].rvs()
# simulate summary statistics
model_result = models[m].summary_statistics(
0, theta, summary_statistics
)
# sampled from prior, so all have uniform weight
weight = 1.0
# distance will be computed after initialization of the
# distance function
distance = np.inf
# all are happy and accepted
accepted = True
return Particle(
m=m,
parameter=theta,
weight=weight,
sum_stat=model_result.sum_stat,
distance=distance,
accepted=accepted,
proposal_id=0,
preliminary=False,
)
return simulate_one
def generate_valid_proposal(
t: int,
m: np.ndarray,
p: np.ndarray,
model_prior: RV,
parameter_priors: List[Distribution],
model_perturbation_kernel: ModelPerturbationKernel,
transitions: List[Transition],
):
"""Sample a parameter for a model.
Parameters
----------
t: Population index to generate for.
m: Indices of alive models.
p: Probabilities of alive models.
model_prior: The model prior.
parameter_priors: The parameter priors.
model_perturbation_kernel: The model perturbation kernel.
transitions: The transitions, one per model.
Returns
-------
(m_ss, theta_ss): Model, parameter.
"""
# first generation
if t == 0:
# sample from prior
m_ss = int(model_prior.rvs())
theta_ss = parameter_priors[m_ss].rvs()
return m_ss, theta_ss
# later generation
# counter
n_sample, n_sample_soft_limit = 0, 1000
# sample until the prior density is positive
while True:
if len(m) > 1:
index = fast_random_choice(p)
m_s = m[index]
m_ss = model_perturbation_kernel.rvs(m_s)
# theta_s is None if the population m_ss has died out.
# This can happen since the model_perturbation_kernel
# can return a model nr which has died out.
if m_ss not in m:
continue
else:
# only one model
m_ss = m[0]
theta_ss = transitions[m_ss].rvs()
# check if positive under prior
if model_prior.pmf(m_ss) * parameter_priors[m_ss].pdf(theta_ss) > 0:
return m_ss, theta_ss
# unhealthy sampling detection
n_sample += 1
if n_sample == n_sample_soft_limit:
logger.warning(
"Unusually many (model, parameter) samples have prior "
"density zero. The transition might be inappropriate."
)
def evaluate_proposal(
m_ss: int,
theta_ss: Parameter,
t: int,
models: List[Model],
summary_statistics: Callable,
distance_function: Distance,
eps: Epsilon,
acceptor: Acceptor,
x_0: dict,
weight_function: Callable,
proposal_id: int,
) -> Particle:
"""Evaluate a proposed parameter.
Parameters
----------
m_ss, theta_ss: The proposed (model, parameter) sample.
t: The current time.
models: List of all models.
summary_statistics:
Function to compute summary statistics from model output.
distance_function: The distance function.
eps: The epsilon threshold.
acceptor: The acceptor.
x_0: The observed summary statistics.
weight_function: Function by which to reweight the sample.
proposal_id: Id of the transition kernel.
Returns
-------
particle: A particle containing all information.
Data for the given parameters theta_ss are simulated, summary statistics
computed and evaluated.
"""
# simulate, compute distance, check acceptance
model_result = models[m_ss].accept(
t, theta_ss, summary_statistics, distance_function, eps, acceptor, x_0
)
# compute acceptance weight
if model_result.accepted:
weight = weight_function(m_ss, theta_ss, model_result.weight)
else:
weight = 0
return Particle(
m=m_ss,
parameter=theta_ss,
weight=weight,
sum_stat=model_result.sum_stat,
distance=model_result.distance,
accepted=model_result.accepted,
preliminary=False,
proposal_id=proposal_id,
)
def create_prior_pdf(
model_prior: RV, parameter_priors: List[Distribution]
) -> Callable:
"""Create a function that calculates a sample's prior density.
Parameters
----------
model_prior: The model prior.
parameter_priors: The parameter priors, one for each model.
Returns
-------
prior_pdf: The prior density function.
"""
def prior_pdf(m_ss, theta_ss):
prior_pd = model_prior.pmf(m_ss) * parameter_priors[m_ss].pdf(theta_ss)
return prior_pd
return prior_pdf
def create_transition_pdf(
transitions: List[Transition],
model_probabilities: pd.DataFrame,
model_perturbation_kernel: ModelPerturbationKernel,
) -> Callable:
"""Create the transition probability density function for time `t`.
Parameters
----------
transitions: The list of parameter transition functions.
model_probabilities: The last generation's model probabilities.
model_perturbation_kernel: The kernel perturbing the models.
Returns
-------
transition_pdf: The transition density function.
"""
def transition_pdf(m_ss, theta_ss):
model_factor = sum(
row.p * model_perturbation_kernel.pmf(m_ss, m)
for m, row in model_probabilities.iterrows()
)
particle_factor = transitions[m_ss].pdf(theta_ss)
transition_pd = model_factor * particle_factor
if transition_pd == 0:
logger.debug("Transition density is zero!")
return transition_pd
return transition_pdf
def create_weight_function(
prior_pdf: Callable,
transition_pdf: Callable,
) -> Callable:
"""Create a function that calculates a sample's importance weight.
The weight is the prior divided by the transition density and the
acceptance step weight.
Parameters
----------
prior_pdf: The prior density.
transition_pdf: The transition density.
Returns
-------
weight_function: The importance sample weight function.
"""
def weight_function(m_ss, theta_ss, acceptance_weight: float):
"""Calculate total weight, from sampling and acceptance weight.
Parameters
----------
m_ss: The model sample.
theta_ss: The parameter sample.
acceptance_weight: The acceptance weight sample. In most cases 1.
Returns
-------
weight: The total weight.
"""
# prior and transition density (can be equal)
prior_pd = prior_pdf(m_ss, theta_ss)
transition_pd = transition_pdf(m_ss, theta_ss)
# calculate weight
weight = acceptance_weight * prior_pd / transition_pd
return weight
return weight_function
def create_simulate_function(
t: int,
model_probabilities: pd.DataFrame,
model_perturbation_kernel: ModelPerturbationKernel,
transitions: List[Transition],
model_prior: RV,
parameter_priors: List[Distribution],
models: List[Model],
summary_statistics: Callable,
x_0: dict,
distance_function: Distance,
eps: Epsilon,
acceptor: Acceptor,
evaluate: bool = True,
proposal_id: int = 0,
) -> Callable:
"""
Create a simulation function which performs the sampling of parameters,
simulation of data and acceptance checking, and which is then passed
to the sampler.
Parameters
----------
t: The time index to simulate for.
model_probabilities: The last generation's model probabilities.
model_perturbation_kernel: The model perturbation kernel.
transitions: The parameter transition kernels.
model_prior: The model prior.
parameter_priors: The parameter priors.
models: List of all models.
summary_statistics:
Function to compute summary statistics from model output.
x_0: The observed summary statistics.
distance_function: The distance function.
eps: The epsilon threshold.
acceptor: The acceptor.
evaluate:
Whether to actually evaluate the sample. Should be True except for
certain preliminary settings.
proposal_id:
Identifier for the proposal distribution.
Returns
-------
simulate_one: callable
Function that samples parameters, simulates data, and checks
acceptance.
.. note::
For some of the samplers, the sampling function needs to be
serialized in order to be transported to where the sampling
happens. Therefore, the returned function should be light, and
in particular not contain references to the ABCSMC class.
"""
# cache model_probabilities to not query the database so often
m = np.array(model_probabilities.index)
p = np.array(model_probabilities.p)
# create prior and transition densities for weight function
prior_pdf = create_prior_pdf(
model_prior=model_prior, parameter_priors=parameter_priors
)
if t == 0:
transition_pdf = prior_pdf
else:
transition_pdf = create_transition_pdf(
transitions=transitions,
model_probabilities=model_probabilities,
model_perturbation_kernel=model_perturbation_kernel,
)
# create weight function
weight_function = create_weight_function(
prior_pdf=prior_pdf, transition_pdf=transition_pdf
)
# simulation function
def simulate_one():
parameter = generate_valid_proposal(
t=t,
m=m,
p=p,
model_prior=model_prior,
parameter_priors=parameter_priors,
model_perturbation_kernel=model_perturbation_kernel,
transitions=transitions,
)
if evaluate:
particle = evaluate_proposal(
*parameter,
t=t,
models=models,
summary_statistics=summary_statistics,
distance_function=distance_function,
eps=eps,
acceptor=acceptor,
x_0=x_0,
weight_function=weight_function,
proposal_id=proposal_id,
)
else:
particle = only_simulate_data_for_proposal(
*parameter,
t=t,
models=models,
summary_statistics=summary_statistics,
weight_function=weight_function,
proposal_id=proposal_id,
)
return particle
return simulate_one
def only_simulate_data_for_proposal(
m_ss: int,
theta_ss: Parameter,
t: int,
models: List[Model],
summary_statistics: Callable,
weight_function: Callable,
proposal_id: int,
) -> Particle:
"""Simulate data for parameters.
Similar to `evaluate_proposal`, however here for the passed parameters
only data are simulated, but no distances calculated or acceptance
checked. That needs to be | |
construct rd_digests.'
if rd_digests_dict is not None:
cache.delete('rd_digests_dict')
cache.set('rd_digests_dict', rd_digests_dict, timeout=get_cache_timeout())
else:
print 'Failed to construct rd_digests_dict.'
if debug: print '\n\t-- Reference designators (%d) have (%d) rd_digests' % (len(rds), len(rd_digests))
end = dt.datetime.now()
if time:
print '\t-- End time: ', end
print '\t-- Time to complete: %s' % (str(end - digests_start))
print ' -- Completed compiling reference designator digests... '
if time:
print '\n\t-- End time: ', end
print '\t-- Time (total) to complete: %s' % (str(end - start))
print '-- Completed building reference designator digests...\n'
return rd_digests, rd_digests_dict # 2017-02-01
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None, None # 2017-02-01
def build_rd_digest_cache(rds):
"""
Create a cache for reference designator to current [operational] asset uid deployment digest.
"""
debug = False
time = True
return_list = []
return_dict = {}
try:
if debug: print '\n debug -- Number of rds: ', len(rds)
if not rds or rds is None:
message = 'No reference designator to process, unable to build rd_digest cache.'
raise Exception(message)
start = dt.datetime.now()
if time:
print '\n\t-- Preparing information before processing... '
print '\t-- Start time: ', start
#=======================================
# Get assets_dict
assets_dict = get_assets_dict()
if assets_dict is None:
message = 'Failed to retrieve assets_dict.'
raise Exception(message)
# Get uid_digests
uid_digests = get_uid_digests()
if uid_digests is None or not uid_digests:
message = 'Failed to retrieve uid_digests.'
raise Exception(message)
"""
# Get uid_digests operational
uid_digests_operational = get_uid_digests_operational()
if uid_digests_operational is None or not uid_digests_operational:
message = 'Failed to retrieve uid_digests_operational.'
raise Exception(message)
"""
#=======================================
end = dt.datetime.now()
if time:
print '\t-- End time: ', end
print '\t-- Time to get information: %s' % (str(end - start))
print '\t-- Completed preparing information before processing... '
#count = 0
if debug:
print '\n debug -- len(assets_dict): ', len(assets_dict)
print '\n debug -- len(uid_digests): ', len(uid_digests)
# Get all asset reference designators and current digest information available.
for id, asset in assets_dict.iteritems():
asset_type = None
if 'assetType' in asset:
asset_type = asset['assetType']
if asset_type is None or not asset_type or len(asset_type) == 0:
continue
if asset_type not in ['Platform', 'Mooring', 'Node', 'Instrument', 'Sensor']:
continue
if asset_type is None:
continue
# Use uid_digests to process all assets of types which may have deployments.
asset_uid = None
if 'uid' in asset:
asset_uid = asset['uid']
if asset_uid is None or not asset_uid or len(asset_uid) == 0:
continue
current_digest = None
"""
if asset_uid in uid_digests_operational:
current_digest = uid_digests_operational[asset_uid]
"""
if asset_uid in uid_digests:
current_digest = uid_digests[asset_uid]
if current_digest is None or not current_digest or len(current_digest) == 0:
continue
asset_rd = get_rd_from_uid_digest(asset_type, current_digest)
if asset_rd is None or not asset_rd or len(asset_rd) == 0:
continue
# Build digest for reference designator.
work = format_rd_digest(asset)
# Add deployment data from uid_digest.
if work is not None:
work['latitude'] = current_digest['latitude']
work['longitude'] = current_digest['longitude']
work['depth'] = current_digest['depth']
work['waterDepth'] = current_digest['waterDepth']
return_list.append(work)
return_dict[work['reference_designator']] = work
if debug:
print '\n debug -- return_list: ', len(return_list)
dump_dict(return_list[0], debug)
return return_list, return_dict
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None, None
# Single uid, get fresh rd digest. (Uses uframe to get asset.)
def get_fresh_rd_digest(uid):
from ooiservices.app.uframe.asset_tools import format_asset_for_ui
debug = False
try:
if uid is None or not uid or len(uid) == 0:
message = 'No asset uid provided, empty or null; unable to provide rd digest.'
raise Exception(message)
# Get uid_digests
uid_digests = get_uid_digests()
if uid_digests is None or not uid_digests:
message = 'Failed to retrieve uid_digests.'
raise Exception(message)
current_digest = None
if uid in uid_digests:
if debug: print '\n debug: uid %s in uid_digests...' % uid
current_digest = uid_digests[uid]
if debug:
print '\n debug -- current_digest: '
dump_dict(current_digest, debug)
if current_digest is None or not current_digest or len(current_digest) == 0:
message = 'Asset uid \'%s\' returned null or empty uid_digest.' % uid
raise Exception(message)
asset = uframe_get_asset_by_uid(uid)
if debug:
print '\n debug -- asset: '
dump_dict(asset, debug)
if not asset or asset is None:
message = 'Failed to get asset with uid: %s' % uid
current_app.logger.info(message)
raise Exception(message)
asset_type = None
if 'assetType' in asset:
asset_type = asset['assetType']
if not asset_type or asset_type is None:
message = 'Failed to get asset_type for asset with uid: %s' % uid
raise Exception(message)
if debug: print '\n debug -- asset_type: ', asset_type
asset_rd = get_rd_from_uid_digest(asset_type, current_digest)
if asset_rd is None or not asset_rd or len(asset_rd) == 0:
message = 'Asset uid returned null or empty reference designator.'
raise Exception(message)
# Build digest for reference designator.
ui_asset = format_asset_for_ui(asset)
work = format_rd_digest(asset)
# Add deployment data.
if work is not None:
work['latitude'] = current_digest['latitude']
work['longitude'] = current_digest['longitude']
work['depth'] = current_digest['depth']
work['waterDepth'] = current_digest['waterDepth']
return asset_rd, work
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None, None
def get_rd_from_uid_digest(asset_type, digest):
"""
Get reference designator from a single uid digest of a specific asset type.
"""
try:
if asset_type not in ['Platform', 'Mooring', 'Node', 'Instrument', 'Sensor']:
message = 'Invalid asset type (\'%s\') provided to get_rd_from_uid_digest.' % asset_type
current_app.logger.info(message)
return None
if not digest or digest is None or len(digest) == 0:
message = 'Empty or null digest provided to get_rd_from_uid_digest for %s.' % asset_type
current_app.logger.info(message)
return None
if 'subsite' not in digest or 'node' not in digest or 'sensor' not in digest:
message = 'Digest provided does not contain one or more required attribute(s) (subsite, node, sensor).'
current_app.logger.info(message)
return None
if digest['subsite'] is None or not digest['subsite']:
message = 'Digest contains null or empty value for \'subsite\'.'
current_app.logger.info(message)
return None
if digest['node'] is None or not digest['node']:
message = 'Digest contains null or empty value for \'node\'.'
current_app.logger.info(message)
return None
if digest['sensor'] is None or not digest['sensor']:
message = 'Digest contains null or empty value for \'sensor\'.'
current_app.logger.info(message)
return None
if asset_type in ['Platform', 'Mooring']:
rd = digest['subsite']
elif asset_type == 'Node':
rd = '-'.join([digest['subsite'], digest['node']])
elif asset_type in ['Instrument', 'Sensor']:
rd = '-'.join([digest['subsite'], digest['node'], digest['sensor']])
else:
rd = None
return rd
except Exception as err:
message = 'Error getting reference designator for %s: %s' % (asset_type, str(err))
current_app.logger.info(message)
raise Exception(message)
def get_rd_digests_dict():
debug = False
try:
rd_digests_dict_cached = cache.get('rd_digests_dict')
if rd_digests_dict_cached and rd_digests_dict_cached is not None and 'error' not in rd_digests_dict_cached:
rd_digests_dict = rd_digests_dict_cached
else:
if debug: print '\n building rd_digest_cache...'
rd_digests, rd_digests_dict = build_rds_cache(refresh=True)
if rd_digests and rd_digests is not None:
cache.set('rd_digests', rd_digests, timeout=get_cache_timeout())
else:
message = 'rd_digests failed to provide data on load.'
raise Exception(message)
if rd_digests_dict and rd_digests_dict is not None:
cache.set('rd_digests_dict', rd_digests_dict, timeout=get_cache_timeout())
else:
message = 'rd_digests_dict failed to provide data on load.'
raise Exception(message)
return rd_digests_dict
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None
def get_rds_digests():
try:
rd_digests_cached = cache.get('rd_digests')
if rd_digests_cached:
rd_digests = rd_digests_cached
else:
rd_digests, rd_digests_dict = build_rds_cache(refresh=True)
if not rd_digests or rd_digests is None:
message = 'rd_digests failed to provide data on load.'
raise Exception(message)
if not rd_digests_dict or rd_digests_dict is None:
message = 'rd_digests_dict failed to provide data on load.'
raise Exception(message)
cache.set('rd_digests', rd_digests, timeout=get_cache_timeout())
cache.set('rd_digests_dict', rd_digests_dict, timeout=get_cache_timeout())
return rd_digests
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None
def get_uid_digests(refresh=False):
""" Get uid_digests, if cached then return 'uid_digests' cache, otherwise build cache.
"""
time = True
uid_digests = None
try:
if not refresh:
uid_digests_cached = cache.get('uid_digests')
if uid_digests_cached:
uid_digests = uid_digests_cached
if refresh or not uid_digests or uid_digests is None:
start = dt.datetime.now()
if time:
print '\n-- Processing uframe uid_digests for reference designators... '
print '\t-- Start time: ', start
#uid_digests, uid_digests_operational = build_uid_digests_cache()
uid_digests = build_uid_digests_cache()
if not uid_digests or uid_digests is None:
message = 'Failed to compile uid_digests cache.'
raise Exception(message)
cache.delete('uid_digests')
cache.set('uid_digests', uid_digests, timeout=get_cache_timeout())
"""
if not uid_digests_operational or uid_digests_operational is None:
message = 'Failed to compile uid_digests_operational cache.'
raise Exception(message)
cache.delete('uid_digests_operational')
cache.set('uid_digests_operational', uid_digests_operational, timeout=get_cache_timeout())
"""
end = dt.datetime.now()
if time:
print '\t-- End time: ', end
print '\t-- Time to complete: %s' % (str(end - start))
return uid_digests
except Exception as err:
message = str(err)
current_app.logger.info(message)
return None
def | |
"""Test the custom authorization class."""
import os
import uuid
import time
import secrets
import pytest
import jwt
import datetime
from rest_framework import status
from django.core.exceptions import SuspiciousOperation
from rest_framework.test import APIRequestFactory
from ..api.login import TokenAuthorizationOIDC
from ..api.logout_redirect_oidc import LogoutRedirectOIDC
from ..api.utils import (
generate_client_assertion,
generate_jwt_from_jwks,
generate_token_endpoint_parameters,
response_internal,
validate_nonce_and_state,
)
from ..authentication import CustomAuthentication
from ..models import User
test_private_key = os.environ["JWT_CERT_TEST"]
class MockRequest:
"""Mock request class."""
def __init__(self, status_code=status.HTTP_200_OK, data=None):
self.status_code = status_code
self.data = data
def json(self):
"""Return data."""
return self.data
@pytest.mark.django_db
def test_authentication(user):
"""Test authentication method."""
auth = CustomAuthentication()
authenticated_user = auth.authenticate(username=user.username)
assert authenticated_user.username == user.username
@pytest.mark.django_db
def test_get_user(user):
"""Test get_user method."""
auth = CustomAuthentication()
found_user = auth.get_user(user.pk)
assert found_user.username == user.username
@pytest.mark.django_db
def test_get_non_user(user):
"""Test that an invalid user does not return a user."""
test_uuid = uuid.uuid1()
auth = CustomAuthentication()
nonuser = auth.get_user(test_uuid)
assert nonuser is None
def test_oidc_auth(api_client):
"""Test login url redirects."""
response = api_client.get("/v1/login/oidc")
assert response.status_code == status.HTTP_302_FOUND
def test_oidc_logout_without_token(api_client):
"""Test logout redirect with token missing."""
response = api_client.get("/v1/logout/oidc")
assert response.status_code == status.HTTP_302_FOUND
def test_oidc_logout_with_token(api_client):
"""Test logout redirect with token present."""
factory = APIRequestFactory()
view = LogoutRedirectOIDC.as_view()
request = factory.get("/v1/logout/oidc")
request.session = api_client.session
request.session["token"] = "testtoken"
response = view(request)
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_auth_update(api_client, user):
"""Test session update."""
api_client.login(username=user.username, password="<PASSWORD>")
api_client.get("/v1/auth_check")
c1 = api_client.cookies.get("id_token")
e1 = datetime.datetime.strptime(c1["expires"], "%a, %d %b %Y %H:%M:%S %Z")
time.sleep(1)
api_client.get("/v1/auth_check")
c2 = api_client.cookies.get("id_token")
e2 = datetime.datetime.strptime(c2["expires"], "%a, %d %b %Y %H:%M:%S %Z")
assert e1 < e2
@pytest.mark.django_db
def test_logout(api_client, user):
"""Test logout."""
api_client.login(username=user.username, password="<PASSWORD>")
response = api_client.get("/v1/logout")
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_without_code(api_client):
"""Test login redirects without code."""
response = api_client.get("/v1/login", {"state": "dummy"})
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_fails_without_state(api_client):
"""Test login redirects without state."""
response = api_client.get("/v1/login", {"code": "dummy"})
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_with_valid_state_and_code(mocker, api_client):
"""Test login with state and code."""
os.environ["JWT_KEY"] = test_private_key
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": "b2d2d115-1d7e-4579-b9d6-f8e84f4f56ca",
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_with_existing_token(mocker, api_client):
"""Login should proceed when token already exists."""
os.environ["JWT_KEY"] = test_private_key
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": "b2d2d115-1d7e-4579-b9d6-f8e84f4f56ca",
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["token"] = "testtoken"
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_with_general_exception(mocker):
"""Test login with state and code."""
os.environ["JWT_KEY"] = test_private_key
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": "b2d2d115-1d7e-4579-b9d6-f8e84f4f56ca",
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
# A custom session will throw a general exception
request.session = {}
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data == {
"error": (
"Email verified, but experienced internal issue " "with login/registration."
)
}
@pytest.mark.django_db
def test_login_with_inactive_user(mocker, api_client, inactive_user):
"""Login with inactive user should error and return message."""
os.environ["JWT_KEY"] = test_private_key
inactive_user.username = "<EMAIL>"
inactive_user.save()
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<PASSWORD>I55jzjBvZpNQ",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": inactive_user.login_gov_uuid,
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response.data == {
"error": f'Login failed, user account is inactive: {inactive_user.username}'
}
@pytest.mark.django_db
def test_login_with_existing_user(mocker, api_client, user):
"""Login should work with existing user."""
os.environ["JWT_KEY"] = test_private_key
user.username = "<EMAIL>"
user.save()
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": user.login_gov_uuid,
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_with_old_email(mocker, api_client, user):
"""Login should work with existing user."""
os.environ["JWT_KEY"] = test_private_key
user.username = "<EMAIL>"
user.save()
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": user.login_gov_uuid,
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
# Ensure the user's username was updated with new email.
assert User.objects.filter(username="<EMAIL>").exists()
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_with_initial_superuser(mocker, api_client, user):
"""Login should work with existing user."""
# How to set os vars for sudo su??
os.environ["JWT_KEY"] = test_private_key
os.environ["DJANGO_SU_NAME"] = "<EMAIL>"
user.username = "<EMAIL>"
user.login_gov_uuid = None
user.save()
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": "b2d2d115-1d7e-4579-b9d6-f8e84f4f66ca",
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
user = User.objects.get(username="<EMAIL>")
assert str(user.login_gov_uuid) == decoded_token["sub"]
assert response.status_code == status.HTTP_302_FOUND
@pytest.mark.django_db
def test_login_with_expired_token(mocker, api_client):
"""Login should proceed when token already exists."""
os.environ["JWT_KEY"] = test_private_key
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
mock_decode.side_effect = jwt.ExpiredSignatureError()
mock_post.return_value = MockRequest(data=token)
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response.data == {"error": "The token is expired."}
@pytest.mark.django_db
def test_login_with_bad_validation_code(mocker, api_client):
"""Login should error with a bad validatino code."""
os.environ["JWT_KEY"] = test_private_key
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
mock_post.return_value = MockRequest(
data={}, status_code=status.HTTP_400_BAD_REQUEST
)
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": nonce,
"state": state,
"added_on": time.time(),
}
response = view(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data == {
"error": "Invalid Validation Code Or OpenID Connect Authenticator Down!"
}
@pytest.mark.django_db
def test_login_with_bad_nonce_and_state(mocker, api_client):
"""Login should error with a bad nonce and state."""
os.environ["JWT_KEY"] = test_private_key
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
token = {
"access_token": "<KEY>",
"token_type": "Bearer",
"expires_in": 3600,
"id_token": os.environ["MOCK_TOKEN"],
}
mock_decode = mocker.patch("tdpservice.users.api.login.jwt.decode")
decoded_token = {
"email": "<EMAIL>",
"email_verified": True,
"nonce": nonce,
"iss": "https://idp.int.identitysandbox.gov",
"sub": "b2d2d115-1d7e-4579-b9d6-f8e84f4f56ca",
"verified_at": 1577854800,
}
mock_post.return_value = MockRequest(data=token)
mock_decode.return_value = decoded_token
factory = APIRequestFactory()
view = TokenAuthorizationOIDC.as_view()
request = factory.get("/v1/login", {"state": state, "code": code})
request.session = api_client.session
request.session["state_nonce_tracker"] = {
"nonce": "badnonce",
"state": "badstate",
"added_on": time.time(),
}
with pytest.raises(SuspiciousOperation):
view(request)
@pytest.mark.django_db
def test_login_with_email_unverified(mocker, api_client):
"""Login should faild with unverified email."""
os.environ["JWT_KEY"] = test_private_key
nonce = "testnonce"
state = "teststate"
code = secrets.token_hex(32)
mock_post = mocker.patch("tdpservice.users.api.login.requests.post")
| |
# Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
from .. properties import *
class Frame(object):
"""
Represents a rectangular frame defined by its top-left coordinates, width
and height.
"""
def __init__(self, x, y, width=0, height=0):
"""
Initializes a new instance of Frame.
Args:
x: int or float
X-coordinate of the top left corner.
y: int or float
Y-coordinate of the top left corner.
width: int or float
Full width.
height: int or float
Full height.
"""
# set values
self._left = x
self._top = y
self._right = x + width
self._bottom = y + height
self._width = width
self._height = height
self._reversed = False
# check values
self._check_values()
def __str__(self):
"""Gets standard string representation."""
return "%f, %f, %f, %f" % (self._left, self._top, self._width, self._height)
@property
def x(self):
"""Gets x-coordinate of the left corner."""
return self._left
@property
def y(self):
"""Gets y-coordinate of the top corner."""
return self._top
@property
def width(self):
"""Gets full width."""
return self._width
@property
def height(self):
"""Gets full height."""
return self._height
@property
def left(self):
"""Gets x-coordinate of the left."""
return self._left
@property
def right(self):
"""Gets x-coordinate of the right."""
return self._right
@property
def top(self):
"""Gets y-coordinate of the top."""
return self._top
@property
def bottom(self):
"""Gets y-coordinate of the bottom."""
return self._bottom
@property
def center(self):
"""Gets coordinates of the center."""
return (
0.5 * (self._left + self._right),
0.5 * (self._top + self._bottom))
@property
def x1(self):
"""Gets x-coordinate of the top left corner."""
return self._left
@property
def y1(self):
"""Gets y-coordinate of the top left corner."""
return self._top
@property
def x2(self):
"""Gets x-coordinate of the top right corner."""
return self._right
@property
def y2(self):
"""Gets y-coordinate of the bottom right corner."""
return self._bottom
@property
def cx(self):
"""Gets x-coordinate of the center."""
return 0.5 * (self._left + self._right)
@property
def cy(self):
"""Gets y-coordinate of the center."""
return 0.5 * (self._top + self._bottom)
@property
def tl(self):
"""Gets coordinates of the top-left corner."""
return self._left, self._top
@property
def tr(self):
"""Gets coordinates of the top-right corner."""
return self._right, self._top
@property
def bl(self):
"""Gets coordinates of the bottom-left corner."""
return self._left, self._bottom
@property
def br(self):
"""Gets coordinates of the bottom-right corner."""
return self._right, self._bottom
@property
def c(self):
"""Gets coordinates of the center."""
return self.center
@property
def w(self):
"""Gets full width."""
return self._width
@property
def h(self):
"""Gets full height."""
return self._height
@property
def wh(self):
"""Gets width and height."""
return self._width, self._height
@property
def rect(self):
"""Gets rectangle as x, y, width, height."""
return self._left, self._top, self._width, self._height
@property
def points(self):
"""Gets rectangle as p1, p2, p3, p3 points starting from top left."""
p1 = (self._left, self._top)
p2 = (self._right, self._top)
p3 = (self._right, self._bottom)
p4 = (self._left, self._bottom)
return p1, p2, p3, p4
@property
def reversed(self):
"""Returns True if the frame had originally negative width or height."""
return self._reversed
def clone(self):
"""
Creates exact clone of current frame.
Returns:
pero.Frame
Cloned frame.
"""
frame = Frame(self._left, self._top, self._width, self._height)
frame._reversed = self._reversed
return frame
def offset(self, x=0, y=0):
"""
Shifts current frame by specified value in x and y directions.
Args:
x: int, float or None
X-coordinate offset.
y: int, float or None
Y-coordinate offset.
"""
# apply offset
if x:
self._left += x
self._right += x
if y:
self._top += y
self._bottom += y
# check values
self._check_values()
def shrink(self, top=0, right=0, bottom=0, left=0):
"""
Shrinks current frame on each specified side.
Args:
top: int, float or None
Top padding.
right: int, float or None
Right padding.
bottom: int, float or None
Bottom padding.
left: int, float or None
Left padding.
"""
# shrink values
self._left += left
self._right -= right
self._top += top
self._bottom -= bottom
self._width = self._right - self._left
self._height = self._bottom - self._top
# check values
self._check_values()
def expand(self, top=0, right=0, bottom=0, left=0):
"""
Expands current frame on each specified side.
Args:
top: int, float or None
Top padding.
right: int, float or None
Right padding.
bottom: int, float or None
Bottom padding.
left: int, float or None
Left padding.
"""
# expand values
self._left -= left
self._right += right
self._top -= top
self._bottom += bottom
self._width = self._right - self._left
self._height = self._bottom - self._top
# check values
self._check_values()
def extend(self, x=None, y=None, width=0, height=0):
"""
Extends current frame to include given coordinate, single point or
additional frame.
Args:
x: int, float, pero.Frame or None
X-coordinate or frame to include.
y: int, float or None
Y-coordinate to include.
width: int or float
Width of the frame to include.
height: int or float
Height of the frame to include.
"""
# get values from frame
if isinstance(x, Frame):
x, y, width, height = x.rect
# extend width
if x is not None:
left = min(self._left, self._right, x, x+width)
right = max(self._left, self._right, x, x+width)
self._left = left
self._right = right
self._width = right - left
# extend height
if y is not None:
top = min(self._top, self._bottom, y, y+height)
bottom = max(self._top, self._bottom, y, y+height)
self._top = top
self._bottom = bottom
self._height = bottom - top
# check values
self._check_values()
def union(self, other):
"""
Creates a new frame containing union area of current frame
and given frame.
Args:
other: pero.Frame
Frame to union with.
Returns:
pero.Frame or None
Union frame.
"""
# get x and width
left = min(self._left, other._left)
right = max(self._right, other._right)
width = right - left
# get y and height
top = min(self._top, other._top)
bottom = max(self._bottom, other._bottom)
height = bottom - top
return Frame(left, top, width, height)
def intersection(self, other):
"""
Creates a new frame containing intersection area between current frame
and given frame or None if no such area.
Args:
other: pero.Frame
Frame to intersect with.
Returns:
pero.Frame or None
Intersection frame or None of no overlap.
"""
# get x and width
left = max(self._left, other._left)
right = min(self._right, other._right)
width = right - left
if width <= 0:
return None
# get y and height
top = max(self._top, other._top)
bottom = min(self._bottom, other._bottom)
height = bottom - top
if height <= 0:
return None
return Frame(left, top, width, height)
def contains(self, x, y):
"""
Checks if given point is inside current frame.
Args:
x: int, float
X-coordinate to check.
y: int, float
Y-coordinate to check.
Returns:
bool
Returns True if given point is inside, False otherwise.
"""
return self._left <= x <= self._right and self._top <= y <= self._bottom
def overlaps(self, other):
"""
Checks if there is any overlap between current frame and given frame.
Args:
other: pero.Frame
Frame to check.
Returns:
bool
Returns True if any overlap exists, False otherwise.
"""
if not ((self._left <= other._left <= self._right)
or (self._left <= other._right <= self._right)
or (other._left <= self._left and other._right >= self._right)):
return False
if not ((self._top <= other._top <= self._bottom)
or (self._top <= other._bottom <= self._bottom)
or (other._top <= self._top and other._bottom >= self._bottom)):
return False
return True
def _check_values(self):
| |
#!/usr/bin/python
#use to parse ms-sql-info nmap xml
#https://nmap.org/nsedoc/scripts/ms-sql-info.html
#nmap -Pn -n -p135,445,1433 --script ms-sql-info <host> -oX results-ms-sql-info.xml
#nmap -Pn -n -p135,445,1433 --script ms-sql-info -iL <hosts_file> -oX results-ms-sql-info.xml
# python3 mssql-info-parser.py results-ms-sql-info.xml
#
#
#
# #ip,port - use for pw guessing
# python3 mssql-info-parser.py results-ms-sql-info.xml | cut -d, -f1,2
#
# ip,port,winhostname,instancename,namedpipe
# python3 mssql-info-parser.py results-ms-sql-info.xml | cut -d, -f1,2,3,4,10
#
#
# python3 mssql-info-parser.py results-ms-sql-info.xml | cut -d, -f1,5
import xml.etree.ElementTree as ET
import sys
usage = "Usage: " + sys.argv[0] + " results-ms-sql-info.xml"
if len(sys.argv) == 1:
print(usage)
sys.exit()
if "-h" in sys.argv:
print(usage)
sys.exit()
if "--help" in sys.argv:
print(usage)
sys.exit()
masssql_file = sys.argv[1]
tree = ET.parse(masssql_file)
root = tree.getroot()
#host_data = []
ipSERV= []
dnsSERV = []
winSERV= []
scriSERV= []
ipSERCO= []
#ip,winserv
comboGetwinhostname= []
#ip,tcpport
soccETTT= []
hosts = root.findall('host')
for host in hosts:
script_element = host.findall('hostscript')
try:
script_namee = script_element[0].findall('script')[0].attrib['id']
except IndexError:
script_namee = ''
#filter, only show if ms-sql-info script ran and tags exist, otherwise skip..
if not script_namee =='ms-sql-info':
continue
#print(script_namee)
#show/find ip
ip_address = host.findall('address')[0].attrib['addr']
#add ip to array, [ip,dns,winhost,script]
ipSERV.append(ip_address)
#show/find hostname DNS
host_name_element = host.findall('hostnames')
try:
host_name = host_name_element[0].findall('hostname')[0].attrib['name']
except IndexError:
host_name = ''
dnsSERV.append(host_name)
try:
scriptoutt = script_element[0].findall('script')[0].attrib['output']
except IndexError:
scriptoutt = ''
#print(scriptoutt)
#print("@@@DBPWN- " + ip_address)
#print(ip_address + "," + "," + "," + scriptoutt)
#print("hostname- " + host_name)
##################
#find details
root1=host
#look for detailssss
for sup in root1.iter('script'):
root2=ET.Element('root')
#print(supply.attrib, supply.text) #shows script id, output.. better
root2=(sup)
for tech in root2.iter('elem'):
root3 = ET.Element('root')
root3=(tech)
#printservernames
#print(tech.attrib['key'])
#if tech.attrib['key']=='Windows server name':
# print(tech.text)
#elll = host.findall('address')[0].attrib['addr']
#print(elll)
#print(tech.text)
#note of servername to
#print("##- " + elll + ",," + tech.text )
# winSERV.append(tech.text)
#print(ip_address)
try:
if tech.attrib['key']=='Windows server name':
#print(tech.text)
#print("servername " + tech.text)
#print(ip_address + "," + tech.text )
comboGetwinhostname.append(ip_address + "," + tech.text)
winSERV.append(tech.text)
#else:
#winSERV.append(" ")
except IndexError:
print("pinggg")
#ipSERCO.append(elll + ",," + tech.text)
try:
if tech.attrib['key']=='TCP port':
#print(tech.text)
#print("servername " + tech.text)
#print(ip_address + "," + tech.text)
soccETTT.append(ip_address + "," + tech.text)
#comboGetwinhostname.append(ip_address + "," + tech.text)
#winSERV.append(tech.text)
#else:
#winSERV.append(" ")
except IndexError:
print("pingggg but not rly cause faills")
#ipSERCO.append(elll + ",," + tech.text)
#else:
#print(tech.attrib['key'])
#print("222222222222222222222222 no server name?????")
#this pulls in script output per each IP
script_element = host.findall('hostscript')
script_outt = script_element[0].findall('script')[0].attrib['output']
#print(script_outt)
scriSERV.append(script_outt)
#inhere is IPADDRESS
#print(comboGetwinhostname[0].split(',')[0])
#WINHOSTNAME
#print(comboGetwinhostname[0].split(',')[0] )
#
#
#thaarray IPADDRESS,winhostname
##try:
# x = len(comboGetwinhostname)
#print(x)
# #tempppIP = comboGetwinhostname[0].split(',')[0]
#print(comboGetwinhostname[x].split(',')[0])
#print(tempppIP)
#except:
# print("a111 ")
#debug
#print(ipSERV)
#print(dnsSERV)
#print(winSERV)
#print(scriSERV)
#print(ipSERCO)
#print tha mappings of IP,windowsServerr
#print(comboGetwinhostname)
#print(comboGetwinhostname[4])
#print(comboGetwinhostname[0].split(','))
# from mappings, dis tha IP address ONLY from the first column
#print(comboGetwinhostname[0].split(',')[0])
#print(comboGetwinhostname[1])
#print(comboGetwinhostname[1].split(',') )
#print(comboGetwinhostname[1].split(',') )
#try:
#x = len(comboGetwinhostname)
#print(x) #length starting at 1
#bombsssss maybe try -1 cause thats correct array size for last item
#print(comboGetwinhostname[x].split(',') )
#print(comboGetwinhostname[x-1].split(',') ) #shows last item
#print(comboGetwinhostname[x-1] )
#tempppIP = comboGetwinhostname[0].split(',')[0]
#print(comboGetwinhostname[x].split(',')[0])
#print(tempppIP)
#except:
# print("a111 000 :) ")
#print(soccETTT)
#print(ipSERV)
#####good luck..this takes array num from scriSERV,outputs parsed dict
def parsZZ(parMEplz):
#print(parMEplz)
#pp = {}
from collections import defaultdict
d = defaultdict(list)
nameOO = ""
nameOOnumm = ""
nameOOprodd = ""
nameOOseripac = ""
name00patchtho = ""
namdddpipez = []
npp = ""
currTCP = ""
instaTT = ""
instanceTEMP = []
i=0 #mssql instance -key.
b=0 #tcp port
c=0 #named pipe
#lol d is used for dictionary.. dont overwrite :P
e=0 #if clustered check
g=0 #name of mssql version
for line in parMEplz.splitlines():
#MSSQL INSTANCE NAME-KEY
#print(line)
if "Instance" in line:
#print("yooo found it? instance name = " + line)
x = line.split(": ")
#print(x[1])
instanceTEMP.append(x[1])
d[x[1]]
i=i+1
instaTT = x[1]
#TCPPORT
if "TCP port" in line:
#print("yooo found tcp port " + line)
x = line.split(": ")
#print(x[1])
#instanceTEMP.append(x[1])
try:
d[instanceTEMP[b]].append(x[1])
currTCP = x[1]
b=b+1
except (IndexError,KeyError):
continue
#NAMEDPIPE
if "Named pipe" in line:
x = line.split(": ")
#print(x[1])
#instanceTEMP.append(x[1])
#print(b)
#print(currTCP)
#d[instanceTEMP[c]].append(x[1])
c=c+1
namdddpipez.append(x[1])
npp = x[1]
#cluster?
#if "Clustered" in line:
# #print(x[1])
# x = line.split(": ")
# d[instanceTEMP[e]].append(x[1])
# e=e+1
#mssql version installed.
#overwrites named pi???s
#if " name" in line:
# #print(line)
# x = line.split(": ")
# print(x[1])
# d[instanceTEMP[g]].append(x[1])
# g=g+1
#name
if "name" in line:
ee = line.split(": ")
#print (ee[1])
nameOO = ee[1]
if "number" in line:
ee = line.split(": ")
nameOOnumm = ee[1]
if "Product" in line:
ee = line.split(": ")
nameOOprodd = ee[1]
if "Service pack" in line:
ee = line.split(": ")
nameOOseripac = ee[1]
if "Post-SP patches" in line:
ee = line.split(": ")
name00patchtho = ee[1]
#plop = namdddpipez
#print(plop)
#print(npp)
#print(instaTT)
if instaTT == "":
instaTT = ","
dalista = instaTT + "," + nameOO + "," + nameOOnumm+ "," + nameOOprodd+ "," + nameOOseripac+ "," + name00patchtho + "," + npp
#print(nameOO + "," + nameOOnumm+ "," + nameOOprodd+ "," + nameOOseripac+ "," + name00patchtho )
#print(d)
#print(*namdddpipez )
#print(d)
#print(d.items)
#+ "," + namdddpipez
#print(instaTT)
#import numpy as np
#print(np.matrix(d))
#print(d)
#print(namdddpipez) d + "," +
aiiaseg = dalista
#print(d)
return aiiaseg
#give item1,item2
#get item2
def shoArraKEE(striin):
#print("input funcctaia " + striin)
rrir = striin.split(",")
#print(rrir)
#print(rrir[1])
return(rrir[0])
def shoArTWOOO(striin):
#print("input funcctaia " + striin)
rrir = striin.split(",")
#print(rrir)
#print(rrir[1])
return(rrir[1])
def shosocc(striin):
#print("input funcctaia " + striin)
rrir = striin.split(",")
#print(rrir)
#print(rrir[1])
return(rrir[0] + "," + rrir[1])
def printteeALL():
#print("canijsutprintll")
finifia =[]
#print("#####stat-ips found- " , len(ipSERV) , " ips found" )
#print("#####sockett-ip-port--found- " , len(soccETTT) , " ip,tcpporort found" )#mostoftehse
#print("#####ip-winhostname mapping- " , len(comboGetwinhostname) , " ips,hostnaem" )
#cyclce through each socket ip:tcpport, since thatstahkey and mostuniqq^^^
for each in soccETTT:
#each ===== ip,port
#each1 ===== ip,winhostnaem
#match winhostname-to ip
tempwinda = ""
tempIPkeyonly = shoArraKEE(each)
#print("yayayaya" , tempIPkeyonly)
#print(comboGetwinhostname)
tempneweachh = each
for each1 in comboGetwinhostname:
# print("watupeachh " , each1 )
#print("watogg? " , each ) #dontfuqwitit
#print(shoArraKEE(each1))
if tempIPkeyonly == shoArraKEE(each1):
#print(each,",", shoArTWOOO(each1) )s
tempneweachh = each + "," + shoArTWOOO(each1)
#else:
#print(each, "," )
#if shoArraKEE(each1) == tempIPkeyonly:
# print(tempIPkeyonly, "," , shoArTWOOO(each1) )
#print(shoArTWOOO(each1))
#if shoArraKEE(each)
#if each == shoArraKEE(each):
# print("somethinnsmatchin..:) " , each)
#if each in comboGetwinhostname:
# print("don0")
#print(tempneweachh)
finifia.append(tempneweachh)
#each = tempneweachh
#print(each)
#print(each)
#print(each , )
#print(finifia)
return finifia
#print(dnsSERV)
#heh this the last item in our arrya
#peep send IP, get the 2nd column.
#peep = comboGetwinhostname[18]
#shoArraKEE(peep)
#this just prints it out straight up
#shoArraKEE(comboGetwinhostname[0])
#shoArraKEE(comboGetwinhostname[3])
#printeeIPSSrit
#printteeALL()
latestyah = printteeALL()
#print(latestyah)
for each in latestyah:
#print(each)
ippp = shoArraKEE(each)
ipppo = shosocc(each)
#print(ipppo)
#print(*ipppo)
for each1 in scriSERV:
#print(each1)
if ippp in each1:
print(each + "," + parsZZ(each1) )
#sprint()
#shosocc(
#if any(s in each1 for s in each):
# print("yooo this is tha " + ippp)
#script in array with ip indexed??
#print(scriSERV[1])
#print(scriSERV[0])
#print(scriSERV[2])
#ozz = parsZZ(scriSERV[1])
#ozz = parsZZ(scriSERV[99])
#print(ozz)
#print(ozz.items())
#print(parsZZ(scriSERV[0]))
#oa = parsZZ(scriSERV[0])
#sprint(oa)
#print(parsZZ(scriSERV[15]))
#arr = [2,4,5,7,9]
#arr_2d = [[1,2],[3,4]]
#print("The Array is: ", arr) #printing the array
#print("The 2D-Array is: ", arr_2d) #printing the 2D-Array
#printing the array
#print("The Array is : ")
#for i in arr:
# print(i, end = ' ')
#printing the 2D-Array
#print("\nThe 2D-Array is:")
#for i in arr_2d:
# for j in i:
# print(j, end=" ")
# print()
#for i in comboGetwinhostname:
# for j in i:
# print(j, end=" ")
# print()
#
#+====++++++++++++++++++++++
###test change here which to parse
#parMEplz = scriSERV[7]
#print(parMEplz)
#function here, send scriSERV[x], get a response of dict file back.
#~~~~~~WIN~~~~~~~~
#print(parsZZ(scriSERV[15]))
#print(parsZZ(scriSERV[15]))
#ozz = parsZZ(scriSERV[15])
#print(ozz.items())
########legacyyyyyyyyy
#print("IP,DNS,Server,Instance,TCP,Named Pipe")
#o=0
#for index,element in enumerate(ipSERV):
#print(index,element)
#print(element +","+ dnsSERV[index] + "," + winSERV[index])
#print(element) ##prints IP only..
#multipe ip per instance below, for each -- 5example
#udpate-- this should be for every key in tha dict
#oi = parsZZ(scriSERV[index])
#print(oi)
#print(oi[1])
#for each in oi:
#for key, value in oi.items() :
#print(key, value)
#sometimes when no namedpipe, then only one val
#print(element)
#print(dnsSERV[index])
#print(index)
#try:
# print(winSERV[index])
#except:
# print('errrrrrr')
# winSERV[index] == ''
#--almost done, only missing here is instace. is that key?
#print(key)
#print(ipSERCO)
#print(element)
#if element ==
#for iz in ipSERCO:
# print(ipSERCO[iz])
#if animal == 'Bird':
# print('Chirp!')
#try:
#out of range error her....
#if element == " ":
# print("YOOOOOOOOOOOOOOOOOOOOOOOOOO")
#print(element)
#if element in comboGetwinhostname:
# print("idonoooooo")
#
#print(element +","+ dnsSERV[index] + "," + | |
# Authors: <NAME> <<EMAIL>>
# <NAME>
#
# License: BSD (3-clause)
import logging
import warnings
import numpy as np
from scipy import linalg
from numpy.linalg import pinv
from .asr_utils import (geometric_median, fit_eeg_distribution, yulewalk,
yulewalk_filter, ma_filter, block_covariance)
class ASR():
"""Artifact Subspace Reconstruction.
Artifact subspace reconstruction (ASR) is an automated, online,
component-based artifact removal method for removing transient or
large-amplitude artifacts in multi-channel EEG recordings [1]_.
Parameters
----------
sfreq : float
Sampling rate of the data, in Hz.
cutoff: float
Standard deviation cutoff for rejection. X portions whose variance
is larger than this threshold relative to the calibration data are
considered missing data and will be removed. The most aggressive value
that can be used without losing too much EEG is 2.5. Recommended to
use with more conservative values ranging from 20 - 30.
Defaults to 20.
blocksize : int
Block size for calculating the robust data covariance and thresholds,
in samples; allows to reduce the memory and time requirements of the
robust estimators by this factor (down to Channels x Channels x Samples
x 16 / Blocksize bytes) (default=100).
win_len : float
Window length (s) that is used to check the data for artifact content.
This is ideally as long as the expected time scale of the artifacts but
not shorter than half a cycle of the high-pass filter that was used
(default=0.5).
win_overlap : float
Window overlap fraction. The fraction of two successive windows that
overlaps. Higher overlap ensures that fewer artifact portions are going
to be missed, but is slower (default=0.66).
max_dropout_fraction : float
Maximum fraction of windows that can be subject to signal dropouts
(e.g., sensor unplugged), used for threshold estimation (default=0.1).
min_clean_fraction : float
Minimum fraction of windows that need to be clean, used for threshold
estimation (default=0.25).
ab : 2-tuple | None
Coefficients (A, B) of an IIR filter that is used to shape the
spectrum of the signal when calculating artifact statistics. The
output signal does not go through this filter. This is an optional way
to tune the sensitivity of the algorithm to each frequency component
of the signal. The default filter is less sensitive at alpha and beta
frequencies and more sensitive at delta (blinks) and gamma (muscle)
frequencies. Defaults to None.
max_bad_chans : float
The maximum number or fraction of bad channels that a retained window
may still contain (more than this and it is removed). Reasonable range
is 0.05 (very clean output) to 0.3 (very lax cleaning of only coarse
artifacts) (default=0.2).
method : {'riemann', 'euclid'}
Method to use. If riemann, use the riemannian-modified version of
ASR [2]_. Currently, only euclidean ASR is supported. Defaults to
"euclid".
Attributes
----------
sfreq: array, shape=(n_channels, filter_order)
Filter initial conditions.
cutoff: float
Standard deviation cutoff for rejection.
blocksize : int
Block size for calculating the robust data covariance and thresholds.
win_len : float
Window length (s) that is used to check the data for artifact content.
win_overlap : float
Window overlap fraction.
max_dropout_fraction : float
Maximum fraction of windows that can be subject to signal dropouts.
min_clean_fraction : float
Minimum fraction of windows.
max_bad_chans : float
The maximum fraction of bad channels.
method : {'riemann', 'euclid'}
Method to use.
A, B: arrays
Coefficients of an IIR filter that is used to shape the spectrum of the
signal when calculating artifact statistics. The output signal does not
go through this filter. This is an optional way to tune the sensitivity
of the algorithm to each frequency component of the signal. The default
filter is less sensitive at alpha and beta frequencies and more
sensitive at delta (blinks) and gamma (muscle) frequencies.
M : array, shape=(channels, channels)
The mixing matrix to fit ASR data.
T : array, shape=(channels, channels)
The mixing matrix to fit ASR data.
References
----------
.. [1] <NAME>., & <NAME>. (2016). U.S. Patent Application No.
14/895,440. https://patents.google.com/patent/US20160113587A1/en
.. [2] <NAME>., <NAME>., <NAME>., & <NAME>.
(2019). A Riemannian Modification of Artifact Subspace Reconstruction
for EEG Artifact Handling. Frontiers in Human Neuroscience, 13.
https://doi.org/10.3389/fnhum.2019.00141
"""
def __init__(self, sfreq, cutoff=20, blocksize=100, win_len=0.5,
win_overlap=0.66, max_dropout_fraction=0.1,
min_clean_fraction=0.25, ab=None, max_bad_chans=0.1,
method="euclid"):
# set attributes
self.sfreq = sfreq
self.cutoff = cutoff
self.blocksize = blocksize
self.win_len = win_len
self.win_overlap = win_overlap
self.max_dropout_fraction = max_dropout_fraction
self.min_clean_fraction = min_clean_fraction
self.max_bad_chans = max_bad_chans
self.method = "euclid" # NOTE: riemann is not yet available
self._fitted = False
# set default yule-walker filter
if ab is None:
yw_f = np.array([0, 2, 3, 13, 16, 40,
np.minimum(80.0, (self.sfreq / 2.0) - 1.0),
self.sfreq / 2.0]) * 2.0 / self.sfreq
yw_m = np.array([3, 0.75, 0.33, 0.33, 1, 1, 3, 3])
self.B, self.A = yulewalk(8, yw_f, yw_m)
else:
self.A, self.B = ab
self._reset()
def _reset(self):
"""Reset state variables."""
self.M = None
self.T = None
# TODO: The following parameters are effectively not used. Still,
# they can be set manually via asr.transform(return_states=True)
self.R = None
self.carry = None
self.Zi = None
self.cov = None
self._fitted = False
def fit(self, raw, picks="eeg", start=0, stop=None,
return_clean_window=False):
"""Calibration for the Artifact Subspace Reconstruction method.
The input to this data is a multi-channel time series of calibration
data. In typical uses the calibration data is clean resting EEG data
of data if the fraction of artifact content is below the breakdown
point of the robust statistics used for estimation (50% theoretical,
~30% practical). If the data has a proportion of more than 30-50%
artifacts then bad time windows should be removed beforehand. This
data is used to estimate the thresholds that are used by the ASR
processing function to identify and remove artifact components.
The calibration data must have been recorded for the same cap design
from which data for cleanup will be recorded, and ideally should be
from the same session and same subject, but it is possible to reuse
the calibration data from a previous session and montage to the
extent that the cap is placed in the same location (where loss in
accuracy is more or less proportional to the mismatch in cap
placement).
Parameters
----------
raw : instance of mne.io.Raw
Instance of mne.io.Raw to be used for fitting the ASR.
The calibration data should have been high-pass filtered (for
example at 0.5Hz or 1Hz using a Butterworth IIR filter), and be
reasonably clean not less than 30 seconds (this method is
typically used with 1 minute or more).
picks : str | list | slice | None
Channels used to fit the ASR. All channels should be of the same
type (e.g. "eeg", "grads"). Slices and lists of integers will
be interpreted as channel indices. In lists, channel
name strings (e.g., ['MEG0111', 'MEG2623'] will pick the given
channels. Note that channels in info['bads'] will be included if
their names or indices are explicitly provided. Defaults to "eeg".
start : int
The first sample to use for fitting the data. Defaults to 0.
stop : int | None
The last sample to use for fitting the data. If `None`, all
samples after `start` will be used for fitting. Defaults to None.
return_clean_window : Bool
If True, the method will return the variables `clean` (the cropped
dataset which was used to fit the ASR) and `sample_mask` (a
logical mask of which samples were included/excluded from fitting
the ASR). Defaults to False.
Returns
-------
clean : array, shape=(n_channels, n_samples)
The cropped version of the dataset which was used to calibrate
the ASR. This array is a result of the `clean_windows` function
and no ASR was applied to it.
sample_mask : boolean array, shape=(1, n_samples)
Logical mask of the samples which were used to train the ASR.
"""
# extract the data
X = raw.get_data(picks=picks, start=start, stop=stop)
# Find artifact-free windows first
clean, sample_mask = clean_windows(
X,
sfreq=self.sfreq,
win_len=self.win_len,
win_overlap=self.win_overlap,
max_bad_chans=self.max_bad_chans,
min_clean_fraction=self.min_clean_fraction,
max_dropout_fraction=self.max_dropout_fraction)
# Perform calibration
self.M, self.T = asr_calibrate(
clean,
sfreq=self.sfreq,
cutoff=self.cutoff,
blocksize=self.blocksize,
win_len=self.win_len,
| |
import io
import tensorflow as tf
import h5py
from tensorflow.python.keras.saving import hdf5_format
import basics.base_utils as _
from mlpug.trainers.training import *
from mlpug.mlpug_exceptions import TrainerInvalidException, \
TrainerStateInvalidException, \
BatchNotChunkableException, \
MLPugException, \
LossNotAvailableException
from mlpug.utils import get_value_at
class TFTrainerMixin:
def _activate_inference_mode(self, inference_mode):
# No pre-evaluation mode change
pass
def _get_model_state(self, model, model_name=None):
state = io.BytesIO()
with h5py.File(state, 'w') as f:
hdf5_format.save_weights_to_hdf5_group(f, model.layers)
return state
def _get_optimizer_state(self, optimizer, optimizer_name=None):
state = io.BytesIO()
with h5py.File(state, 'w') as f:
hdf5_format.save_optimizer_weights_to_hdf5_group(f, optimizer)
return state
def _set_model_state(self, model, state, model_name=None):
with h5py.File(state, 'r') as f:
hdf5_format.load_weights_from_hdf5_group(f, model.layers)
def _set_optimizer_state(self, optimizer, state, optimizer_name):
with h5py.File(state, 'r') as f:
weights = hdf5_format.load_optimizer_weights_from_hdf5_group(f)
optimizer.set_weights(weights)
class Trainer(TFTrainerMixin, TrainerBase):
pass
class DefaultTrainer(TFTrainerMixin, DefaultTrainerBase):
def __init__(self, *args,
eager_mode=False,
batch_data_signature=None,
training_settings_signature=None,
distribution_strategy=None,
trainable_variables=None,
name="DefaultTrainer",
**kwargs):
"""
:param args:
:param eager_mode: If true, the training step is not wrapped in a @tf.function
:param batch_data_signature: Is only required when eager_mode=False
Example, when batch data is a tuple of an input and target tensor
(tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64),)
:param training_settings_signature: Use when you use training_settings.
Is only used when eager_mode=False
Default is {}.
Note: training_settings, are the same as evaluation_settings.
:param distribution_strategy: Optional distributed training strategy
:param trainable_variables: Only required when using multiple optimizers
:param kwargs:
"""
super(DefaultTrainer, self).__init__(*args, **kwargs)
self.eager_mode = eager_mode
self.batch_data_signature = batch_data_signature
self.training_settings_signature = training_settings_signature
self.distribution_strategy = distribution_strategy
self.trainable_variables = trainable_variables
if not eager_mode:
if self.batch_data_signature is None:
raise TrainerInvalidException(f"Missing batch_data_signature such that the "
f"training step computation graph can be traced")
if self.training_settings_signature is None:
self._log.info("training_settings_signature not given, setting to empty dict, "
"implying that training settings won't be used.")
self.training_settings_signature = {}
self._train_step_tf_func = self._create_training_step_tf_func() if self.distribution_strategy is None \
else self._create_distributed_training_step_tf_func()
self._call_model_tf_func = self._create_call_model_tf_func() if self.distribution_strategy is None \
else self._create_distributed_call_model_tf_func()
else:
self._log.warn("Training in eager mode.")
self._train_step_tf_func = self._train_on if self.distribution_strategy is None \
else self._create_distributed_training_step_eager()
self._call_model_tf_func = self._call_model if self.distribution_strategy is None \
else self._create_distributed_call_model_eager()
if self.trainable_variables is None:
if len(self.optimizers) > 1:
raise TrainerInvalidException(f"No trainable variables provided per optimizer")
else:
self.trainable_variables = convert_to_dict("optimizer", trainable_variables)
missing_optimizer_vars = []
for optimizer_name in self.optimizers.keys():
if optimizer_name not in self.trainable_variables or self.trainable_variables[optimizer_name] is None:
missing_optimizer_vars += [optimizer_name]
if len(missing_optimizer_vars) > 0:
raise TrainerInvalidException(f"Missing trainable variables for optimizer(s) : "
f"{', '.join(missing_optimizer_vars)}")
self._deferred_model_components_state = None
self._deferred_optimizers_state = None
self._first_batch = True
def set_model_components_state(self, state):
"""
:param state:
:return: success (True or False)
"""
if not _.is_callable(getattr(state, 'items', None)):
self._log.error("State is invalid, unable to set model components state")
return False
self._deferred_model_components_state = state
self._log.debug("Model components checkpoint state received; "
"deferred setting the state until training has started")
return True
def set_optimizers_state(self, state):
"""
:param state:
:return: success (True, False)
"""
if not _.is_callable(getattr(state, 'items', None)):
self._log.error("State is invalid, unable to set optimizers state")
return False
self._deferred_optimizers_state = state
self._log.debug("Optimizers checkpoint state received; "
"deferred setting the state until training has started")
return True
def set_learning_rate_for(self, optimizer_name, lr):
"""
Set learning rate for specific optimizer `optimizer_name` to `lr`
:param optimizer_name:
:param lr:
:return: True on success, else False
"""
optimizer = self.get_optimizer(optimizer_name)
if not hasattr(optimizer, 'learning_rate'):
self._log.error(f"No valid optimizer available with name {optimizer_name}, unable to set learning rate")
return False
try:
optimizer.learning_rate = lr
except Exception as e:
_.log_exception(self._log, f"Unable to set learning rate for optimizer {optimizer_name}", e)
return False
self._log.debug(f"Learning rate of optimizer {optimizer_name} set to : {lr}")
return True
def train_on(self, batch_data, training_settings=None):
"""
Use batch_data to perform a training iteration.
Optionally uses `batch_chunk_size` to evaluate the loss in chunks.
If a `batch_chunk_size` was given during construction of the trainer, the gradients are updated by evaluating
the batch in chunks.
*Note*
When using chunked batch processing, the default implementation assumes that the
loss, calculated over a chunk, is the average of the sample losses.
:param batch_data: batch_data object to train on (e.g. dict, list, tuple)
When `batch_chunk_size` is given, `batch_data` must be an object that implements the
`__len__` and `__getitem__` methods. Here the `__getitem__` method must be able to deal
with slices.
:param training_settings: optional training_settings object (usually dict)
:return: loss, auxiliary_results
loss : number (e.g. float)
auxiliary_results : can be anything, e.g dict or list with values or data items
"""
if not self.instance_valid():
raise TrainerInvalidException()
if self._first_batch:
# Check if we first need to restore a checkpoint
deferred_model_components_state_set = \
self._set_deferred_model_components_state(batch_data, training_settings)
if deferred_model_components_state_set:
# To set the deferred_model_components_state at the first batch the model was evaluated
# So we can get the trainable variables from the model and subsequently set the
# deferred optimizer state, which needs teh trainable variables.
self._retrieve_trainable_variables()
self._set_deferred_optimizers_state()
self._first_batch = False
loss, auxiliary_results = self._train_step_tf_func(batch_data, training_settings)
return loss, auxiliary_results
def _create_train_step_signature(self):
return [
self.batch_data_signature,
self.training_settings_signature
]
def _create_distributed_training_step_tf_func(self):
@tf.function(input_signature=self._create_train_step_signature())
def training_step_func(batch_data, training_settings):
return self.distribution_strategy.run(self._train_on, args=(batch_data, training_settings,))
return training_step_func
def _create_training_step_tf_func(self):
@tf.function(input_signature=self._create_train_step_signature())
def training_step_func(batch_data, training_settings):
return self._train_on(batch_data, training_settings)
return training_step_func
def _create_distributed_training_step_eager(self):
def training_step_func(batch_data, training_settings):
return self.distribution_strategy.run(self._train_on, args=(batch_data, training_settings,))
return training_step_func
def _train_on(self, batch_data, training_settings):
loss, auxiliary_results, gradients = self._calc_gradients(batch_data, training_settings=training_settings)
self._update_model_parameters(self._prepare_update_model_parameters(gradients))
self._after_update_model_parameters(gradients)
return loss, auxiliary_results
def _create_call_model_signature(self):
return [
self.batch_data_signature, # batch_data
self.training_settings_signature, # evaluate_settings
tf.TensorSpec(shape=(), dtype=tf.bool) # inference_mode
]
def _create_distributed_call_model_tf_func(self):
@tf.function(input_signature=self._create_call_model_signature())
def call_model_func(batch_data, evaluate_settings, inference_mode):
return self.distribution_strategy.run(self._call_model,
args=(batch_data, evaluate_settings, inference_mode))
return call_model_func
def _create_call_model_tf_func(self):
@tf.function(input_signature=self._create_call_model_signature())
def call_model_func(batch_data, evaluate_settings, inference_mode):
return self._call_model(batch_data, evaluate_settings, inference_mode)
return call_model_func
def _create_distributed_call_model_eager(self):
def call_model_func(batch_data, evaluate_settings, inference_mode):
return self.distribution_strategy.run(self._call_model,
args=(batch_data, evaluate_settings, inference_mode))
return call_model_func
def _retrieve_trainable_variables(self):
if len(self.optimizers) > 1:
return
# This only needs to be done once
# Further, this situation only occurs when there is only one optimizer
optimizer_name = next(iter(self.optimizers))
trainable_variables = get_value_at(optimizer_name, self.trainable_variables, warn_on_failure=False)
if trainable_variables is None:
trainable_variables = self.training_model.trainable_variables
self.trainable_variables = {
optimizer_name: trainable_variables
}
def _set_deferred_model_components_state(self, batch_data, training_settings):
"""
Model component state can only be set after evaluating the model on input data
(Crazy but true)
:param batch_data:
:param training_settings:
:return: True if set, else False
"""
if self._deferred_model_components_state is None:
return False
def dry_eval_model():
self.evaluate_loss(batch_data,
inference_mode=False,
evaluate_settings=training_settings)
if self.distribution_strategy is not None:
with self.distribution_strategy.scope():
dry_eval_model()
else:
dry_eval_model()
success = super().set_model_components_state(self._deferred_model_components_state)
if not success:
self._log.error("Unable to set deferred model components state, weights are not loaded")
self._deferred_model_components_state = None
return success
def _set_deferred_optimizers_state(self):
if self._deferred_optimizers_state is None:
return
def create_optimizer_weights():
for optimizer_name, optimizer in self.optimizers.items():
trainable_variables = get_value_at(optimizer_name, self.trainable_variables, warn_on_failure=False)
optimizer._create_all_weights(trainable_variables)
if self.distribution_strategy is not None:
with self.distribution_strategy.scope():
create_optimizer_weights()
else:
create_optimizer_weights()
success = super().set_optimizers_state(self._deferred_optimizers_state)
if not success:
self._log.error("Unable to set deferred optimizers state, weights are not loaded")
self._deferred_optimizers_state = None
def _evaluate_loss(self, batch_data, evaluate_settings=None, inference_mode=None):
"""
Evaluates the given training model on the given batch_data, using the optional training_settings
Depending on the Deep learning backend you might need to use inference mode here
:param batch_data: batch_data object to evaluate loss on (e.g. dict, list, tuple)
:param evaluate_settings: optional evaluate_settings object (usually dict)
:param inference_mode: optional bool, important when inference mode not set in `_activate_inference_mode`
Pytorch: inference_mode not required here
Tensorflow: inference_mode required here
:return: dict or tuple
{
"loss": <Tensor>,
"auxiliary_results": <can be anything, e.g dict or list with values or data items>
}
(loss, ... auxiliary results ...)
"""
if not (type(inference_mode) is bool):
raise TrainerStateInvalidException("Inference mode is not set")
if not inference_mode:
# @tf.function on the train step level
return self._call_model(batch_data, evaluate_settings, inference_mode)
else:
return self._call_model_tf_func(batch_data, evaluate_settings, tf.constant(inference_mode, dtype=tf.bool))
def _call_model(self, batch_data, evaluate_settings, inference_mode):
return self.training_model(batch_data, evaluate_settings, inference_mode)
def _calc_gradients(self, batch_data, training_settings=None):
"""
:param batch_data:
:param training_settings:
:return:
:raises LossNotAvailableException
"""
if not self.batch_chunk_size:
with tf.GradientTape() as tape:
results = self.evaluate_loss(batch_data,
inference_mode=False,
evaluate_settings=training_settings)
if 'loss' not in results:
raise LossNotAvailableException()
if self.trainable_variables is None:
# We now have evaluated the model and the trainable variables should be available
self._retrieve_trainable_variables()
loss = results['loss']
auxiliary_results = get_value_at('auxiliary_results', results, warn_on_failure=False)
gradients = self._back_propagate_from(loss, tape)
else:
raise NotImplementedError("Gradient accumulation over batch chunks is not implemented")
return loss, auxiliary_results, gradients
def _back_propagate_from(self, loss, tape, last_chunk=False):
gradients = {}
for optimizer_name in self.optimizers.keys():
trainable_variables = get_value_at(optimizer_name, self.trainable_variables, warn_on_failure=False)
gradients[optimizer_name] = tape.gradient(loss, trainable_variables)
return gradients
def _prepare_update_model_parameters(self, gradients):
"""
:param gradients: dict with gradients per provided optimizer
The simple situation, when only one optimizer is | |
'verbose_name_plural': '24 Penghapusan ATL KESBANGPOL',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLKominfo',
fields=[
],
options={
'verbose_name': '43 Penghapusan ATL Kominfo',
'proxy': True,
'verbose_name_plural': '43 Penghapusan ATL Kominfo',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLLampihong',
fields=[
],
options={
'verbose_name': '31 Penghapusan ATL Lampihong',
'proxy': True,
'verbose_name_plural': '31 Penghapusan ATL Lampihong',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLParingin',
fields=[
],
options={
'verbose_name': '28 Penghapusan ATL Paringin',
'proxy': True,
'verbose_name_plural': '28 Penghapusan ATL Paringin',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLParinginKota',
fields=[
],
options={
'verbose_name': '29 Penghapusan ATL Paringin Kota',
'proxy': True,
'verbose_name_plural': '29 Penghapusan ATL Paringin Kota',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLParinginSelatan',
fields=[
],
options={
'verbose_name': '36 Penghapusan ATL Paringin Selatan',
'proxy': True,
'verbose_name_plural': '36 Penghapusan ATL Paringin Selatan',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLParinginTimur',
fields=[
],
options={
'verbose_name': '30 Penghapusan ATL Paringin Timur',
'proxy': True,
'verbose_name_plural': '30 Penghapusan ATL Paringin Timur',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLPariwisata',
fields=[
],
options={
'verbose_name': '46 Penghapusan ATL Pariwisata',
'proxy': True,
'verbose_name_plural': '46 Penghapusan ATL Pariwisata',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLPerdagangan',
fields=[
],
options={
'verbose_name': '47 Penghapusan ATL Perdagangan',
'proxy': True,
'verbose_name_plural': '47 Penghapusan ATL Perdagangan',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLPerikanan',
fields=[
],
options={
'verbose_name': '45 Penghapusan ATL Perikanan',
'proxy': True,
'verbose_name_plural': '45 Penghapusan ATL Perikanan',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLPerpustakaan',
fields=[
],
options={
'verbose_name': '08 Penghapusan ATL Perpustakaan',
'proxy': True,
'verbose_name_plural': '08 Penghapusan ATL Perpustakaan',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLPertanian',
fields=[
],
options={
'verbose_name': '13 Penghapusan ATL Pertanian',
'proxy': True,
'verbose_name_plural': '13 Penghapusan ATL Pertanian',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLRSUD',
fields=[
],
options={
'verbose_name': '06 Penghapusan ATL RSUD',
'proxy': True,
'verbose_name_plural': '06 Penghapusan ATL RSUD',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLSATPOLPP',
fields=[
],
options={
'verbose_name': '25 Penghapusan ATL SATPOLPP',
'proxy': True,
'verbose_name_plural': '25 Penghapusan ATL SATPOLPP',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLSekretariatKorpri',
fields=[
],
options={
'verbose_name': '27 Penghapusan ATL Sekretariat Korpri',
'proxy': True,
'verbose_name_plural': '27 Penghapusan ATL Sekretariat Korpri',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLSetda',
fields=[
],
options={
'verbose_name': '02 Penghapusan ATL Setda',
'proxy': True,
'verbose_name_plural': '02 Penghapusan ATL Setda',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLSetwan',
fields=[
],
options={
'verbose_name': '01 Penghapusan ATL Setwan',
'proxy': True,
'verbose_name_plural': '01 Penghapusan ATL Setwan',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLSosial',
fields=[
],
options={
'verbose_name': '09 Penghapusan ATL Sosial',
'proxy': True,
'verbose_name_plural': '09 Penghapusan ATL Sosial',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='PenghapusanATLTebingTinggi',
fields=[
],
options={
'verbose_name': '38 Penghapusan ATL Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '38 Penghapusan ATL Tebing Tinggi',
},
bases=('atl.penghapusanatl',),
),
migrations.CreateModel(
name='SKPDAsalATLAwayan',
fields=[
],
options={
'verbose_name': '34 SKPD Asal ATL Awayan',
'proxy': True,
'verbose_name_plural': '34 SKPD Asal ATL Awayan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLBAPPEDA',
fields=[
],
options={
'verbose_name': '21 SKPD Asal ATL BAPPEDA',
'proxy': True,
'verbose_name_plural': '21 SKPD Asal ATL BAPPEDA',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLBatumandi',
fields=[
],
options={
'verbose_name': '32 SKPD Asal ATL Batumandi',
'proxy': True,
'verbose_name_plural': '32 SKPD Asal ATL Batumandi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLBatuPiring',
fields=[
],
options={
'verbose_name': '37 SKPD Asal ATL Batu Piring',
'proxy': True,
'verbose_name_plural': '37 SKPD Asal ATL Batu Piring',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLBKD',
fields=[
],
options={
'verbose_name': '19 SKPD Asal ATL BKD',
'proxy': True,
'verbose_name_plural': '19 SKPD Asal ATL BKD',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLBKPPD',
fields=[
],
options={
'verbose_name': '26 SKPD Asal ATL BKPPD',
'proxy': True,
'verbose_name_plural': '26 SKPD Asal ATL BKPPD',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLBPBD',
fields=[
],
options={
'verbose_name': '39 SKPD Asal ATL BPBD',
'proxy': True,
'verbose_name_plural': '39 SKPD Asal ATL BPBD',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLBPPD',
fields=[
],
options={
'verbose_name': '48 SKPD Asal ATL BPPD',
'proxy': True,
'verbose_name_plural': '48 SKPD Asal ATL BPPD',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkes',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesAwayan',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Awayan',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Awayan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesBatumandi',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Batumandi',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Batumandi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesHalong',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Halong',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesJuai',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Juai',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Juai',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesKantor',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Kantor',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Kantor',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesLampihong',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Lampihong',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Lampihong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesLokbatu',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Lokbatu',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Lokbatu',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesParingin',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Paringin',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesParinginSelatan',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Paringin Selatan',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Paringin Selatan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesPirsus',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Pirsus',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Pirsus',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesRSUD',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes RSUD',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes RSUD',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesTanahHabang',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Tanah Habang',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Tanah Habang',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesTebingTinggi',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Tebing Tinggi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDinkesUren',
fields=[
],
options={
'verbose_name': '05 SKPD Asal ATL Dinkes Uren',
'proxy': True,
'verbose_name_plural': '05 SKPD Asal ATL Dinkes Uren',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdik',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikAwayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Awayan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikBatumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Batumandi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikHalong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikJuai',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Juai',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Juai',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikKantor',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Kantor',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Kantor',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikLampihong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Lampihong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Lampihong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikParingin',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Paringin',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Paringin',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikParinginSelatan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik Paringin Selatan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik Paringin Selatan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN1Awayan',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 1 Awayan',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 1 Awayan',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN1Batumandi',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 1 Batumandi',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 1 Batumandi',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN1Halong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 1 Halong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 1 Halong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN1Juai',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 1 Juai',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 1 Juai',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN1Lampihong',
fields=[
],
options={
'verbose_name': '07 SKPD Asal ATL Disdik SMPN 1 Lampihong',
'proxy': True,
'verbose_name_plural': '07 SKPD Asal ATL Disdik SMPN 1 Lampihong',
},
bases=('atl.skpdasalatl',),
),
migrations.CreateModel(
name='SKPDAsalATLDisdikSMPN1Paringin',
fields=[
],
| |
# Copyright (c) 2020 <NAME>
"""mech tests"""
import os
import re
from unittest.mock import patch, mock_open, MagicMock
from click.testing import CliRunner
import mech.mech
import mech.vmrun
from mech.mech_cli import cli
import mech.mech_instance
@patch('mech.utils.locate', return_value=None)
def test_mech_list_with_one(mock_locate, mechfile_one_entry):
"""Test 'mech list' with one entry."""
runner = CliRunner()
with patch('mech.utils.instances', return_value=['first']) as mock_instances:
with patch('mech.utils.load_mechfile',
return_value=mechfile_one_entry) as mock_load_mechfile:
result = runner.invoke(cli, ['list'])
print("result:{}".format(result))
print("result.output:{}".format(result.output))
mock_instances.assert_called()
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r'first\s+notcreated', result.output, re.MULTILINE)
def test_mech_list_with_cloud():
"""Test 'mech list' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', '--debug', 'list'])
mock_cloud_run.assert_called()
def test_mech_global_status_with_cloud():
"""Test 'mech global_status' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'global-status', '--purge'])
mock_cloud_run.assert_called()
def test_mech_ps_with_cloud():
"""Test 'mech ps' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'ps', 'first'])
mock_cloud_run.assert_called()
def test_mech_pause_with_cloud():
"""Test 'mech pause' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'pause', 'first'])
mock_cloud_run.assert_called()
def test_mech_upgrade_with_cloud():
"""Test 'mech upgrade' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'upgrade', 'first'])
mock_cloud_run.assert_called()
def test_mech_suspend_with_cloud():
"""Test 'mech suspend' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'suspend', 'first'])
mock_cloud_run.assert_called()
def test_mech_ip_with_cloud():
"""Test 'mech ip' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'ip', 'first'])
mock_cloud_run.assert_called()
def test_mech_ssh_with_cloud():
"""Test 'mech ssh' with cloud."""
runner = CliRunner()
result = runner.invoke(cli, ['--cloud', 'foo', 'ssh', '--command', 'uptime', 'first'])
print('result:{}'.format(result))
print('result.output:{}'.format(result.output))
assert re.search('is not supported', '{}'.format(result.output))
def test_mech_ssh_config_with_cloud():
"""Test 'mech ssh_config' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'ssh-config', 'first'])
mock_cloud_run.assert_called()
def test_mech_destroy_with_cloud():
"""Test 'mech destroy' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'destroy', 'first'])
mock_cloud_run.assert_called()
def test_mech_resume_with_cloud():
"""Test 'mech resume' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'resume', 'first'])
mock_cloud_run.assert_called()
def test_mech_down_with_cloud():
"""Test 'mech down' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'down', 'first'])
mock_cloud_run.assert_called()
def test_mech_provision_with_cloud():
"""Test 'mech provision' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'provision', 'first'])
mock_cloud_run.assert_called()
def test_mech_port_with_cloud():
"""Test 'mech port' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'port', 'first'])
mock_cloud_run.assert_called()
def test_mech_add_with_cloud():
"""Test 'mech add' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'add', 'second', 'bento/ubuntu-18.04'])
mock_cloud_run.assert_called()
def test_mech_up_with_cloud():
"""Test 'mech up' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'up'])
mock_cloud_run.assert_called()
def test_mech_start_with_cloud():
"""Test 'mech start' (alias) with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'start'])
mock_cloud_run.assert_called()
def test_mech_remove_with_cloud():
"""Test 'mech remove' with cloud."""
runner = CliRunner()
with patch('mech.utils.cloud_run') as mock_cloud_run:
runner.invoke(cli, ['--cloud', 'foo', 'remove', 'third'])
mock_cloud_run.assert_called()
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value=None)
def test_mech_list_with_one_without_box_version(mock_locate, mock_load_mechfile,
mechfile_one_entry_without_box_version):
"""Test 'mech list' with one entry."""
mock_load_mechfile.return_value = mechfile_one_entry_without_box_version
runner = CliRunner()
result = runner.invoke(cli, ['list'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r'first\s+notcreated', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value=None)
def test_mech_list_with_one_and_debug(mock_locate, mock_load_mechfile,
mechfile_one_entry):
"""Test 'mech list' with one entry."""
mock_load_mechfile.return_value = mechfile_one_entry
runner = CliRunner()
result = runner.invoke(cli, ['--debug', 'list', '--detail'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r'created:False', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value=None)
def test_mech_list_with_two_not_created(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech list' with two entries."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
result = runner.invoke(cli, ['list'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r'first\s+notcreated', result.output, re.MULTILINE)
assert re.search(r'second\s+notcreated', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_list_powered_on(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech list' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
with patch.object(mech.mech_instance.MechInstance,
'get_ip', return_value="192.168.1.145") as mock_get_ip:
result = runner.invoke(cli, ['list', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_get_ip.assert_called()
assert re.search(r'192.168.', result.output, re.MULTILINE)
@patch('mech.vbm.VBoxManage.ip', return_value='192.168.1.100')
@patch('mech.utils.get_fallback_executable', return_value='/tmp/VBoxManage')
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vbox')
def test_mech_list_virtualbox(mock_locate, mock_load_mechfile,
mock_get_fallback, mock_get_ip,
mechfile_one_entry_virtualbox):
"""Test 'mech list' powered on."""
mock_load_mechfile.return_value = mechfile_one_entry_virtualbox
runner = CliRunner()
with patch.object(mech.mech_instance.MechInstance,
'get_vm_info', return_value="some data") as mock_get_vm_info:
with patch.object(mech.mech_instance.MechInstance,
'get_vm_state', return_value="some data") as mock_get_vm_state:
runner.invoke(cli, ['list', 'first', '-d'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_get_ip.assert_called()
mock_get_vm_state.assert_called()
mock_get_vm_info.assert_called()
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_list_powered_on_cannot_get_ip(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech list' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
with patch.object(mech.mech_instance.MechInstance,
'get_ip', return_value=False) as mock_get_ip:
result = runner.invoke(cli, ['list', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_get_ip.assert_called()
assert re.search(r'running', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_list_powered_on_cannot_get_state(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech list' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
with patch.object(mech.mech_instance.MechInstance,
'get_ip', return_value=False) as mock_get_ip:
with patch.object(mech.mech_instance.MechInstance,
'get_vm_state', return_value=None) as mock_get_state:
result = runner.invoke(cli, ['list', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_get_ip.assert_called()
mock_get_state.assert_called()
assert re.search(r'running', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_list_powered_off(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech list' powered off."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
with patch.object(mech.mech_instance.MechInstance,
'get_ip', return_value=None) as mock_get_ip:
result = runner.invoke(cli, ['list', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_get_ip.assert_called()
assert re.search(r'poweroff', result.output, re.MULTILINE)
@patch('mech.utils.get_provider', return_value=None)
@patch('os.path.exists', return_value=True)
@patch('shutil.rmtree')
@patch('mech.vmrun.VMrun.delete_vm')
@patch('mech.vmrun.VMrun.stop', return_value=True)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_destroy(mock_locate, mock_load_mechfile,
mock_vmrun_stop, mock_vmrun_delete_vm,
mock_rmtree, mock_path_exists, mock_get_provider,
mechfile_two_entries):
"""Test 'mech destroy' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
mock_rmtree.return_value = True
runner = CliRunner()
result = runner.invoke(cli, ['destroy', '--force', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_vmrun_stop.assert_called()
mock_get_provider.assert_called()
mock_vmrun_delete_vm.assert_called()
mock_rmtree.assert_called()
mock_path_exists.assert_called()
assert re.search(r'Deleting', result.output, re.MULTILINE)
assert re.search(r'Deleted', result.output, re.MULTILINE)
@patch('os.path.exists', return_value=True)
@patch('shutil.rmtree')
@patch('mech.vbm.VBoxManage.unregister')
@patch('mech.vbm.VBoxManage.stop', return_value=True)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vbox')
def test_mech_destroy_virtualbox(mock_locate, mock_load_mechfile,
mock_stop, mock_unregister,
mock_rmtree, mock_path_exists,
mechfile_one_entry_virtualbox):
"""Test 'mech destroy' powered on."""
mock_load_mechfile.return_value = mechfile_one_entry_virtualbox
mock_rmtree.return_value = True
runner = CliRunner()
result = runner.invoke(cli, ['destroy', '--force', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_stop.assert_called()
mock_unregister.assert_called()
mock_rmtree.assert_called()
mock_path_exists.assert_called()
assert re.search(r'Deleting', result.output, re.MULTILINE)
assert re.search(r'Deleted', result.output, re.MULTILINE)
@patch('os.path.exists', return_value=True)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_destroy_prompted_and_answered_no(mock_locate, mock_load_mechfile,
mock_path_exists,
mechfile_two_entries):
"""Test 'mech destroy' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
a_mock = MagicMock()
a_mock.return_value = 'N'
with patch('mech.utils.input', a_mock):
result = runner.invoke(cli, ['destroy', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_path_exists.assert_called()
assert re.search(r'Delete aborted', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value=None)
def test_mech_destroy_not_created(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech destroy' not created."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
result = runner.invoke(cli, ['destroy', '--force'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r'not created', result.output, re.MULTILINE)
@patch('mech.vmrun.VMrun.installed_tools', return_value='running')
@patch('mech.vmrun.VMrun.stop', return_value=True)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_down(mock_locate, mock_load_mechfile,
mock_vmrun_stop, mock_installed_tools,
mechfile_two_entries):
"""Test 'mech down' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
result = runner.invoke(cli, ['down'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_vmrun_stop.assert_called()
mock_installed_tools.assert_called()
assert re.search(r'Stopped', result.output, re.MULTILINE)
@patch('mech.vbm.VBoxManage.stop', return_value=True)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vbox')
def test_mech_down_virtualbox(mock_locate, mock_load_mechfile,
mock_stop,
mechfile_one_entry_virtualbox):
"""Test 'mech down' powered on."""
mock_load_mechfile.return_value = mechfile_one_entry_virtualbox
runner = CliRunner()
result = runner.invoke(cli, ['down'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_stop.assert_called()
assert re.search(r'Stopped', result.output, re.MULTILINE)
@patch('mech.vbm.VBoxManage.stop', return_value=None)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vbox')
def test_mech_down_fails_virtualbox(mock_locate, mock_load_mechfile,
mock_stop,
mechfile_one_entry_virtualbox):
"""Test 'mech down' powered on."""
mock_load_mechfile.return_value = mechfile_one_entry_virtualbox
runner = CliRunner()
result = runner.invoke(cli, ['down'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_stop.assert_called()
assert re.search(r'Not stopped', result.output, re.MULTILINE)
@patch('mech.vmrun.VMrun.installed_tools', return_value=False)
@patch('mech.vmrun.VMrun.stop', return_value=None)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_down_no_vmware_tools_and_stopped_fails(mock_locate, mock_load_mechfile,
mock_vmrun_stop, mock_installed_tools,
mechfile_two_entries):
"""Test 'mech down' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
result = runner.invoke(cli, ['down', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_vmrun_stop.assert_called()
mock_installed_tools.assert_called()
assert re.search(r'Not stopped', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value=None)
def test_mech_down_not_created(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech down' not created."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
result = runner.invoke(cli, ['down'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r' not created', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_ip(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech ip' powered on."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
with patch.object(mech.mech_instance.MechInstance,
'get_ip', return_value="192.168.1.145") as mock_get_ip:
result = runner.invoke(cli, ['ip', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_get_ip.assert_called()
assert re.search(r'192.168', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_ip_unknown(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech ip' but cannot get ip address."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
with patch.object(mech.mech_instance.MechInstance,
'get_ip', return_value=None) as mock_get_ip:
result = runner.invoke(cli, ['ip', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_get_ip.assert_called()
assert re.search(r'Unknown', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile')
@patch('mech.utils.locate', return_value=None)
def test_mech_ip_not_created(mock_locate, mock_load_mechfile,
mechfile_two_entries):
"""Test 'mech ip' not created."""
mock_load_mechfile.return_value = mechfile_two_entries
runner = CliRunner()
result = runner.invoke(cli, ['ip', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r'VM not created', result.output, re.MULTILINE)
MECHFILE_WITH_PROVISIONING = {
"first": {
"box": "mrlesmithjr/alpine311",
"box_version": "1578437753",
"name": "first",
"url": "https://vagrantcloud.com/mrlesmithjr/boxes/alpine311/\
versions/1578437753/providers/vmware_desktop.box",
"provision": [
{
"type": "file",
"source": "file1.txt",
"destination": "/tmp/file1.txt"
},
{
"type": "file",
"source": "file2.txt",
"destination": "/tmp/file2.txt"
}
]
},
"second": {
"box": "mrlesmithjr/alpine311",
"box_version": "1578437753",
"name": "second",
"url": "https://vagrantcloud.com/mrlesmithjr/boxes/alpine311/\
versions/1578437753/providers/vmware_desktop.box",
"provision": [
{
"type": "shell",
"path": "file1.sh",
"args": [
"a=1",
"b=true"
]
},
{
"type": "shell",
"path": "file2.sh",
"args": []
},
{
"type": "shell",
"inline": "echo hello from inline"
}
]
},
"third": {
"box": "mrlesmithjr/alpine311",
"box_version": "1578437753",
"name": "third",
"url": "https://vagrantcloud.com/mrlesmithjr/boxes/alpine311/\
versions/1578437753/providers/vmware_desktop.box",
"provision": []
},
"fourth": {
"box": "mrlesmithjr/alpine311",
"box_version": "1578437753",
"name": "second",
"url": "https://vagrantcloud.com/mrlesmithjr/boxes/alpine311/\
versions/1578437753/providers/vmware_desktop.box",
"provision": [
{
"type": "pyinfra",
"path": "file1.py",
"args": [
"a=1",
"b=true"
]
}
]
},
}
@patch('mech.utils.provision_file', return_value=True)
@patch('mech.utils.load_mechfile', return_value=MECHFILE_WITH_PROVISIONING)
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_provision_file(mock_locate, mock_load_mechfile,
mock_provision_file):
"""Test 'mech provision' (using file provisioning)."""
runner = CliRunner()
result = runner.invoke(cli, ['provision', 'first'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_provision_file.assert_called()
assert re.search(r' Provision ', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile', return_value=MECHFILE_WITH_PROVISIONING)
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_provision_with_pyinfra_show(mock_locate, mock_load_mechfile):
"""Test 'mech provision' (using file provisioning)."""
runner = CliRunner()
result = runner.invoke(cli, ['provision', '--show-only', 'fourth'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
assert re.search(r' Provision ', result.output, re.MULTILINE)
assert re.search(r'file1.py', result.output, re.MULTILINE)
@patch('mech.utils.provision_pyinfra', return_value=(None, None, None))
@patch('mech.utils.load_mechfile', return_value=MECHFILE_WITH_PROVISIONING)
@patch('mech.utils.locate', return_value='/tmp/first/some.vmx')
def test_mech_provision_with_pyinfra_fails(mock_locate, mock_load_mechfile,
mock_provision_pyinfra):
"""Test 'mech provision' (using file provisioning)."""
runner = CliRunner()
result = runner.invoke(cli, ['provision', 'fourth'])
mock_locate.assert_called()
mock_load_mechfile.assert_called()
mock_provision_pyinfra.assert_called()
assert re.search(r'Not Provisioned', result.output, re.MULTILINE)
@patch('mech.utils.load_mechfile', return_value=MECHFILE_WITH_PROVISIONING)
@patch('mech.utils.locate', return_value=None)
def test_mech_provision_not_created(mock_locate, mock_load_mechfile):
"""Test | |
they exist in
historical data.
"""
name = CharTextField(unique=True)
slug = models.SlugField(null=True)
group = models.CharField(
max_length=10,
choices=(("yes", "yes"), ("no", "no"), ("skip", "skip"), ("other", "other")),
null=True,
)
notes = CharTextField(null=True, blank=True)
disabled = models.BooleanField(default=False)
previous_names = models.JSONField(
default=list,
help_text="Any previous names used for this tag, used for keeping import scripts working",
blank=True,
)
def __str__(self):
return self.name
class Meta:
db_table = "availability_tag"
ordering = ["-group", "name"]
class AppointmentTag(models.Model):
"""
A tag indicating whether an appointment is needed and, if so, how it should be scheduled (e.g., by phone, online, other).
This is modelled as a separate table so that metadata can be easily added to the tags.
For example, has_details indicates whether the appointment_details on the report should contain more information,
such as a URL.
"""
slug = models.SlugField(unique=True)
name = models.CharField(max_length=30, unique=True)
has_details = models.BooleanField(
default=False,
help_text="should the report refer to the appointment details. Unfortunately we can't enforce constraints across joins.",
)
def __str__(self):
return self.name
class Meta:
db_table = "appointment_tag"
class Report(models.Model):
"""
A report on the availability of the vaccine. Could be from a phone call, or a site visit, or reading a website.
"""
class ReportSource(models.TextChoices):
CALLER_APP = "ca", "Caller app"
DATA_CORRECTIONS = "dc", "Data corrections"
WEB_BANK = "wb", "Web banking"
location = models.ForeignKey(
Location,
related_name="reports",
on_delete=models.PROTECT,
help_text="a report must have a location",
)
is_pending_review = models.BooleanField(
default=False, help_text="Reports that are pending review by our QA team"
)
originally_pending_review = models.BooleanField(
null=True,
help_text="Reports that were originally flagged as pending review",
)
pending_review_because = CharTextField(
null=True, blank=True, help_text="Reason this was originally flagged for review"
)
claimed_by = models.ForeignKey(
"auth.User",
related_name="claimed_reports",
on_delete=models.PROTECT,
blank=True,
null=True,
help_text="QA reviewer who has claimed this report",
)
claimed_at = models.DateTimeField(
help_text="When the QA reviewer claimed this report",
blank=True,
null=True,
)
soft_deleted = models.BooleanField(
default=False,
help_text="we never delete rows from this table; all deletes are soft",
)
soft_deleted_because = CharTextField(null=True, blank=True)
report_source = models.CharField(
max_length=2,
choices=ReportSource.choices,
default=ReportSource.CALLER_APP,
)
appointment_tag = models.ForeignKey(
AppointmentTag,
related_name="reports",
on_delete=models.PROTECT,
help_text="a single appointment tag, indicating how appointments are made",
)
appointment_details = CharTextField(
null=True,
blank=True,
help_text="appointment details (e.g., a URL). Should not be used if the appointment_tag's has_details is false.",
)
public_notes = models.TextField(null=True, blank=True)
internal_notes = models.TextField(
null=True, blank=True, verbose_name="Private notes"
)
restriction_notes = models.TextField(null=True, blank=True)
vaccines_offered = models.JSONField(
null=True,
blank=True,
help_text="JSON array of strings representing vaccines on offer here",
)
website = CharTextField(
null=True, blank=True, help_text="Update for website information"
)
full_address = models.TextField(
null=True,
blank=True,
help_text="Update for the entire address, including city and zip code",
)
hours = models.TextField(
blank=True,
null=True,
help_text="Update for hours information",
)
planned_closure = models.DateField(
blank=True,
null=True,
help_text='Date this site a site plans to stop operating, "planned_closure" in our API',
verbose_name="Last known event date",
)
reported_by = models.ForeignKey(
Reporter, related_name="reports", on_delete=models.PROTECT
)
created_at = models.DateTimeField(
default=timezone.now,
help_text="the time when the report was submitted. We will interpret this as a validity time",
)
call_request = models.ForeignKey(
"CallRequest",
null=True,
blank=True,
related_name="reports",
on_delete=models.SET_NULL,
help_text="the call request that this report was based on, if any.",
)
availability_tags = models.ManyToManyField(
AvailabilityTag,
related_name="reports",
db_table="call_report_availability_tag",
)
airtable_id = models.CharField(
max_length=20,
null=True,
unique=True,
help_text="Airtable record ID, if this has one",
)
airtable_json = models.JSONField(null=True, blank=True)
public_id = models.SlugField(
unique=True, help_text="ID that we expose outside of the application"
)
def created_at_utc(self):
tz = pytz.UTC
created_at_utc = timezone.localtime(self.created_at, tz)
return dateformat.format(created_at_utc, "jS M Y fA e")
def availability(self):
# Used by the admin list view
return ", ".join(t.name for t in self.availability_tags.all())
def based_on_call_request(self):
return self.call_request is not None
def full_appointment_details(self, location: Optional[Location] = None):
# We often call this from contexts where the report was
# prefetched off of a location, and fetching self.location
# would be another DB query within a tight loop; support
# passing it in as an extra arg.
if location is not None:
assert location.id == self.location_id
else:
location = self.location
# Do not access self.location below; use location instead.
if self.appointment_details:
return self.appointment_details
elif location.county and self.appointment_tag.slug == "county_website":
return location.county.vaccine_reservations_url
elif self.appointment_tag.slug == "myturn_ca_gov":
return "https://myturn.ca.gov/"
elif location.website:
return location.website
elif location.provider and location.provider.appointments_url:
return location.provider.appointments_url
return None
class Meta:
db_table = "report"
def __str__(self):
return "Call to {} by {} at {}".format(
self.location, self.reported_by, self.created_at
)
@property
def pid(self):
return "r" + pid.from_int(self.pk)
def save(self, *args, **kwargs):
set_public_id_later = False
if (not self.public_id) and self.airtable_id:
self.public_id = self.airtable_id
elif not self.public_id:
set_public_id_later = True
self.public_id = "tmp:{}".format(uuid.uuid4())
super().save(*args, **kwargs)
if set_public_id_later:
self.public_id = self.pid
Report.objects.filter(pk=self.pk).update(public_id=self.pid)
location = self.location
location.update_denormalizations()
# location.derive_availability_and_inventory(save=True)
# will not work here because the availability tags have not yet been saved
def delete(self, *args, **kwargs):
location = self.location
super().delete(*args, **kwargs)
location.update_denormalizations()
location.derive_availability_and_inventory(save=True)
class ReportReviewTag(models.Model):
tag = models.CharField(unique=True, max_length=64)
description = models.TextField(blank=True)
def __str__(self):
return self.tag
class ReportReviewNote(models.Model):
report = models.ForeignKey(
Report, related_name="review_notes", on_delete=models.PROTECT
)
author = models.ForeignKey(
"auth.User", related_name="review_notes", on_delete=models.PROTECT
)
created_at = models.DateTimeField(default=timezone.now)
note = models.TextField(blank=True)
tags = models.ManyToManyField(
ReportReviewTag,
related_name="review_notes",
blank=True,
)
def __str__(self):
return "{} review note on {}".format(self.author, self.report)
class EvaReport(models.Model):
"""
A report obtained by our robotic assistant Eva. Eva only gathers a subset of the data that we would normally gather.
"""
location = models.ForeignKey(
Location, related_name="eva_reports", on_delete=models.PROTECT
)
name_from_import = CharTextField(null=True, blank=True)
phone_number_from_import = CharTextField(null=True, blank=True)
has_vaccines = models.BooleanField()
hung_up = models.BooleanField()
valid_at = models.DateTimeField(
help_text="the time when Eva's report was made (or our best estimate"
)
uploaded_at = models.DateTimeField(
help_text="this is the time when we uploaded Eva's report. It might not even be on the same day that the report was filed"
)
airtable_id = models.CharField(
max_length=20,
null=True,
unique=True,
help_text="Airtable record ID, if this has one",
)
def __str__(self):
return "Eva call to {} at {}".format(self.location, self.valid_at)
class Meta:
db_table = "eva_report"
class CallRequestReason(models.Model):
short_reason = CharTextField(unique=True)
long_reason = models.TextField(null=True, blank=True)
def __str__(self):
return self.short_reason
class Meta:
db_table = "call_request_reason"
class CallRequest(models.Model):
"""
A request to make a phone call (i.e., an entry in the call queue).
This reifies the notion of "requesting a call" so that all of the call attempts can be tracked with full history.
For example, if a bug in an app has us call a location repeatedly, we have the full record of why those calls were made.
"""
class PriorityGroup(models.IntegerChoices):
CRITICAL_1 = 1, "1-critical"
IMPORTANT_2 = 2, "2-important"
NORMAL_3 = 3, "3-normal"
LOW_4 = 4, "4-low"
NOT_PRIORITIZED_99 = 99, "99-not_prioritized"
class TipType(models.TextChoices):
EVA = "eva_report", "Eva report"
SCOOBY = "scooby_report", "Scooby report"
DATA_CORRECTIONS = "data_corrections_report", "Data corrections report"
location = models.ForeignKey(
Location, related_name="call_requests", on_delete=models.PROTECT
)
created_at = models.DateTimeField(
help_text="the time the call request entered the queue.",
null=True,
blank=True,
default=timezone.now,
)
vesting_at = models.DateTimeField(
help_text="the time at which this call request is considered 'active'. For example, a call request made by a skip will have a future vesting time."
)
claimed_by = models.ForeignKey(
Reporter,
blank=True,
null=True,
related_name="call_requests_claimed",
on_delete=models.PROTECT,
help_text="if non-null, the reporter who has currently 'claimed' this request",
)
claimed_until = models.DateTimeField(
blank=True,
null=True,
help_text="if non-null, the time until which the report is considered claimed",
)
call_request_reason = models.ForeignKey(
CallRequestReason,
related_name="call_requests",
on_delete=models.PROTECT,
help_text="a tag indicating why the call was added to the queue",
)
completed = models.BooleanField(
default=False, help_text="Has this call been completed"
)
completed_at = models.DateTimeField(
blank=True, null=True, help_text="When this call was marked as completed"
)
priority_group = models.IntegerField(
choices=PriorityGroup.choices,
default=PriorityGroup.NOT_PRIORITIZED_99,
)
priority = models.IntegerField(
default=0,
db_index=True,
help_text="Priority within this priority group - higher number means higher priority",
)
tip_type = CharTextField(
choices=TipType.choices,
blank=True,
null=True,
help_text=" the type of tip that prompted this call request, if any",
)
tip_report = models.ForeignKey(
Report,
blank=True,
null=True,
related_name="prompted_call_requests",
on_delete=models.PROTECT,
help_text="the id of the report, if any that prompted this call request",
)
def __str__(self):
return "Call request to {} vesting at {}".format(self.location, self.vesting_at)
class Meta:
db_table = "call_request"
# Group 1 comes before group 2 comes before group 3
# Within those groups, lower priority scores come before higher
# Finally we tie-break on ID optimizing for mostl recently created first
ordering = ("priority_group", "-priority", "-id")
constraints = | |
to the
LCOS center. The Xm and Ym shape (array's rows x cols) should
be the same as the spot spot grid shape (pattern's rows x cols).
labels (2D array): array of spot labels (ints) starting from 0.
Defines rectangular regions for each spot on the LCOS image.
phase_max (float): constant phase added to the pattern (in pi units).
See :func:`single_spot_pattern` for details.
f (float): focal length of the lens created on the phase pattern
and used to focus a plane wave into a spot.
wavelen (float): wavelength of the input laser.
phase_factor (uint8): the 8-bit value [0..255] corresponding to pi
phase_wrap_neg (bool): if True wraps all the negative-phase values into
[0..phase_wrap_max]. phase_wrap_max is 2 when `phase_max` <= 2,
otherwise is the smallest multiple of 2 contained in `phase_max`.
When False, the negative phase values are set ot 0.
phase_wrap_pos (bool): if True, wrap the positive phase values into
[0..phase_wrap_max]. phase_wrap_max is 2 when `phase_max` <= 2,
otherwise is the smallest multiple of 2 contained in `phase_max`.
dtype (numpy.dtype): data type to use in the returned array.
Default uint8.
debug (bool): if True prints debugging info into the log file.
Returns:
A 2D array containing phase pattern image for the defined spots.
"""
X = Xm + LCOS_X_SIZE // 2
Y = Ym + LCOS_Y_SIZE // 2
a = black_pattern(float)
for ispot, (xm, ym) in enumerate(zip(X.ravel(), Y.ravel())):
mask = labels == ispot
single_spot_pattern(xm, ym, mask=mask, a=a, phase_max=phase_max,
f=f, wavelen=wavelen)
if phase_wrap_neg or phase_wrap_pos:
# smallest multiple of 2 contained in phase_max
phase_wrap_max = 2 if phase_max <= 2 else (phase_max // 2) * 2
if phase_wrap_pos:
pos_phase = a > 0
# wrap phase between 0 and phase_wrap_max (in pi units)
a[pos_phase] = a[pos_phase] % phase_wrap_max
neg_phase = a < 0
if phase_wrap_neg:
# wrap phase between 0 and phase_wrap_max (in pi units)
a[neg_phase] = a[neg_phase] % phase_wrap_max
else:
a[neg_phase] = 0
a *= phase_factor
return a.round().astype(dtype)
def multispot_patternC(X, Y, C, phase_max, f=30e-3, wavelen=532e-9,
phase_factor=1, phase_wrap_pos=False, phase_wrap_neg=True,
dtype=np.uint8, debug=False):
"""Pattern for spots centered in X,Y and rectangular limits defined in C.
Arguments:
X, Y (2d arrays): center positions of the spots
C (3d array): for each spot has 4 values (xmin, xmax, ymin, ymax)
that defines the spot boundaries
phase_max (float): constant phase added to the pattern (in pi units).
See :func:`single_spot_pattern` for details.
f (float): focal length of the lens created on the phase pattern
and used to focus a plane wave into a spot.
wavelen (float): wavelength of the input laser.
phase_factor (uint8): the 8-bit value [0..255] corresponding to pi
phase_wrap_neg (bool): if True wraps all the negative-phase values into
[0..phase_wrap_max]. phase_wrap_max is 2 when `phase_max` <= 2,
otherwise is the smallest multiple of 2 contained in `phase_max`.
When False, the negative phase values are set ot 0.
phase_wrap_pos (bool): if True, wrap the positive phase values into
[0..phase_wrap_max]. phase_wrap_max is 2 when `phase_max` <= 2,
otherwise is the smallest multiple of 2 contained in `phase_max`.
dtype (numpy.dtype): data type to use in the returned array.
Default uint8.
debug (bool): if True prints debugging info into the log file.
Returns:
A 2D array containing phase pattern image for the defined spots.
"""
a = black_pattern(float)
for iy in range(X.shape[0]):
for ix in range(X.shape[1]):
xm, ym = X[iy, ix], Y[iy, ix]
xmin, xmax, ymin, ymax = C[iy, ix]
mask = (XL >= xmin) * (XL <= xmax) * (YL >= ymin) * (YL <= ymax)
single_spot_pattern(xm, ym, mask=mask, a=a, phase_max=phase_max,
f=f, wavelen=wavelen)
if phase_wrap_neg or phase_wrap_pos:
# smallest multiple of 2 contained in phase_max
phase_wrap_max = 2 if phase_max <= 2 else (phase_max // 2) * 2
if phase_wrap_pos:
pos_phase = a > 0
# wrap phase between 0 and phase_wrap_max (in pi units)
a[pos_phase] = a[pos_phase] % phase_wrap_max
neg_phase = a < 0
if phase_wrap_neg:
# wrap phase between 0 and phase_wrap_max (in pi units)
a[neg_phase] = a[neg_phase] % phase_wrap_max
else:
a[neg_phase] = 0
a *= phase_factor
return a.round().astype(dtype)
def get_outer_mask(C, pad=0):
"""Return a mask that selects outside the spot pattern.
Arguments:
pad (int): an additional padding in number of LCOS pixels to be
added around the spot pattern.
Returns:
2D boolean array defining the region outside the spot pattern.
"""
mask = np.ones(XL.shape, dtype=bool)
for row in C:
for xmin, xmax, ymin, ymax in row:
mask[ymin - pad: ymax + 1 + pad, xmin - pad: xmax + 1 + pad] = False
return mask
def phase_patternC(Xm, Ym, lens_params, steer_params, pad=2, ref_spot=4,
ref_spot_dark=False, dark_all=False, nospot=False,
debug=False):
"""Return the pattern with the multi-spot lenses and the beam steering.
Arguments:
pad (uint): # pixels of zero-padding around the lens pattern before
the steering pattern starts.
ref_spot (int): index of the spot considered as reference (e.g. center).
ref_spot_dark (bool): if True darken the reference spot.
dark_all (bool): if True return an array of zeros.
nospot (bool): if True return only the steering pattern with no spots.
debug (bool): if True prints debugging info into the log file.
Returns:
A 2D array containing the complete phase pattern image with both spots
and beam steering pattern.
"""
steer_params.update(debug=debug)
lens_params.update(debug=debug)
if dark_all:
return black_pattern()
if nospot:
return get_steer_pattern(**steer_params)
Xm = Xm.copy() + LCOS_X_SIZE/2.
Ym = Ym.copy() + LCOS_Y_SIZE/2.
XM, YM = np.atleast_2d(Xm), np.atleast_2d(Ym)
if debug:
fprint_kw(XM_YM_shape_assert=(len(XM.shape) == len(YM.shape) == 2))
assert len(XM.shape) == len(YM.shape) == 2
C = get_spot_limits(XM, YM, debug=debug)
a = multispot_pattern(XM, YM, C, dtype=np.uint8, **lens_params)
if ref_spot_dark:
if ref_spot >= 0 and ref_spot < XM.size:
nrows, ncols = XM.shape
rspot_y = ref_spot // ncols
rspot_x = ref_spot % ncols
xmin, xmax, ymin, ymax = C[rspot_y, rspot_x]
a[ymin:ymax + 1, xmin:xmax + 1] = 0
else:
print('WARNING: ref_spot out of range: %d' % ref_spot)
if steer_params['vmax'] > 0:
steer_img = get_steer_pattern(**steer_params)
mask = get_outer_mask(C, pad=pad)
a[mask] = steer_img[mask]
return a
def phase_pattern(Xm, Ym, lens_params, steer_params, sparams=None, pad=2,
ref_spot=4,
ref_spot_dark=False, dark_all=False, nospot=False,
debug=False):
"""Return the pattern with the multi-spot lenses and the beam steering.
Arguments:
Xm, Ym (2D arrays): coordinates of spot centers with respect to the
LCOS center. The Xm and Ym shape (array's rows x cols) should
be the same as the spot spot grid shape (pattern's rows x cols).
lens_params (dict): parameters for the multispot pattern.
steer_params (dict): parameters for the beam steering pattern.
pad (uint): # pixels of zero-padding around the lens pattern before
the steering pattern starts.
ref_spot (int): index of the spot considered as reference (e.g. center).
ref_spot_dark (bool): if True darken the reference spot.
dark_all (bool): if True return an array of zeros.
nospot (bool): if True return only the steering pattern with no spots.
debug (bool): if True prints debugging info into the log file.
Returns:
A 2D array containing the complete phase pattern image with both spots
and beam steering pattern.
"""
steer_params.update(debug=debug)
lens_params.update(debug=debug)
if dark_all:
return black_pattern()
if nospot:
return get_steer_pattern(**steer_params)
XM, YM = np.atleast_2d(Xm), np.atleast_2d(Ym)
assert len(XM.shape) == len(YM.shape) == 2
if sparams is None:
nspots_x, nspots_y = XM.shape
pitch_x, pitch_y = pitch_from_centers(XM, YM)
sparams = dict(nspots_x=nspots_x, nspots_y=nspots_y,
pitch_x=pitch_x, pitch_y=pitch_y,
center_x=XM.ravel().mean(), center_y=YM.ravel().mean())
spot_regions = get_spot_regions(**sparams)
if ref_spot_dark:
if ref_spot >= 0 and ref_spot < XM.size:
spot_regions[spot_regions == ref_spot] = np.nan
else:
print('WARNING: ref_spot out of range: %d' % ref_spot)
a = multispot_pattern(XM, YM, spot_regions, dtype=np.uint8, **lens_params)
if steer_params['vmax'] > 0:
# NOTE: pad is ignored here
steer_img = get_steer_pattern(**steer_params)
mask = np.isnan(spot_regions)
a[mask] = steer_img[mask]
return a
def spot_coord_grid(nspots_x, nspots_y, pitch_x=25, pitch_y=25,
center_x=0, center_y=0, rotation=0):
"""Returns the coordinates of spots arranged on a rectangular grid.
Arguments:
nspots_x, nspots_y (ints): number of spots in the X and Y direction.
pitch_x, pitch_y (floats): spot pitch in X and Y direction.
center_x, center_y (floats): coordinate of the pattern center.
rotation (float): pattern rotation angle in degree.
Returns:
A tuple (X, Y) of two 2D arrays containing the grid of spot centers
coordinates with respect to the LCOS center and | |
<filename>audioSegmentation.py
import numpy
import sklearn.cluster
import time
import scipy
import os
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioBasicIO
import matplotlib.pyplot as plt
from scipy.spatial import distance
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import sklearn.discriminant_analysis
import csv
import os.path
import sklearn
import sklearn.cluster
import hmmlearn.hmm
import cPickle
import glob
""" General utility functions """
def smoothMovingAvg(inputSignal, windowLen=11):
windowLen = int(windowLen)
if inputSignal.ndim != 1:
raise ValueError("")
if inputSignal.size < windowLen:
raise ValueError("Input vector needs to be bigger than window size.")
if windowLen < 3:
return inputSignal
s = numpy.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1], inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]
w = numpy.ones(windowLen, 'd')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[windowLen:-windowLen+1]
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S
def flags2segs(Flags, window):
'''
ARGUMENTS:
- Flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
preFlag = 0
curFlag = 0
numOfSegments = 0
curVal = Flags[curFlag]
segsList = []
classes = []
while (curFlag < len(Flags) - 1):
stop = 0
preFlag = curFlag
preVal = curVal
while (stop == 0):
curFlag = curFlag + 1
tempVal = Flags[curFlag]
if ((tempVal != curVal) | (curFlag == len(Flags) - 1)): # stop
numOfSegments = numOfSegments + 1
stop = 1
curSegment = curVal
curVal = Flags[curFlag]
segsList.append((curFlag * window))
classes.append(preVal)
segs = numpy.zeros((len(segsList), 2))
for i in range(len(segsList)):
if i > 0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def segs2flags(segStart, segEnd, segLabel, winSize):
'''
This function converts segment endpoints and respective segment labels to fix-sized class labels.
ARGUMENTS:
- segStart: segment start points (in seconds)
- segEnd: segment endpoints (in seconds)
- segLabel: segment labels
- winSize: fix-sized window (in seconds)
RETURNS:
- flags: numpy array of class indices
- classNames: list of classnames (strings)
'''
flags = []
classNames = list(set(segLabel))
curPos = winSize / 2.0
while curPos < segEnd[-1]:
for i in range(len(segStart)):
if curPos > segStart[i] and curPos <= segEnd[i]:
break
flags.append(classNames.index(segLabel[i]))
curPos += winSize
return numpy.array(flags), classNames
def computePreRec(CM, classNames):
'''
This function computes the Precision, Recall and F1 measures, given a confusion matrix
'''
numOfClasses = CM.shape[0]
if len(classNames) != numOfClasses:
print "Error in computePreRec! Confusion matrix and classNames list must be of the same size!"
return
Precision = []
Recall = []
F1 = []
for i, c in enumerate(classNames):
Precision.append(CM[i,i] / numpy.sum(CM[:,i]))
Recall.append(CM[i,i] / numpy.sum(CM[i,:]))
F1.append( 2 * Precision[-1] * Recall[-1] / (Precision[-1] + Recall[-1]))
return Recall, Precision, F1
def readSegmentGT(gtFile):
'''
This function reads a segmentation ground truth file, following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gtFile: the path of the CSV segment file
RETURNS:
- segStart: a numpy array of segments' start positions
- segEnd: a numpy array of segments' ending positions
- segLabel: a list of respective class labels (strings)
'''
f = open(gtFile, "rb")
reader = csv.reader(f, delimiter=',')
segStart = []
segEnd = []
segLabel = []
for row in reader:
if len(row) == 3:
segStart.append(float(row[0]))
segEnd.append(float(row[1]))
#if row[2]!="other":
# segLabel.append((row[2]))
#else:
# segLabel.append("silence")
segLabel.append((row[2]))
return numpy.array(segStart), numpy.array(segEnd), segLabel
def plotSegmentationResults(flagsInd, flagsIndGT, classNames, mtStep, ONLY_EVALUATE=False):
'''
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method if ground-truth is available.
'''
flags = [classNames[int(f)] for f in flagsInd]
(segs, classes) = flags2segs(flags, mtStep)
minLength = min(flagsInd.shape[0], flagsIndGT.shape[0])
if minLength > 0:
accuracy = numpy.sum(flagsInd[0:minLength] == flagsIndGT[0:minLength]) / float(minLength)
else:
accuracy = -1
if not ONLY_EVALUATE:
Duration = segs[-1, 1]
SPercentages = numpy.zeros((len(classNames), 1))
Percentages = numpy.zeros((len(classNames), 1))
AvDurations = numpy.zeros((len(classNames), 1))
for iSeg in range(segs.shape[0]):
SPercentages[classNames.index(classes[iSeg])] += (segs[iSeg, 1]-segs[iSeg, 0])
for i in range(SPercentages.shape[0]):
Percentages[i] = 100.0 * SPercentages[i] / Duration
S = sum(1 for c in classes if c == classNames[i])
if S > 0:
AvDurations[i] = SPercentages[i] / S
else:
AvDurations[i] = 0.0
for i in range(Percentages.shape[0]):
print classNames[i], Percentages[i], AvDurations[i]
# font = {'size': 10}
# plt.rc('font', **font)
#
# fig = plt.figure()
# ax1 = fig.add_subplot(211)
# ax1.set_yticks(numpy.array(range(len(classNames))))
# ax1.axis((0, Duration, -1, len(classNames)))
# ax1.set_yticklabels(classNames)
# ax1.plot(numpy.array(range(len(flagsInd))) * mtStep + mtStep / 2.0, flagsInd)
# if flagsIndGT.shape[0] > 0:
# ax1.plot(numpy.array(range(len(flagsIndGT))) * mtStep + mtStep / 2.0, flagsIndGT + 0.05, '--r')
# plt.xlabel("time (seconds)")
# if accuracy >= 0:
# plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
#
# ax2 = fig.add_subplot(223)
# plt.title("Classes percentage durations")
# ax2.axis((0, len(classNames) + 1, 0, 100))
# ax2.set_xticks(numpy.array(range(len(classNames) + 1)))
# ax2.set_xticklabels([" "] + classNames)
# ax2.bar(numpy.array(range(len(classNames))) + 0.5, Percentages)
#
# ax3 = fig.add_subplot(224)
# plt.title("Segment average duration per class")
# ax3.axis((0, len(classNames)+1, 0, AvDurations.max()))
# ax3.set_xticks(numpy.array(range(len(classNames) + 1)))
# ax3.set_xticklabels([" "] + classNames)
# ax3.bar(numpy.array(range(len(classNames))) + 0.5, AvDurations)
# fig.tight_layout()
# plt.show()
return accuracy
def evaluateSpeakerDiarization(flags, flagsGT):
minLength = min(flags.shape[0], flagsGT.shape[0])
flags = flags[0:minLength]
flagsGT = flagsGT[0:minLength]
uFlags = numpy.unique(flags)
uFlagsGT = numpy.unique(flagsGT)
# compute contigency table:
cMatrix = numpy.zeros((uFlags.shape[0], uFlagsGT.shape[0]))
for i in range(minLength):
cMatrix[int(numpy.nonzero(uFlags == flags[i])[0]), int(numpy.nonzero(uFlagsGT == flagsGT[i])[0])] += 1.0
Nc, Ns = cMatrix.shape
N_s = numpy.sum(cMatrix, axis=0)
N_c = numpy.sum(cMatrix, axis=1)
N = numpy.sum(cMatrix)
purityCluster = numpy.zeros((Nc, ))
puritySpeaker = numpy.zeros((Ns, ))
# compute cluster purity:
for i in range(Nc):
purityCluster[i] = numpy.max((cMatrix[i, :])) / (N_c[i])
for j in range(Ns):
puritySpeaker[j] = numpy.max((cMatrix[:, j])) / (N_s[j])
purityClusterMean = numpy.sum(purityCluster * N_c) / N
puritySpeakerMean = numpy.sum(puritySpeaker * N_s) / N
return purityClusterMean, puritySpeakerMean
def trainHMM_computeStatistics(features, labels):
'''
This function computes the statistics used to train an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a numpy matrix of feature vectors (numOfDimensions x numOfWindows)
- labels: a numpy array of class indices (numOfWindows x 1)
RETURNS:
- startprob: matrix of prior class probabilities (numOfClasses x 1)
- transmat: transition matrix (numOfClasses x numOfClasses)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
'''
uLabels = numpy.unique(labels)
nComps = len(uLabels)
nFeatures = features.shape[0]
if features.shape[1] < labels.shape[0]:
print "trainHMM warning: number of short-term feature vectors must be greater or equal to the labels length!"
labels = labels[0:features.shape[1]]
# compute prior probabilities:
startprob = numpy.zeros((nComps,))
for i, u in enumerate(uLabels):
startprob[i] = numpy.count_nonzero(labels == u)
startprob = startprob / startprob.sum() # normalize prior probabilities
# compute transition matrix:
transmat = numpy.zeros((nComps, nComps))
for i in range(labels.shape[0]-1):
transmat[int(labels[i]), int(labels[i + 1])] += 1
for i in range(nComps): # normalize rows of transition matrix:
transmat[i, :] /= transmat[i, :].sum()
means = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == uLabels[i])[0]].mean(axis=1))
cov = numpy.zeros((nComps, nFeatures))
for i in range(nComps):
#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==uLabels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!
cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == uLabels[i])[0]], axis=1)
return startprob, transmat, means, cov
def trainHMM_fromFile(wavFile, gtFile, hmmModelName, mtWin, mtStep):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wavFile: the path of the audio filename
- gtFile: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row
- hmmModelName: the name of the HMM model to be stored
- mtWin: mid-term window size
- mtStep: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- classNames: a list of classNames
After training, hmm, classNames, along with the mtWin and mtStep values | |
<reponame>juliawestermayr/schnetpack
import argparse
from argparse import ArgumentParser
from schnetpack.datasets import (
QM9,
ANI1,
MD17,
OrganicMaterialsDatabase,
MaterialsProject,
)
class StoreDictKeyPair(argparse.Action):
"""
From https://stackoverflow.com/a/42355279
"""
def __init__(self, option_strings, dest, nargs=None, val_type=str, **kwargs):
self._nargs = nargs
self.val_type = val_type
super(StoreDictKeyPair, self).__init__(
option_strings, dest, nargs=nargs, **kwargs
)
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values:
k, v = kv.split("=")
# typecast
if self.val_type == int:
v = int(float(v))
else:
v = self.val_type(v)
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def get_mode_parsers():
# json parser
json_parser = ArgumentParser(add_help=False)
json_parser.add_argument(
"json_path",
type=str,
help="Path to argument file. (default: %(default)s)",
default=None,
)
# train parser
train_parser = ArgumentParser(add_help=False)
train_parser.add_argument("datapath", help="Path to dataset")
train_parser.add_argument("modelpath", help="Path of stored model")
train_parser.add_argument(
"--cuda", help="Set flag to use GPU(s) for training", action="store_true"
)
train_parser.add_argument(
"--Huber", help="Set flag to use the Huber loss instead of the L2 loss for better handling of outliers.", action="store_true"
)
train_parser.add_argument(
"--parallel",
help="Run data-parallel on all available GPUs (specify with environment"
" variable CUDA_VISIBLE_DEVICES)",
action="store_true",
)
train_parser.add_argument(
"--seed", type=int, default=None, help="Set random seed for torch and numpy."
)
train_parser.add_argument(
"--mlmm", type =str, default = None, help="Enables training of only the QM region for the Delta-Learning approach in QMMM. Requires a file name as argument. "
)
train_parser.add_argument(
"--overwrite", help="Remove previous model directory.", action="store_true"
)
# data split
train_parser.add_argument(
"--split_path", help="Path / destination of npz with data splits", default=None
)
train_parser.add_argument(
"--split",
help="Split into [train] [validation] and use remaining for testing",
type=int,
nargs=2,
default=[None, None],
)
train_parser.add_argument(
"--max_epochs",
type=int,
help="Maximum number of training epochs (default: %(default)s)",
default=5000,
)
train_parser.add_argument(
"--max_steps",
type=int,
help="Maximum number of training steps (default: %(default)s)",
default=None,
)
train_parser.add_argument(
"--lr",
type=float,
help="Initial learning rate (default: %(default)s)",
default=1e-4,
)
train_parser.add_argument(
"--lr_patience",
type=int,
help="Epochs without improvement before reducing the learning rate "
"(default: %(default)s)",
default=25,
)
train_parser.add_argument(
"--lr_decay",
type=float,
help="Learning rate decay (default: %(default)s)",
default=0.8,
)
train_parser.add_argument(
"--lr_min",
type=float,
help="Minimal learning rate (default: %(default)s)",
default=1e-6,
)
train_parser.add_argument(
"--logger",
help="Choose logger for training process (default: %(default)s)",
choices=["csv", "tensorboard"],
default="csv",
)
train_parser.add_argument(
"--log_every_n_epochs",
type=int,
help="Log metrics every given number of epochs (default: %(default)s)",
default=1,
)
train_parser.add_argument(
"--n_epochs",
type=int,
help="Maximum number of training epochs (default: %(default)s)",
default=1000,
)
train_parser.add_argument(
"--checkpoint_interval",
type=int,
help="Store checkpoint every n epochs (default: %(default)s)",
default=1,
)
train_parser.add_argument(
"--keep_n_checkpoints",
type=int,
help="Number of checkpoints that will be stored (default: %(default)s)",
default=3,
)
# evaluation parser
eval_parser = ArgumentParser(add_help=False)
eval_parser.add_argument("datapath", help="Path to dataset")
eval_parser.add_argument("modelpath", help="Path of stored model")
eval_parser.add_argument(
"--cuda", help="Set flag to use GPU(s) for evaluation", action="store_true"
)
eval_parser.add_argument(
"--parallel",
help="Run data-parallel on all available GPUs (specify with environment"
" variable CUDA_VISIBLE_DEVICES)",
action="store_true",
)
eval_parser.add_argument(
"--mlmm", type =str, default = None, help="Enables training of only the QM region for the Delta-Learning approach in QMMM. Requires a file name as argument. "
)
eval_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for evaluation (default: %(default)s)",
default=100,
)
eval_parser.add_argument(
"--split",
help="Evaluate trained model on given split",
choices=["train", "validation", "test"],
default=["test"],
nargs="+",
)
eval_parser.add_argument(
"--overwrite", help="Remove previous evaluation files", action="store_true"
)
return json_parser, train_parser, eval_parser
def get_model_parsers():
# model parsers
schnet_parser = ArgumentParser(add_help=False)
schnet_parser.add_argument(
"--features", type=int, help="Size of atom-wise representation", default=128
)
schnet_parser.add_argument(
"--interactions", type=int, help="Number of interaction blocks", default=6
)
schnet_parser.add_argument(
"--cutoff_function",
help="Functional form of the cutoff",
choices=["hard", "cosine", "mollifier"],
default="cosine",
)
schnet_parser.add_argument(
"--num_gaussians",
type=int,
default=50,
help="Number of Gaussians to expand distances (default: %(default)s)",
)
schnet_parser.add_argument(
"--normalize_filter",
action="store_true",
help="Normalize convolution filters by number of neighbors",
)
wacsf_parser = ArgumentParser(add_help=False)
wacsf_parser.add_argument(
"--radial",
type=int,
default=22,
help="Number of radial symmetry functions (default: %(default)s)",
)
wacsf_parser.add_argument(
"--angular",
type=int,
default=5,
help="Number of angular symmetry functions (default: %(default)s)",
)
wacsf_parser.add_argument(
"--zetas",
type=int,
nargs="+",
default=[1],
help="List of zeta exponents used for angle resolution (default: %(default)s)",
)
wacsf_parser.add_argument(
"--standardize",
action="store_true",
help="Standardize wACSF before atomistic network.",
)
# Atomistic network parameters
wacsf_parser.add_argument(
"--n_nodes",
type=int,
default=100,
help="Number of nodes in atomic networks (default: %(default)s)",
)
wacsf_parser.add_argument(
"--n_layers",
type=int,
default=2,
help="Number of layers in atomic networks (default: %(default)s)",
)
# Advances wACSF settings
wacsf_parser.add_argument(
"--centered",
action="store_true",
help="Use centered Gaussians for radial functions",
)
wacsf_parser.add_argument(
"--crossterms", action="store_true", help="Use crossterms in angular functions"
)
wacsf_parser.add_argument(
"--behler", action="store_true", help="Switch to conventional ACSF"
)
wacsf_parser.add_argument(
"--elements",
default=["H", "C", "N", "O", "F"],
nargs="+",
help="List of elements to be used for symmetry functions "
"(default: %(default)s).",
)
return schnet_parser, wacsf_parser
def get_data_parsers():
# qm9
qm9_parser = ArgumentParser(add_help=False)
qm9_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default=QM9.U0,
choices=[
QM9.A,
QM9.B,
QM9.C,
QM9.mu,
QM9.alpha,
QM9.homo,
QM9.lumo,
QM9.gap,
QM9.r2,
QM9.zpve,
QM9.U0,
QM9.U,
QM9.H,
QM9.G,
QM9.Cv,
],
)
qm9_parser.add_argument(
"--cutoff",
type=float,
default=10.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
qm9_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
qm9_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
qm9_parser.add_argument(
"--remove_uncharacterized",
help="Remove uncharacterized molecules from QM9 (default: %(default)s)",
action="store_true",
)
ani1_parser = ArgumentParser(add_help=False)
ani1_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default=ANI1.energy,
choices=[ANI1.energy],
)
ani1_parser.add_argument(
"--cutoff",
type=float,
default=10.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
ani1_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
ani1_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
ani1_parser.add_argument(
"--num_heavy_atoms",
type=int,
help="Number of heavy atoms that will be loaded into the database."
" (default: %(default)s)",
default=8,
)
matproj_parser = ArgumentParser(add_help=False)
matproj_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted" " (default: %(default)s)",
default=MaterialsProject.EformationPerAtom,
choices=[
MaterialsProject.EformationPerAtom,
MaterialsProject.EPerAtom,
MaterialsProject.BandGap,
MaterialsProject.TotalMagnetization,
],
)
matproj_parser.add_argument(
"--cutoff",
type=float,
default=5.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
matproj_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=32,
)
matproj_parser.add_argument(
"--environment_provider",
type=str,
default="torch",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
matproj_parser.add_argument(
"--apikey",
help="API key for Materials Project (see https://materialsproject.org/open)",
default=None,
)
matproj_parser.add_argument(
"--timestamp",
help="Timestamp at which to reconstruct the dataset",
default="2017-12-04 14:20",
)
md17_parser = ArgumentParser(add_help=False)
md17_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted" " (default: %(default)s)",
default=MD17.energy,
choices=[MD17.energy],
)
md17_parser.add_argument(
"--cutoff",
type=float,
default=5.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
md17_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
md17_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
md17_parser.add_argument(
"--ignore_forces", action="store_true", help="Ignore forces during training."
)
md17_parser.add_argument(
"--molecule",
type=str,
help="Choose molecule inside the MD17 dataset. (default: %(default)s)",
default="ethanol",
choices=MD17.datasets_dict.keys(),
)
md17_parser.add_argument(
"--rho",
type=float,
help="Energy-force trade-off. For rho=0, use forces only. "
"(default: %(default)s)",
default=0.1,
)
omdb_parser = ArgumentParser(add_help=False)
omdb_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default=OrganicMaterialsDatabase.BandGap,
choices=[OrganicMaterialsDatabase.BandGap],
)
omdb_parser.add_argument(
"--cutoff",
type=float,
default=5.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
omdb_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=32,
)
omdb_parser.add_argument(
"--environment_provider",
type=str,
default="torch",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
custom_data_parser = ArgumentParser(add_help=False)
custom_data_parser.add_argument(
"--property",
type=str,
help="Database property to be predicted (default: %(default)s)",
default="energy",
)
custom_data_parser.add_argument(
"--cutoff",
type=float,
default=10.0,
help="Cutoff radius of local environment (default: %(default)s)",
)
custom_data_parser.add_argument(
"--batch_size",
type=int,
help="Mini-batch size for training (default: %(default)s)",
default=100,
)
custom_data_parser.add_argument(
"--environment_provider",
type=str,
default="simple",
choices=["simple", "ase", "torch"],
help="Environment provider for dataset. (default: %(default)s)",
)
custom_data_parser.add_argument(
"--derivative",
type=str,
help="Derivative of dataset property to be predicted (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--negative_dr",
action="store_true",
help="Multiply derivatives with -1 for training. (default: %(default)s)",
)
custom_data_parser.add_argument(
"--force",
type=str,
help="Name of force property in database. Alias for‚ derivative + setting "
"negative_dr. (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--contributions",
type=str,
help="Contributions of dataset property to be predicted (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--stress",
type=str,
help="Train on stress tensor if not None (default: %(default)s)",
default=None,
)
custom_data_parser.add_argument(
"--aggregation_mode",
type=str,
help="Select mode for aggregating atomic properties. (default: %(default)s)",
default="sum",
)
custom_data_parser.add_argument(
"--output_module",
type=str,
help="Select matching output module for selected property. (default: %("
"default)s)",
default="atomwise",
choices=[
"atomwise",
"elemental_atomwise",
"dipole_moment",
"elemental_dipole_moment",
"polarizability",
"isotropic_polarizability",
"electronic_spatial_extent",
"charges",
],
)
custom_data_parser.add_argument(
"--rho",
action=StoreDictKeyPair,
nargs="+",
metavar="KEY=VAL",
help="Define loss tradeoff weights with prop=weight. (default: %(default)s)",
default=dict(),
val_type=float,
)
return (
qm9_parser,
ani1_parser,
matproj_parser,
md17_parser,
omdb_parser,
custom_data_parser,
)
def build_parser():
main_parser = ArgumentParser()
# get parsers
json_parser, train_parser, eval_parser = get_mode_parsers()
schnet_parser, wacsf_parser = get_model_parsers()
(
qm9_parser,
ani1_parser,
matproj_parser,
md17_parser,
omdb_parser,
custom_data_parser,
) = get_data_parsers()
# subparser structure
# mode
mode_subparsers = main_parser.add_subparsers(dest="mode", help="main arguments")
| |
<reponame>nimachm81/SpheroidalFuncs
__all__ = ["SpheroidScattering"]
from scipy.integrate import quadrature, quad
import numpy as np
from scipy import constants
import matplotlib.pyplot as plt
import matplotlib
from scipy.special import pro_rad1, pro_rad2, pro_ang1
import specfun
class SpheroidScattering:
def __init__(self, tipRadius, length):
self.tipRadius = tipRadius
self.length = length
self.a, self.b, self.ksi, self.d = self.Get_ProlateSpheroidParameters(tipRadius, length)
self.N_max = 8
def SetFrequency(self, freq):
self.freq = freq
self.lambda0 = constants.c / freq
self.k = 2.0*np.pi/self.lambda0
self.c = self.k*self.d/2.0
def SetIncidentAngle(self, theta):
self.theta = theta
def SetFieldAmp(self, E0):
self.E0 = E0
def SetNumberOfHarmonics(self, N_max):
self.N_max = N_max
def Map2DIndexTo1D(self, m_0, m_1, ind_start = 0):
## m = m_0 .. m_1-1 n = m ... m_1-1
map2DTo1D, map1DTo2D = {}, {}
ind = ind_start
for m in range(m_0, m_1):
for n in range(m, m_1):
map2DTo1D[(m,n)] = ind
map1DTo2D[ind] = (m, n)
ind += 1
return map2DTo1D, map1DTo2D
def GetIncExpansionCoeffs_Amn(self, m, n):
E0, k, theta_0, c = self.E0, self.k, self.theta, self.c
eps_m = 2.0
if m==0:
eps_m = 1.0
N_mn = GetInt_Sm_mpn_Sm_mpN(c, m, n-m, n-m)
A_mn = 2.0* eps_m * pro_ang1(m, n, c, np.cos(theta_0))[0] / N_mn
j_nm1 = 1j**((n-1)%4)
return E0/k * j_nm1 * A_mn
def ConstructMatrix(self):
k, theta_0, ksi_0, c_0 = self.k, self.theta, self.ksi, self.c
E0 = self.E0
N_t = self.N_max
alphaInd_2DTo1D, alphaInd_1DTo2D = self.Map2DIndexTo1D(0, N_t)
n_total = len(alphaInd_2DTo1D)
betaInd_2DTo1D, betaInd_1DTo2D = self.Map2DIndexTo1D(1, N_t+1, n_total)
n_total += len(betaInd_2DTo1D)
gammaInd = [n_total + i for i in range(N_t - 1)]
n_total += len(gammaInd)
##construct coefficient marix
A = np.zeros((n_total, n_total), dtype=complex)
b = np.zeros(n_total, dtype=complex)
## eta: cos(m*phi) m=1..Nt
for m in range(0, N_t):
for N in range(m, N_t):
ind_row = alphaInd_2DTo1D[(m, N)]
for n in range(m, N_t):
ind_col = alphaInd_2DTo1D[(m, n)]
elem = ((ksi_0**2 - 1)*GetDerivativeRadialFunc(4, m, n, c_0, ksi_0) \
- ksi_0*m*GetRadialFunc(4, m, n, c_0, ksi_0)) \
* GetInt_Sm_mpn_Sm_mpN(c_0, m, n-m, N-m)
A[ind_row, ind_col] += elem
ind_col = betaInd_2DTo1D[(m+1, n+1)]
elem = -2.0*np.sqrt(ksi_0**2 - 1)*(m+1)*GetRadialFunc(4, m+1, n+1, c_0, ksi_0) \
* GetInt_Smp1_mpnp1_Sm_mpN_x_div_sqrt_1mx2(c_0, m, n-m, N-m)
A[ind_row, ind_col] += elem
##---- rhs
A_mn = self.GetIncExpansionCoeffs_Amn(m, n)
b[ind_row] -= A_mn * \
( \
-ksi_0*m*GetRadialFunc(1, m, n, c_0, ksi_0) \
+ (ksi_0**2 - 1)*GetDerivativeRadialFunc(1, m, n, c_0, ksi_0) \
) * GetInt_Sm_mpn_Sm_mpN(c_0, m, n-m, N-m)
A_mp2np2 = self.GetIncExpansionCoeffs_Amn(m+2, n+2)
b[ind_row] -= A_mp2np2 * \
( \
ksi_0*(m+2)*GetRadialFunc(1, m+2, n+2, c_0, ksi_0) \
+ (ksi_0**2 - 1)*GetDerivativeRadialFunc(1, m+2, n+2, c_0, ksi_0) \
) * GetInt_Smp2_mpnp2_Sm_mpN(c_0, m, n-m, N-m)
if m==0:
A_0n = self.GetIncExpansionCoeffs_Amn(0, n)
b[ind_row] -= A_0n * (ksi_0**2 - 1)*GetDerivativeRadialFunc(1, m, n, c_0, ksi_0) \
* GetInt_Sm_mpn_Sm_mpN(c_0, m, n-m, N-m)
## eta: cos(0*phi)
for N in range(N_t - 1):
ind_row = gammaInd[N]
for n in range(N_t - 1):
ind_col = gammaInd[n]
elem = (-(ksi_0**2 - 1)*GetDerivativeRadialFunc(4, 1, n+1, c_0, ksi_0) \
- ksi_0*1*GetRadialFunc(4, 1, n+1, c_0, ksi_0)) \
* GetInt_Sm_mpn_Sm_mpN(c_0, 1, n, N)
A[ind_row, ind_col] += elem
## rhs
A_1np1 = self.GetIncExpansionCoeffs_Amn(1, n+1)
b[ind_row] -= A_1np1 * \
( \
ksi_0*GetRadialFunc(1, 1, n+1, c_0, ksi_0) \
+ (ksi_0**2 - 1)*GetDerivativeRadialFunc(1, 1, n+1, c_0, ksi_0) \
) * GetInt_Sm_mpn_Sm_mpN(c_0, 1, n, N)
## phi: sin(m*phi), m=1...Nt-2
for m in range(0, N_t):
for N in range(m, N_t):
ind_row = betaInd_2DTo1D[(m+1, N+1)]
for n in range(m, N_t):
ind_col = alphaInd_2DTo1D[(m, n)]
elem = (ksi_0**2 - 1)*GetDerivativeRadialFunc(4, m, n, c_0, ksi_0) \
* GetInt_Sm_mpn_Sm_mpN_x(c_0, m, n-m, N-m) \
+ \
ksi_0 * GetRadialFunc(4, m, n, c_0, ksi_0) \
* GetInt_dxSm_mpn_Sm_mpN_1mx2(c_0, m, n-m, N-m)
A[ind_row, ind_col] += elem
ind_col = betaInd_2DTo1D[(m+1, n+1)]
elem = 2.0*np.sqrt(ksi_0**2 - 1) * \
( \
GetRadialFunc(4, m+1, n+1, c_0, ksi_0) \
* GetInt_dxSmp1_mpnp1_Sm_mpN_x_sqrt_1mx2(c_0, m, n-m, N-m) \
- \
ksi_0 * GetDerivativeRadialFunc(4, m+1, n+1, c_0, ksi_0) \
* GetInt_Smp1_mpnp1_Sm_mpN_sqrt_1mx2(c_0, m, n-m, N-m) \
)
A[ind_row, ind_col] += elem
##---- rhs
A_mn = self.GetIncExpansionCoeffs_Amn(m, n)
b[ind_row] -= A_mn * \
( \
ksi_0*GetRadialFunc(1, m, n, c_0, ksi_0)*GetInt_dxSm_mpn_Sm_mpN_1mx2(c_0, m, n-m, N-m) \
+ (ksi_0**2 - 1)*GetDerivativeRadialFunc(1, m, n, c_0, ksi_0) \
*GetInt_Sm_mpn_Sm_mpN_x(c_0, m, n-m, N-m) \
)
A_mp2np2 = self.GetIncExpansionCoeffs_Amn(m+2, n+2)
b[ind_row] += A_mp2np2 * \
( \
ksi_0*GetRadialFunc(1, m+2, n+2, c_0, ksi_0)*GetInt_dxSmp2_mpnp2_Sm_mpN_1mx2(c_0, m, n-m, N-m) \
+ (ksi_0**2 - 1)*GetDerivativeRadialFunc(1, m+2, n+2, c_0, ksi_0) \
*GetInt_Smp2_mpnp2_Sm_mpN_x(c_0, m, n-m, N-m) \
)
if m==0:
A_0n = self.GetIncExpansionCoeffs_Amn(0, n)
b[ind_row] -= A_0n * \
( \
ksi_0*GetRadialFunc(1, m, n, c_0, ksi_0)*GetInt_dxSm_mpn_Sm_mpN_1mx2(c_0, m, n-m, N-m) \
+ (ksi_0**2 - 1)*GetDerivativeRadialFunc(1, m, n, c_0, ksi_0) \
*GetInt_Sm_mpn_Sm_mpN_x(c_0, m, n-m, N-m) \
)
return A, b
def GetAlphaBetaGamma_from_X(self, x):
N_t = self.N_max
alphaInd_2DTo1D, alphaInd_1DTo2D = self.Map2DIndexTo1D(0, N_t)
n_total = len(alphaInd_2DTo1D)
n_end_alpha = n_total
betaInd_2DTo1D, betaInd_1DTo2D = self.Map2DIndexTo1D(1, N_t+1, n_total)
n_total += len(betaInd_2DTo1D)
n_end_beta = n_total
gammaInd = [n_total + i for i in range(N_t - 1)]
n_total += len(gammaInd)
alpha = np.zeros((N_t, N_t), dtype=complex)
beta = np.zeros((N_t+1, N_t+1), dtype=complex)
gamma = np.zeros(N_t, dtype=complex)
for i in range(n_end_alpha):
alpha[alphaInd_1DTo2D[i]] = x[i]
print(betaInd_1DTo2D)
for i in range(n_end_alpha, n_end_beta):
beta[betaInd_1DTo2D[i]] = x[i]
for i in range(n_end_beta, len(x)):
gamma[i - n_end_beta + 1] = x[i]
return alpha, beta, gamma
def Get_ProlateSpheroidParameters(self, tipRadius, length):
b2_div_a = tipRadius
a = length/2.0
b = np.sqrt(b2_div_a * a)
# d*ksi = a d*sqrt(ksi**2 - 1) = b
# ksi**2 * (1 - (b/a)**2) = 1
ksi = 1.0/(1.0 - (b/a)**2)
d = a / ksi
return a, b, ksi, d
def GetETMonSurface_direct(self, etas, ksi_0, phi_0):
E_0, k, d = self.E0, self.k, self.d
assert phi_0 == 0
n = len(etas)
E_eta = np.zeros(n, dtype=complex)
E_ksi = np.zeros(n, dtype=complex)
for i in range(n):
eta = etas[i]
z_hat_eta = ksi_0*np.sqrt((1 - eta**2)/(ksi_0**2 - eta**2))
z_hat_ksi = eta * np.sqrt((ksi_0**2 - 1)/(ksi_0**2 - eta**2))
x = d/2*np.sqrt(1 - eta**2)*np.sqrt(ksi_0**2 - 1)*np.cos(phi_0)
E_eta[i] = E_0*np.exp(1j*k*x)*z_hat_eta
E_ksi[i] = E_0*np.exp(1j*k*x)*z_hat_ksi
return E_eta, E_ksi
def GetETMonSurface_expansion(self, etas, ksi_0, phi_0):
E_0, k, d, c_0 = self.E0, self.k, self.d, self.c
assert phi_0 == 0
theta_0 = np.pi/2
n_eta = len(etas)
E_eta = np.zeros(n_eta, dtype=complex)
E_ksi = np.zeros(n_eta, dtype=complex)
N = self.N_max
for i in range(n_eta):
eta = etas[i]
for m in range(N):
for n in range(m, N):
A_mn = self.GetIncExpansionCoeffs_Amn(m, n)
E_eta[i] += A_mn * 2*(ksi_0**2 - 1)*GetDerivativeRadialFunc(1, m, n, c_0, ksi_0) \
*pro_ang1(m, n, c_0, eta)[0] \
/(d*np.sqrt(ksi_0**2 - eta**2)*np.sqrt(ksi_0**2 - 1))
E_ksi[i] += A_mn * (-2)*(1 - eta**2)*pro_ang1(m, n, c_0, eta)[1] \
*GetRadialFunc(1, m, n, c_0, ksi_0) \
/(d*np.sqrt(ksi_0**2 - eta**2)*np.sqrt(1 - eta**2))
return E_eta, E_ksi
def GetFieldOnSurface_(self, alpha, beta, gamma, etas, ksi, phi):
c, d = self.c, self.d
k = self.k
E0 = self.E0
n_pts = len(etas)
E_eta = np.zeros(n_pts, dtype=complex)
E_ksi = np.zeros(n_pts, dtype=complex)
E_phi = np.zeros(n_pts, dtype=complex)
for i in range(n_pts):
eta = etas[i]
M, N = alpha.shape
for m in range(M):
for n in range(m, N):
E_eta[i] += alpha[m, n]*GetM_mplus1n_o_plus_eta(eta, ksi, phi, m, n, c, d)
E_ksi[i] += alpha[m, n]*GetM_mplus1n_o_plus_ksi(eta, ksi, phi, m, n, c, d)
E_phi[i] += alpha[m, n]*GetM_mplus1n_o_plus_phi(eta, ksi, phi, m, n, c, d)
M, N = beta.shape
for m in range(M):
for n in range(m, N):
E_eta[i] += beta[m, n]*GetM_mn_o_z_eta(eta, ksi, phi, m, n, c, d)
E_ksi[i] += beta[m, n]*GetM_mn_o_z_ksi(eta, ksi, phi, m, n, c, d)
E_phi[i] += beta[m, n]*GetM_mn_o_z_phi(eta, ksi, phi, m, n, c, d)
N = len(gamma)
for n in range(1,N):
E_eta[i] += gamma[n]*GetM_mminus1n_o_minus_eta(eta, ksi, phi, 1, n, c, d)
E_ksi[i] += gamma[n]*GetM_mminus1n_o_minus_ksi(eta, ksi, phi, 1, n, c, d)
E_phi[i] += gamma[n]*GetM_mminus1n_o_minus_phi(eta, ksi, phi, 1, n, c, d)
return E_eta, E_ksi, E_phi
def GetFieldOnSurface(self, alpha, beta, gamma, etas, ksi, phi, totalField=True):
c, d = self.c, self.d
k = self.k
E0 = self.E0
n_pts = len(etas)
E_eta = np.zeros(n_pts, dtype=complex)
E_ksi = np.zeros(n_pts, dtype=complex)
E_phi = np.zeros(n_pts, dtype=complex)
for i in range(n_pts):
eta = etas[i]
M, N = alpha.shape
for m in range(M):
for n in range(m, N):
E_eta[i] += alpha[m, n]*GetM_mplus1n_o_plus_eta(eta, ksi, phi, m, n, c, d)
E_ksi[i] += alpha[m, n]*GetM_mplus1n_o_plus_ksi(eta, ksi, phi, m, n, | |
required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler`")
collection_formats = {}
resource_path = '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1HorizontalPodAutoscaler',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1HorizontalPodAutoscaler body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status_with_http_info(name, namespace, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1HorizontalPodAutoscaler body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1HorizontalPodAutoscaler
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_autoscaling_v1_namespaced_horizontal_pod_autoscaler_status`")
collection_formats = {}
resource_path = '/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/status'.replace('{format}', 'json')
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1HorizontalPodAutoscaler',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def watch_autoscaling_v1_horizontal_pod_autoscaler_list_for_all_namespaces(self, **kwargs):
"""
watch individual changes to a list of HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_autoscaling_v1_horizontal_pod_autoscaler_list_for_all_namespaces(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.watch_autoscaling_v1_horizontal_pod_autoscaler_list_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.watch_autoscaling_v1_horizontal_pod_autoscaler_list_for_all_namespaces_with_http_info(**kwargs)
return data
def watch_autoscaling_v1_horizontal_pod_autoscaler_list_for_all_namespaces_with_http_info(self, **kwargs):
"""
watch individual changes to a list of HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_autoscaling_v1_horizontal_pod_autoscaler_list_for_all_namespaces_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: VersionedEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['field_selector', 'label_selector', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_autoscaling_v1_horizontal_pod_autoscaler_list_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/apis/autoscaling/v1/watch/horizontalpodautoscalers'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
if 'watch' in params:
query_params['watch'] = params['watch']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VersionedEvent',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def watch_autoscaling_v1_namespaced_horizontal_pod_autoscaler(self, name, namespace, **kwargs):
"""
watch changes to an object of kind HorizontalPodAutoscaler
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_autoscaling_v1_namespaced_horizontal_pod_autoscaler(name, namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: name of the HorizontalPodAutoscaler (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import types
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
from numpy.testing import utils
from .example_models import models_1D, models_2D
from .. import (fitting, models, LabeledInput, SerialCompositeModel,
SummedCompositeModel)
from ..core import FittableModel
from ..polynomial import PolynomialBase
from ...tests.helper import pytest
from ...extern import six
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestSerialComposite(object):
"""
Test composite models evaluation in series
"""
def setup_class(self):
self.y, self.x = np.mgrid[:5, :5]
self.p1 = models.Polynomial1D(3)
self.p11 = models.Polynomial1D(3)
self.p2 = models.Polynomial2D(3)
def test_single_array_input(self):
model = SerialCompositeModel([self.p1, self.p11])
result = model(self.x)
xx = self.p11(self.p1(self.x))
utils.assert_almost_equal(xx, result)
def test_labeledinput_1(self):
labeled_input = LabeledInput([self.x, self.y], ['x', 'y'])
model = SerialCompositeModel([self.p2, self.p1],
[['x', 'y'], ['z']],
[['z'], ['z']])
result = model(labeled_input)
z = self.p2(self.x, self.y)
z1 = self.p1(z)
utils.assert_almost_equal(z1, result.z)
def test_labeledinput_2(self):
labeled_input = LabeledInput([self.x, self.y], ['x', 'y'])
rot = models.Rotation2D(angle=23.4)
offx = models.Shift(-2)
offy = models.Shift(1.2)
model = SerialCompositeModel([rot, offx, offy],
[['x', 'y'], ['x'], ['y']],
[['x', 'y'], ['x'], ['y']])
result = model(labeled_input)
x, y = rot(self.x, self.y)
x = offx(x)
y = offy(y)
utils.assert_almost_equal(x, result.x)
utils.assert_almost_equal(y, result.y)
def test_labeledinput_3(self):
labeled_input = LabeledInput([2, 4.5], ['x', 'y'])
rot = models.Rotation2D(angle=23.4)
offx = models.Shift(-2)
offy = models.Shift(1.2)
model = SerialCompositeModel([rot, offx, offy],
[['x', 'y'], ['x'], ['y']],
[['x', 'y'], ['x'], ['y']])
result = model(labeled_input)
x, y = rot(2, 4.5)
x = offx(x)
y = offy(y)
utils.assert_almost_equal(x, result.x)
utils.assert_almost_equal(y, result.y)
def test_multiple_input(self):
rot = models.Rotation2D(angle=-60)
model = SerialCompositeModel([rot, rot])
xx, yy = model(self.x, self.y)
x1, y1 = model.inverse(xx, yy)
utils.assert_almost_equal(x1, self.x)
utils.assert_almost_equal(y1, self.y)
class TestSummedComposite(object):
"""Test legacy composite models evaluation."""
def setup_class(self):
self.x = np.linspace(1, 10, 100)
self.y = np.linspace(1, 10, 100)
self.p1 = models.Polynomial1D(3)
self.p11 = models.Polynomial1D(3)
self.p2 = models.Polynomial2D(3)
self.p1.parameters = [1.4, 2.2, 3.1, 4]
self.p2.c0_0 = 100
def test_single_array_input(self):
model = SummedCompositeModel([self.p1, self.p11])
result = model(self.x)
delta11 = self.p11(self.x)
delta1 = self.p1(self.x)
xx = delta1 + delta11
utils.assert_almost_equal(xx, result)
def test_labeledinput(self):
labeled_input = LabeledInput([self.x, self.y], ['x', 'y'])
model = SummedCompositeModel([self.p1, self.p11], inmap=['x'],
outmap=['x'])
result = model(labeled_input)
delta11 = self.p11(self.x)
delta1 = self.p1(self.x)
xx = delta1 + delta11
utils.assert_almost_equal(xx, result.x)
def test_inputs_outputs_mismatch(self):
p2 = models.Polynomial2D(1)
ch2 = models.Chebyshev2D(1, 1)
with pytest.raises(ValueError):
SummedCompositeModel([p2, ch2])
def test_pickle():
p1 = models.Polynomial1D(3)
p11 = models.Polynomial1D(4)
g1 = models.Gaussian1D(10.3, 5.4, 1.2)
serial_composite_model = SerialCompositeModel([p1, g1])
parallel_composite_model = SummedCompositeModel([serial_composite_model,
p11])
s = pickle.dumps(parallel_composite_model)
s1 = pickle.loads(s)
assert s1(3) == parallel_composite_model(3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_custom_model(amplitude=4, frequency=1):
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model_1d(sine_model, func_fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
y = sin_model.evaluate(x, 5., 2.)
y_prime = sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
fitter = fitting.LevMarLSQFitter()
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model_1d
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model_1d
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2)
sub_arr = model(x1, y1, z1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
class Fittable2DModelTester(object):
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all((np.abs(model(x, y) - z) < self.eval_error))
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
pytest.skip("Bounding_box is not defined for model.")
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter2D(self, model_class, test_parameters):
"""Test if the parametric model works with the fitter."""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
utils.assert_allclose(fitted, expected,
atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_2D(self, model_class, test_parameters):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
if model_class.fit_deriv is None:
pytest.skip("Derivative function is not defined for model.")
if issubclass(model_class, PolynomialBase):
pytest.skip("Skip testing derivative of polynomials.")
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
xv, yv = np.meshgrid(x, y)
try:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
except KeyError:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn | |
[]
list_chrom = []
for start,end in breaks:
if len(timespend) == 0:
stimespend = []
else:
stimespend = timespend[start:end]
list_chrom.append(Chrom(start,end,timespend=stimespend))
def MRT():
if len(list_chrom) == 1:
return list_chrom[0].mrt
else:
return np.concatenate([c.mrt for c in list_chrom])
def RFD():
if len(list_chrom) == 1:
return list_chrom[0].rfd
else:
return np.concatenate([c.rfd for c in list_chrom])
#pos, proba, newp, finished = generate_newp(pos, proba, 1,actual_pos=[],cascade=cascade)
#print("Before",avail)
pos, proba, newp, finished,previous = generate_newp_no_corre(pos, proba, avail,
actual_pos=actual_pos,cascade=cascade,
previous =[])
tot_introduced=avail
nfork = [[avail,0]]
avail = 0
position_time_activated_ori = []
terminations = []
def in_ch_i(rpos):
for i,(start,end) in enumerate(breaks):
#print(start,end,pos)
if start<=rpos< end:
return i
for p in newp:
list_chrom[in_ch_i(p)].append_forks(p)
position_time_activated_ori.append([p, 0, len(d3p)])
finished = False
if debug:
print(actual_pos)
count = 0
smess = np.random.randint(0, single_mol_exp_sub_sample)
smess_size = 0
start_exp = False
def next_event_breaks():
return min( [chrom.min_time() for chrom in list_chrom])
last_intro = 0
continuous_time = 0
Avails=[]
Noris = []
introduced=False
#print("There")
while (sum( [len(chrom.actual_pos) for chrom in list_chrom]) != 0) or np.any(proba!=0) :
if avail > diff:
print(avail,diff,time)
raise
if correct_activation and not introduced:
avail = diff-1
introduced=True
#print(avail,time)
Noris.append([np.sum(proba!=0),time])
#print(time,avail,np.sum(proba!=0))
#print(introduction_time,"INTSNST")
count += 1
smess += 1
next_e = next_event_breaks()
# evolve until next event :
# either a fork collision
# or an attachement
if debug:
print("Actual pos", actual_pos)
print("next", next_e)
find_target = False
#print("Tmio",time,avail)
#Gillespi algorithm
fast=True
if fast:
def add_attach_nothing(attached=0,add=0,tot_introduced=0):
nori = np.sum(proba != 0)
if nori != 0 and avail - attached + add != 0:
k_attach = nori * (avail-attached+add) * kon # minutes
else:
# print("1001",nori,avail)
k_attach = 0
if (introduction_time != None) and (tot_introduced < diff):
kaddN = 1.*diff / introduction_time * np.exp(-time / fork_speed(time) / introduction_time)
else:
kaddN = 0
#print("Inside",k_attach,kaddN)
if k_attach !=0 or kaddN != 0:
next_attach_or_insert = - np.log(np.random.rand()) / (kaddN + k_attach)
if np.random.rand() < kaddN / (kaddN + k_attach):
return 0,1,next_attach_or_insert
else:
return 1,0,next_attach_or_insert
else:
return 0,0,1000000000000000000
attached = 0
add = 0
next_collide_time = next_e / fork_speed(time)
passed_time = 0
while True:
status = add_attach_nothing(attached=attached,add=add,
tot_introduced=tot_introduced)
#print(status)
if continuous_time + passed_time + status[2] * fork_speed(time) > time+ next_e:
time_evolve = next_e
continuous_time = time + time_evolve
#print("Event")
break
attached += status[0]
add += status[1]
passed_time += status[2] * fork_speed(time)
tot_introduced += status[1]
if continuous_time + passed_time > time + 1:
#print("Plusone" )
time_evolve = int(continuous_time - time)
continuous_time += passed_time
break
if tot_introduced == diff:
introduction_time = None
#print("Avail %i, add %i , attached %i, tot_introduced %i"%(avail,add,attached,tot_introduced),continuous_time,time,time_evolve)
if (introduction_time != None) and (time > (introduction_time * 3)) and tot_introduced != diff:
avail += diff-tot_introduced
tot_introduced = diff
avail += add # the one attached are remove afterward
Avails.append([avail,time])
nfork.append([nfork[-1][0],time])
else:
nori = np.sum(proba != 0)
if nori != 0 and avail != 0:
next_attach_time = 1 / (nori * avail * kon) # minutes
else:
# print("1001",nori,avail)
next_attach_time = 10000000
kattach = 1/next_attach_time
if introduction_time != None and tot_introduced <= diff:
kaddN = diff/introduction_time * np.exp(-time / fork_speed(time)/introduction_time) #Ok for time unit
else:
kaddN = 0
next_attach_or_insert = - np.log(np.random.rand()) / ( kaddN + kattach )
release = False
attached=0
next_collide_time = next_e / fork_speed(time) # minutes
if next_attach_or_insert > next_collide_time:
time_evolve = next_e
continuous_time = time + time_evolve
#print("Collision",time_evolve)
else:
if np.random.rand() < kaddN / (kaddN + kattach):
avail += 1
tot_introduced += 1
else:
attached = 1
continuous_time += (next_attach_or_insert * fork_speed(time))
if continuous_time > time+1:
time_evolve = int(continuous_time-time)
else:
time_evolve = 0
#print("Reac",time_evolve,next_e)
# print(next_attach_time)
#print("time %.2f , %i avail %i nori, %.3f conti time , %i n intro , %i find target %i release "%(time,avail,np.sum(proba!=0),continuous_time,tot_introduced,find_target,release))
"""
if nori == 0 or next_attach_time > next_collide_time:
time_evolve = next_e # kb
else:
time_evolve = int(next_attach_time * fork_speed(time)) # kb
find_target = True
"""
# print(collide,find_target)
# print(next_attach_time,next_collide_time,avail,nori)
# print(next_e[0][0])
if time_evolve > next_e:
print(time_evolve,next_e,continuous_time,time)
raise
# print(collide,find_target,avail)
# print(actual_pos)
# propagate
#print(avail,update,add,continuous_time)
if (time_evolve > 0 or next_collide_time == 0) : # Second condition not sure why but needed
##############################################################
# To record single mol
if single_mol_exp and (smess % single_mol_exp_sub_sample == 0):
if not start_exp:
#print(time, "start")
single_mol_exp_v.append([time+time_evolve/2, time_evolve, np.sum(RFD() != 0)/len(d3p),
copy.deepcopy(RFD().copy())])
start_exp = True
else:
smess_size += time_evolve
# To record single mol
if single_mol_exp and (smess % single_mol_exp_sub_sample == 0):
if smess_size > pulse_size:
single_mol_exp_v[-1][1] = smess_size
single_mol_exp_v[-1][-1] -= RFD().copy()
start_exp = False
smess_size = 0
#print(time, "end")
else:
# print(ramp)
smess -= 1
##############################################################
time += time_evolve
actual_pos = []
olds = np.sum(proba != 0)
newavail =0
for chrom in list_chrom:
termination, iavail = chrom.evolve(time_evolve,proba,filter_termination=filter_termination)
chrom.check()
avail += iavail
newavail += iavail
actual_pos.extend(chrom.actual_pos)
terminations.extend(termination)
if newavail != 0:
nfork.append([nfork[-1][0] - newavail, time])
Avails.append([avail, time])
if attached != 0:
newp = []
if not finished and np.sum(proba) != 0:
#print("Reac",attached)
# print("multiple",avail,toadd)
# print(add)
# pos, proba, newp, finished = generate_newp(pos, proba, 1,actual_pos=[],cascade=cascade)
pos, proba, newp, finished, previous = generate_newp_no_corre(pos, proba, attached,
actual_pos=actual_pos, cascade=cascade,
previous=previous)
# avail -= 1
else:
newp = []
# Add the new one:
MRTl = MRT()
for p in newp:
list_chrom[in_ch_i(p)].append_forks(p)
proba[p] = 0
position_time_activated_ori.append([p, time, len(d3p) - np.sum(~np.isnan(MRTl))])
avail -= 1
nfork.append([nfork[-1][0] + len(newp), time])
Avails.append([avail,time])
if tot_introduced ==60 and (nfork[-1][0]+Avails[-1][0]) < tot_introduced:
print(nfork[-1][0],Avails[-1][0] , tot_introduced,len(breaks))
print(newp)
raise
#print(avail,len(list_chrom[0].actual_pos)/2)
if debug:
print("AFter", actual_pos)
for p1, p2 in zip(actual_pos[:-1], actual_pos[1:]):
try:
assert(p1[0] <= p2[0])
except:
print(p1, p2)
raise
if list_chrom_ret :
return MRT(), RFD(), time, single_mol_exp_v, position_time_activated_ori,terminations , list_chrom
else:
return MRT(), RFD(), time, single_mol_exp_v, \
position_time_activated_ori,terminations , \
tot_introduced, Avails, Noris, nfork
def get_fast_MRT_RFDs(nsim, distrib, ndiff, dori=20, kon=0.001,
fork_speed=0.3,
single_mol_exp=False, pulse_size=5, it=True,
binsize=5,continuous=False,wholeRFD=False,cascade={},breaks=None,
n_jobs=6,timespend=[],nMRT=6,filter_termination=None,
introduction_time=None,
wholeMRT=False,return_dict=False,correct_activation=False,dario=False,mask=[],early_over_late=False):
if not cascade:
cascade = {}
print("EXperimental")
#np.random.seed(1)
if breaks is None:
breaks = [[0,len(distrib)]]
MRTs = []
RFDs = []
Rep_Time = []
single_mol_exp_vs = []
position_time_activated_oris = []
terminations = []
tot_introduced = []
Avails=[]
Forks=[]
Noris =[]
#print("Nori", int(len(distrib)*binsize/dori))
lao = []
fork_speed_ct = False
if type(fork_speed) in [float,int]:
fs = 0 + fork_speed
fork_speed = lambda x:fs
fork_speed_ct = True
from joblib import Parallel, delayed
if n_jobs != 1:
res = Parallel(n_jobs=n_jobs)(delayed(fast_rep)(distrib, ndiff, kon=kon, debug=False,
fork_speed=fork_speed, single_mol_exp=single_mol_exp,
pulse_size=pulse_size,cascade=cascade,breaks=breaks,
continuous=continuous,binsize=binsize,dori=dori,timespend=timespend,
filter_termination=filter_termination,
introduction_time=introduction_time,
correct_activation=correct_activation,dario=dario) for _ in range(nsim))
else:
res = [ fast_rep(distrib, ndiff, kon=kon, debug=False,
fork_speed=fork_speed, single_mol_exp=single_mol_exp,
pulse_size=pulse_size,cascade=cascade,breaks=breaks,
continuous=continuous,binsize=binsize,dori=dori,timespend=timespend,
filter_termination=filter_termination,introduction_time=introduction_time,
correct_activation=correct_activation,dario=dario) for _ in range(nsim)]
for MRT, RFD, time, single_mol_exp_v, position_time_activated_ori,termination,totn,sAvails,sNoris,sForks in res:
MRTs.append(MRT)
RFDs.append(RFD)
#Rep_Time.append(time)
single_mol_exp_vs.append(single_mol_exp_v)
# Rescale time in single mol to 0 1
for i in range(len(single_mol_exp_vs[-1])):
single_mol_exp_vs[-1][i][0] /= time
position_time_activated_oris.append(position_time_activated_ori)
terminations.append(termination)
tot_introduced.append(totn)
Avails.append(sAvails)
Forks.append(sForks)
Noris.append(sNoris)
#print(sAvails[-4:],sForks[-4:],tot_introduced[-1])
if it:
for position_time_activated_ori in position_time_activated_oris:
for p, t, unrep in position_time_activated_ori:
lao.append([t, unrep])
lao.sort()
#print(len(lao))
#print(lao)
if fork_speed_ct:
dt = 1 / fork_speed(0) # in minute
print("Fs cte , dt %.1f (min)"%dt)
else:
print("Dt 1")
dt = 1
maxi = int(round(lao[-1][0]))+1
print("Average introduced" , np.mean(tot_introduced))
print("Maxiiiiiiiiiiiiiii",maxi)
Itun = np.zeros(maxi)
Unrep = np.zeros(maxi) + np.nan
It = np.zeros(maxi) + np.nan
npts = np.zeros_like(It)
for position_time_activated_ori in position_time_activated_oris:
#print(position_time_activated_ori)
for p, t, unrep in position_time_activated_ori:
#Itun[int(t*dt)] += 1
#print(p,t,int(t*dt),t*dt,unrep)
assign = int(t)
#left = t*dt-assign
#if np.random.rand() < left:
# assign += 1
#Unrep0[int(t*dt)] = unrep
if np.isnan(It[assign]):
It[assign] = 1 / unrep
else:
It[assign] += 1 / unrep
if np.isnan(Unrep[assign]):
Unrep[assign] = unrep
else:
Unrep[assign] += unrep
npts[assign] += 1
def compute_ft(data):
#Not perfect are average is not done over all simus
Flat_avail = np.zeros(maxi)
Flat_N = np.zeros(maxi)
for savails in data:
#print(savails)
for avail,ti in savails:
if int(ti)<len(Flat_avail):
#if np.isnan()
Flat_avail[int(ti)] += avail
Flat_N[int(ti)] += 1
for tip in range(ti,len(Flat_avail)):
Flat_avail[int(tip)] += avail
Flat_N[int(tip)] += 1
#if int(ti) + 1 < len(Flat_avail):
# print(int(ti)+1)
# Flat_avail[int(ti) + 1] += avail
# Flat_N[int(ti)+1] += 1
Flat_avail /= Flat_N
return Flat_avail
#print(It[:10])
Flat_avail = compute_ft(Avails)
Flat_ori = compute_ft(Noris)
Flat_fork = compute_ft(Forks)
# Probability of activation
Pa = np.zeros_like(MRTs[0])
for position_time_activated_ori in position_time_activated_oris:
for p, _, _ in position_time_activated_ori:
Pa[p] += 1
Pa /= len(position_time_activated_oris)
# Probability of terminations
Pt = np.zeros_like(MRTs[0])
for termination in terminations:
for p in termination:
#print(p)
Pt[p] += 1
Pt | |
workflow WHERE market = '{}' AND exchange = '{}' AND userid = {} AND core_strategy = '{}' ".format(
robot.market, robot.exchange_abbr, robot.user_id, robot.core_strategy)
rows = sql.query(sql_string)
# Check workflow if it is not saved in robot
if robot.wf_id is None:
try:
robot.wf_id = rows[0][0] # first result if existing
robot.wf_run_mode = rows[0][1]
robot.logger.lprint(["Workflow:", robot.wf_id, robot.wf_run_mode])
except:
robot.logger.lprint(["Not a part of workflow"])
robot.wf_id = None
robot.wf_run_mode = None
else:
robot.wf_run_mode = robot.simulation_param
### Checking exchanges availability and balance
def init_pre_check(robot, coinigy, b_test):
try:
ticker_upd = coinigy.price(robot.exchange_abbr, robot.market, robot.logger, b_test)
# Ticker could be failing if there is automatic maintenance - then sleep for a while
if ticker_upd is None:
send_chat_message(robot.user_id,
'{} seems to be on an automatic maintenance. Will try every 5 minutes.'.format(
robot.market))
while ticker_upd is None:
b_test.sleep(300) # sleeping for 5 minutes and checking again
ticker_upd = coinigy.price(robot.exchange_abbr, robot.market, robot.logger, b_test)
if ticker_upd == 'INVALID_MARKET':
robot.logger.lprint(['Error: Invalid market'])
send_chat_message(robot.user_id, 'Error: Invalid market to buy')
robot.terminate()
except urllib.error.URLError:
robot.logger.lprint(['Exchange url unavailable to buy'])
send_chat_message(robot.user_id, 'Error: Exchange url unavailable')
robot.terminate()
except:
robot.logger.lprint(['Cannot get the price. Please check that you are using a correct market name.'])
send_chat_message(robot.user_id,
'Error: Cannot get the price. Please check that you are using a correct market name.')
robot.terminate()
### Checking modes
def init_mode_check(robot):
if robot.mode == 'now-s':
robot.wf_run_mode = 's' # simulating
robot.mode = 'now' # setting up a regular mode
if robot.mode == 'fullta-s':
robot.wf_run_mode = 's' # simulating
robot.mode = 'fullta' # setting up a regular mode
if robot.wf_run_mode == 's': # simulation switch
robot.simulation = True
else:
robot.simulation = False
### Processing fulfilled orders
def init_orders_process(robot, buy_uuid, e_api, b_test, sum_paid, sum_quantity, source_filled, usd_x_rate):
if buy_uuid is not None:
### 1. Get information on the existing orders and cancel them
robot.logger.lprint(['>>> Cancelling:', buy_uuid, robot.exchange, robot.market])
# Get order info for filled part
e_api.cancel(robot.exchange, robot.market, buy_uuid)
b_test.sleep(5)
order_info = e_api.getorder(robot.exchange, robot.market, buy_uuid)
### 2. Filled / remaining
buy_uuid = None
# For safety # change
if order_info is not None:
quantity_filled = order_info['Quantity'] - order_info['QuantityRemaining']
else:
quantity_filled = 0
# DEBUG
print(">>>>> Quantity filled {}, order_info['Quantity'] {} , order_info['QuantityRemaining'] {}".format(
quantity_filled, order_info['Quantity'], order_info['QuantityRemaining'])) # DEBUG
print(">>>>> PricePerUnit {}, Price {}".format(order_info['PricePerUnit'], order_info['Price'])) # DEBUG
price_unit = order_info['PricePerUnit']
price_order = order_info['Price']
if price_unit is None:
price_unit = 0
if robot.exchange == 'bitmex':
if robot.market in config.primary_calc_markets:
source_filled = Decimal(str(Decimal(quantity_filled) / Decimal(price_unit)))
sum_paid += Decimal(str(source_filled))
sum_quantity += quantity_filled
else:
source_filled = Decimal(str(Decimal(quantity_filled) * Decimal(price_unit)))
sum_paid += Decimal(str(source_filled)) # for price averaging
sum_quantity += quantity_filled
elif robot.exchange == 'oanda':
#print(usd_x_rate, quantity_filled, price_order)
if robot.forex_pair: # different approach to units for forex pairs
source_filled = Decimal(str(abs(quantity_filled))) / Decimal(usd_x_rate)
else:
source_filled = Decimal(str(abs(price_order))) / Decimal(usd_x_rate)
sum_paid += Decimal(str(price_order))
sum_quantity += quantity_filled
print('Filled: {}, sum_quantity {}'.format(source_filled, sum_quantity))
# Returning results
return sum_paid, sum_quantity, source_filled
### Updating price info in db
def init_db_upd(robot, sql, b_test, type = 'update'):
if type == 'update':
if not robot.fixed_price_flag and not b_test.backtesting:
sql_string = "UPDATE buys SET price = {}, last_update={} WHERE job_id = {} " \
"AND userid = {} AND core_strategy = '{}' ".format(
robot.price, b_test.time(), robot.job_id, robot.user_id, robot.core_strategy)
#print(sql_string)
sql.query(sql_string)
elif type == 'delete':
sql_string = "DELETE FROM buys WHERE job_id = {} " \
"AND userid = {} AND core_strategy = '{}' ".format(
robot.job_id, robot.user_id, robot.core_strategy)
sql.query(sql_string)
elif type == 'wf_delete':
if robot.wf_id is not None:
sql_string = "DELETE FROM workflow WHERE wf_id = {} " \
"AND userid = {} AND core_strategy = '{}' ".format(
robot.wf_id, robot.user_id, robot.core_strategy)
sql.query(sql_string)
robot.wf_id = None
### Checking if actually should start
def init_launch_position_opening(initiate_position_launch, robot):
if not initiate_position_launch:
# Mode: If requested to buy now
if robot.mode == 'now':
initiate_position_launch = True
# Mode: ML-based
td_result, td_direction, over_threshold = robot.predicted_action_direction()
robot.logger.lprint(['--- validating (init): td_result {}, td_direction {}, over_threshold {}'.format(
td_result, td_direction, over_threshold)])
if (robot.mode in ['fullta', 'now']) and over_threshold and (td_direction in ['green', 'red']):
initiate_position_launch = True
else:
robot.logger.lprint(
[robot.user_name, '|', robot.exchange, robot.market, "| no prediction / prediction is below the threshold"])
initiate_position_launch = False
# If test run
if config.run_testmode:
initiate_position_launch = True
if robot.short_flag is None:
robot.short_flag = False
robot.prediction = 1
print("Testmode: launching immediately")
return initiate_position_launch
### Updating prices to open position for
def init_price_update(robot, e_api, initiate_position_launch):
if initiate_position_launch:
if not config.backtesting_enabled:
if not robot.fixed_price_flag: # otherwise price is in the input
# When we are long, on the enter we buy -> get the price from asks (the lowest ask (sell price) which is the first in the array)
# When we are short, on the enter we sell -> get the price from bids (the highest bid (buy price), which is the first in the array)
if not robot.short_flag: # LONG
robot.fixed_price = float(e_api.getorderbook(robot.exchange, robot.market, 'asks')[0]['Rate'])
else: # SHORT
robot.fixed_price = float(e_api.getorderbook(robot.exchange, robot.market, 'bids')[0]['Rate'])
# for other cases (not fullta) like now or breakout - just get the averaged ticker price
else:
robot.fixed_price = get_price_feed(robot, b_test)
### pre 4.8 double checking if there are enough funds to buy. If not - waiting.
def init_pre_launch_check(initiate_position_launch, pre_order_open_state, flag_buyer_check_positions, approved_flag, balance_issue_notify, robot, sum_quantity, b_test, buy_rate, buy_flag):
if initiate_position_launch and pre_order_open_state:
pre_order_open_state = False
robot.logger.lprint(['Confirming the balance...'])
# If there is no minimum balance available, then cancel buying flag and wait for 5 minutes
if not ensure_balance(robot, buy_rate):
initiate_position_launch = False
robot.logger.lprint(["The balance is not enough to buy. Cancelling buy flag and sleeping for 5 minutes."])
if balance_issue_notify:
send_chat_message(robot.user_id,
'Please add to the balance or cancel the buy task. Bot will be sleeping in 5-min cycles.')
balance_issue_notify = False
b_test.sleep(300)
# Also confirming that no positions are open
if flag_buyer_check_positions:
proceed_decision = buyer_check_positions(robot, e_api)
flag_buyer_check_positions = False
if not proceed_decision:
buy_flag, approved_flag = False, False
robot.sleep_buy_timer = 0
sum_quantity = 0
return flag_buyer_check_positions, initiate_position_launch, buy_flag, approved_flag, pre_order_open_state, sum_quantity, balance_issue_notify
### Init - quantity modifications
def init_quantity(robot, e_api, buy_rate, contracts):
str_status = 'Used rate: {}'.format(buy_rate)
robot.logger.lprint([str_status])
# Source position to rate, including currency conversion if needed
if robot.exchange != 'oanda':
quantity = round(Decimal(str(robot.source_position)) / Decimal(str(buy_rate)), 6)
else:
# Need to account for the usd price; hardcoded AUD but could be changed to anything
usd_x_rate = usd_rate_value(robot, e_api)
source_in_usd = robot.source_position * Decimal(str(usd_x_rate))
# For forex pairs like USD_JPY, unit = USD so no need to divide
if robot.forex_pair:
quantity = round(Decimal(str(source_in_usd)), 6)
else:
quantity = round(Decimal(str(source_in_usd)) / Decimal(str(buy_rate)), 6)
robot.logger.lprint(["Changing {} AUD to {} USD".format(robot.source_position, source_in_usd)]) # DEBUG
# Calculate quantity / contracts
if robot.exchange == 'bitmex': # need to do this in contracts because the api returns contracts and not xbt filled
if robot.market in config.primary_calc_markets: # robot.market == 'btc/usd': #
quantity = round(Decimal(str(robot.source_position)), 6)
buy_rate = round(buy_rate, 0)
contracts = round(quantity * Decimal(buy_rate)) # margin is already accounted for in the main code
else: # All alts are traded vs btc
quantity = round(Decimal(str(robot.source_position)), 6)
buy_rate = round(buy_rate, 20)
contracts = round(quantity / buy_rate) # margin is already accounted for in the main code
robot.contracts_total += contracts
robot.logger.lprint(["Quantity (xbt) {}, buy_rate {}, contracts {}, source_position {}".format(
quantity, buy_rate, contracts, robot.source_position)]) # DEBUG
# on OANDA, the number of units should be whole
elif robot.exchange == 'oanda':
if not b_test.backtesting:
# Because we could have something like 0.99 units when there is enough margin left.
# Subtracting 0.4 to e.g. not round 1.2 up to 2
quantity = int(math.ceil(quantity - Decimal(0.4)))
robot.logger.lprint(['Changing the quantity to whole:', quantity])
str_status = 'Quantity to open position for: {}'.format(quantity)
robot.logger.lprint([str_status])
return quantity, buy_rate, contracts
### Postonly attempts handling
def postonly_attempts_confirm(robot, timer_start):
# Account for attempts to try market making
timer_now = b_test.time()
robot.timer_diff = (timer_now - timer_start)/60 # in minutes
if robot.timer_diff > config.postonly_minutes:
current_postonly_flag = False
robot.logger.lprint(['-- switching to market taking/market orders'])
else:
current_postonly_flag = True
robot.logger.lprint(['-- trying market making/orderbook'])
return current_postonly_flag
### Opening position when everything is ready
def init_position_open(robot, e_api, buy_flag, buy_rate, contracts, sum_quantity, quantity, avg_price):
buy_uuid = None
# Account for attempts to try market making or orderbook (for traditional)
current_postonly_flag = postonly_attempts_confirm(robot, robot.timer_init_start)
# Proceeding with the position
if robot.simulation:
buy_flag, robot.sleep_buy_timer = False, 0
robot.logger.lprint(['Bought | |
<reponame>faycalki/tainted-paths
from header_common import *
from header_presentations import *
from header_mission_templates import *
from ID_meshes import *
from header_operations import *
from header_triggers import *
from module_constants import *
from header_items import *
import string
####################################################################################################################
# Each presentation record contains the following fields:
# 1) Presentation id: used for referencing presentations in other files. The prefix prsnt_ is automatically added before each presentation id.
# 2) Presentation flags. See header_presentations.py for a list of available flags
# 3) Presentation background mesh: See module_meshes.py for a list of available background meshes
# 4) Triggers: Simple triggers that are associated with the presentation
####################################################################################################################
coop_presentations = [
("coop_admin_panel", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
(assign, "$g_presentation_obj_coop_admin_panel_1", -1),
(assign, "$g_presentation_obj_coop_admin_panel_2", -1),
(assign, "$g_presentation_obj_coop_admin_panel_3", -1),
(assign, "$g_presentation_obj_coop_admin_panel_4", -1),
(assign, "$g_presentation_obj_coop_admin_panel_5", -1),
(assign, "$g_presentation_obj_coop_admin_panel_6", -1),
(assign, "$g_presentation_obj_coop_admin_panel_7", -1),
(assign, "$g_presentation_obj_coop_admin_panel_8", -1),
(assign, "$g_presentation_obj_coop_admin_panel_9", -1),
(assign, "$g_presentation_obj_coop_admin_panel_10", -1),
(assign, "$g_presentation_obj_coop_admin_panel_11", -1),
(assign, "$g_presentation_obj_coop_admin_panel_12", -1),
(assign, "$g_presentation_obj_coop_admin_panel_13", -1),
(assign, "$g_presentation_obj_coop_admin_panel_14", -1),
(assign, "$g_presentation_obj_coop_admin_panel_15", -1),
(assign, "$g_presentation_obj_coop_admin_panel_16", -1),
(assign, "$g_presentation_obj_coop_admin_panel_17", -1),
(assign, "$g_presentation_obj_coop_admin_panel_18", -1),
(assign, "$g_presentation_obj_coop_admin_panel_19", -1),
(assign, "$g_presentation_obj_coop_admin_panel_20", -1),
(assign, "$g_presentation_obj_coop_admin_panel_21", -1),
# (assign, "$g_presentation_obj_coop_admin_panel_22", -1),
(assign, "$g_presentation_obj_coop_admin_panel_23", -1),
(assign, "$g_presentation_obj_coop_admin_panel_24", -1),
(assign, "$g_presentation_obj_coop_admin_panel_25", -1),
(assign, "$g_presentation_obj_coop_admin_panel_26", -1),
(assign, "$g_presentation_obj_coop_admin_panel_27", -1),
#Begin terrain generation
# (assign, "$g_presentation_obj_coop_admin_panel_28", -1),
# (assign, "$g_presentation_obj_coop_admin_panel_29", -1), #STEP 2
# (assign, "$g_presentation_obj_coop_admin_panel_30", -1), #STEP 2
# (assign, "$g_presentation_obj_coop_admin_panel_31", -1), #STEP 2
# (assign, "$g_presentation_obj_coop_admin_panel_32", -1), #STEP 2
# (assign, "$g_presentation_obj_coop_admin_panel_33", -1), #STEP 2
# (assign, "$g_presentation_obj_coop_admin_panel_34", -1), #STEP 2
# (assign, "$g_presentation_obj_coop_admin_panel_35", -1), #STEP 2
# #(assign, "$g_presentation_obj_coop_admin_panel_36", -1), #STEP 2 #Historical Banners (No option for it because we want it enabled all the time)
#(assign, "$g_presentation_obj_coop_admin_panel_37", -1), #STEP 2 # Randomize Shield (No option for it because we want it enabled all the time)
(assign, "$g_presentation_obj_coop_admin_panel_38", -1), #STEP 2 #Shield bash player
(assign, "$g_presentation_obj_coop_admin_panel_39", -1), #STEP 2 # Shield bash AI
#STEP 2 Numerical Settings template BEGIN add another one with a higher number make sure it dosen't conflict with other numbers
(assign, "$g_presentation_obj_coop_admin_panel_40", -1), #STEP 2 # Storm chance
(assign, "$g_presentation_obj_coop_admin_panel_41", -1), #STEP 2 # Storm chance
#STEP 2 Numerical Settings template END
(assign, "$g_presentation_obj_coop_admin_panel_42", -1), #STEP 2 # Storm chance
(assign, "$g_presentation_obj_coop_admin_panel_43", -1),
(assign, "$g_presentation_obj_coop_admin_panel_44", -1),
#End terrain generation
(create_mesh_overlay, reg0, "mesh_mp_ui_host_maps_randomp"),
(position_set_x, pos1, -1),
(position_set_y, pos1, 550),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1002),
(position_set_y, pos1, 1002),
(overlay_set_size, reg0, pos1),
(create_mesh_overlay, reg0, "mesh_mp_ui_host_main"),
(position_set_x, pos1, -1),
(position_set_y, pos1, -1),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 1002),
(position_set_y, pos1, 1002),
(overlay_set_size, reg0, pos1),
#identify coop admin panel
(create_text_overlay, reg0, "@Co-op Mode",tf_center_justify),
(position_set_x, pos1, 850),
(position_set_y, pos1, 500),
(overlay_set_position, reg0, pos1),
(position_set_x, pos1, 2000),
(position_set_y, pos1, 2000),
(overlay_set_size, reg0, pos1),
(assign, ":cur_y", 1450), #Increase coop admin panel options by increasing this, if you can't see an option increase this number to increase the height.
(assign, ":cur_y_adder", 40),
(str_clear, s0),
(create_text_overlay, "$g_presentation_obj_admin_panel_container", s0, tf_scrollable),
(position_set_x, pos1, 59),
(position_set_y, pos1, 50),
(overlay_set_position, "$g_presentation_obj_admin_panel_container", pos1),
(position_set_x, pos1, 640),
(position_set_y, pos1, 520),
(overlay_set_area_size, "$g_presentation_obj_admin_panel_container", pos1),
(set_container_overlay, "$g_presentation_obj_admin_panel_container"),
(create_text_overlay, reg0, "str_add_to_official_game_servers_list", 0),
(position_set_x, pos1, 30),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_6", "mesh_checkbox_off", "mesh_checkbox_on"),
(position_set_x, pos1, 7),
(store_add, ":special_cur_y", ":cur_y", 0),
(position_set_y, pos1, ":special_cur_y"),
(overlay_set_position, "$g_presentation_obj_coop_admin_panel_6", pos1),
(server_get_add_to_game_servers_list, ":add_to_servers_list"),
(overlay_set_val, "$g_presentation_obj_coop_admin_panel_6", ":add_to_servers_list"),
####BEGIN ADDITIONAL FEATURES
#Constants are:
# coop_generate_swamp = 60
# coop_generate_desert = 61
# coop_generate_desertv2 = 62
# coop_generate_desertv3 = 63
# coop_generate_iberian = 64
# coop_generate_iberian2 = 65
# coop_generate_snow = 66
# coop_generate_euro_hillside = 67
# g_presentation_obj_coop_admin_panel_28 is only in this file
# coop_generate_swamp & $coop_generate_swamp this file and module_coop_scripts.
#If building additional menus (One button checkbox) search for:
# STEP 1 Copy the swamp text below and paste it right below it with val_sub cutting between them.
# STEP 2 Add g_presentation_obj_coop_admin_panel_29 above
# STEP 3 Go below and add the new number, as well as the correct coop_ code (We are done working with this file now, onto module_coop_scripts we go!)
# STEP 4 Search for coop_generate_swamp in module_coop_scripts & Copy the lines while putting the correct information in them.
#Files affected = header common, module_coop_scripts, module_constants, and this file.
# ####Swamp Text STEP 1
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate Swamp Note: Check map name below and generate correct terrain.", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
# ####Swamp button
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_28", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_28", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_28", "$coop_generate_swamp"),
# ####End Swamp
#
#
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate Desert (Best for: Rocky Desert, Rocks only)", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
#
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_29", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_29", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_29", "$coop_generate_desert"),
#
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate DesertV2 (Best for: Rocky Desert, different Rocks & Trees)", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
#
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_30", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0), #7 = default for 1429 font, 0= for 1257AD Font.
#
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_30", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_30", "$coop_generate_desertv2"),
#
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate DesertV3 (Best for: The Nile in the Desert, Desert Trees)", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
#
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_31", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_31", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_31", "$coop_generate_desertv3"),
#
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate Iberian (Best for: Iberian Hillsides, Iberian, Steppe & Steppe Forest)", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
#
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_32", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_32", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_32", "$coop_generate_iberian"),
#
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate Iberian2 (Best for: Iberian Hillsides, Iberian, Steppe & Steppe Forest)", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
#
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_33", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_33", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_33", "$coop_generate_iberian2"),
#
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate Snow (NOTE: Scroll down for more options below!)", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
#
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_34", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_34", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_34", "$coop_generate_snow"),
#
# (val_sub, ":cur_y", ":cur_y_adder"),
#
# (create_text_overlay, reg0, "@Generate Euro Hillside (Best for: Euro Hillsides)", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
#
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_35", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 0),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_35", pos1),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_35", "$coop_generate_euro_hillside"),
#
#
#
#
#
# ####END ADDITIONAL FEATURES
# (val_sub, ":cur_y", ":cur_y_adder"),
# (create_text_overlay, reg0, "str_enable_valve_anti_cheat", 0),
# (position_set_x, pos1, 30),
# (position_set_y, pos1, ":cur_y"),
# (overlay_set_position, reg0, pos1),
# (create_check_box_overlay, "$g_presentation_obj_coop_admin_panel_22", "mesh_checkbox_off", "mesh_checkbox_on"),
# (position_set_x, pos1, 7),
# (store_add, ":special_cur_y", ":cur_y", 7),
# (position_set_y, pos1, ":special_cur_y"),
# (overlay_set_position, "$g_presentation_obj_coop_admin_panel_22", pos1),
# (server_get_anti_cheat, ":server_anti_cheat"),
# (overlay_set_val, "$g_presentation_obj_coop_admin_panel_22", ":server_anti_cheat"),
(val_sub, ":cur_y", ":cur_y_adder"),
(create_text_overlay, reg0, "str_server_name", 0),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(str_store_server_name, s0),
(try_begin),
(eq, "$g_multiplayer_renaming_server_allowed", 1),
(create_simple_text_box_overlay, "$g_presentation_obj_coop_admin_panel_9"),
(position_set_x, pos1, 390),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_coop_admin_panel_9", pos1),
(overlay_set_text, "$g_presentation_obj_coop_admin_panel_9", s0),
(else_try),
(assign, "$g_presentation_obj_coop_admin_panel_9", -1),
(create_text_overlay, reg0, s0, 0),
(position_set_x, pos1, 385),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(try_end),
(val_sub, ":cur_y", ":cur_y_adder"),
(create_text_overlay, reg0, "str_game_password", 0),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(create_simple_text_box_overlay, "$g_presentation_obj_coop_admin_panel_4"),
(position_set_x, pos1, 390),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_coop_admin_panel_4", pos1),
(str_store_server_password, s0),
(overlay_set_text, "$g_presentation_obj_coop_admin_panel_4", s0),
(val_sub, ":cur_y", ":cur_y_adder"),
(create_text_overlay, reg0, "str_welcome_message", 0),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(create_simple_text_box_overlay, "$g_presentation_obj_coop_admin_panel_18"),
(position_set_x, pos1, 390),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, "$g_presentation_obj_coop_admin_panel_18", pos1),
(str_store_welcome_message, s0),
(overlay_set_text, "$g_presentation_obj_coop_admin_panel_18", s0),
(val_sub, ":cur_y", ":cur_y_adder"),
(create_text_overlay, reg0, "str_map", 0),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(try_begin),
(is_between, "$coop_battle_scene", multiplayer_scenes_begin, multiplayer_scenes_end),
(store_sub, ":string_id", "$coop_battle_scene", multiplayer_scenes_begin),
(val_add, ":string_id", multiplayer_scene_names_begin),
(str_store_string, s0, ":string_id"),
(else_try),
(call_script, "script_coop_get_scene_name", "$coop_battle_scene"),#if not random map use party name
(try_end),
(create_text_overlay, reg0, s0, 0),
(position_set_x, pos1, 385),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", ":cur_y_adder"),
(create_text_overlay, reg0, "str_game_type", 0),
(position_set_x, pos1, 0),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(store_add, ":string_index", "$g_multiplayer_game_type", multiplayer_game_type_names_begin),
(str_store_string, s0, ":string_index"),
(create_text_overlay, reg0, s0, 0),
(position_set_x, pos1, 385),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", ":cur_y_adder"),
(assign, reg1, 1),
| |
<reponame>john-doe-3141592653/XXX
'''
Copyright or © or Copr.
This software is a computer program whose purpose is to generate random
test case from a template file describing the data model.
This software is governed by the CeCILL-B license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL-B
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL-B license and that you accept its terms.
'''
import numpy as np
import Miscellaneous as misc
from Xxx import SETTINGS
from Element import Element
###############################################################################
# --- Parameter ------------------------------------------------------------- #
###############################################################################
class Parameter(Element):
"""
A Parameter object hold an array of variables and everything required to generate them
"""
counter = 0
def __init__(self, n, d, nb):
"""
:param n : name
:param d : depth
:param nb : nb_instances
"""
Element.__init__(self, n, d)
self.__check_nb_instances(nb)
self._nb_instances = nb
self._identifier = "var_" + str(Parameter.counter)
self._values = [None]*self._nb_instances
self._locks = [False]*self._nb_instances
self._nb_instances_lock = False
Parameter.counter += 1
def __check_nb_instances(self, nb):
if not 0 <= nb <= SETTINGS.get("parameter_max_nb_instances"):
misc.error("Parameter::__check_nb_instances() -> " + self._name + ": nb_instances parameter is out of range [0 ; " + str(SETTINGS.get("parameter_max_nb_instances")) + "]")
raise ValueError
def change_nb_instances(self, nb):
if not self._nb_instances_lock:
self.__check_nb_instances(nb)
while self._nb_instances > nb:
self._values.pop()
self._locks.pop()
self._nb_instances -= 1
while self._nb_instances < nb:
self._values.append(None)
self._locks.append(False)
self._nb_instances += 1
def lock_nb_instances(self):
self._nb_instances_lock = True
def lock_i(self, i):
self._locks[i] = True
def lock_all(self):
for i in range(self._nb_instances):
self.lock_i(i)
def unlock_nb_instances(self):
self._nb_instances_lock = False
def unlock_i(self, i):
self._locks[i] = False
def unlock_all(self):
for i in range(self._nb_instances):
self.unlock_i(i)
def reset_i(self, i):
if not self._locks[i]:
self._values[i] = None
def reset_all(self):
for i in range(self._nb_instances):
self.reset_i(i)
def _random_gen(self):
"""
Generate a parameter content according to the selected method
:return: the parameter content
"""
raise NotImplementedError
def set_value_i(self, i, val):
"""
Set the parameter i content according to val.
val can be "r" for random or a specific value.
The function will do nothing if the parameter is locked (locks[i] == True)
:param i : the parameter index
:param val : "r" or a specific value
:return : None
"""
raise NotImplementedError
def set_all_values(self, val):
for i in range(self._nb_instances):
self.set_value_i(i, val)
def duplicate(self):
"""
Create a new instance of the parameter with the same initial settings
:return: A parameter object
"""
raise NotImplementedError
def __repr__(self):
return "name: " + self._name +\
"\nidentifier: " +str(self._identifier) +\
"\ndepth: " + str(self._depth) +\
"\nnb_instances: " + str(self._nb_instances) +\
"\nvalues: " + str(self._values) +\
"\nlocks: " + str(self._locks)
def get_type(self):
raise NotImplementedError
def get_values(self):
return self._values
values = property(get_values)
def get_identifier(self):
return self._identifier
identifier = property(get_identifier)
def get_nb_instances_lock(self):
return self._nb_instances_lock
nb_instances_lock = property(get_nb_instances_lock)
def get_locks(self):
return self._locks
locks = property(get_locks)
def get_nb_instances(self):
return self._nb_instances
nb_instances = property(get_nb_instances)
###############################################################################
# --- Categorical-Parameter ------------------------------------------------- #
###############################################################################
class Categorical_Parameter(Parameter):
def __init__(self, n, d, v, w, nb):
"""
:param n : name
:param d : depth
:param v : values
:param w : weights
:param nb : nb_instances
"""
Parameter.__init__(self, n, d, nb)
self._check_values(v)
self._values_array = v
self.__check_weights(w)
self._weights_array = w
def _check_values(self, v):
raise NotImplementedError
def __check_weights(self, w):
if w:
if len(self._values_array) != len(w):
misc.error("Categorical_Parameter::__check_weights() -> " + self._name + ": values array size and weights array size must be equal")
raise ValueError
def _random_gen(self):
if self._weights_array:
return self._discrete_distribution_selection()
else:
return self._values_array[np.random.randint(0, len(self._values_array))]
def _discrete_distribution_selection(self):
r = round(np.random.randint(sum(self._weights_array)))
counter = 0
for i in range(len(self._weights_array)):
if counter <= r < (counter + self._weights_array[i]):
return self._values_array[i]
counter += self._weights_array[i]
def get_values_array(self):
return self._values_array
values_array = property(get_values_array)
def get_weights_array(self):
return self._weights_array
weights_array = property(get_weights_array)
def __repr__(self):
return Parameter.__repr__(self) +\
"\nvalues: " + str(self._values_array) +\
"\nweights: " + str(self._weights_array)
###############################################################################
# --- Boolean_Parameter ----------------------------------------------------- #
###############################################################################
class Boolean_Parameter(Categorical_Parameter):
def __init__(self, n, d, v, w, nb):
"""
:param n : name
:param d : depth
:param v : values
:param w : weights
:param nb : nb_instances
"""
Categorical_Parameter.__init__(self, n, d, v, w, nb)
def _check_values(self, v):
pass
def set_value_i(self, i, val):
if not self._locks[i]:
if val == 'r':
self._values[i] = self._random_gen()
elif val in [True, "True", 1, "1"]:
self._values[i] = True
elif val in [False, "False", 0, "O"]:
self._values[i] = False
else:
misc.error("Boolean_Parameter::set_value_i() -> " + self._name + ": unknow value parameter \"" + val + "\"")
raise ValueError
def duplicate(self):
return Boolean_Parameter(self._name, self._depth, self._values_array, self._weights_array, self._nb_instances)
def get_type(self):
return "boolean"
def __repr__(self):
return misc.color("--- Boolean_Parameter ---", "yellow") + "\n" + Categorical_Parameter.__repr__(self)
###############################################################################
# --- String_Parameter ------------------------------------------------------ #
###############################################################################
class String_Parameter(Categorical_Parameter):
def __init__(self, n, d, v, w, nb):
"""
:param n : name
:param d : depth
:param v : an array that contains all possible values as a string
:param w : an array (int) that contains a weight corresponding to the associated value
:param nb : nb_instances
"""
Categorical_Parameter.__init__(self, n, d, v, w, nb)
def _check_values(self, v):
if not 1 <= len(v) <= SETTINGS.get("string_parameter_max_size"):
misc.error("Categorical_Parameter::__check_values() -> " + self._name + ": values array size is out of range [1 ;" + str(SETTINGS.get("string_parameter_max_size")) + "]")
raise ValueError
def set_value_i(self, i, val):
if not self._locks[i]:
if val == "r":
self._values[i] = self._random_gen()
elif val == "first":
self._values[i] = self._values_array[0]
elif val == "last":
self._values[i] = self._values_array[-1]
elif val == "wmin":
self._values[i] = self.__get_wmin()
elif val == "wmax":
self._values[i] = self.__get_wmax()
elif val in self._values_array:
self._values[i] = val
else:
misc.error("String_Parameter::set_value_i() -> " + self._name + ": invalid parameter: " + str(self._values_array))
raise NameError
def __get_wmin(self):
wmin = 999
wmin_index = 0
for w, i in enumerate(self._weights_array):
if w < wmin:
wmin = w
wmin_index = i
return self._values_array[wmin_index]
def __get_wmax(self):
wmax = 0
wmax_index = 0
for w, i in enumerate(self._weights_array):
if w > wmax:
wmax = w
wmax_index = i
return self._values_array[wmax_index]
def duplicate(self):
return String_Parameter(self._name, self._depth, self._values_array, self._weights_array, self._nb_instances)
def get_type(self):
return "string"
def __repr__(self):
return misc.color("--- String_Parameter ---", "yellow") + "\n" + Categorical_Parameter.__repr__(self)
###############################################################################
# --- Numerical_Parameter --------------------------------------------------- #
###############################################################################
class Numerical_Parameter(Parameter):
def __init__(self, n, d, m, M, dis, mea, var, r, w, nb):
"""
:param n : name
:param d : depth
:param m : min value
:param M : max value
:param dis : distribution -> "u" for a uniform | "n" for a normal | i for an interval
:param mea : mean
:param var : variance
:param r : ranges
:param w : weights
:param nb : nb_instances
"""
Parameter.__init__(self, n, d, nb)
self._min = m
self._max = M
self._check_min_max_order()
self._mean = None
self._variance = None
self._ranges = None
self._intervals = None
self.__check_distribution(dis)
self._distribution = dis
self.__set_mean_and_variance(mea, var)
self.__check_ranges(r)
self._ranges = r
self.__set_intervals(r, w)
def __check_distribution(self, dis):
if dis not in ["u", "n", "i"]:
misc.error("Numerical_Parameter::__check_distribution() -> " + self._name + ": invalid distribution [\"u\", \"n\" ,\"i\"]")
raise NameError
def _check_min_max_order(self):
if self._min > self._max:
misc.error("Numerical_Parameter::__check_min_max_order() -> " + self._name + ": max value should be greater than min value")
raise ValueError
def _check_value(self, val):
if not self._min <= val <= self._max:
misc.error("Numerical_Parameter::_check_value() -> " + self._name + ": value parameter out of range[" + str(self._min) + ";" + str(self._max) + "]")
raise ValueError
def __check_ranges(self, ranges):
if ranges:
for r in ranges:
for i in range(2):
if not self._min <= r[i] <= self._max:
misc.error("Numerical_Parameter::_check_ranges() -> " + self._name + ": invalid range value [" + str(self._min) + ";" + str(self._max) + "]")
raise ValueError
if r[1] < r[0]:
misc.error("Numerical_Parameter::_check_ranges() -> " + self._name + ": invalid range value [" + str(self._min) + ";" + str(self._max) + "]")
raise ValueError
def __set_mean_and_variance(self, mea, var):
if mea is None and var is None:
self._mean = round((self._max + self._min)/2.0, 5)
self._variance = round((self._max - self._min)/4.0, 5)
else:
if not self._min <= mea <= self._max:
misc.error("Numerical_Parameter::__set_mean_and_variance() -> " + self._name + ": mean value must be between min and max")
raise ValueError
self._mean = round(mea, 5)
if var < 0:
misc.error("Numerical_Parameter::__set_mean_and_variance() -> " + self._name + ": variance value must be positive or null")
raise ValueError
self._variance = round(var, 5)
def __set_intervals(self, r, w):
if r:
v = []
for i in range(len(r)):
v.append(str(i))
self._intervals = String_Parameter("interval", -1, v, w, 1)
def _random_gen(self):
if self._distribution == "u":
val = (self._max - self._min)*np.random.rand() + self._min
elif self._distribution == "n":
if self._variance == 0:
val = self._mean
else:
val = np.random.normal(self._mean, self._variance, 1)[0]
while not self._min <= val <= self._max:
val = np.random.normal(self._mean, self._variance, 1)[0]
else:
self._intervals.set_value_i(0, "r")
index = int(self._intervals.values[0])
val = (self._ranges[index][1] - self._ranges[index][0])*np.random.rand() + self._ranges[index][0]
return val
def set_value_i(self, i, val):
if val == "r":
self._values[i] = self._random_gen()
return True
elif val == "min":
self._values[i] = self._min
return True
elif val == "max":
self._values[i] = self._max
return True
elif val == "mean":
self._values[i] = self._mean
return True
else:
return False
def __repr__(self):
return Parameter.__repr__(self) +\
"\nmin: " + str(self._min) +\
"\nmax: " + str(self._max) +\
"\ngenerator: " + str(self._distribution) +\
"\nmean: " + str(self._mean) +\
"\nvariance: " + str(self._variance) +\
"\nranges: " + str(self._ranges) + \
"\nweights: " + str(self.get_weights())
def get_m(self):
return self._min
m = property(get_m)
def get_M(self):
return self._max
M = property(get_M)
def get_distribution(self):
return self._distribution
distribution = property(get_distribution)
def get_mean(self):
return self._mean
mean = property(get_mean)
def get_variance(self):
return self._variance
variance = property(get_variance)
def get_ranges(self):
return self._ranges
ranges = property(get_ranges)
def get_weights(self):
w = []
if self._intervals:
w = self._intervals.weights_array
return w
weights = property(get_weights)
###############################################################################
# --- Integer_Parameter ----------------------------------------------------- #
###############################################################################
class Integer_Parameter(Numerical_Parameter):
def __init__(self, n, d, m, M, dis, mea, var, r, w, nb):
"""
:param n : name
:param d : depth
:param m : min value
:param M : max value
:param dis : | |
<reponame>Mooonside/SEGS
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Provides DeepLab model definition and helper functions.
DeepLab is a deep learning system for semantic image segmentation with
the following features:
(1) Atrous convolution to explicitly control the resolution at which
feature responses are computed within Deep Convolutional Neural Networks.
(2) Atrous spatial pyramid pooling (ASPP) to robustly segment objects at
multiple scales with filters at multiple sampling rates and effective
fields-of-views.
(3) ASPP module augmented with image-level feature and batch normalization.
(4) A simple yet effective decoder module to recover the object boundaries.
See the following papers for more details:
"Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation"
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
(https://arxiv.org/abs/1802.02611)
"Rethinking Atrous Convolution for Semantic Image Segmentation,"
<NAME>, <NAME>, <NAME>, <NAME>
(https://arxiv.org/abs/1706.05587)
"DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,
Atrous Convolution, and Fully Connected CRFs",
<NAME>*, <NAME>*, <NAME>, <NAME>,
<NAME> (* equal contribution)
(https://arxiv.org/abs/1606.00915)
"Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected
CRFs"
<NAME>*, <NAME>*, <NAME>, <NAME>,
<NAME> (* equal contribution)
(https://arxiv.org/abs/1412.7062)
"""
import tensorflow as tf
from backbones import feature_extractor
from segs.common_configure import DeepLabFlags
from tf_ops.wrap_ops import conv2d, sep_conv2d, drop_out, arg_scope, regularizer, \
batch_norm2d, avg_pool2d, ms_softmax_with_logits
_LOGITS_SCOPE_NAME = 'logits'
_MERGED_LOGITS_SCOPE = 'merged_logits'
_IMAGE_POOLING_SCOPE = 'image_pooling'
_ASPP_SCOPE = 'aspp'
_CONCAT_PROJECTION_SCOPE = 'concat_projection'
_DECODER_SCOPE = 'decoder'
_DEBUG_SCOPE = 'debug'
class DEBUG:
def __init__(self):
pass
DEBUG_VARS = DEBUG()
def get_extra_layer_scopes():
"""Gets the scopes for extra layers.
Returns:
A list of scopes for extra layers.
"""
return [
_LOGITS_SCOPE_NAME,
_IMAGE_POOLING_SCOPE,
_ASPP_SCOPE,
_CONCAT_PROJECTION_SCOPE,
_DECODER_SCOPE,
]
def predict_labels(images, model_options, outputs_to_num_classes, image_pyramid=None):
"""Predicts segmentation labels.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
image_pyramid: Input image scales for multi-scale feature extraction.
Returns:
A dictionary with keys specifying the output_type (e.g., semantic
prediction) and values storing Tensors representing predictions (argmax
over channels). Each prediction has size [batch, height, width].
"""
outputs_to_scales_to_logits = multi_scale_logits(
images,
model_options=model_options,
image_pyramid=image_pyramid,
is_training=False,
outputs_to_num_classes=outputs_to_num_classes,
fine_tune_batch_norm=False)
predictions = {}
for output in sorted(outputs_to_scales_to_logits):
scales_to_logits = outputs_to_scales_to_logits[output]
logits = tf.image.resize_bilinear(
scales_to_logits[_MERGED_LOGITS_SCOPE],
tf.shape(images)[1:3],
align_corners=True)
predictions[output] = tf.argmax(logits, 3)
return predictions
def scale_dimension(dim, scale):
"""Scales the input dimension.
Args:
dim: Input dimension (a scalar or a scalar Tensor).
scale: The amount of scaling applied to the input.
Returns:
Scaled dimension.
TODO: cast_int = floor(), floor((y - 1) / x + 1) = ceil(y / x)
"""
if isinstance(dim, tf.Tensor):
return tf.cast((tf.to_float(dim) - 1.0) * scale + 1.0, dtype=tf.int32)
else:
return int((float(dim) - 1.0) * scale + 1.0)
def multi_scale_logits(images,
model_options,
image_pyramid,
outputs_to_num_classes,
weight_decay=0.0001,
is_training=False,
fine_tune_batch_norm=False):
"""Gets the logits for multi-scale inputs.
The returned logits are all downsampled (due to max-pooling layers)
for both training and evaluation.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
image_pyramid: Input image scales for multi-scale feature extraction.
weight_decay: The weight decay for model variables.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
Returns:
{
'TASK_NAME':{
Image_Scale : feature
......
_MERGED_LOGITS_SCOPE : merged feature
}
...(IF MORE TASKS)
}
outputs_to_scales_to_logits: A map of maps from output_type (e.g.,
semantic prediction) to a dictionary of multi-scale logits names to
logits. For each output_type, the dictionary has keys which
correspond to the scales and values which correspond to the logits.
For example, if `scales` equals [1.0, 1.5], then the keys would
include 'merged_logits', 'logits_1.00' and 'logits_1.50'.
Raises:
ValueError: If model_options doesn't specify crop_size and its
add_image_level_feature = True, since add_image_level_feature requires
crop_size information.
"""
# Setup default values.
if not image_pyramid:
image_pyramid = [1.0]
if model_options.crop_size is None and model_options.add_image_level_feature:
raise ValueError(
'Crop size must be specified for using image-level feature.')
if model_options.model_variant == 'mobilenet_v2':
if (model_options.atrous_rates is not None or
model_options.decoder_output_stride is not None):
# Output a warning and users should make sure if the setting is desired.
tf.logging.warning('Our provided mobilenet_v2 checkpoint does not '
'include ASPP and decoder modules.')
crop_height = (
# 514
model_options.crop_size[0]
if model_options.crop_size else tf.shape(images)[1])
crop_width = (
model_options.crop_size[1]
if model_options.crop_size else tf.shape(images)[2])
# Compute the height, width for the output logits.
# default to 16 , i.e. final predictions is [H/16, W/16]
logits_output_stride = (
model_options.decoder_output_stride or model_options.output_stride)
logits_height = scale_dimension(
crop_height,
max(1.0, max(image_pyramid)) / logits_output_stride)
logits_width = scale_dimension(
crop_width,
max(1.0, max(image_pyramid)) / logits_output_stride)
# Compute the logits for each scale in the image pyramid.
outputs_to_scales_to_logits = {
k: {}
for k in outputs_to_num_classes
}
for count, image_scale in enumerate(image_pyramid):
# print('scale is {}'.format(image_scale))
if image_scale != 1.0:
scaled_height = scale_dimension(crop_height, image_scale)
scaled_width = scale_dimension(crop_width, image_scale)
scaled_crop_size = [scaled_height, scaled_width]
scaled_images = tf.image.resize_bilinear(
images, scaled_crop_size, align_corners=True)
if model_options.crop_size:
scaled_images.set_shape([None, scaled_height, scaled_width, 3])
else:
scaled_crop_size = model_options.crop_size
scaled_images = images
model_options.crop_size = scaled_crop_size
outputs_to_logits = _get_logits(
scaled_images,
model_options,
weight_decay=weight_decay,
reuse=tf.AUTO_REUSE,
is_training=is_training,
outputs_to_num_classes=outputs_to_num_classes,
fine_tune_batch_norm=fine_tune_batch_norm)
# Resize the logits to have the same dimension before merging.
for output in sorted(outputs_to_logits):
# resize_bilinear requires channel to be one or three
outputs_to_logits[output] = tf.image.resize_bilinear(
outputs_to_logits[output], [logits_height, logits_width],
align_corners=True)
# Return when only one input scale.
if len(image_pyramid) == 1:
for output in sorted(outputs_to_num_classes):
outputs_to_scales_to_logits[output][
_MERGED_LOGITS_SCOPE] = outputs_to_logits[output]
return outputs_to_scales_to_logits
# Save logits to the output map.
for output in sorted(outputs_to_num_classes):
outputs_to_scales_to_logits[output][
'logits_%.2f' % image_scale] = outputs_to_logits[output]
# Merge the logits from all the multi-scale inputs.
for output in sorted(outputs_to_num_classes):
# Concatenate the multi-scale logits for each output type.
all_logits = [
tf.expand_dims(logits, axis=4)
for logits in outputs_to_scales_to_logits[output].values()
]
all_logits = tf.concat(all_logits, 4)
merge_fn = (
tf.reduce_max
if model_options.merge_method == 'max' else tf.reduce_mean)
outputs_to_scales_to_logits[output][_MERGED_LOGITS_SCOPE] = merge_fn(
all_logits, axis=4)
return outputs_to_scales_to_logits
def _extract_features(images,
model_options,
weight_decay=0.0001,
reuse=tf.AUTO_REUSE,
is_training=False,
fine_tune_batch_norm=False):
"""Extracts features by the particular model_variant.
Args:
images: A tensor of size [batch, height, width, channels].
model_options: A ModelOptions instance to configure models.
weight_decay: The weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Is training or not.
fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
Returns:
concat_logits: A tensor of size [batch, feature_height, feature_width,
feature_channels], where feature_height/feature_width are determined by
the images height/width and output_stride.
end_points: A dictionary from components of the network to the corresponding
activation.
"""
# feature extractor is a backbone factory
DEBUG_VARS.raw_image = images
features, end_points = feature_extractor.extract_features(
images,
output_stride=model_options.output_stride,
multi_grid=model_options.multi_grid,
model_variant=model_options.model_variant,
weight_decay=weight_decay,
reuse=reuse,
is_training=is_training,
fine_tune_batch_norm=fine_tune_batch_norm)
# TODO:check
# DEBUG_VARS.xception_feature = end_points['xception_65/entry_flow/conv1_1/Relu:0']
DEBUG_VARS.xception_feature = features
if not model_options.aspp_with_batch_norm:
return features, end_points
else:
batch_norm_params = {
'is_training': is_training and fine_tune_batch_norm,
'decay': 0.9997,
'eps': 1e-5,
'affine': True,
}
regularize_func = regularizer('l2', weight_decay)
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
with arg_scope([sep_conv2d], activate=tf.nn.relu, activate_middle=tf.nn.relu, batch_norm=True,
depthwise_weight_reg=None, pointwise_weight_reg=regularize_func,
padding='SAME', strides=[1, 1]):
with arg_scope([conv2d], activate=tf.nn.relu, weight_reg=regularize_func,
batch_norm=True, padding='SAME', strides=[1, 1]):
# TODO: ASPP IS IMPLEMENTED HERE! Check Out!
with arg_scope([batch_norm2d], **batch_norm_params):
depth = 256
branch_logits = []
# TODO: ADD IMAGE POOLING HERE
if model_options.add_image_level_feature:
# this crop size has been updated to the new scaled one outside, which is the exact size
# of this model's inputs
pool_height = scale_dimension(model_options.crop_size[0],
1. / model_options.output_stride)
pool_width = scale_dimension(model_options.crop_size[1],
1. / model_options.output_stride)
# global average pooling, check whether the shape here is 1?
image_feature = avg_pool2d(
features, [pool_height, pool_width], [pool_height, pool_width],
padding='VALID')
# collapse channels to depth after GAP
image_feature = conv2d(
inputs=image_feature, outc=depth, ksize=[1, 1], name=_IMAGE_POOLING_SCOPE)
# TODO:check
DEBUG_VARS.image_feature = image_feature
# reshape it to final feature map shape
image_feature = tf.image.resize_bilinear(
image_feature, [pool_height, pool_width], align_corners=True)
image_feature.set_shape([None, pool_height, pool_width, depth])
# add image level feature to branch_logits
branch_logits.append(image_feature)
# Employ a 1x1 convolution.
branch_logits.append(conv2d(features, outc=depth, ksize=[1, 1], name=_ASPP_SCOPE + str(0)))
if model_options.atrous_rates:
# Employ 3x3 convolutions with different atrous rates.
DEBUG_VARS.aspp_features = []
for i, rate in enumerate(model_options.atrous_rates, 1):
scope = _ASPP_SCOPE + str(i)
if model_options.aspp_with_separable_conv:
aspp_features = sep_conv2d(
features, outc=depth, ksize=[3, 3], ratios=[rate, rate], name=scope)
DEBUG_VARS.aspp_features.append(aspp_features)
else:
aspp_features = conv2d(
features, outc=depth, ksize=[3, 3], ratios=[rate, | |
# -----------------------------------------------------------------------------
# Copyright 2020 <NAME>
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import warnings
from mpi4py import MPI
import numpy as np
from scipy.interpolate import RegularGridInterpolator
from scipy import ndimage
import h5py
import matplotlib.pyplot as plt
import segyio
from ..geometry import signed_distance_functions as sdf
from .cpp import limgrad
class MeshSizeFunction:
"""The :class:`MeshSizeFunction` is used to build a rectangular or cubic isotropic mesh size function :math:`f(h)`.
"""
def __init__(
self,
bbox,
hmin,
model,
units="m-s",
wl=0.0,
freq=5.0,
grad=0.0,
space_order=1,
hmax=np.inf,
dt=0.0,
cr_max=1.0,
grade=0.0,
nx=None,
ny=None,
nz=None,
domain_ext=0.0,
padstyle="edge",
endianness="little",
):
"""Class constructor for :class:`MeshSizeFunction`
:param bbox: bounding box containing domain extents.
:type bbox: tuple with size (2*dim). For example, in 2D `(zmin, zmax, xmin, xmax)`
:param hmin: minimum triangular edgelength populating the domain in meters.
:type hmin: float64
:param model: in 2D, a SEG-Y file containing the velocity model. In 3D, a binary file containing the velocity model.
:type model: name of file (assumes velocity in m-s). Note 3D binary file must be little endian and `nx`, `ny`, `nz` are required.
:param endianness: binary layout.
:type endianness: optional, (little or big)
:param nz: number of grid points in z-direction for velocity model.
:type nz: int, optional in 2D, required in 3D
:param nx: number of grid points in x-direction for velocity model.
:type nx: int, optional in 2D, required in 3D
:param ny: number of grid points in y-direction for velocity model.
:type ny: int, optional in 2D, required in 3D
:param units: units of the velocity model (either `m-s` or `km-s`)
:type units: str, optional, default=`m-s`
:param wl: number of vertices per wavelength for a given :math:`f_{max}`
:type wl: int, optional
:param grad: the resolution in m nearby sharp gradients in velociy.
:type grad: float64, optional
:param freq: :math:`f_{max}` in hertz for which to estimate `wl`
:type freq: float64, optional
:param space_order: the polynomial order of the basis functions.
:type space_order: int, optional
:param hmax: maximum mesh size in meters allowed in the domain
:type hmax: float64, optional
:param dt: theoretical maximum stable timestep in seconds given Courant number `Cr`
:type dt: float64, optional
:param cr_max: `dt` is stable with this Courant number.
:type cr_max: float64, optional
:param grade: maximum allowable variation in mesh size in decimal percent.
:type grade: float64, optional
:param domain_ext: width of domain extension in `-z`, `+x`, `-x`, `+y`, `-y` directions
:type domain_ext: float64, optional
:param padstyle: method to pad velocity in the domain extension
:type padstyle: str, optional, `edge`, `linear`, `constant`
:return: object populated with meta-data, :math:`f(h)`, and a :math:`f(d)`.
:rtype: :class:`MeshSizeFunction` object
"""
self.bbox = bbox
self.dim = int(len(self.bbox) / 2)
self.width = bbox[3] - bbox[2]
self.depth = bbox[1] - bbox[0]
if self.dim == 3:
self.length = bbox[5] - bbox[4]
self.spacingZ = None
self.spacingX = None
self.model = model
self.units = units
self.hmin = hmin
self.hmax = hmax
self.wl = wl
self.freq = freq
self.grad = grad
self.space_order = space_order
self.dt = dt
self.cr_max = cr_max
self.grade = grade
self.fh = None
self.fd = None
self.nx = nx
self.ny = ny
self.nz = nz
self.domain_ext = domain_ext
self.endianness = endianness
self.padstyle = padstyle
self.interpolant = None
self.vp = 1500.0
### SETTERS AND GETTERS ###
@property
def interpolant(self):
return self.__interpolant
@interpolant.setter
def interpolant(self, value):
self.__interpolant = value
@property
def fh(self):
return self.__fh
@fh.setter
def fh(self, value):
self.__fh = value
@property
def fd(self):
return self.__fd
@fd.setter
def fd(self, value):
self.__fd = value
@property
def bbox(self):
return self.__bbox
@bbox.setter
def bbox(self, value):
assert (
len(value) >= 4 and len(value) <= 6
), "bbox has wrong number of values. either 4 or 6."
self.__bbox = value
@property
def hmin(self):
return self.__hmin
@hmin.setter
def hmin(self, value):
assert value > 0.0, "hmin must be non-zero"
self.__hmin = value
@property
def dim(self):
return self.__dim
@dim.setter
def dim(self, value):
assert value == 2 or value == 3, "dim must be either 2 or 3"
self.__dim = value
@property
def vp(self):
return self.__vp
@vp.setter
def vp(self, value):
if np.amin(value) < 1000:
warnings.warn("Min. velocity < 1000 m-s. Units may be incorrect.")
if np.amax(value) > 10000:
warnings.warn("Max. velocity > 10,000 m-s. Units may be incorrect.")
self.__vp = value
@property
def nz(self):
assert self.__nz is not None, "binary file specified but nz was not."
return self.__nz
@nz.setter
def nz(self, value):
assert value is None or value > 0, " nz is not > 0"
self.__nz = value
@property
def nx(self):
assert self.__nx is not None, "binary file specified but nx was not."
return self.__nx
@nx.setter
def nx(self, value):
assert value is None or value > 0, " nx is not > 0"
self.__nx = value
@property
def ny(self):
assert self.__ny is not None, "binary file specified but ny was not."
return self.__ny
@ny.setter
def ny(self, value):
assert value is None or value > 0, " ny is not > 0"
self.__ny = value
@property
def model(self):
return self.__model
@model.setter
def model(self, value):
assert isinstance(value, str) is True, "model must be a filename"
self.__model = value
@property
def units(self):
return self.__units
@units.setter
def units(self, value):
assert value == "m-s" or value == "km-s", "units are not compatible"
self.__units = value
@property
def wl(self):
return self.__wl
@wl.setter
def wl(self, value):
self.__wl = value
@property
def grad(self):
return self.__grad
@grad.setter
def grad(self, value):
self.__grad = value
@property
def freq(self):
return self.__freq
@freq.setter
def freq(self, value):
self.__freq = value
@property
def space_order(self):
return self.__space_order
@space_order.setter
def space_order(self, value):
self.__space_order = value
@property
def hmax(self):
return self.__hmax
@hmax.setter
def hmax(self, value):
self.__hmax = value
@property
def dt(self):
return self.__dt
@dt.setter
def dt(self, value):
assert value >= 0, "dt must be > 0"
self.__dt = value
@property
def cr_max(self):
return self.__cr_max
@cr_max.setter
def cr_max(self, value):
assert value >= 0, "Cr_max must be > 0"
self.__cr_max = value
@property
def grade(self):
return self.__grade
@grade.setter
def grade(self, value):
assert value >= 0, "grade must be > 0"
self.__grade = value
@property
def domain_ext(self):
return self.__domain_ext
@domain_ext.setter
def domain_ext(self, value):
assert value >= 0, "domain extent must be > 0"
self.__domain_ext = value
@property
def endianness(self):
return self.__endianness
@endianness.setter
def endianness(self, value):
assert value == "big" or value == "little", "endianness must be little or big"
self.__endianness = value
@property
def padstyle(self):
return self.__padstyle
@padstyle.setter
def padstyle(self, value):
assert value == "edge" or value == "constant" or value == "linear_ramp"
self.__padstyle = value
# ---PUBLIC METHODS---#
def build(self, comm=None): # noqa: C901
"""Builds the isotropic mesh size function according
to the user arguments that were passed.
"""
comm = comm or MPI.COMM_WORLD
if comm is not None:
rank = comm.Get_rank()
size = comm.Get_size()
else:
rank = 0
size = 1
if rank == 0:
self.__ReadVelocityModel()
_vp = self.vp
_bbox = self.bbox
_dim = self.dim
_width = self.width
_nz = self.nz
_nx = self.nx
if _dim == 3:
_ny = self.ny
_domain_ext = self.domain_ext
_hmax = self.hmax
_hmin = | |
('pad_xx', c_char * 656) )
plist.append( ('rhi_dfov', c_float) )
plist.append( ('pad_xx', c_char * 80) )
plist.append( ('rhi_scanspacing', c_float) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhi_loc', c_float) )
plist.append( ('rhi_ctr_R', c_float) )
plist.append( ('rhi_ctr_A', c_float) )
plist.append( ('rhi_ctr_S', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhi_tlhc_R', c_float) )
plist.append( ('rhi_tlhc_A', c_float) )
plist.append( ('rhi_tlhc_S', c_float) )
plist.append( ('rhi_trhc_R', c_float) )
plist.append( ('rhi_trhc_A', c_float) )
plist.append( ('rhi_trhc_S', c_float) )
plist.append( ('rhi_brhc_R', c_float) )
plist.append( ('rhi_brhc_A', c_float) )
plist.append( ('rhi_brhc_S', c_float) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhi_tr', c_int) )
plist.append( ('rhi_ti', c_int) )
plist.append( ('rhi_te', c_int) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhi_numecho', c_short) )
plist.append( ('pad_xx', c_char * 6) )
plist.append( ('rhi_nex', c_float) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhi_mr_flip', c_short) )
plist.append( ('pad_xx', c_char * 58) )
plist.append( ('rhi_psdname', c_char * 33) )
plist.append( ('pad_xx', c_char * 21) )
plist.append( ('rhi_ctyp', c_short) )
plist.append( ('rhi_cname', c_char * 17) )
plist.append( ('pad_xx', c_char * 31) )
plist.append( ('rhi_user0', c_float) )
plist.append( ('rhi_user1', c_float) )
plist.append( ('rhi_user2', c_float) )
plist.append( ('rhi_user3', c_float) )
plist.append( ('rhi_user4', c_float) )
plist.append( ('rhi_user5', c_float) )
plist.append( ('rhi_user6', c_float) )
plist.append( ('rhi_user7', c_float) )
plist.append( ('rhi_user8', c_float) )
plist.append( ('rhi_user9', c_float) )
plist.append( ('rhi_user10', c_float) )
plist.append( ('rhi_user11', c_float) )
plist.append( ('rhi_user12', c_float) )
plist.append( ('rhi_user13', c_float) )
plist.append( ('rhi_user14', c_float) )
plist.append( ('rhi_user15', c_float) )
plist.append( ('rhi_user16', c_float) )
plist.append( ('rhi_user17', c_float) )
plist.append( ('rhi_user18', c_float) )
plist.append( ('rhi_user19', c_float) )
plist.append( ('rhi_user20', c_float) )
plist.append( ('rhi_user21', c_float) )
plist.append( ('rhi_user22', c_float) )
plist.append( ('rhi_user23', c_float) )
plist.append( ('rhi_user24', c_float) )
plist.append( ('pad_xx', c_char * 240) )
plist.append( ('rhi_freq_dir', c_short) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhi_image_uid', c_char * 32) )
plist.append( ('pad_xx', c_char * 100) )
plist.append( ('rhi_user25', c_float) )
plist.append( ('rhi_user26', c_float) )
plist.append( ('rhi_user27', c_float) )
plist.append( ('rhi_user28', c_float) )
plist.append( ('rhi_user29', c_float) )
plist.append( ('rhi_user30', c_float) )
plist.append( ('rhi_user31', c_float) )
plist.append( ('rhi_user32', c_float) )
plist.append( ('rhi_user33', c_float) )
plist.append( ('rhi_user34', c_float) )
plist.append( ('rhi_user35', c_float) )
plist.append( ('rhi_user36', c_float) )
plist.append( ('rhi_user37', c_float) )
plist.append( ('rhi_user38', c_float) )
plist.append( ('rhi_user39', c_float) )
plist.append( ('rhi_user40', c_float) )
plist.append( ('rhi_user41', c_float) )
plist.append( ('rhi_user42', c_float) )
plist.append( ('rhi_user43', c_float) )
plist.append( ('rhi_user44', c_float) )
plist.append( ('rhi_user45', c_float) )
plist.append( ('rhi_user46', c_float) )
plist.append( ('rhi_user47', c_float) )
plist.append( ('rhi_user48', c_float) )
elif version == 11:
plist.append( ('rhr_rh_rdbm_rev', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhr_rh_scan_date', c_char * 10) )
plist.append( ('rhr_rh_scan_time', c_char * 8) )
plist.append( ('rhr_rh_logo', c_char * 10) )
plist.append( ('rhr_rh_file_contents', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_data_collect_type', c_short) )
plist.append( ('pad_xx', c_char * 6) )
plist.append( ('rhr_rh_npasses', c_short) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhr_rh_nslices', c_short) )
plist.append( ('pad_xx', c_char * 10) )
plist.append( ('rhr_rh_frame_size', c_ushort) )
plist.append( ('rhr_rh_point_size', c_short) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_raw_pass_size', c_int) )
plist.append( ('pad_xx', c_char * 80) )
plist.append( ('rhr_rh_dab[0]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[0]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[1]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[2]_stop_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_start_rcv', c_short) )
plist.append( ('rhr_rh_dab[3]_stop_rcv', c_short) )
plist.append( ('rhr_rh_user0', c_float) )
plist.append( ('rhr_rh_user1', c_float) )
plist.append( ('rhr_rh_user2', c_float) )
plist.append( ('rhr_rh_user3', c_float) )
plist.append( ('rhr_rh_user4', c_float) )
plist.append( ('rhr_rh_user5', c_float) )
plist.append( ('rhr_rh_user6', c_float) )
plist.append( ('rhr_rh_user7', c_float) )
plist.append( ('rhr_rh_user8', c_float) )
plist.append( ('rhr_rh_user9', c_float) )
plist.append( ('rhr_rh_user10', c_float) )
plist.append( ('rhr_rh_user11', c_float) )
plist.append( ('rhr_rh_user12', c_float) )
plist.append( ('rhr_rh_user13', c_float) )
plist.append( ('rhr_rh_user14', c_float) )
plist.append( ('rhr_rh_user15', c_float) )
plist.append( ('rhr_rh_user16', c_float) )
plist.append( ('rhr_rh_user17', c_float) )
plist.append( ('rhr_rh_user18', c_float) )
plist.append( ('rhr_rh_user19', c_float) )
plist.append( ('pad_xx', c_char * 72) )
plist.append( ('rhr_spectral_width', c_float) )
plist.append( ('rhr_csi_dims', c_short) )
plist.append( ('rhr_xcsi', c_short) )
plist.append( ('rhr_ycsi', c_short) )
plist.append( ('rhr_zcsi', c_short) )
plist.append( ('rhr_roilenx', c_float) )
plist.append( ('rhr_roileny', c_float) )
plist.append( ('rhr_roilenz', c_float) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhr_rh_ps_mps_freq', c_int) )
plist.append( ('pad_xx', c_char * 560) )
plist.append( ('rhr_rh_user_usage_tag', c_uint) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhr_rh_user20', c_float) )
plist.append( ('rhr_rh_user21', c_float) )
plist.append( ('rhr_rh_user22', c_float) )
plist.append( ('rhr_rh_user23', c_float) )
plist.append( ('rhr_rh_user24', c_float) )
plist.append( ('rhr_rh_user25', c_float) )
plist.append( ('rhr_rh_user26', c_float) )
plist.append( ('rhr_rh_user27', c_float) )
plist.append( ('rhr_rh_user28', c_float) )
plist.append( ('rhr_rh_user29', c_float) )
plist.append( ('rhr_rh_user30', c_float) )
plist.append( ('rhr_rh_user31', c_float) )
plist.append( ('rhr_rh_user32', c_float) )
plist.append( ('rhr_rh_user33', c_float) )
plist.append( ('rhr_rh_user34', c_float) )
plist.append( ('rhr_rh_user35', c_float) )
plist.append( ('rhr_rh_user36', c_float) )
plist.append( ('rhr_rh_user37', c_float) )
plist.append( ('rhr_rh_user38', c_float) )
plist.append( ('rhr_rh_user39', c_float) )
plist.append( ('rhr_rh_user40', c_float) )
plist.append( ('rhr_rh_user41', c_float) )
plist.append( ('rhr_rh_user42', c_float) )
plist.append( ('rhr_rh_user43', c_float) )
plist.append( ('rhr_rh_user44', c_float) )
plist.append( ('rhr_rh_user45', c_float) )
plist.append( ('rhr_rh_user46', c_float) )
plist.append( ('rhr_rh_user47', c_float) )
plist.append( ('rhr_rh_user48', c_float) )
plist.append( ('pad_xx', c_char * 56244) )
plist.append( ('rhe_ex_no', c_ushort) )
plist.append( ('rhe_hospname', c_char * 33) )
plist.append( ('pad_xx', c_char * 41) )
plist.append( ('rhe_magstrength', c_int) )
plist.append( ('rhe_patid', c_char * 13) )
plist.append( ('rhe_patname', c_char * 25) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhe_patsex', c_short) )
plist.append( ('pad_xx', c_char * 67) )
plist.append( ('rhe_reqnum', c_char * 13) )
plist.append( ('rhe_ex_datetime', c_int) )
plist.append( ('rhe_refphy', c_char * 33) )
plist.append( ('pad_xx', c_char * 79) )
plist.append( ('rhe_ex_sysid', c_char * 9) )
plist.append( ('pad_xx', c_char * 27) )
plist.append( ('rhe_ex_verscre', c_char * 2) )
plist.append( ('pad_xx', c_char * 84) )
plist.append( ('rhe_uniq_sys_id', c_char * 16) )
plist.append( ('pad_xx', c_char * 20) )
plist.append( ('rhe_study_uid', c_char * 32) )
plist.append( ('pad_xx', c_char * 66) )
plist.append( ('rhe_patnameff', c_char * 65) )
plist.append( ('rhe_patidff', c_char * 65) )
plist.append( ('rhe_reqnumff', c_char * 17) )
plist.append( ('rhe_dateofbirth', c_char * 9) )
plist.append( ('pad_xx', c_char * 310) )
plist.append( ('rhs_se_no', c_short) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhs_se_desc', c_char * 30) )
plist.append( ('pad_xx', c_char * 26) )
plist.append( ('rhs_position', c_int) )
plist.append( ('rhs_entry', c_int) )
plist.append( ('rhs_anref', c_char * 3) )
plist.append( ('pad_xx', c_char * 257) )
plist.append( ('rhs_series_uid', c_char * 32) )
plist.append( ('rhs_landmark_uid', c_char * 32) )
plist.append( ('pad_xx', c_char * 1164) )
plist.append( ('rhi_dfov', c_float) )
plist.append( ('pad_xx', c_char * 80) )
plist.append( ('rhi_scanspacing', c_float) )
plist.append( ('pad_xx', c_char * 8) )
plist.append( ('rhi_loc', c_float) )
plist.append( ('rhi_ctr_R', c_float) )
plist.append( ('rhi_ctr_A', c_float) )
plist.append( ('rhi_ctr_S', c_float) )
plist.append( ('pad_xx', c_char * 12) )
plist.append( ('rhi_tlhc_R', c_float) )
plist.append( ('rhi_tlhc_A', c_float) )
plist.append( ('rhi_tlhc_S', c_float) )
plist.append( ('rhi_trhc_R', c_float) )
plist.append( ('rhi_trhc_A', c_float) )
plist.append( ('rhi_trhc_S', c_float) )
plist.append( ('rhi_brhc_R', c_float) )
plist.append( ('rhi_brhc_A', c_float) )
plist.append( ('rhi_brhc_S', c_float) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhi_tr', c_int) )
plist.append( ('rhi_ti', c_int) )
plist.append( ('rhi_te', c_int) )
plist.append( ('pad_xx', c_char * 4) )
plist.append( ('rhi_numecho', c_short) )
plist.append( ('pad_xx', c_char * 6) )
plist.append( ('rhi_nex', c_float) )
plist.append( ('pad_xx', c_char * 32) )
plist.append( ('rhi_mr_flip', c_short) )
plist.append( ('pad_xx', c_char * 58) )
plist.append( ('rhi_psdname', c_char * 33) )
plist.append( ('pad_xx', c_char * 21) )
plist.append( ('rhi_ctyp', c_short) )
plist.append( ('rhi_cname', c_char * 17) )
plist.append( ('pad_xx', c_char * 31) )
plist.append( ('rhi_user0', c_float) )
plist.append( ('rhi_user1', c_float) )
plist.append( ('rhi_user2', c_float) )
plist.append( ('rhi_user3', c_float) )
plist.append( ('rhi_user4', c_float) )
plist.append( ('rhi_user5', c_float) )
plist.append( ('rhi_user6', c_float) )
plist.append( ('rhi_user7', c_float) )
plist.append( ('rhi_user8', c_float) )
plist.append( ('rhi_user9', c_float) )
plist.append( ('rhi_user10', c_float) )
plist.append( ('rhi_user11', c_float) )
plist.append( ('rhi_user12', c_float) )
plist.append( ('rhi_user13', c_float) )
plist.append( ('rhi_user14', c_float) )
plist.append( ('rhi_user15', c_float) )
plist.append( ('rhi_user16', c_float) )
plist.append( ('rhi_user17', c_float) )
plist.append( ('rhi_user18', c_float) )
plist.append( ('rhi_user19', c_float) )
plist.append( ('rhi_user20', c_float) )
plist.append( ('rhi_user21', c_float) )
plist.append( ('rhi_user22', c_float) )
plist.append( ('pad_xx', c_char * 92) )
plist.append( ('rhi_user23', c_float) )
plist.append( ('rhi_user24', c_float) )
plist.append( ('pad_xx', c_char * 148) )
plist.append( ('rhi_freq_dir', c_short) )
plist.append( ('pad_xx', c_char * 2) )
plist.append( ('rhi_image_uid', c_char * 32) )
plist.append( | |
<gh_stars>10-100
#
# This file is part of Magnum.
#
# Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
# 2020, 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import array
import sys
import unittest
from corrade import containers
import test_stridedarrayview
import test_optional
class ArrayView(unittest.TestCase):
def test_init(self):
a = containers.ArrayView()
b = containers.MutableArrayView()
self.assertIs(a.owner, None)
self.assertIs(b.owner, None)
self.assertEqual(len(a), 0)
self.assertEqual(len(b), 0)
self.assertEqual(bytes(a), b'')
self.assertEqual(bytes(b), b'')
def test_init_buffer(self):
a = b'hello'
a_refcount = sys.getrefcount(a)
b = containers.ArrayView(a)
self.assertIs(b.owner, a)
self.assertEqual(len(b), 5)
self.assertEqual(bytes(b), b'hello')
self.assertEqual(b[2], 'l')
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# Not mutable
with self.assertRaisesRegex(TypeError, "object does not support item assignment"):
b[4] = '!'
# b should keep a reference to a, so deleting the local reference
# shouldn't affect it
del a
self.assertTrue(sys.getrefcount(b.owner), a_refcount)
self.assertEqual(b[2], 'l')
# Now, if we delete b, a should not be referenced by anything anymore
a = b.owner
del b
self.assertTrue(sys.getrefcount(a), a_refcount)
def test_init_buffer_empty(self):
a = b''
a_refcount = sys.getrefcount(a)
b = containers.ArrayView(a)
self.assertIs(b.owner, None)
self.assertEqual(len(b), 0)
self.assertEqual(sys.getrefcount(a), a_refcount)
def test_init_buffer_memoryview_obj(self):
a = b'hello'
v = memoryview(a)
b = containers.ArrayView(v)
# memoryview's buffer protocol returns itself, not the underlying
# bytes, as it manages the Py_buffer instance. So this is expected.
self.assertIs(b.owner, v)
def test_init_buffer_mutable(self):
a = bytearray(b'hello')
b = containers.MutableArrayView(a)
b[4] = '!'
self.assertEqual(b[4], '!')
self.assertEqual(bytes(b), b'hell!')
def test_init_array(self):
a = array.array('f', [1.0, 4.5, 7.9])
b = containers.ArrayView(a)
self.assertIs(b.owner, a)
self.assertEqual(len(b), 3*4)
def test_init_buffer_unexpected_stride(self):
a = memoryview(b'hello')[::2]
self.assertEqual(bytes(a), b'hlo')
# Error emitted by memoryview, not us
with self.assertRaisesRegex(BufferError, "memoryview: underlying buffer is not C-contiguous"):
b = containers.ArrayView(a)
def test_init_buffer_mutable_from_immutable(self):
a = b'hello'
with self.assertRaisesRegex(BufferError, "Object is not writable."):
b = containers.MutableArrayView(a)
def test_slice(self):
a = b'World is hell!'
a_refcount = sys.getrefcount(a)
b = containers.ArrayView(a)
b_refcount = sys.getrefcount(b)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# When slicing, b's refcount should not change but a's refcount should
# increase
c = b[4:-4]
self.assertIsInstance(c, containers.ArrayView)
self.assertEqual(bytes(c), b'd is h')
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 2)
# Deleting a slice should reduce a's refcount again, keep b's unchanged
del c
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
def test_slice_empty(self):
data = b'hello'
data_refcount = sys.getrefcount(data)
# slice.start = slice.stop
a = containers.ArrayView(data)[7:8]
self.assertEqual(len(a), 0)
# Empty view, original data not referenced at all
self.assertIs(a.owner, None)
self.assertEqual(sys.getrefcount(data), data_refcount)
def test_slice_invalid(self):
with self.assertRaisesRegex(ValueError, "slice step cannot be zero"):
containers.ArrayView()[::0]
def test_slice_stride(self):
a = b'World_ _i_s_ _hell!'
a_refcount = sys.getrefcount(a)
b = containers.ArrayView(a)
b_refcount = sys.getrefcount(b)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# When slicing to a strided array view, b's refcount should not change
# but a's refcount should increase. Check consistency with slices on
# bytes, slicing bytes will make a copy so it doesn't affect the
# refcount.
c1 = a[4:-4:2]
c2 = b[4:-4:2]
self.assertIsInstance(c2, containers.StridedArrayView1D)
self.assertEqual(len(c1), 6)
self.assertEqual(len(c2), 6)
self.assertEqual(bytes(c1), b'd is h')
self.assertEqual(bytes(c2), b'd is h')
self.assertEqual(c2.size, (6,))
self.assertEqual(c2.stride, (2,))
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 2)
# Deleting a slice should reduce a's refcount again, keep b's unchanged
del c2
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
def test_slice_stride_empty(self):
data = b'hello'
data_refcount = sys.getrefcount(data)
# slice.start = slice.stop
a = containers.ArrayView(data)[7:8:2]
self.assertEqual(len(a), 0)
# Empty view, original data not referenced at all
self.assertIs(a.owner, None)
self.assertEqual(sys.getrefcount(data), data_refcount)
def test_slice_stride_negative(self):
a = b'World_ _i_s_ _hell!'
b = containers.ArrayView(a)
# Check consistency with slices on bytes
c1 = a[-5:3:-2] # like [4:-4:2] above, but reverted
c2 = b[-5:3:-2]
self.assertEqual(len(c1), 6)
self.assertEqual(len(c2), 6)
self.assertEqual(bytes(c1), b'h si d') # like b'd is h' but reverted
self.assertEqual(bytes(c2), b'h si d')
self.assertEqual(c2.size, (6,))
self.assertEqual(c2.stride, (-2,))
def test_slice_stride_reverse(self):
# slice.stop = -1
a = containers.ArrayView(b'hello')[::-1]
self.assertEqual(len(a), 5)
self.assertEqual(bytes(a), b'olleh')
def test_convert_memoryview(self):
a = b'World is hell!'
a_refcount = sys.getrefcount(a)
b = containers.ArrayView(a)
b_refcount = sys.getrefcount(b)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
c = memoryview(b)
# Unlike slicing, ArrayView's buffer protocol returns a reference to
# itself -- it needs to be kept around because the Py_buffer refers to
# its internals for size. Also returning a reference to the underlying
# buffer would mean the underlying buffer's releasebuffer function gets
# called instead of ours which is *not* wanted.
self.assertIs(c.obj, b)
self.assertEqual(sys.getrefcount(b), b_refcount + 1)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
with self.assertRaisesRegex(TypeError, "cannot modify read-only memory"):
c[-1] = ord('?')
def test_convert_mutable_memoryview(self):
a = bytearray(b'World is hell!')
b = memoryview(containers.MutableArrayView(a))
b[-1] = ord('?')
self.assertEqual(a, b'World is hell?')
class StridedArrayView1D(unittest.TestCase):
def test_init(self):
a = containers.StridedArrayView1D()
b = containers.MutableStridedArrayView1D()
self.assertIs(a.owner, None)
self.assertIs(b.owner, None)
self.assertEqual(len(a), 0)
self.assertEqual(len(b), 0)
self.assertEqual(bytes(a), b'')
self.assertEqual(bytes(b), b'')
self.assertEqual(a.size, (0, ))
self.assertEqual(b.size, (0, ))
self.assertEqual(a.stride, (0, ))
self.assertEqual(b.stride, (0, ))
self.assertEqual(a.dimensions, 1)
self.assertEqual(b.dimensions, 1)
def test_init_buffer(self):
a = b'hello'
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView1D(a)
self.assertIs(b.owner, a)
self.assertEqual(len(b), 5)
self.assertEqual(bytes(b), b'hello')
self.assertEqual(b.size, (5, ))
self.assertEqual(b.stride, (1, ))
self.assertEqual(b[2], 'l')
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# Not mutable
with self.assertRaisesRegex(TypeError, "object does not support item assignment"):
b[4] = '!'
# b should keep a reference to a, so deleting the local reference
# shouldn't affect it
del a
self.assertTrue(sys.getrefcount(b.owner), a_refcount)
self.assertEqual(b[2], 'l')
# Now, if we delete b, a should not be referenced by anything anymore
a = b.owner
del b
self.assertTrue(sys.getrefcount(a), a_refcount)
def test_init_buffer_empty(self):
a = b''
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView1D(a)
self.assertIs(b.owner, None)
self.assertEqual(len(b), 0)
self.assertEqual(sys.getrefcount(a), a_refcount)
def test_init_buffer_memoryview_obj(self):
a = b'hello'
v = memoryview(a)
b = containers.StridedArrayView1D(v)
# memoryview's buffer protocol returns itself, not the underlying
# bytes, as it manages the Py_buffer instance. So this is expected.
self.assertIs(b.owner, v)
def test_init_buffer_mutable(self):
a = bytearray(b'hello')
b = containers.MutableStridedArrayView1D(a)
b[4] = '!'
self.assertEqual(b[4], '!')
self.assertEqual(bytes(b), b'hell!')
def test_init_buffer_unexpected_dimensions(self):
a = memoryview(b'123456').cast('b', shape=[2, 3])
self.assertEqual(bytes(a), b'123456')
with self.assertRaisesRegex(BufferError, "expected 1 dimensions but got 2"):
b = containers.StridedArrayView1D(a)
def test_init_buffer_stride(self):
a = memoryview(b'hello')[::2]
self.assertEqual(bytes(a), b'hlo')
b = containers.StridedArrayView1D(a)
self.assertEqual(len(b), 3)
self.assertEqual(bytes(b), b'hlo')
self.assertEqual(b.size, (3, ))
self.assertEqual(b.stride, (2, ))
self.assertEqual(b[2], 'o')
def test_init_buffer_mutable_from_immutable(self):
a = b'hello'
with self.assertRaisesRegex(BufferError, "Object is not writable."):
b = containers.MutableStridedArrayView1D(a)
def test_slice(self):
a = b'World is hell!'
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView1D(a)
b_refcount = sys.getrefcount(b)
self.assertIs(b.owner, a)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# When slicing, b's refcount should not change but a's refcount should
# increase
c = b[4:-4]
self.assertEqual(c.size, (6,))
self.assertEqual(c.stride, (1,))
self.assertIs(c.owner, a)
self.assertIsInstance(c, containers.StridedArrayView1D)
self.assertEqual(bytes(c), b'd is h')
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 2)
# Deleting a slice should reduce a's refcount again, keep b's unchanged
del c
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
def test_slice_empty(self):
data = b'hello'
data_refcount = sys.getrefcount(data)
# slice.start = slice.stop
a = containers.StridedArrayView1D(data)[7:8]
self.assertEqual(a.size, (0, ))
# Empty view, original data not referenced at all
self.assertIs(a.owner, None)
self.assertEqual(sys.getrefcount(data), data_refcount)
def test_slice_invalid(self):
with self.assertRaisesRegex(TypeError, "indices must be integers"):
containers.StridedArrayView1D()[-5:3:"boo"]
def test_slice_stride(self):
a = b'World_ _i_s_ _hell!'
b = containers.StridedArrayView1D(a)
# Check consistency with slices on bytes
c1 = a[4:-4:2]
c2 = b[4:-4:2]
self.assertIsInstance(c2, containers.StridedArrayView1D)
self.assertEqual(len(c1), 6)
self.assertEqual(len(c2), 6)
self.assertEqual(bytes(c1), b'd is h')
self.assertEqual(bytes(c2), b'd is h')
self.assertEqual(c2.size, (6,))
self.assertEqual(c2.stride, (2,))
def test_slice_stride_negative(self):
a = b'World_ _i_s_ _hell!'
b = containers.StridedArrayView1D(a)
# Check consistency with slices on bytes
c1 = a[-5:3:-2] # like [4:-4:2] above, but reverted
c2 = b[-5:3:-2]
self.assertEqual(len(c1), 6)
self.assertEqual(len(c2), 6)
self.assertEqual(bytes(c1), | |
to make DNS changes.'])
assert_failed_change_in_error_response(errors[2], input_name=f"update.{ok_zone_name}", record_data="192.168.127.12",
error_messages=[f'User \"dummy\" is not authorized. Contact zone owner group: {ok_group_name} at <EMAIL> to make DNS changes.'])
assert_failed_change_in_error_response(errors[3], input_name=f"update.{ok_zone_name}", change_type="DeleteRecordSet",
record_data=None,
error_messages=[f'User \"dummy\" is not authorized. Contact zone owner group: {ok_group_name} at <EMAIL> to make DNS changes.'])
finally:
clear_ok_acl_rules(shared_zone_test_context)
clear_recordset_list(to_delete, ok_client)
def test_a_recordtype_add_checks(shared_zone_test_context):
"""
Test all add validations performed on A records submitted in batch changes
"""
client = shared_zone_test_context.ok_vinyldns_client
dummy_zone_name = shared_zone_test_context.dummy_zone["name"]
dummy_group_name = shared_zone_test_context.dummy_group["name"]
parent_zone_name = shared_zone_test_context.parent_zone["name"]
existing_a_name = generate_record_name()
existing_a_fqdn = "{0}.{1}".format(existing_a_name, shared_zone_test_context.parent_zone["name"])
existing_a = create_recordset(shared_zone_test_context.parent_zone, existing_a_name, "A", [{"address": "10.1.1.1"}],
100)
existing_cname_name = generate_record_name()
existing_cname_fqdn = "{0}.{1}".format(existing_cname_name, shared_zone_test_context.parent_zone["name"])
existing_cname = create_recordset(shared_zone_test_context.parent_zone, existing_cname_name, "CNAME",
[{"cname": "cname.data."}], 100)
good_record_name = generate_record_name()
good_record_fqdn = "{0}.{1}".format(good_record_name, shared_zone_test_context.parent_zone["name"])
batch_change_input = {
"changes": [
# valid changes
get_change_A_AAAA_json(good_record_fqdn, address="1.2.3.4"),
# input validation failures
get_change_A_AAAA_json(f"bad-ttl-and-invalid-name$.{parent_zone_name}", ttl=29, address="1.2.3.4"),
get_change_A_AAAA_json("reverse-zone.10.10.in-addr.arpa.", address="1.2.3.4"),
# zone discovery failures
get_change_A_AAAA_json(f"no.subzone.{parent_zone_name}", address="1.2.3.4"),
get_change_A_AAAA_json("no.zone.at.all.", address="1.2.3.4"),
# context validation failures
get_change_CNAME_json(f"cname-duplicate.{parent_zone_name}"),
get_change_A_AAAA_json(f"cname-duplicate.{parent_zone_name}", address="1.2.3.4"),
get_change_A_AAAA_json(existing_a_fqdn, address="1.2.3.4"),
get_change_A_AAAA_json(existing_cname_fqdn, address="1.2.3.4"),
get_change_A_AAAA_json(f"user-add-unauthorized.{dummy_zone_name}", address="1.2.3.4")
]
}
to_create = [existing_a, existing_cname]
to_delete = []
try:
for create_json in to_create:
create_result = client.create_recordset(create_json, status=202)
to_delete.append(client.wait_until_recordset_change_status(create_result, "Complete"))
response = client.create_batch_change(batch_change_input, status=400)
# successful changes
assert_successful_change_in_error_response(response[0], input_name=good_record_fqdn, record_data="1.2.3.4")
# ttl, domain name, reverse zone input validations
assert_failed_change_in_error_response(response[1], input_name=f"bad-ttl-and-invalid-name$.{parent_zone_name}", ttl=29,
record_data="192.168.127.12",
error_messages=['Invalid TTL: "29", must be a number between 30 and 2147483647.',
f'Invalid domain name: "bad-ttl-and-invalid-name$.{parent_zone_name}", '
"valid domain names must be letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot."])
assert_failed_change_in_error_response(response[2], input_name="reverse-zone.10.10.in-addr.arpa.",
record_data="1.2.3.4",
error_messages=["Invalid Record Type In Reverse Zone: record with name \"reverse-zone.10.10.in-addr.arpa.\" and "
"type \"A\" is not allowed in a reverse zone."])
# zone discovery failure
assert_failed_change_in_error_response(response[3], input_name=f"no.subzone.{parent_zone_name}", record_data="192.168.127.12",
error_messages=[f'Zone Discovery Failed: zone for "no.subzone.{parent_zone_name}" does not exist in VinylDNS. '
f'If zone exists, then it must be connected to in VinylDNS.'])
assert_failed_change_in_error_response(response[4], input_name="no.zone.at.all.", record_data="192.168.127.12",
error_messages=['Zone Discovery Failed: zone for "no.zone.at.all." does not exist in VinylDNS. '
'If zone exists, then it must be connected to in VinylDNS.'])
# context validations: duplicate name failure is always on the cname
assert_failed_change_in_error_response(response[5], input_name=f"cname-duplicate.{parent_zone_name}",
record_type="CNAME", record_data="test.com.",
error_messages=[f"Record Name \"cname-duplicate.{parent_zone_name}\" Not Unique In Batch Change: "
f"cannot have multiple \"CNAME\" records with the same name."])
assert_successful_change_in_error_response(response[6], input_name=f"cname-duplicate.{parent_zone_name}",
record_data="192.168.127.12")
# context validations: conflicting recordsets, unauthorized error
assert_failed_change_in_error_response(response[7], input_name=existing_a_fqdn, record_data="1.2.3.4",
error_messages=[f"Record \"{existing_a_fqdn}\" Already Exists: "
f"cannot add an existing record; to update it, issue a DeleteRecordSet then an Add."])
assert_failed_change_in_error_response(response[8], input_name=existing_cname_fqdn,
record_data="192.168.127.12",
error_messages=[f'CNAME Conflict: CNAME record names must be unique. '
f'Existing record with name "{existing_cname_fqdn}" and type \"CNAME\" conflicts with this record.'])
assert_failed_change_in_error_response(response[9], input_name=f"user-add-unauthorized.{dummy_zone_name}",
record_data="192.168.127.12",
error_messages=[f"User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes."])
finally:
clear_recordset_list(to_delete, client)
def test_a_recordtype_update_delete_checks(shared_zone_test_context):
"""
Test all update and delete validations performed on A records submitted in batch changes
"""
ok_client = shared_zone_test_context.ok_vinyldns_client
dummy_client = shared_zone_test_context.dummy_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
dummy_zone = shared_zone_test_context.dummy_zone
ok_zone_name = ok_zone["name"]
dummy_zone_name = dummy_zone["name"]
dummy_group_name = shared_zone_test_context.dummy_group["name"]
group_to_delete = {}
temp_group = {
"name": "test-group-for-record-in-private-zone",
"email": "<EMAIL>",
"description": "for testing that a get batch change still works when record owner group is deleted",
"members": [{"id": "ok"}, {"id": "dummy"}],
"admins": [{"id": "ok"}, {"id": "dummy"}]
}
rs_delete_name = generate_record_name()
rs_delete_fqdn = rs_delete_name + f".{ok_zone_name}"
rs_delete_ok = create_recordset(ok_zone, rs_delete_name, "A", [{"address": "1.1.1.1"}])
rs_update_name = generate_record_name()
rs_update_fqdn = rs_update_name + f".{ok_zone_name}"
rs_update_ok = create_recordset(ok_zone, rs_update_name, "A", [{"address": "1.1.1.1"}])
rs_delete_dummy_name = generate_record_name()
rs_delete_dummy_fqdn = rs_delete_dummy_name + f".{dummy_zone_name}"
rs_delete_dummy = create_recordset(dummy_zone, rs_delete_dummy_name, "A", [{"address": "1.1.1.1"}])
rs_update_dummy_name = generate_record_name()
rs_update_dummy_fqdn = rs_update_dummy_name + f".{dummy_zone_name}"
rs_update_dummy = create_recordset(dummy_zone, rs_update_dummy_name, "A", [{"address": "1.1.1.1"}])
rs_dummy_with_owner_name = generate_record_name()
rs_delete_dummy_with_owner_fqdn = rs_dummy_with_owner_name + f".{dummy_zone_name}"
rs_update_dummy_with_owner_fqdn = rs_dummy_with_owner_name + f".{dummy_zone_name}"
batch_change_input = {
"comments": "this is optional",
"changes": [
# valid changes
get_change_A_AAAA_json(rs_delete_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(rs_update_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(rs_update_fqdn, ttl=300),
# input validations failures
get_change_A_AAAA_json("$invalid.host.name.", change_type="DeleteRecordSet"),
get_change_A_AAAA_json("reverse.zone.in-addr.arpa.", change_type="DeleteRecordSet"),
get_change_A_AAAA_json("$another.invalid.host.name.", ttl=300),
get_change_A_AAAA_json("$another.invalid.host.name.", change_type="DeleteRecordSet"),
get_change_A_AAAA_json("another.reverse.zone.in-addr.arpa.", ttl=10),
get_change_A_AAAA_json("another.reverse.zone.in-addr.arpa.", change_type="DeleteRecordSet"),
# zone discovery failures
get_change_A_AAAA_json("zone.discovery.error.", change_type="DeleteRecordSet"),
# context validation failures: record does not exist, not authorized
get_change_A_AAAA_json(f"non-existent.{ok_zone_name}", change_type="DeleteRecordSet"),
get_change_A_AAAA_json(rs_delete_dummy_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(rs_update_dummy_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(rs_update_dummy_fqdn, ttl=300),
get_change_A_AAAA_json(rs_delete_dummy_with_owner_fqdn, change_type="DeleteRecordSet"),
get_change_A_AAAA_json(rs_update_dummy_with_owner_fqdn, ttl=300)
]
}
to_create = [rs_delete_ok, rs_update_ok, rs_delete_dummy, rs_update_dummy]
to_delete = []
try:
group_to_delete = dummy_client.create_group(temp_group, status=200)
rs_update_dummy_with_owner = create_recordset(dummy_zone, rs_dummy_with_owner_name, "A", [{"address": "1.1.1.1"}], 100, group_to_delete["id"])
create_rs_update_dummy_with_owner = dummy_client.create_recordset(rs_update_dummy_with_owner, status=202)
to_delete.append(dummy_client.wait_until_recordset_change_status(create_rs_update_dummy_with_owner, "Complete"))
for rs in to_create:
if rs["zoneId"] == dummy_zone["id"]:
create_client = dummy_client
else:
create_client = ok_client
create_rs = create_client.create_recordset(rs, status=202)
to_delete.append(create_client.wait_until_recordset_change_status(create_rs, "Complete"))
# Confirm that record set doesn't already exist
ok_client.get_recordset(ok_zone["id"], "non-existent", status=404)
response = ok_client.create_batch_change(batch_change_input, status=400)
# valid changes
assert_successful_change_in_error_response(response[0], input_name=rs_delete_fqdn, change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[1], input_name=rs_update_fqdn, change_type="DeleteRecordSet")
assert_successful_change_in_error_response(response[2], input_name=rs_update_fqdn, ttl=300)
# input validations failures
assert_failed_change_in_error_response(response[3], input_name="$invalid.host.name.",
change_type="DeleteRecordSet",
error_messages=['Invalid domain name: "$invalid.host.name.", valid domain names must be letters, '
'numbers, underscores, and hyphens, joined by dots, and terminated with a dot.'])
assert_failed_change_in_error_response(response[4], input_name="reverse.zone.in-addr.arpa.",
change_type="DeleteRecordSet",
error_messages=['Invalid Record Type In Reverse Zone: record with name "reverse.zone.in-addr.arpa." and type "A" '
'is not allowed in a reverse zone.'])
assert_failed_change_in_error_response(response[5], input_name="$another.invalid.host.name.", ttl=300,
error_messages=['Invalid domain name: "$another.invalid.host.name.", valid domain names must be letters, '
'numbers, underscores, and hyphens, joined by dots, and terminated with a dot.'])
assert_failed_change_in_error_response(response[6], input_name="$another.invalid.host.name.",
change_type="DeleteRecordSet",
error_messages=['Invalid domain name: "$another.invalid.host.name.", valid domain names must be letters, '
'numbers, underscores, and hyphens, joined by dots, and terminated with a dot.'])
assert_failed_change_in_error_response(response[7], input_name="another.reverse.zone.in-addr.arpa.", ttl=10,
error_messages=['Invalid Record Type In Reverse Zone: record with name "another.reverse.zone.in-addr.arpa." '
'and type "A" is not allowed in a reverse zone.',
'Invalid TTL: "10", must be a number between 30 and 2147483647.'])
assert_failed_change_in_error_response(response[8], input_name="another.reverse.zone.in-addr.arpa.",
change_type="DeleteRecordSet",
error_messages=['Invalid Record Type In Reverse Zone: record with name "another.reverse.zone.in-addr.arpa." '
'and type "A" is not allowed in a reverse zone.'])
# zone discovery failure
assert_failed_change_in_error_response(response[9], input_name="zone.discovery.error.",
change_type="DeleteRecordSet",
error_messages=['Zone Discovery Failed: zone for "zone.discovery.error." does not exist in VinylDNS. '
'If zone exists, then it must be connected to in VinylDNS.'])
# context validation failures: record does not exist, not authorized
assert_failed_change_in_error_response(response[10], input_name=f"non-existent.{ok_zone_name}",
change_type="DeleteRecordSet",
error_messages=[
f'Record "non-existent.{ok_zone_name}" Does Not Exist: cannot delete a record that does not exist.'])
assert_failed_change_in_error_response(response[11], input_name=rs_delete_dummy_fqdn,
change_type="DeleteRecordSet",
error_messages=[f'User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes.'])
assert_failed_change_in_error_response(response[12], input_name=rs_update_dummy_fqdn,
change_type="DeleteRecordSet",
error_messages=[f'User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes.'])
assert_failed_change_in_error_response(response[13], input_name=rs_update_dummy_fqdn, ttl=300,
error_messages=[f'User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes.'])
assert_failed_change_in_error_response(response[14], input_name=rs_update_dummy_with_owner_fqdn, change_type="DeleteRecordSet",
error_messages=[f'User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes.'])
assert_failed_change_in_error_response(response[15], input_name=rs_update_dummy_with_owner_fqdn, ttl=300,
error_messages=[f'User \"ok\" is not authorized. Contact zone owner group: {dummy_group_name} at <EMAIL> to make DNS changes.'])
finally:
# Clean up updates
dummy_deletes = [rs for rs in to_delete if rs["zone"]["id"] == dummy_zone["id"]]
ok_deletes = [rs for rs in to_delete if rs["zone"]["id"] != dummy_zone["id"]]
clear_recordset_list(dummy_deletes, dummy_client)
clear_recordset_list(ok_deletes, ok_client)
dummy_client.delete_group(group_to_delete["id"], status=200)
def test_aaaa_recordtype_add_checks(shared_zone_test_context):
"""
Test all add validations performed on AAAA records submitted in batch changes
"""
client = shared_zone_test_context.ok_vinyldns_client
dummy_zone_name = shared_zone_test_context.dummy_zone["name"]
parent_zone_name = shared_zone_test_context.parent_zone["name"]
dummy_group_name = shared_zone_test_context.dummy_group["name"]
existing_aaaa_name = generate_record_name()
existing_aaaa_fqdn = existing_aaaa_name + "." + shared_zone_test_context.parent_zone["name"]
existing_aaaa = create_recordset(shared_zone_test_context.parent_zone, existing_aaaa_name, "AAAA", [{"address": "fc00:db20:35b:7399::5"}], 100)
existing_cname_name = generate_record_name()
existing_cname_fqdn = existing_cname_name + "." + shared_zone_test_context.parent_zone["name"]
existing_cname = create_recordset(shared_zone_test_context.parent_zone, existing_cname_name, "CNAME", [{"cname": "cname.data."}], 100)
good_record_name = generate_record_name()
good_record_fqdn = good_record_name + "." + shared_zone_test_context.parent_zone["name"]
batch_change_input = {
"changes": [
# valid changes
get_change_A_AAAA_json(good_record_fqdn, record_type="AAAA", address="fc00:db20:35b:7399::5"),
# input validation failures
get_change_A_AAAA_json(f"bad-ttl-and-invalid-name$.{parent_zone_name}", ttl=29, record_type="AAAA", address="fc00:db20:35b:7399::5"),
get_change_A_AAAA_json("reverse-zone.1.2.3.ip6.arpa.", record_type="AAAA", address="fc00:db20:35b:7399::5"),
# zone discovery failures
get_change_A_AAAA_json(f"no.subzone.{parent_zone_name}", record_type="AAAA", address="fc00:db20:35b:7399::5"),
get_change_A_AAAA_json("no.zone.at.all.", record_type="AAAA", address="fc00:db20:35b:7399::5"),
# context validation failures
get_change_CNAME_json(f"cname-duplicate.{parent_zone_name}"),
get_change_A_AAAA_json(f"cname-duplicate.{parent_zone_name}", record_type="AAAA", address="fc00:db20:35b:7399::5"),
get_change_A_AAAA_json(existing_aaaa_fqdn, record_type="AAAA", address="fc00:db20:35b:7399::5"),
get_change_A_AAAA_json(existing_cname_fqdn, record_type="AAAA", address="fc00:db20:35b:7399::5"),
get_change_A_AAAA_json(f"user-add-unauthorized.{dummy_zone_name}", record_type="AAAA", address="fc00:db20:35b:7399::5")
]
}
to_create = [existing_aaaa, existing_cname]
to_delete = []
try:
for create_json in to_create:
create_result = client.create_recordset(create_json, status=202)
to_delete.append(client.wait_until_recordset_change_status(create_result, "Complete"))
response = client.create_batch_change(batch_change_input, status=400)
# successful changes
assert_successful_change_in_error_response(response[0], input_name=good_record_fqdn, record_type="AAAA", record_data="fc00:db20:35b:7399::5")
# ttl, domain name, reverse zone input validations
assert_failed_change_in_error_response(response[1], input_name=f"bad-ttl-and-invalid-name$.{parent_zone_name}", ttl=29,
record_type="AAAA", record_data="fc00:db20:35b:7399::5",
error_messages=['Invalid TTL: "29", must be a number between 30 and 2147483647.',
f'Invalid domain name: "bad-ttl-and-invalid-name$.{parent_zone_name}", '
"valid domain names must be letters, numbers, underscores, and hyphens, joined by dots, and terminated with a dot."])
assert_failed_change_in_error_response(response[2], input_name="reverse-zone.1.2.3.ip6.arpa.",
record_type="AAAA", record_data="fc00:db20:35b:7399::5",
error_messages=["Invalid Record Type In Reverse Zone: record with name \"reverse-zone.1.2.3.ip6.arpa.\" "
"and type \"AAAA\" is not allowed in a | |
(1. + bervmax / c) / (1. + RV_table[0] / c)
llmax = ll[imax - 1 - 1] / (1. + berv / c) * (1. - bervmax / c) / (1. + RV_table[nx_ccf - 1] / c)
imin = 0; imax = n_mask - 1
#? turns out cpl_table_get indexes stating at 0...
while (imin < n_mask and mask['lambda'][imin] < (llmin + 0.5 * mask_width / c * llmin)): imin += 1
while (imax >= 0 and mask['lambda'][imax] > (llmax - 0.5 * mask_width / c * llmax)): imax -= 1
# print(imin, imax)
# for (i = imin; i <= imax; i++)
for i in range(imin, imax + 1):
#? cpl_array_get also indexes starting at 0
llcenter = mask['lambda'][i] * (1. + RV_table[nx_ccf // 2] / c)
# index_center = 1
# while(ll[index_center-1] < llcenter): index_center += 1
# my attempt to speed it up
index_center = np.where(ll < llcenter)[0][-1] +1
contrast = mask['contrast'][i]
w = contrast * contrast
# print(i, w)
for j in range(0, nx_ccf):
llcenter = mask['lambda'][i] * (1. + RV_table[j] / c)
llstart = llcenter - 0.5 * mask_width / c * llcenter
llstop = llcenter + 0.5 * mask_width / c * llcenter
# print(llstart, llcenter, llstop)
# index1 = 1
# while(ll2[index1-1] < llstart): index1 += 1
index1 = np.where(ll2 < llstart)[0][-1] +1
# index2 = index1
# while (ll2[index2-1] < llcenter): index2 += 1
index2 = np.where(ll2 < llcenter)[0][-1] +1
# index3 = index2
# while (ll2[index3-1] < llstop): index3 += 1;
index3 = np.where(ll2 < llstop)[0][-1] +1
# print(index1, index2, index3)
# sys.exit(0)
k = j
for index in range(index1, index3):
ccf_flux[k] += w * flux[index-1] / blaze[index-1] * blaze[index_center-1]
ccf_flux[k] += w * flux[index1 - 1 - 1] * (ll2[index1-1] - llstart) / dll[index1 - 1 - 1] / blaze[index1 - 1 - 1] * blaze[index_center - 1]
ccf_flux[k] -= w * flux[index3 - 1 - 1] * (ll2[index3-1] - llstop) / dll[index3 - 1 - 1] / blaze[index3 - 1 - 1] * blaze[index_center - 1]
ccf_error[k] += w * w * error[index2 - 1 - 1] * error[index2 - 1 - 1]
ccf_quality[k] += quality[index2 - 1 - 1]
# my_error = cpl_image_power(*CCF_error_RE,0.5);
ccf_error = np.sqrt(ccf_error)
return ccf_flux, ccf_error, ccf_quality
def find_dll(s2dfile):
hdu = fits.open(s2dfile)
dllfile = hdu[0].header['HIERARCH ESO PRO REC1 CAL7 NAME']
if os.path.exists(dllfile):
return dllfile
elif len(glob(dllfile + '*')) > 1:
return glob(dllfile + '*')[0]
else:
date = hdu[0].header['DATE-OBS']
def calculate_s2d_ccf(s2dfile, rvarray, order='all',
mask_file='ESPRESSO_G2.fits', mask=None, mask_width=0.5,
debug=False):
hdu = fits.open(s2dfile)
if order == 'all':
if debug:
print('can only debug one order at a time...')
return
orders = range(hdu[1].data.shape[0])
return_sum = True
else:
assert isinstance(order, int), 'order should be integer'
orders = (order, )
return_sum = False
BERV = hdu[0].header['HIERARCH ESO QC BERV']
BERVMAX = hdu[0].header['HIERARCH ESO QC BERVMAX']
dllfile = hdu[0].header['HIERARCH ESO PRO REC1 CAL7 NAME']
blazefile = hdu[0].header['HIERARCH ESO PRO REC1 CAL13 NAME']
print('need', dllfile)
print('need', blazefile)
dllfile = glob(dllfile + '*')[0]
# CCF mask
if mask is None:
mask = fits.open(mask_file)[1].data
else:
assert 'lambda' in mask, 'mask must contain the "lambda" key'
assert 'contrast' in mask, 'mask must contain the "contrast" key'
# get the flux correction stored in the S2D file
keyword = 'HIERARCH ESO QC ORDER%d FLUX CORR'
flux_corr = [hdu[0].header[keyword % (o + 1)] for o in range(170)]
ccfs, ccfes = [], []
for order in orders:
# WAVEDATA_AIR_BARY
ll = hdu[5].data[order, :]
# mean w
llc = np.mean(hdu[5].data, axis=1)
dll = fits.open(dllfile)[1].data[order, :]
# dll = doppler_shift_wave(dll, -BERV, f=1.+1.55e-8)
# fit an 8th degree polynomial to the flux correction
corr_model = np.polyval(np.polyfit(llc, flux_corr, 7), llc)
flux = hdu[1].data[order, :]
error = hdu[2].data[order, :]
quality = hdu[3].data[order, :]
blaze = fits.open(blazefile)[1].data[order, :]
y = flux * blaze / corr_model[order]
# y = np.loadtxt('flux_in_pipeline_order0.txt')
ye = error * blaze / corr_model[order]
if debug:
return ll, dll, y, ye, blaze, quality, rvarray, mask, BERV, BERVMAX
print('calculating ccf (order %d)...' % order)
ccf, ccfe, _ = espdr_compute_CCF_fast(ll, dll, y, ye, blaze, quality,
rvarray, mask, BERV, BERVMAX,
mask_width=mask_width)
ccfs.append(ccf)
ccfes.append(ccfe)
if return_sum:
ccf = np.concatenate([ccfs, np.array(ccfs).sum(axis=0, keepdims=True)])
ccfe = np.concatenate([ccfes, np.zeros(len(rvarray)).reshape(1, -1)])
# what to do with the errors?
return ccf, ccfe
else:
return np.array(ccfs), np.array(ccfes)
def find_file(file, ssh=None):
print('Looking for file:', file)
# first try here:
if os.path.exists(file) or os.path.exists(file + '.fits'):
print('\tfound it in current directory')
return glob(file + '*')[0]
similar = glob(file + '*.fits')
if len(similar) > 0:
file = similar[0]
print(f'\tfound a similar file in current directory ({file})')
return file
# try on the local machine
try:
found = subprocess.check_output(f'locate {file}'.split())
found = found.decode().split()
print('\tfound file:', found[-1])
return found[-1]
except subprocess.CalledProcessError:
if ssh is None:
raise FileNotFoundError(file) from None
# try on a server with SSH
if ssh is not None:
if '@' not in ssh:
raise ValueError('ssh should be in the form "user@host"')
# user, host = ssh.split('@')
locate_cmd = f'ssh {ssh} locate {file}'
try:
found = subprocess.check_output(locate_cmd.split())
found = found.decode().split()
print('\tfound file:', ssh + ':' + found[-1])
except subprocess.CalledProcessError:
raise FileNotFoundError(file) from None
full_path = found[-1]
scp_cmd = f'scp {ssh}:{full_path} .'
try:
subprocess.check_call(scp_cmd.split())
return os.path.split(full_path)[-1]
except subprocess.CalledProcessError:
raise RuntimeError(f'Could not scp {file} from {ssh}') from None
def _dowork(args, debug=False):
order, kwargs = args
data = kwargs['data']
dll = kwargs['dll'][order]
blaze = kwargs['blaze'][order]
corr_model = kwargs['corr_model']
rvarray = kwargs['rvarray']
mask = kwargs['mask']
mask_wave = mask['lambda'].astype(np.float64)
mask_contrast = mask['contrast'].astype(np.float64)
BERV = kwargs['BERV']
BERVMAX = kwargs['BERVMAX']
mask_width = kwargs['mask_width']
# WAVEDATA_AIR_BARY
ll = data[5][order, :]
flux = data[1][order, :]
error = data[2][order, :]
quality = data[3][order, :]
y = flux * blaze / corr_model[order]
ye = error * blaze #/ corr_model[order]
# ccf, ccfe, ccfq = espdr_compute_CCF_fast(ll, dll, y, ye, blaze, quality,
# rvarray, mask, BERV, BERVMAX,
# mask_width=mask_width)
ccf, ccfe, ccfq = espdr_compute_CCF_numba_fast(
ll, dll, y, ye, blaze, quality, rvarray, mask_wave, mask_contrast,
BERV, BERVMAX, mask_width=mask_width
)
return ccf, ccfe, ccfq
def calculate_s2d_ccf_parallel(s2dfile, rvarray, order='all',
mask_file='ESPRESSO_G2.fits', mask_width=0.5,
ncores=None, verbose=True, full_output=False,
ignore_blaze=True, ssh=None):
"""
Calculate the CCF between a 2D spectra and a mask. This function can lookup
necessary files (locally or over SSH) and can perform the calculation in
parallel, depending on the value of `ncores`
Arguments
---------
s2dfile : str
The name of the S2D file
rvarray : array
RV array where to calculate the CCF
order : str or int
Either 'all' to calculate the CCF for all orders, or the order
mask_file : str
The fits file containing the CCF mask (may be in the current directory)
mask_width : float
The width of the mask "lines" in km/s
ncores : int
Number of CPU cores to use for the calculation (default: all available)
verbose : bool, default True
Print messages and a progress bar during the calcualtion
full_output : bool, default False
Return all the quantities that went into the CCF calculation (some
extracted from the S2D file)
ignore_blaze : bool, default False
If True, the function completely ignores any blaze correction and takes
the flux values as is from the S2D file
ssh : str
SSH information in the form "user@host" to look for required
calibration files in a server. If the files are not found locally, the
function tries the `locate` and `scp` commands to find and copy the
file from the SSH host
"""
hdu = fits.open(s2dfile)
norders, order_len = hdu[1].data.shape
if ncores is None:
ncores = get_ncores()
print(f'Using {ncores} CPU cores for the calculation')
if order == 'all':
orders = range(hdu[1].data.shape[0])
return_sum = True
else:
assert isinstance(order, int), 'order should be integer'
orders = (order, )
return_sum = False
BERV = hdu[0].header['HIERARCH ESO QC BERV']
BERVMAX = hdu[0].header['HIERARCH ESO QC BERVMAX']
## find and read the blaze file
if ignore_blaze:
blaze = np.ones_like(hdu[1].data)
else:
blazefile = hdu[0].header['HIERARCH ESO PRO REC1 CAL12 NAME']
blazefile = find_file(blazefile, ssh)
blaze = fits.open(blazefile)[1].data
## dll used to be stored in | |
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. <NAME> +
# All rights reserved. +
# This file is part of the edoC discord bot project , +
# and is released under the "MIT License Agreement". Please see the LICENSE +
# file that should have been included as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import asyncio
import json
from asyncio import sleep
from collections import Counter
from io import BytesIO
from random import *
from secrets import token_urlsafe
from typing import Optional
import discord
import discord.ext.commands
from aiotrivia import TriviaClient, AiotriviaException
from bs4 import BeautifulSoup
from discord import Embed, HTTPException
from discord.ext import commands
from discord.ext.commands import BucketType, command, max_concurrency, cooldown
from discord.ext.menus import MenuPages
from faker import Faker
from nekos import InvalidArgument, why, owoify, img
from phone_gen import PhoneNumber
from pyfiglet import figlet_format
from pyjokes import pyjokes
from cogs.Discordinfo import plural
from utils.apis.Somerandomapi import SRA
from utils.checks import MemberConverterr
from utils.default import config, CustomTimetext
from utils.http import get
from utils.pagination import UrbanSource
from utils.vars import *
class Fun(commands.Cog, description='Fun and entertaining commands can be found below'):
def __init__(self, bot):
self.bot = bot
self.config = config()
self.alex_api_token = self.config["alexflipnote_api"]
self.trivia = TriviaClient()
self.sra = SRA(session=self.bot.session)
@command()
async def trivia(self, ctx, difficulty: str):
difficulty = difficulty.lower()
try:
question = await self.trivia.get_random_question(difficulty)
except AiotriviaException as error:
if error.__class__.__name__ == 'InvalidDifficulty':
return await ctx.error('Invalid Difficulty Please use either easy, medium or hard')
return await ctx.error(f"{error.__class__.__name__}: {error}")
answers = question.responses
shuffle(answers)
final_answers = '\n'.join([f"{index}. {value}" for index, value in enumerate(answers, 1)])
message = await ctx.invis(
f"**{question.question}**\n{final_answers}\n{question.type.capitalize()} Question about {question.category}")
answer = answers.index(question.answer) + 1
await self.trivia.close() # cleaning up
try:
while True:
msg = await self.bot.wait_for('message', timeout=15, check=lambda m: m.id != message.id)
if str(answer) in msg.content:
return await ctx.success(f"{answer} was correct! ({question.answer})")
except asyncio.TimeoutError:
await ctx.invis(f"The correct answer was {question.answer}")
@commands.command(aliases=["sayagain", 'repeat'])
async def echo(self, ctx, *, what_to_say: commands.clean_content):
""" repeats text """
await ctx.reply(f'🦜 {what_to_say}')
@commands.command(aliases=["8ball"])
async def eightball(self, ctx, *, question: commands.clean_content):
""" Consult 8ball to receive an answer """
answer = choice(ballresponse)
tosend = f"🎱 **Question:** {question}\n**Answer:** {answer}"
emb = discord.Embed(description=tosend, color=choice(ColorsList))
await ctx.reply(embed=emb)
@command(aliases=['ouija'], brief="Asks the mystical Ouija Board a question...")
async def askouija(self, ctx, *, question: str):
ouija_choice = choice(ouija_responses)
ouija_says = f"You asked me... '_{question}_'... I respond... {ouija_choice}"
await ctx.success(ouija_says)
@commands.command(aliases=['asciiart'])
async def ascii(self, ctx, *, value):
""" sends ascii style art """
art = figlet_format(f"{value}")
try:
await ctx.send(f"```\n{art}```")
except HTTPException:
await ctx.send('Thats a bit too long please try somthing shorter')
else:
return await ctx.error(
'please join the support server and ping the developer about this (i think there will be an error here sometime)')
@commands.command(aliases=["roll", "dice"])
async def rolldice(self, ctx, guess):
answer = randint(1, 6)
await ctx.reply(embed=discord.Embed(color=green if guess == answer else red,
description=f"{'True' if guess == answer else 'False'} your guess was {guess} and the answer was {answer}"))
@commands.command()
async def rip(self, ctx, name: str = None, *, text: str = None):
""" Sends a tombstone with a name with x text under
E.g. ~rip (dev)Jason **FREE** *at last..*"""
if name is None:
name = ctx.message.author.name
if len(ctx.message.mentions) >= 1:
name = ctx.message.mentions[0].name
if text is not None:
if len(text) > 22:
one = text[:22]
two = text[22:]
url = "http://www.tombstonebuilder.com/generate.php?top1=R.I.P&top3={0}&top4={1}&top5={2}".format(name,
one,
two).replace(
" ", "%20")
else:
url = "http://www.tombstonebuilder.com/generate.php?top1=R.I.P&top3={0}&top4={1}".format(name,
text).replace(
" ", "%20")
else:
if name[-1].lower() != 's':
name += "'s"
url = "http://www.tombstonebuilder.com/generate.php?top1=R.I.P&top3={0}&top4=Hopes and Dreams".format(
name).replace(" ", "%20")
await ctx.send(url)
@commands.command(aliases=['achievement', 'ach'])
async def mc(self, ctx, *, txt: str):
"""Generate a Minecraft Achievement"""
author = ctx.author.display_name if len(ctx.author.display_name) < 22 else "Achievement Unlocked!"
t = txt.replace(' ', '+')
a = author.replace(' ', '+')
if len(txt) > 25:
return await ErrorEmbed(ctx, err="Please keep your message under 25 chars")
api = f'https://mcgen.herokuapp.com/a.php?i=2&h={a}&t={t}'
emb = discord.Embed(color=random_color())
emb.set_image(url=api)
await ctx.reply(embed=emb)
@commands.command(aliases=["rfact", "rf"])
@commands.cooldown(rate=1, per=2, type=commands.BucketType.user)
async def RandomFact(self, ctx):
""" Legit just posts a random fact"""
fact = choice(random_facts)
emb = discord.Embed(description=fact, color=choice(ColorsList))
await ctx.reply(embed=emb, mention_author=False)
async def api_img_creator(self, ctx, url: str, filename: str, content: str = None):
async with ctx.channel.typing():
req = await get(url, res_method="read")
if not req:
return await ctx.send("I couldn't create the image ;-;")
bio = BytesIO(req)
bio.seek(0)
await ctx.send(content=content, file=discord.File(bio, filename=filename))
# @commands.command(aliases=["doggo"])
# @commands.cooldown(rate=1, per=1.5, type=commands.BucketType.user)
# async def dog(self, ctx):
# """ Posts a random dog """
# await self.randomimageapi(ctx, "https://dog/woof.json", "url")
@commands.command(aliases=["flip", "coin"])
async def coinflip(self, ctx):
""" Coinflip! """
coinsides = ["Heads", "Tails"]
await ctx.send(f"**{ctx.author.name}** flipped a coin and got **{choice(coinsides)}**!")
@commands.command(aliases=["flip", "coin"])
async def coinflip(self, ctx, *, toss='Heads'):
""" Coinflip! """
responses = ['Heads', 'Tails']
if len(toss) > 100:
return await ErrorEmbed(ctx=ctx, err='Please keep the length of your toss down')
value = randint(0, 0xffffff)
embed = discord.Embed(
colour=value,
)
embed.add_field(name=f'**User Side:** {toss}\n**Result:** {choice(responses)}',
value="Someone is gonna go cry to mommy.", inline=False)
await ctx.send(embed=embed)
@commands.command(aliases=['Programmingjoke', 'pj'])
async def ProgrammerHumor(self, ctx):
""" Just run the command """
joke = pyjokes.get_joke()
await ctx.reply(joke)
@commands.command(aliases=['renamedchuckJokes', 'gudjokesherenoscam', 'CJ'])
async def ChuckJoke(self, ctx, person: MemberConverterr = None):
"""ChuckNorris is the only man to ever defeat a brick wall in a game of tennis."""
joke = choice(chuckjoke)
if person is not None:
try:
nj = joke.replace('<NAME>', person)
except TypeError:
nj = joke.replace('<NAME>', ctx.author.display_name)
else:
nj = joke
await ctx.reply(embed=discord.Embed(color=green, description=nj))
@commands.command(aliases=['quote'])
async def inspire(self, ctx):
async with ctx.session.get("https://zenquotes.io/api/random") as api:
data = await api.read()
data2 = json.loads(data)
await ctx.send(embed=discord.Embed(description=data2[0]['q'], color=invis).set_author(name=data2[0]["a"]))
@commands.command()
@commands.cooldown(rate=1, per=1.5, type=commands.BucketType.user)
async def topic(self, ctx):
""" Generates a random topic to start a conversation up"""
url = "https://www.conversationstarters.com/generator.php"
async with self.bot.session.get(url) as r:
output = await r.read()
soup = BeautifulSoup(output, 'html5lib')
topics = soup.find("div", {"id": "random"})
topic = topics.contents[1]
await ctx.send(f"**{topic}**")
@commands.command(aliases=["ie"])
async def iseven(self, ctx, num: int):
""" checks if a number is even or not"""
async with ctx.session.get(f'https://api.isevenapi.xyz/api/iseven/{num}/') as api:
data = await api.json()
if data["iseven"]:
color = green
answer = "Yes"
answer2 = " "
else:
color = red
answer = "No"
answer2 = " not"
embed = discord.Embed(
title="**IsEven finder**",
description=f"{answer} {num} is{answer2} even",
color=color,
timestamp=ctx.message.created_at
)
embed.set_footer(text=data["ad"])
await ctx.send(embed=embed)
@commands.command(aliases=['randint', 'rn'])
async def RandomNumber(self, ctx, minimum=0, maximum=100):
"""Displays a random number within an optional range.
The minimum must be smaller than the maximum and the maximum number
accepted is 1000.
"""
maximum = min(maximum, 1000)
if minimum >= maximum:
return await ctx.send('Maximum is smaller than minimum.')
await ctx.send(randint(minimum, maximum))
@commands.command(aliases=['random-lenny', 'rl'])
async def rlenny(self, ctx):
"""Displays a random lenny face."""
lenny = choice([
"( ͡° ͜ʖ ͡°)", "( ͠° ͟ʖ ͡°)", "ᕦ( ͡° ͜ʖ ͡°)ᕤ", "( ͡~ ͜ʖ ͡°)",
"( ͡o ͜ʖ ͡o)", "͡(° ͜ʖ ͡ -)", "( ͡͡ ° ͜ ʖ ͡ °)", "(ง ͠° ͟ل͜ ͡°)ง",
"ヽ༼ຈل͜ຈ༽ノ"
])
await ctx.send(lenny)
@commands.command(aliases=['pick'])
async def choose(self, ctx, *choices: commands.clean_content):
"""Chooses between multiple choices.
To denote multiple choices, you should use double quotes.
"""
if len(choices) < 2:
return await ctx.send('Not enough choices to pick from.')
await ctx.send(choice(choices))
@commands.command(aliases=['CBO'])
async def choosebestof(self, ctx, times: Optional[int], *choices: commands.clean_content):
"""Chooses between multiple choices N times.
To denote multiple choices, you should use double quotes.
You can only choose up to 10001 times and only the top 15 results are shown.
"""
if len(choices) < 2:
return await ctx.send('Not enough choices to pick from.')
if times is None:
times = (len(choices) ** 2) + 1
times = min(10001, max(1, times))
results = Counter(choice(choices) for i in range(times))
builder = []
if len(results) > 15:
builder.append('Only showing top 15 results...')
for index, (elem, count) in enumerate(results.most_common(15), start=1):
builder.append(f'{index}. {elem} ({plural(count):time}, {count / times:.2%})')
data = '\n'.join(builder)
data = BytesIO(data.encode("utf-8"))
await ctx.send(file=discord.File(data, filename=f"{CustomTimetext('prolog', 'output')}"))
@commands.command(name="guessthenumber", aliases=["gtn"], brief="Guess the number game!")
@commands.max_concurrency(1, commands.BucketType.user)
async def gtn(self, ctx):
"""Play a guess the number game! You have three chances to guess the number 1-10"""
no = randint(1, 10) # randrange to randint
await ctx.success(
"A number between **1 and 10** has been chosen, You have 3 attempts to guess the right number! Type your guess in the chat as a valid number!"
# no f
)
for i in range(3):
try:
response = await self.bot.wait_for(
"message",
timeout=10,
check=lambda m: m.author == ctx.author and m.channel == ctx.channel,
)
except asyncio.TimeoutError:
await ctx.error(
| |
1, None, 1, None ) } )
if on_local_files:
# same deal, just smaller file domain
test_ac( 'mc bad*', my_service_key, CC.LOCAL_FILE_SERVICE_KEY, { 'mc bad' : ( 2, None, 0, None ), 'mc good' : ( 2, None, 0, None ) }, { 'mc good' : ( 3, None, 0, None ) } )
test_ac( 'pc bad*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, { 'pc bad' : ( 2, None, 0, None ), 'pc good' : ( 2, None, 0, None ) }, { 'pc good' : ( 3, None, 0, None ) } )
test_ac( 'pp bad*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, { 'pp bad' : ( 0, None, 2, None ), 'pp good' : ( 0, None, 2, None ) }, { 'pp good' : ( 0, None, 3, None ) } )
test_ac( 'sameus aran*', my_service_key, CC.LOCAL_FILE_SERVICE_KEY, { 'sameus aran' : ( 1, None, 0, None ) }, { 'samus metroid' : ( 1, None, 0, None ) } )
test_ac( 'samus metroid*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, { 'samus metroid' : ( 1, None, 0, None ), 'character:samus aran' : ( 0, None, 1, None ) }, { 'character:samus aran' : ( 1, None, 1, None ) } )
test_ac( 'samus aran*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, { 'samus metroid' : ( 1, None, 0, None ), 'character:samus aran' : ( 0, None, 1, None ) }, { 'character:samus aran' : ( 1, None, 1, None ) } )
test_ac( 'mc bad*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, { 'mc bad' : ( 2, None, 0, None ), 'mc good' : ( 2, None, 0, None ) }, { 'mc good' : ( 3, None, 0, None ) } )
test_ac( 'pc bad*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, { 'pc bad' : ( 2, None, 0, None ), 'pc good' : ( 2, None, 0, None ) }, { 'pc good' : ( 3, None, 0, None ) } )
test_ac( 'pp bad*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, { 'pp bad' : ( 0, None, 2, None ), 'pp good' : ( 0, None, 2, None ) }, { 'pp good' : ( 0, None, 3, None ) } )
# here the write a/c gets funky because of all known tags. finding counts for disjoint yet now merged sibling suggestions even though not on same tag domain
# slightly odd situation, but we'll want to clear it up
# this is cleared up UI side when it does sibling_tag_id filtering based on the tag service we are pending to, but it shows that a/c fetch needs an optional sibling_tag_service_key
# this is a job for tag search context
# read a/c counts are fine
test_ac( 'sameus aran*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, { 'sameus aran' : ( 1, None, 0, None ), 'samus metroid' : ( 1, None, 0, None ) }, { 'samus metroid' : ( 1, None, 0, None ) } )
test_ac( 'samus metroid*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, { 'sameus aran' : ( 1, None, 0, None ), 'samus metroid' : ( 1, None, 0, None ), 'character:samus aran' : ( 0, None, 1, None ) }, { 'samus metroid' : ( 1, None, 0, None ), 'character:samus aran' : ( 1, None, 1, None ) } )
test_ac( 'samus aran*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, { 'samus metroid' : ( 1, None, 0, None ), 'character:samus aran' : ( 0, None, 1, None ) }, { 'samus metroid' : ( 1, None, 0, None ), 'character:samus aran' : ( 1, None, 1, None ) } )
else:
test_ac( 'mc bad*', my_service_key, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'pc bad*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'pp bad*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'sameus aran*', my_service_key, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'samus metroid*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'samus aran*', public_service_key, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'mc bad*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'pc bad*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'pp bad*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'sameus aran*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'samus metroid*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
test_ac( 'samus aran*', CC.COMBINED_TAG_SERVICE_KEY, CC.LOCAL_FILE_SERVICE_KEY, {}, {} )
# remove the application
master_service_keys_to_applicable_service_keys = { my_service_key : [], processing_service_key : [], public_service_key : [] }
self._write( 'tag_sibling_application', master_service_keys_to_applicable_service_keys )
self.assertEqual( self._read( 'tag_siblings_all_ideals', my_service_key ), {} )
self.assertEqual( self._read( 'tag_siblings_all_ideals', processing_service_key ), {} )
self.assertEqual( self._read( 'tag_siblings_all_ideals', public_service_key ), {} )
test_no_sibs()
# apply across to both, which should do A->B->C chain
master_service_keys_to_applicable_service_keys = { my_service_key : [ my_service_key, public_service_key ], processing_service_key : [], public_service_key : [ my_service_key, public_service_key ] }
self._write( 'tag_sibling_application', master_service_keys_to_applicable_service_keys )
self.assertEqual( self._read( 'tag_siblings_all_ideals', my_service_key ), { 'mc bad' : 'mc good', 'sameus aran' : 'character:samus aran', 'pc bad' : 'pc good', 'pp bad' : 'pp good', 'samus metroid' : 'character:samus aran' } )
self.assertEqual( self._read( 'tag_siblings_all_ideals', processing_service_key ), {} )
self.assertEqual( self._read( 'tag_siblings_all_ideals', public_service_key ), { 'mc bad' : 'mc good', 'sameus aran' : 'character:samus aran', 'pc bad' : 'pc good', 'pp bad' : 'pp good', 'samus metroid' : 'character:samus aran' } )
for do_regen_sibs in ( False, True ):
if do_regen_sibs:
self._write( 'regenerate_tag_siblings_cache' )
for do_regen_display in ( False, True ):
if do_regen_display in ( False, True ):
self._write( 'regenerate_tag_display_mappings_cache' )
hash_ids_to_tags_managers = self._read( 'force_refresh_tags_managers', hash_ids )
self.assertEqual( hash_ids_to_tags_managers[ samus_bad_hash_id ].GetCurrent( my_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'mc good', 'character:<NAME>' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_bad_hash_id ].GetCurrent( processing_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'process these' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_bad_hash_id ].GetCurrent( public_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'pc good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_bad_hash_id ].GetPending( public_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'pp good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_bad_hash_id ].GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'mc good', 'character:sam<NAME>', 'process these', 'pc good', 'pp good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_both_hash_id ].GetCurrent( my_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'mc good', 'mc good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_both_hash_id ].GetCurrent( processing_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'process these' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_both_hash_id ].GetCurrent( public_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'pc good', 'pc good', 'character:sam<NAME>' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_both_hash_id ].GetPending( public_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'pp good', 'pp good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_both_hash_id ].GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'mc good', 'mc good', 'process these', 'pc good', 'pc good', 'character:sam<NAME>', 'pp good', 'pp good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_good_hash_id ].GetCurrent( my_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'mc good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_good_hash_id ].GetCurrent( processing_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'process these' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_good_hash_id ].GetCurrent( public_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'pc good' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_good_hash_id ].GetPending( public_service_key, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'pp good', 'character:<NAME>' } )
self.assertEqual( hash_ids_to_tags_managers[ samus_good_hash_id ].GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_SIBLINGS_AND_PARENTS ), { 'mc good', 'process these', 'pc good', 'pp good', 'character:<NAME>' } )
# now we get more write a/c suggestions, and accurated merged read a/c values
test_ac( 'mc bad*', my_service_key, CC.COMBINED_FILE_SERVICE_KEY, { 'mc bad' : ( 2, None, 0, None ), 'mc good' : ( 2, None, 0, None ) }, { 'mc good' : ( 3, None, 0, None ) } )
test_ac( 'pc bad*', public_service_key, CC.COMBINED_FILE_SERVICE_KEY, { 'pc bad' : ( 2, None, 0, None ), 'pc good' : ( 2, None, 0, None ) }, { 'pc good' : ( 3, None, 0, None ) } )
test_ac( 'pp bad*', public_service_key, CC.COMBINED_FILE_SERVICE_KEY, { 'pp bad' : ( 0, None, 2, None ), 'pp good' : ( 0, None, 2, None ) }, { 'pp good' : ( 0, None, 3, None ) } )
test_ac( 'sameus aran*', my_service_key, CC.COMBINED_FILE_SERVICE_KEY, { 'sameus aran' : ( 1, None, 0, None ) }, { 'character:samus aran' : ( 1, None, 0, None ) } )
test_ac( 'samus metroid*', public_service_key, CC.COMBINED_FILE_SERVICE_KEY, { 'samus metroid' : ( 1, None, 0, None ), 'character:samus aran' : ( 0, None, 1, None ) }, { 'character:samus aran' : ( 1, None, 1, | |
import occ.spawn as mspawn
import time
import datetime
import occ.yeshup as yeshup
import os
import traceback
import functools
bind = functools.partial
from occ.inbox import *
SHORT_WAIT = 0.01
#############################################################################
# utility functions
# this runs f in another process, passes it a inbox
# it returns the inbox address to send to that server
def spawn(f):
# used only to get the address of the spawned process's
# listening socket
(loc,rem) = sck.socketpair()
def wrap_f(sck,f):
yeshup.yeshup_me()
with make_with_server() as ib:
sck.send_value(ib.addr)
f(ib)
p = mspawn.spawn_basic(bind(wrap_f, rem,f))
rem.detach_close()
addr = loc.receive_value()
return (addr, p)
def delayed_send_process(addr, msg, tm, ib):
time.sleep(tm)
ib.send(addr, msg)
def send_after_delay(addr, msg, tm):
(_, p) = spawn(bind(delayed_send_process, addr, msg, tm))
return p
# read everything already in the inbox
def read_all_inbox(ib):
ret = []
while True:
x = ib.receive(timeout=0)
if x == ReceiveTimeout():
break
ret.append(x)
return ret
# remove all messages from inbox and throw away
def flush_buffer(ib):
while True:
x = ib.receive(timeout=0)
if x == ReceiveTimeout():
break
def assert_is_instance(trp, nm, exp,got):
if isinstance(got,exp):
trp.tpass(nm)
else:
trp.fail(f"{nm} expected instance of {exp}, got {got} :: {type(got)}")
def assert_inbox_empty(trp, ib):
x = read_all_inbox(ib)
trp.assert_equal("inbox empty", [], x)
#############################################################################
# test cases
# create a inbox, send a message to it, read the message out of the inbox
def test_self_send(trp):
with make_with_server() as ib:
ib.send(ib.addr, "hello")
msg = ib.receive()
trp.assert_equal("send and receive message in same process", "hello", msg)
# create a inbox in another process, send a message to it,
# get a reply
def test_send_other_process(trp):
def srv(trp, ib):
x = ib.q.get()
match x:
case (ret, v):
ib.send(ret, ("got", v))
case _:
#print(f"expected (ret,v), got {x}")
trp.fail(f"expected (ret,v), got {x}")
# how to exit the test reliably when this happens?
# the other side will deadlock
# extend test framework to figure out how can pass trp to the
# other process? update: it magically works, no idea how ...
# use it for now and come back to it
# usually it gives an error when you try to pickle a socket
# if the socket is being passed, this is not reliable
# since now there are two processes writing to the same socket
# so there's a chance of messages being interleaved and therefore
# corrupted
(addr,p) = spawn(bind(srv,trp))
with make_with_server() as ib:
ib.send(addr, (ib.addr, "stuff"))
msg = ib.receive()
trp.assert_equal("exchange messages with another process", ("got", "stuff"), msg)
p.join()
# create a inbox in another process
# create several client processes which all send messages
# to the server process and get replies
# make sure the test fails if the client messages aren't
# received interleaved in the server -> this checks the
# test is good enough quality
def test_many_clients(trp):
def client_process(trp, addr, nm, n, ib):
failed = False
for i in range(0,n):
ib.send(addr, (ib.addr, i))
x = ib.receive()
match x:
case ("got", m) if m == i:
# print(f"client {nm} {i}")
pass
case _:
trp.fail(f"expected {('got', n)}, got {x}")
failed = True
if not failed:
trp.tpass(f"client {nm}")
def server_process(trp, ib):
while True:
x = ib.receive()
match x:
case "exit":
# todo: check requests were interleaved
break
case (addr, y):
#print(f"client {addr} {y}")
ib.send(addr, ("got", y))
case _:
trp.fail(f"expected exit or (addr,x), got {x}")
(saddr,sp) = spawn(bind(server_process,trp))
num_messages = 50
num_clients = 10
clis = []
for i in range(0,num_clients):
(_,cp) = spawn(bind(client_process, trp, saddr, f"client {i}", num_messages))
clis.append(cp)
for i in clis:
i.join()
with make_with_server() as ib:
ib.send(saddr, "exit")
sp.join()
# a client sends all messages then reads the responses
# todo: do a test with a extra process per client to read the responses
def test_xmany_clients_pipelined(trp):
def client_process(trp, addr, nm, n, ib):
failed = False
for i in range(0,n):
ib.send(addr, (ib.addr, i))
expect_in_order = False
if expect_in_order:
for i in range(0,n):
x = ib.receive()
match x:
case ("got", m) if m == i:
# print(f"client {nm} {i}")
pass
case _:
trp.fail(f"expected {('got', n)}, got {x}")
failed = True
else:
l = []
for i in range(0,n):
l.append(ib.receive())
for i in range(0,n):
l.remove(("got", i))
if len(l) > 0:
trp.fail(f"wrong messages received: {l}")
failed = True
if not failed:
trp.tpass(f"client {nm}")
def server_process(trp, ib):
while True:
x = ib.receive()
match x:
case "exit":
# todo: check requests were interleaved
break
case (addr, y):
#print(f"client {addr} {y}")
ib.send(addr, ("got", y))
case _:
trp.fail(f"expected exit or (addr,x), got {x}")
(saddr,sp) = spawn(bind(server_process,trp))
n = 50
clis = []
for i in range(0,10):
(_,cp) = spawn(bind(client_process, trp, saddr, f"rpcs {i}", n))
clis.append(cp)
for i in clis:
i.join()
with make_with_server() as ib:
ib.send(saddr, "exit")
sp.join()
######################################
# timeout tests
def test_timeout0_empty(trp):
with make_with_server() as ib:
msg = ib.receive(timeout=0)
trp.assert_equal("receive timeout 0 empty inbox", ReceiveTimeout(), msg)
def test_timeout0_nonempty(trp):
with make_with_server() as ib:
ib.send(ib.addr, "xx")
time.sleep(SHORT_WAIT)
msg = ib.receive(timeout=0)
trp.assert_equal("receive timeout 0 non empty inbox", "xx", msg)
# timeout with posting a message too late to check it times out
# then it reads the message without a timeout to make sure it comes through
def test_timeout_timesout(trp):
with make_with_server() as ib:
send_after_delay(ib.addr, "xxx", SHORT_WAIT * 2)
st = datetime.datetime.now()
msg = ib.receive(timeout=SHORT_WAIT)
trp.assert_equal("receive timeout times out", ReceiveTimeout(), msg)
elapsed = (datetime.datetime.now() - st).total_seconds()
trp.assert_true("timeout time", (elapsed - SHORT_WAIT) < 0.01)
msg = ib.receive()
trp.assert_equal("receive timeout get after timeout", "xxx", msg)
def test_timeout_explicit_infinity(trp):
with make_with_server() as ib:
send_after_delay(ib.addr, "xxx", SHORT_WAIT * 2)
msg = ib.receive(timeout=Infinity())
trp.assert_equal("timeout explicit infinity", "xxx", msg)
def test_read_all_inbox(trp):
with make_with_server() as ib:
msgs = ['a', 'b', 'c']
for i in msgs:
ib.send(ib.addr, i)
time.sleep(SHORT_WAIT)
res = read_all_inbox(ib)
trp.assert_equal("read all buffer", sorted(msgs), sorted(res))
time.sleep(SHORT_WAIT)
res2 = read_all_inbox(ib)
trp.assert_equal("read all buffer empty", [], res2)
def test_flush_buffer(trp):
with make_with_server() as ib:
msgs = ['a', 0, True]
for i in msgs:
ib.send(ib.addr, i)
time.sleep(SHORT_WAIT)
flush_buffer(ib)
res2 = read_all_inbox(ib)
trp.assert_equal("read all buffer empty", [], res2)
######################################
# selective receive
# test some selective receive stuff
# test get everything matching predicate in buffer
def test_selective_receive1(trp):
with make_with_server() as ib:
ib.send(ib.addr, ("message1",))
ib.send(ib.addr, ("message1.5",))
ib.send(ib.addr, ("message2",))
def match1(x):
#print(f"match1 {x}")
match x:
case ("message2",):
#print(f"2 {x}")
return (1,x)
case ("message1.5",):
#print(f"1.5 {x}")
return (2,x)
x = ib.receive( match=match1)
trp.assert_equal("test_selective_receive1 1", x, (2,("message1.5",)))
x = ib.receive( match=match1)
trp.assert_equal("test_selective_receive1 2", x, (1,("message2",)))
x = ib.receive()
trp.assert_equal("test_selective_receive1 3", x, ("message1",))
# timeout style one: without a case for this
x = ib.receive( match=match1, timeout=0)
assert_is_instance(trp, "test_selective_receive1 4", ReceiveTimeout, x)
assert_inbox_empty(trp, ib)
def test_selective_receive2(trp):
with make_with_server() as ib:
# timeout style two: using a case in the match function
def match2(x):
#print(x)
#print(f"{x}")
match x:
case ("message2",):
#print('{("message2",)}')
#print(f"2 {x}")
return (1,x)
case ("message1.5",):
#print(f"1.5 {x}")
return (2,x)
case ReceiveTimeout():
#print(f"timeout")
return "timeout"
x = ib.receive( match=match2, timeout=0)
trp.assert_equal("test_selective_receive2", "timeout", x)
assert_inbox_empty(trp, ib)
def test_selective_receive3(trp):
with make_with_server() as ib:
# post a couple of messages that don't match
ib.send(ib.addr, ("message1",))
ib.send(ib.addr, ("message1.5",))
# post another message that does match with delay
send_after_delay(ib.addr, ("message2",), SHORT_WAIT)
# post another message that does match (done in a spawned process)
# then get matching the second
# then get matching the first
def match3(x):
match x:
case ("message2",):
return x
# check with 0 timeout it times out
x = ib.receive( match=match3, timeout=0)
assert_is_instance(trp, "test_selective_receive3 1", ReceiveTimeout, x)
# check waiting for the matching message to be posted
x = ib.receive( match=match3)
trp.assert_equal("test_selective_receive3 2", ("message2",), x)
# get the other two messages in reverse order
def match4(x):
match x:
case ("message1.5",):
return x
x = ib.receive( match=match4)
trp.assert_equal("test_selective_receive3 3", ("message1.5",), x)
x = ib.receive()
trp.assert_equal("test_selective_receive3 4", ("message1",), x)
assert_inbox_empty(trp, ib)
def test_timeout_with_unmatching_message(trp):
"""
receive with timeout
theres a message which doesn't match, which gets
added to the buffer
let it timeout
then do a regular receive
"""
with make_with_server() as ib:
ib.send(ib.addr, 1)
def m(x):
match x:
case 2:
return 2
x = ib.receive(timeout=SHORT_WAIT,match=m)
assert_is_instance(trp, "test_timeout_with_unmatching_message 1", ReceiveTimeout, x)
x = ib.receive()
trp.assert_equal("xx", 1, x)
send_after_delay(ib.addr, 1, SHORT_WAIT)
def m(x):
match x:
case 2:
return 2
x = ib.receive(timeout=SHORT_WAIT * 2,match=m)
assert_is_instance(trp, "test_timeout_with_unmatching_message 2", ReceiveTimeout, x)
x = ib.receive()
trp.assert_equal("xx", 1, x)
def test_timeout_with_unmatching_message2(trp):
"""
do a match which matches the second message
then get the first message
"""
with make_with_server() as ib:
def m(x):
match x:
case 2:
return 2
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Sept 12 16:35:13 2016
@author: <NAME>
"""
# # fill_array_scan_2b
def maxj_readline_func(expand_data_to_fit_line, nb_px_fast, offset, nb_el_line_list, ct_i, ishg_EOM_AC_flag, ovrsmp_ph, oversampling00, oversampling, method_fast, numpy):
# # only if read_duration_lines
if (method_fast and expand_data_to_fit_line):
max_j = int(round(nb_px_fast - offset)) # # max_j is the whole line
oversampling = int(round((nb_el_line_list[ct_i]/nb_px_fast))) # round
if ishg_EOM_AC_flag: # iSHG fast, special case
ovrsmp_ph = round(ovrsmp_ph/oversampling00*oversampling)
else: # method_slow or not expand_data_to_fit_line
max_j = int(min(round(round(nb_el_line_list[ct_i]/oversampling - offset)), nb_px_fast)) # # added int 2019.06.12
# # print('offsetl11', offset, max_j, nb_px_fast)
return max_j, oversampling, ovrsmp_ph
def extract_onePXorline_from_buffer(numpy, method_is_fast, use_volt_not_raw, use_median, data, num_pmt, pack_param, min_val_volt_list, max_val_volt_list, max_value_pixel, ct):
if not method_is_fast: # slow (usually line by line)
[ind_data_down, ind_data_up, axis_sum, avg_px] = pack_param
# # print('walla 26', axis_sum)
if ind_data_down < numpy.size(data, 1):
if use_volt_not_raw:
avg_val = numpy.round((numpy.sum(data[num_pmt, ind_data_down: ind_data_up ]- min_val_volt_list[ct], axis=axis_sum))) #/len(data[0, ind_data_down: ind_data_up ]) - min_val_volt_list[ct])/(max_val_volt_list[ct] - min_val_volt_list[ct])*(max_value_pixel)) # it's faster to use sum rather than mean
else: # min_val_volt_list is in int16
if use_median:
avg_val = numpy.median(data[num_pmt, ind_data_down: ind_data_up ] - min_val_volt_list[ct], axis=axis_sum)/(max_val_volt_list[ct]-min_val_volt_list[ct])*max_value_pixel
else: # avg
avg_val = numpy.sum(data[num_pmt, ind_data_down: ind_data_up ]- min_val_volt_list[ct], axis=axis_sum) #numpy.round( /len(data[0, ind_data_down: ind_data_up ]) /(max_val_volt_list[ct]-min_val_volt_list[ct])*max_value_pixel # is in uint
if (avg_px and not use_median):
avg_val = round(avg_val/len(data[0, ind_data_down: ind_data_up ])/(max_val_volt_list[ct]-min_val_volt_list[ct])*max_value_pixel)
else:
avg_val = 0
return avg_val
else: # fast : many lines
[slice_data, oversampling, avg_px, slice_dead_samps, reshpr_samps_perps_ishg, reshpr_arr3D ] = pack_param
# # print('walou', data[num_pmt, slice_data].reshape(-1, round(oversampling))[:, slice_dead_samps].shape, oversampling, slice_dead_samps, reshpr_samps_perps_ishg, reshpr_arr3D, slice_data, data.shape )
# # prod = (len(data[0, :oversampling][ slice_dead_samps])*len(data[0, slice_data])/oversampling)
# # print(prod)
# # err
# # slice_dead_samps is used for ISHG, otherwise the whole vect
if use_volt_not_raw:
arr_xfast = numpy.round(numpy.sum((data[num_pmt, slice_data].reshape(-1, int(round(oversampling))))[:, slice_dead_samps].reshape(reshpr_samps_perps_ishg) - min_val_volt_list[ct], axis=1)).reshape(reshpr_arr3D) # slice_data = : # /(max_val_volt_list[ct]-min_val_volt_list[ct])*(max_value_pixel-1)
else:
# # print('!!', min_val_volt_list[ct], numpy.amin(data), slice_data, slice_dead_samps, reshpr_samps_perps_ishg, reshpr_arr3D)
# # reshpr_samps_perps_ishg is NONE if no ISHG !!!!
if use_median:
arr_xfast = (numpy.median((data[num_pmt, slice_data].reshape(-1, int(round(oversampling))))[:, slice_dead_samps].reshape(reshpr_samps_perps_ishg), axis=1)-min_val_volt_list[ct])/(max_val_volt_list[ct]-min_val_volt_list[ct])*max_value_pixel.reshape(reshpr_arr3D)
else: # avg or sum
# # print('ct!!!l59',num_pmt,slice_data, slice_dead_samps, reshpr_samps_perps_ishg, reshpr_arr3D) #"data.shape, ct, num_pmt, slice_data, slice_dead_samps, reshpr_samps_perps_ishg, reshpr_arr3D)
arr_xfast = numpy.sum((data[num_pmt, slice_data].reshape(-1, int(round(oversampling))))[:, slice_dead_samps].reshape(reshpr_samps_perps_ishg) - min_val_volt_list[ct], axis=1).reshape(reshpr_arr3D) # numpy.round((numpy.mean(data[num_pmt, slice_data].reshape(-1, round(oversampling)), axis=1)-min_val_volt_list[ct])/(max_val_volt_list[ct]-min_val_volt_list[ct])*((max_value_pixel-1)/2)).reshape(reshpr_arr3D) # is in uint
if (avg_px and not use_median):
fact = max_value_pixel/oversampling/(max_val_volt_list[ct]-min_val_volt_list[ct])
arr_xfast = numpy.round(arr_xfast*fact)
return arr_xfast
def fast_array_onebuffer(numpy, range_forloop_pmt, y_fast, array_Nd, missing_samps_atendnotright_bidirek, buffer_manylines_samesize, scan_mode, unidirectional, i, use_volt_not_raw, use_median, data, min_val_volt_list, max_val_volt_list, max_value_pixel, pack_param, st_i, st_i_b, end_i_b, nb_px_fast, nb_ph, frst_j, max_j, reshpr_arr3D, ind_pmt):
if array_Nd.ndim >= 4: # # ishg fast
a = list(reshpr_arr3D); a.append(nb_ph)
reshpr_arr3D = tuple(a) # # append to a tuple ...
pack_param[-1] = reshpr_arr3D
norm_dir = 1; st_ip = 0
if not unidirectional: # k bidirek only
if buffer_manylines_samesize: # static acq. or acq. with all buffer's line are the same size
if ((st_i+1) % 2): # st_i is even
st_ip = 0 #st_i
else: # st_i is odd
st_ip = 1 #st_i+1
else:
st_ip = 0 #st_i
if (missing_samps_atendnotright_bidirek and ((i % 2) or buffer_manylines_samesize)): # i is odd ( 1st line ...) DECREASING (bidirek)
norm_dir = 0
if norm_dir: # direct dir.
range_j = numpy.s_[frst_j:max_j] # # frst_j = 0 for standard
else: # reverse
range_j = numpy.s_[nb_px_fast-max_j: nb_px_fast-frst_j]
range_ii = numpy.s_[st_i_b:end_i_b] # range_j is range_j
if y_fast: # yfast
range_ii00 = range_ii; range_ii = range_j
range_j = range_ii00 #
ct = 0
for num_pmt in range_forloop_pmt:
# # print('num pmt', num_pmt)
if (len(range_forloop_pmt)==1 and len(num_pmt)==1): # only one PMT
num_pmt = 0
# # range_forloop_pmt = [0,1] for 2pmts with different offsets
arr_xfast = extract_onePXorline_from_buffer(numpy, True, use_volt_not_raw, use_median, data, num_pmt, pack_param, min_val_volt_list, max_val_volt_list, max_value_pixel, ct) # # true for fast
if (not unidirectional and ((i % 2) or buffer_manylines_samesize)): # i is odd ( 1st line ...) DECREASING (bidirek)
print('pmtsss', ind_pmt,arr_xfast.shape)
if arr_xfast.ndim >= 3: # # many PMTS
arr_xfast[ind_pmt, st_ip::2, :] = arr_xfast[ind_pmt, st_ip::2, ::-1] # step is the last el.
else: arr_xfast[ind_pmt, :] = arr_xfast[ind_pmt, ::-1] # step is the last el.
# # ind_pmt is here for indexing if 2 PMTs are used at same time, otherwise None
# # if len(reshpr_arr3D) >= 3: # # many PMTS
# # # # print(arr_xfast.shape, reshpr_arr3D)
# # arr_xfast = numpy.roll(arr_xfast, round(len(arr_xfast[0])/2), axis=2) #numpy.squeeze(arr_xfast) # takes time ??
#'''
# # print(range_j.shape, arr_xfast.shape, array_3d[num_pmt, st_i_b:end_i_b, range_j].shape, array_3d[num_pmt, st_i_b:end_i_b, :max_j].shape)
#'''
# # print('num pmt', num_pmt, arr_xfast.shape)
if y_fast:
arr_xfast = arr_xfast.T
# if indices overpass data's limits, it just return 0 (sum([]) = 0), no error raised
if array_Nd.ndim >= 4: # # ishg with one or many PMT
array_Nd[num_pmt, range_ii, range_j, :] = arr_xfast # # axis1 is phshft
else: # # standard
array_Nd[num_pmt, range_ii, range_j] = arr_xfast # array is passed by reference
ct+=1
# # a=numpy.amin(arr_xfast)
# # if a<0:
# # print('arr2!!', a)
def ishg_EOM_fill_func(nb_ph, nb_samps_perphsft, data, array_ishg_4d, ramp_offset_nbsamps_list, pack_ind , oversampling, ovrsmp_ph, num_pmt, meth_fast_fill, use_volt_not_raw, use_median, avg_px, min_val_volt_list, max_val_volt_list, max_value_pixel, ct, range_forloop_pmt, slice_dead_samps, reshpr_samps_perps_ishg, reshpr_arr3D, ind_pmt, numpy):
'''
contact <EMAIL>, or <EMAIL> to obtain this fast I-SHG fill function
'''
print('\n \n ERROR : contact code owner to unlock !')
raise(ValueError)
def func_resize_arr(kk, max_j_list, data, ovrsamp_temp, nblines, str_add, slice_data, tup_pb, num):
old_m = max_j_list[kk]
max_j_list[kk] = int(len(data[0, slice_data])/ovrsamp_temp/nblines) # by ref
print('warning: (%s, kk=%d) I had to decrease the number of columns to %d (was %d) because exposure_time*rate %s was over nb_samps in buffer!' % (num, kk, max_j_list[kk], old_m, str_add), tup_pb)
# # print(len(data[0, slice_data]), ovrsamp_temp,nblines)
return kk-1 # # redo this
def fill_array_scan_good2(avg_px, nb_px_fast, nb_lines_treat, oversampling, data, array_3d, array_ishg_4d, numpy, max_val_volt_list, st_i, end_i, max_j_list00, verbose, nb_pmt_channel, max_value_pixel, y_fast, unidirectional, method_fast, read_buffer_offset_direct, read_buffer_offset_reverse, min_val_volt_list, use_volt_not_raw, use_median, range_forloop_pmt, nb_el_line_list, scan_mode, expand_data_to_fit_line, missing_samps_atendnotright_bidirek, skip_behavior, ishg_EOM_AC_insamps):
'''
Used in static acq. (several full lines per packet)
Used in stage scan (line by line)
Used with galvos & line time measure (line by line)
'''
# # print('arr1!!',method_fast)
# # print('arr1!!', unidirectional, method_fast, read_buffer_offset_direct, read_buffer_offset_reverse)#numpy.amin(array_ishg_4d), max_j_list00)
max_j_list = max_j_list00 # list( # not to change max_j_list00 ?
ind_data_packet = 0
off_b = max_data_beg = 0
read_buffer_offset_reverse = read_buffer_offset_reverse*(-1) # -1 very important !!!
# # print('unidirectional', unidirectional, read_buffer_offset_direct, read_buffer_offset_reverse)
# # print('range_forloop_pmt', range_forloop_pmt, data.shape, numpy.mean(data[0]), numpy.mean(data[1]))
# # print('oversampling in fillloop', oversampling)
oversampling00 = oversampling # to keep
slice_dead_dflt = slice(None)
ishg_EOM_AC_flag = (ishg_EOM_AC_insamps[0] and array_ishg_4d is not None) # ishg_EOM_AC_insamps[0] ==2 --> array_ishg_4d None !!
# # print('ishg_EOM_AC_flag', ishg_EOM_AC_flag, array_3d, array_ishg_4d)
if ishg_EOM_AC_flag:
# # ishg_EOM_AC_insamps is [flag, nb_samps_ramp00, nb phsft, Vpi, VMax, nb_samps_perphsft, offset_samps, flag_impose_ramptime_as_exptime] with the times in nb smps !!
ramp_offset_nbsamps_list = ishg_EOM_AC_insamps[-2] # # list of offset, in samps
nb_ph = ishg_EOM_AC_insamps[2]; nb_samps_perphsft = ishg_EOM_AC_insamps[5]
ovrsmp_ph = ishg_EOM_AC_insamps[-1][1] # # oversampling for the phase-shifts, ramp_time00 + dead_time_begin + dead_time_end
# # only for FAST
# # see EOMph_nb_samps_phpixel_meth
end_slice = -ramp_offset_nbsamps_list[2] if ramp_offset_nbsamps_list[2] != 0 else None # no other way to say "end" as in Matlab
slice_dead_samps = numpy.s_[ramp_offset_nbsamps_list[1]+ramp_offset_nbsamps_list[0]:end_slice]
off_begline_eom = ramp_offset_nbsamps_list[3]
if off_begline_eom > 0: data = data[:, :, off_begline_eom:] if data.ndim == 3 else data[:, off_begline_eom:] # erase samples at beginning of lines (stabilization of eom)
else:
ramp_offset_nbsamps_list = [0, 0, 0, 0] # dflt
ovrsmp_ph = float('Inf') # very high value not to be considered
if (skip_behavior is not None and skip_behavior[2] is not None):
# # skip_behavior is [nb_skip, pause_trigger_diggalvo, callback_notmeasline, unirek_skip_half_of_lines]
nb_skip = skip_behavior[0]
read_duration_lines = not skip_behavior[2]
unirek_skip_half_of_lines = skip_behavior[-1]
else: read_duration_lines = unirek_skip_half_of_lines = nb_skip = False # # static, stage, or mode without sync
buffer_manylines_samesize = (scan_mode == -1 or (scan_mode == -2 and not read_duration_lines) or (scan_mode == 1 and skip_behavior[1] and not read_duration_lines)) # # static or anlg galvos with callback each lines (no measure)
dim1 = 2 if data.ndim == 3 else 1
# print('dim1', max_j_list[0])
if numpy.size(data, dim1) < round(oversampling)*max_j_list[0]: # not enough samples
if data.ndim == | |
<filename>PhotochemPy/PhotochemPy.py
import numpy as np
from ._photochem import photochem, photochem_data, photochem_vars, photochem_wrk
import sys
import os
rootdir = os.path.dirname(os.path.realpath(__file__))+'/'
photochem_vars.rootdir = "{:500}".format(rootdir)
class PhotochemError(Exception):
pass
class PhotochemPy:
'''
The PhotochemPy class.
:ivar photo: (object) Compiled Fortran module "photochem". It has many methods and attributes.
:ivar nq: Number of long lived species
:ivar np: Number of particles
:ivar isl: Number of short lived species
:ivar ispec: List if species names
:ivar nsp: Total number of species
:ivar nr: Number of reactions
:ivar ks: Number of photolysis species
:ivar kj: Number of photolysis reactions
:ivar species_dat: Name of the input species file.
:ivar reactions_rx: Name of the input reactions file.
:ivar set_file: Name of the input settings file.
:ivar atmosphere_txt: Name of the input atmosphere file.
:ivar flux_txt: Name of the input solar flux file.
:ivar code_run: If True/False then code has converged/ has not converged to equilrium.
To import and initialize the PhotochemPy class do the following
.. highlight:: python
.. code-block:: python
from PhotochemPy import PhotochemPy
pc = PhotochemPy(species_dat, reactions_rx, set_file, atmosphere_txt, flux_txt)
Parameters
----------
species_dat : string
Path to input file describing the species in the photochemical model,
and their boundary conditions.
reactions_rx : string
Path to input file describing the reactions in the atmosphere and
their rates.
set_file : string
Path to input file describing settings.
atmosphere_txt : string
Path to input file describing the initial atmospheric composition,
temperature structure, eddy diffusion profile, and aersol parameters.
flux_txt : string
Path to input file describing the stellar flux
'''
def __init__(self,species_dat,reactions_rx, set_file,\
atmosphere_txt, flux_txt):
self.photo = photochem
self.data = photochem_data
self.vars = photochem_vars
self.wrk = photochem_wrk
self.warnings = True
if all(fil==None for fil in [species_dat,reactions_rx, \
set_file, atmosphere_txt, flux_txt]):
pass
else:
self.species_dat = species_dat
self.reactions_rx = reactions_rx
self.set_file = set_file
self.atmosphere_txt = atmosphere_txt
self.flux_txt = flux_txt
# get species names
fil = open(species_dat,'r')
lines = fil.readlines()
fil.close()
self.ispec = []
for line in lines:
if line[0]=='*':
pass
else:
if line.split()[1] == 'LL':
self.ispec.append(line.split()[0])
if line.split()[1] == 'SL':
self.ispec.append(line.split()[0])
if line.split()[1] == 'IN':
self.background_spec = line.split()[0]
self.ispec.append(line.split()[0])
self.ispec.append('HV')
self.ispec.append('M')
err = self.photo.setup(species_dat, \
reactions_rx, \
set_file, \
atmosphere_txt, \
flux_txt)
if len(err.strip()) > 0:
raise PhotochemError(err.decode("utf-8").strip())
self.code_run = False
self.test_for_reproducibility()
def setup(self,species_dat,reactions_rx,set_file,\
atmosphere_txt, flux_txt):
'''
In you initialize PhotochemPy with all `None` arguments, then you can run
This to set up the atmospheres afterwords. This is necessary for some parallel
applications (pickling errors).
Parameters
----------
species_dat : string
Path to input file describing the species in the photochemical model,
and their boundary conditions.
reactions_rx : string
Path to input file describing the reactions in the atmosphere and
their rates.
set_file : string
Path to input file describing the settings.
atmosphere_txt : string
Path to input file describing the initial atmospheric composition,
temperature structure, eddy diffusion profile, and aersol parameters.
flux_txt : string
Path to input file describing the stellar flux
'''
self.species_dat = species_dat
self.reactions_rx = reactions_rx
self.set_file = set_file
self.atmosphere_txt = atmosphere_txt
self.flux_txt = flux_txt
# get species names
fil = open(species_dat,'r')
lines = fil.readlines()
fil.close()
self.ispec = []
for line in lines:
if line[0]=='*':
pass
else:
if line.split()[1] == 'LL':
self.ispec.append(line.split()[0])
if line.split()[1] == 'SL':
self.ispec.append(line.split()[0])
if line.split()[1] == 'IN':
self.background_spec = line.split()[0]
self.ispec.append(line.split()[0])
self.ispec.append('HV')
self.ispec.append('M')
err = self.photo.setup(species_dat, \
reactions_rx, \
set_file, \
atmosphere_txt, \
flux_txt)
if len(err.strip()) > 0:
raise PhotochemError(err.decode("utf-8").strip())
self.code_run = False
self.test_for_reproducibility()
def test_for_reproducibility(self):
u0 = self.vars.usol_init.flatten(order='F').copy()
u1 = self.vars.usol_init.flatten(order='F').copy()*2.0
self.right_hand_side(0,u0)
rhs1 = self.right_hand_side(0,u1)
rhs2 = self.right_hand_side(0,u1)
should_be_true = np.all(np.isclose(rhs1,rhs2,rtol=1.0e-8,atol=1.0e-30))
if not should_be_true:
raise PhotochemError("There is a problem with the right-hand-side. "+\
"Two calls with the same inputs gave different results.")
def integrate(self,nsteps=1000,method='Backward_Euler',rtol = 1e-3, atol = 1e-27, fast_and_loose = True):
'''
Integrates atomsphere to photochemical equilibrium using the backward
Euler method.
Parameters
----------
nsteps : integer, optional
The number of steps the integrator takes to find photochemical
equilibrium. The default value is 1000.
Returns
-------
converged : bool
If True, then the code converged to equilibrium. If False,
the code did not converge.
'''
if method == "CVODE_BDF":
self.vars.max_cvode_steps = nsteps
converged, err = self.photo.cvode_equilibrium(rtol,atol,fast_and_loose)
if len(err.strip()) > 0:
raise PhotochemError(err.decode("utf-8").strip())
elif method == "Backward_Euler":
converged, err = self.photo.integrate(nsteps)
if len(err.strip()) > 0:
raise PhotochemError(err.decode("utf-8").strip())
if not converged:
self.code_run = False
else:
self.code_run = True
# check redox conservation
if np.abs(self.vars.redox_factor) > 1e-3 and self.warnings:
print('Warning, redox conservation is not very good.')
print('redox factor =','%.2e'%self.vars.redox_factor)
# check for mixing ratios greater than 1
if np.max(self.vars.usol_out) > 1 and self.warnings:
print('Warning, some mixing ratios are greater than 1.')
return self.code_run
def evolve(self,t0,usol_start,t_eval,rtol = 1.0e-3, atol= 1e-27, nsteps = 1000000, \
fast_and_loose = True, outfile = None, overwrite = False, amount2save = 1):
"""Evolves the atmosphere with the CVODE BDF integrator from Sundials.
Parameters
----------
t0 : float
Starting time (s)
usol_start : Array{float,2}
Initial conditions. nq by nz array of atmospheric mixing ratios.
t_eval : Vector{float}
Times to evaluate the solution (s)
rtol : float
Relative tolerance. Probably don't go higher than 1e-3.
atol : float
Absolute tolerance. About 1e-25 works well for rtol=1e-3.
For low rtol (~1e-5) then use rtol=~1e-30.
fast_and_loose : bool
If 1, then will use a fast approximation to the jacobian.
If 0, then CVODE will compute a more accurate jacobian (slowly).
outfile : string
If a file path is given, the the solution will be appended to the file "outfile"
throughout the simulation. If this is used, then None is returned
Returns
-------
solution : Array{float,3}
Array of dimension [len(t_eval),nq,nz] containing mixing ratios of the atmosphere at
each time.
"""
if usol_start.shape != (self.data.nq, self.data.nz):
raise PhotochemError('usol_start is the wrong shape')
self.vars.max_cvode_steps = nsteps
# in this case num_sol = len(t_eval)
if outfile == None:
num_sol = len(t_eval)
solution, success, err = self.photo.cvode(t0,usol_start,t_eval,rtol,atol,fast_and_loose)
if len(err.strip()) > 0:
raise PhotochemError(err.decode("utf-8").strip())
return solution
else:
if os.path.isfile(outfile) and not overwrite:
raise PhotochemError(outfile,' is already a file.')
success, err = self.photo.cvode_save(t0,usol_start,t_eval,rtol,atol,fast_and_loose,outfile,amount2save)
if len(err.strip()) > 0:
raise PhotochemError(err.decode("utf-8").strip())
if not success:
raise PhotochemError('CVODE returned an error.')
return None
def out_dict(self):
'''
Makes a dictionary of the atmosphere after integration to photochemical
equilibrium
Returns
-------
out : dict
Dictionary containing the mixing ratio of all species in the atmosphere,
temperature structure, total pressure, and total number density.
Raises
------
SystemExit
When photochemical model has not been integrated to equilibrium.
'''
if not self.code_run:
raise PhotochemError('Need to integrate before outputting a solution!')
elif self.code_run:
out = {}
out['alt'] = self.data.z/1e5
out['den'] = self.vars.den
out['press'] = self.vars.p
out['T'] = self.vars.t
for i in range(self.data.nq):
out[self.ispec[i]] = self.vars.usol_out[i,:]
for i in range(self.data.nq,self.data.nq+self.data.isl):
out[self.ispec[i]] = self.wrk.d[i]/self.vars.den
out[self.ispec[-3]] = self.wrk.d[-3]/self.vars.den
out[self.ispec[-2]] = self.wrk.d[-2]/self.vars.den
out[self.ispec[-1]] = self.wrk.d[-1]/self.vars.den
return out
def in_dict(self):
'''
Makes a dictionary of the atmosphere before integration to photochemical
equilibrium. This is typically the atmosphere described in the input file
atmosphere_txt, unless the input atmosphere has been changed with the out2in
method.
Returns
-------
out : dict
Dictionary containing the mixing ratio of all species in the input atmosphere,
temperature structure, total pressure, and total number density.
'''
out = {}
out['alt'] = self.data.z/1e5
out['den'] = self.vars.den
out['press'] = self.vars.p
out['T'] = self.vars.t
for i in range(self.data.nq):
out[self.ispec[i]] = self.vars.usol_init[i,:]
for i in range(self.data.nq,self.data.nq+self.data.isl):
out[self.ispec[i]] = self.wrk.d[i]/self.vars.den
out[self.ispec[-1]] = self.wrk.d[-3]/self.vars.den
return out
def surf_flux(self):
'''
Makes dictionary of the surface fluxes of each species at photochemical
equilibrium.
Returns
-------
out : dict
Surface flux of each species in the model in molecules/cm2/s. Positive
flux means a flux into the atmosphere.
Raises
------
SystemExit
When photochemical model has not been integrated to equilibrium.
'''
if not self.code_run:
raise PhotochemError('Need to integrate before outputing surface flux!')
elif self.code_run:
out = {}
for i in range(self.data.nq):
out[self.ispec[i]] = self.vars.flow[i]
return out
def reset(self):
'''
Resets the problem by reading in the original input files (e.g. species_dat, ...)
'''
err = self.photo.setup(self.species_dat, \
self.reactions_rx, \
self.set_file, | |
def test_get_cmd_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cmd': 'FAKECMD'},
branch_dict=None,
)
assert fake_conf.get_cmd() == 'FAKECMD'
def test_get_env_default(self):
fake_conf = utils.InstanceConfig(
service='fake_service',
cluster='fake_cluster',
instance='fake_instance',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_env() == {
'PAASTA_SERVICE': 'fake_service',
'PAASTA_INSTANCE': 'fake_instance',
'PAASTA_CLUSTER': 'fake_cluster',
'PAASTA_DEPLOY_GROUP': 'fake_cluster.fake_instance',
'PAASTA_DOCKER_IMAGE': '',
}
def test_get_env_with_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'env': {'SPECIAL_ENV': 'TRUE'},
'deploy_group': 'fake_deploy_group',
'monitoring': {'team': 'generic_team'},
},
branch_dict={'docker_image': 'something'},
)
assert fake_conf.get_env() == {
'SPECIAL_ENV': 'TRUE',
'PAASTA_SERVICE': '',
'PAASTA_INSTANCE': '',
'PAASTA_CLUSTER': '',
'PAASTA_DEPLOY_GROUP': 'fake_deploy_group',
'PAASTA_DOCKER_IMAGE': 'something',
'PAASTA_MONITORING_TEAM': 'generic_team',
}
def test_get_args_default_no_cmd(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_args() == []
def test_get_args_default_with_cmd(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'cmd': 'FAKECMD'},
branch_dict=None,
)
assert fake_conf.get_args() is None
def test_get_args_in_config(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'args': ['arg1', 'arg2']},
branch_dict=None,
)
assert fake_conf.get_args() == ['arg1', 'arg2']
def test_get_args_in_config_with_cmd(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'args': ['A'], 'cmd': 'C'},
branch_dict=None,
)
fake_conf.get_cmd()
with raises(utils.InvalidInstanceConfig):
fake_conf.get_args()
def test_get_force_bounce(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict={'force_bounce': 'blurp'},
)
assert fake_conf.get_force_bounce() == 'blurp'
def test_get_desired_state(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict={'desired_state': 'stop'},
)
assert fake_conf.get_desired_state() == 'stop'
def test_monitoring_blacklist_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_monitoring_blacklist(system_deploy_blacklist=[]) == []
def test_monitoring_blacklist_defaults_to_deploy_blacklist(self):
fake_deploy_blacklist = [("region", "fake_region")]
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'deploy_blacklist': fake_deploy_blacklist},
branch_dict=None,
)
assert fake_conf.get_monitoring_blacklist(system_deploy_blacklist=[]) == fake_deploy_blacklist
def test_deploy_blacklist_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_deploy_blacklist() == []
def test_deploy_blacklist_reads_blacklist(self):
fake_deploy_blacklist = [("region", "fake_region")]
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'deploy_blacklist': fake_deploy_blacklist},
branch_dict=None,
)
assert fake_conf.get_deploy_blacklist() == fake_deploy_blacklist
def test_extra_volumes_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_extra_volumes() == []
def test_extra_volumes_normal(self):
fake_extra_volumes: List[utils.DockerVolume] = [
{
"containerPath": "/etc/a",
"hostPath": "/var/data/a",
"mode": "RO",
},
]
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'extra_volumes': fake_extra_volumes},
branch_dict=None,
)
assert fake_conf.get_extra_volumes() == fake_extra_volumes
def test_get_pool(self):
pool = "poolname"
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'pool': pool},
branch_dict=None,
)
assert fake_conf.get_pool() == pool
def test_get_pool_default(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
assert fake_conf.get_pool() == 'default'
def test_get_volumes_dedupes_correctly_when_mode_differs_last_wins(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RW"},
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = []
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
]
def test_get_volumes_dedupes_respects_hostpath(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/a", "hostPath": "/other_a", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [{"containerPath": "/a", "hostPath": "/a", "mode": "RO"}]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/a", "hostPath": "/other_a", "mode": "RO"},
]
def test_get_volumes_handles_dupes_everywhere(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
{"containerPath": "/c", "hostPath": "/c", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
{"containerPath": "/d", "hostPath": "/d", "mode": "RO"},
]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
{"containerPath": "/c", "hostPath": "/c", "mode": "RO"},
{"containerPath": "/d", "hostPath": "/d", "mode": "RO"},
]
def test_get_volumes_prefers_extra_volumes_over_system(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RW"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RW"},
]
def test_get_volumes_handles_dupes_with_trailing_slashes(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b/", "hostPath": "/b/", "mode": "RO"},
]
# note: prefers extra_volumes over system_volumes
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a", "hostPath": "/a", "mode": "RO"},
{"containerPath": "/b", "hostPath": "/b", "mode": "RO"},
]
def test_get_volumes_preserves_trailing_slash(self):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'extra_volumes': [
{"containerPath": "/a/", "hostPath": "/a/", "mode": "RW"},
],
},
branch_dict=None,
)
system_volumes: List[utils.DockerVolume] = [
{"containerPath": "/b/", "hostPath": "/b/", "mode": "RW"},
]
assert fake_conf.get_volumes(system_volumes) == [
{"containerPath": "/a/", "hostPath": "/a/", "mode": "RW"},
{"containerPath": "/b/", "hostPath": "/b/", "mode": "RW"},
]
def test_get_docker_url_no_error(self):
fake_registry = "im.a-real.vm"
fake_image = "and-i-can-run:1.0"
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={},
branch_dict=None,
)
with mock.patch(
'paasta_tools.utils.InstanceConfig.get_docker_registry', autospec=True,
return_value=fake_registry,
), mock.patch(
'paasta_tools.utils.InstanceConfig.get_docker_image', autospec=True,
return_value=fake_image,
):
expected_url = f"{fake_registry}/{fake_image}"
assert fake_conf.get_docker_url() == expected_url
@pytest.mark.parametrize(
('dependencies_reference', 'dependencies', 'expected'), [
(None, None, None),
('aaa', None, None),
('aaa', {}, None),
('aaa', {"aaa": [{"foo": "bar"}]}, {"foo": "bar"}),
('aaa', {"bbb": [{"foo": "bar"}]}, None),
],
)
def test_get_dependencies(self, dependencies_reference, dependencies, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'dependencies_reference': dependencies_reference,
'dependencies': dependencies,
},
branch_dict=None,
)
fake_conf.get_dependencies() == expected
@pytest.mark.parametrize(
('security', 'expected'), [
({}, None),
(None, None),
({"outbound_firewall": "monitor"}, 'monitor'),
({"outbound_firewall": "foo"}, 'foo'),
],
)
def test_get_outbound_firewall(self, security, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'security': security},
branch_dict=None,
)
fake_conf.get_outbound_firewall() == expected
@pytest.mark.parametrize(
('security', 'expected'), [
({}, (True, '')),
({"outbound_firewall": "monitor"}, (True, '')),
({"outbound_firewall": "block"}, (True, '')),
({"outbound_firewall": "foo"}, (False, 'Unrecognized outbound_firewall value "foo"')),
(
{"outbound_firewall": "monitor", "foo": 1},
(False, 'Unrecognized items in security dict of service config: "foo"'),
),
],
)
def test_check_security(self, security, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={'security': security},
branch_dict=None,
)
assert fake_conf.check_security() == expected
@pytest.mark.parametrize(
('dependencies_reference', 'dependencies', 'expected'), [
(None, None, (True, '')),
('aaa', {"aaa": []}, (True, '')),
('aaa', None, (False, 'dependencies_reference "aaa" declared but no dependencies found')),
('aaa', {"bbb": []}, (False, 'dependencies_reference "aaa" not found in dependencies dictionary')),
],
)
def test_check_dependencies_reference(self, dependencies_reference, dependencies, expected):
fake_conf = utils.InstanceConfig(
service='',
cluster='',
instance='',
config_dict={
'dependencies_reference': dependencies_reference,
'dependencies': dependencies,
},
branch_dict=None,
)
assert fake_conf.check_dependencies_reference() == expected
def test_is_under_replicated_ok():
num_available = 1
expected_count = 1
crit_threshold = 50
actual = utils.is_under_replicated(num_available, expected_count, crit_threshold)
assert actual == (False, float(100))
def test_is_under_replicated_zero():
num_available = 1
expected_count = 0
crit_threshold = 50
actual = utils.is_under_replicated(num_available, expected_count, crit_threshold)
assert actual == (False, float(100))
def test_is_under_replicated_critical():
num_available = 0
expected_count = 1
crit_threshold = 50
actual = utils.is_under_replicated(num_available, expected_count, crit_threshold)
assert actual == (True, float(0))
def test_deploy_blacklist_to_constraints():
fake_deploy_blacklist = [("region", "useast1-prod"), ("habitat", "fake_habitat")]
expected_constraints = [["region", "UNLIKE", "useast1-prod"], ["habitat", "UNLIKE", "fake_habitat"]]
actual = utils.deploy_blacklist_to_constraints(fake_deploy_blacklist)
assert actual == expected_constraints
def test_validate_service_instance_valid_marathon():
mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
my_service = 'service1'
my_instance = 'main'
fake_cluster = 'fake_cluster'
fake_soa_dir = 'fake_soa_dir'
with mock.patch(
'paasta_tools.utils.get_services_for_cluster',
autospec=True,
side_effect=[mock_marathon_services, mock_chronos_services],
) as get_services_for_cluster_patch:
assert utils.validate_service_instance(
my_service,
my_instance,
fake_cluster,
fake_soa_dir,
) == 'marathon'
assert mock.call(
cluster=fake_cluster,
instance_type='marathon',
soa_dir=fake_soa_dir,
) in get_services_for_cluster_patch.call_args_list
def test_validate_service_instance_valid_chronos():
mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
my_service = 'service1'
my_instance = 'worker'
fake_cluster = 'fake_cluster'
fake_soa_dir = 'fake_soa_dir'
with mock.patch(
'paasta_tools.utils.get_services_for_cluster',
autospec=True,
side_effect=[mock_marathon_services, mock_chronos_services],
) as get_services_for_cluster_patch:
assert utils.validate_service_instance(
my_service,
my_instance,
fake_cluster,
fake_soa_dir,
) == 'chronos'
assert mock.call(
cluster=fake_cluster,
instance_type='chronos',
soa_dir=fake_soa_dir,
) in get_services_for_cluster_patch.call_args_list
def test_validate_service_instance_invalid():
mock_marathon_services = [('service1', 'main'), ('service2', 'main')]
mock_chronos_services = [('service1', 'worker'), ('service2', 'tailer')]
mock_paasta_native_services = [('service1', 'main2'), ('service2', 'main2')]
mock_adhoc_services = [('service1', 'interactive'), ('service2', 'interactive')]
my_service = 'bad_service'
my_instance = 'main'
fake_cluster = 'fake_cluster'
fake_soa_dir = 'fake_soa_dir'
with mock.patch(
'paasta_tools.utils.get_services_for_cluster',
autospec=True,
side_effect=[
mock_marathon_services, mock_chronos_services,
mock_paasta_native_services, mock_adhoc_services,
],
):
with raises(utils.NoConfigurationForServiceError):
utils.validate_service_instance(
my_service,
my_instance,
fake_cluster,
fake_soa_dir,
)
def test_terminal_len():
assert len('some text') == utils.terminal_len(utils.PaastaColors.red('some text'))
def test_format_table():
actual = utils.format_table(
[
['looooong', 'y', 'z'],
['a', 'looooong', 'c'],
['j', 'k', 'looooong'],
],
)
expected = [
'looooong y z',
'a looooong c',
'j k looooong',
]
assert actual == expected
assert ["a b c"] == utils.format_table([['a', 'b', 'c']], min_spacing=5)
def test_format_table_with_interjected_lines():
actual = utils.format_table(
[
['looooong', 'y', 'z'],
'interjection',
['a', 'looooong', 'c'],
'unicode interjection',
['j', 'k', 'looooong'],
],
)
expected = [
'looooong y z',
'interjection',
'a looooong c',
'unicode interjection',
'j k looooong',
]
assert actual == expected
def test_format_table_all_strings():
actual = utils.format_table(['foo', 'bar', 'baz'])
expected = ['foo', 'bar', 'baz']
assert actual == expected
def test_parse_timestamp():
actual = utils.parse_timestamp('19700101T000000')
expected = datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0)
assert actual == expected
def test_null_log_writer():
"""Basic smoke test for NullLogWriter"""
lw = utils.NullLogWriter(driver='null')
lw.log('fake_service', 'fake_line', 'build', 'BOGUS_LEVEL')
class TestFileLogWriter:
def test_smoke(self):
"""Smoke test | |
<reponame>pablobesada/tw
#encoding: utf-8
import re
import pymongo
class ProductMatch(object):
def __init__(self):
self.brand = ""
self.product = ""
self.sentiment = None
self.brand_matched_word = ""
self.brand_matched_pos = (-1, -1)
self.product_matched_word = ""
self.product_matched_pos = (-1, -1)
self.source = ""
self.patten = ""
self.rule = ""
self.campaign_id = ""
self.campaign_name = ""
self.account_id = ""
self.account_name = ""
self.confidence = 0
def __unicode__(self):
return u"<Brand: %s, Product: %s>" % (self.brand, self.product)
def getDetail(self):
ctx = 10
res = u"Brand: %s, match: %s, context: %s" % (self.brand, self.brand_matched_word, self.source[(self.brand_matched_pos[0]-ctx):(self.brand_matched_pos[1]+ctx)])
res = res + "\nProduct: %s, match: %s, context: %s" % (self.product, self.product_matched_word, self.source[(self.product_matched_pos[0]-ctx):(self.product_matched_pos[1]+ctx)])
return res
def getDictionary(self):
res = {}
for k in ("brand","product","sentiment","brand_matched_word","brand_matched_pos","product_matched_word","product_matched_pos","source","patten","rule","campaign_id","campaign_name","account_id","account_name", "confidence"):
res[k] = self.__getattribute__(k)
return res
JUGO_CONFIDENCE_CLUES = [(5, "juguito", "juguitos", "jugo", "jugos", "tomas", u"tomás", u"tomá", "toma", "tomando", "tomar", "tome", u"tomé", "tomen", "beber", "vaso", "jarra")]
FUTBOL_CONFIDENCE_CLUES = [(100, "club", "plantilla", "torneo", "torneos", "campeonato" "campeonatos", "campeon", u"campeón", "campeones", "local", "visitante", "locales", "visitantes", "contra", "vs", "entrada", "entradas", "ganarle", "ganamos", "dirigente", "dirigentes", "ganaron", "perdieron", "empataron", "empate", "empaten", "empatamos", "hincha", "hinchas", "jugar", "futbol", u"fútbol", "jugador", "jugadores", "ganando", "perdiendo", "perder","titular", "titulares", "suplente", "suplentes", "tecnico", u"técnico", "dt", "plantel", "enfrentamiento", "enfrentamientos", "equipo", "equipos", "partido", "cancha", "estadio", "derrota", "derrotas", "victoria", "victorias","ganar", "previa")]
AVION_CONFIDENCE_CLUES = [(5, "air", u"avión", "aviones","aerolinea", u"aerolínea", "aerolineas", u"aerolíneas", "vuelo", "vuelos", "vuelen", "vuela", "volar", "volara", "volare", u"volaré", u"volará", "volaran", u"volarán", "aeropuerto", "pasaje", "pasajes", "ticket", "tickets", "aereo", "@iberia")]
class BrandClassifier(object):
#name = "Brand"
#brands= ["Brand"]
#products = ["Product 1", "Product 2", {u"Product 3": ["prod3", u"producto 3"]}]
def __str__(self):
return ""
def __init__(self):
self.campaign_id = ""
self.campaign_name = ""
self.brand_regexps = [] #lista de tuplas (regexp, rule)
self.product_regexps = {} #diccionario: producto->lista de tuplas (regexp,rule)
self.name = ""
self.products = {}
self.product_list = []
self.brand_confidence_clues = []
self.product_confidence_clues = {}
self.pld_counter = 1
self.bld_counter = 1
self.brandLookupDict = {}
self.rule = ""
def getProductLookupWords(self):
self.productLookupDict = {}
res = []
for p in self.products:
if isinstance(p, basestring):
res.append("(?P<PLD_%s>%s+)" % (self.pld_counter,p))
self.productLookupDict["PLD_%s"%self.pld_counter] = p
self.pld_counter += 1
elif isinstance(p, dict):
for k,v in p.items():
res.append("(?P<PLD_%s>%s+)" % (self.pld_counter, k))
self.productLookupDict["PLD_%s"%self.pld_counter] = k
self.pld_counter += 1
if isinstance(v, basestring):
res.append("(?P<PLD_%s>%s+)" % (self.pld_counter,v))
self.productLookupDict["PLD_%s"%self.pld_counter] = k
self.pld_counter += 1
elif isinstance(v, list):
for w in v:
res.append("(?P<PLD_%s>%s+)" % (self.pld_counter,w))
self.productLookupDict["PLD_%s"%self.pld_counter] = k
self.pld_counter += 1
return res
def getBrandLookupWords(self):
res = []
p = self.name
if isinstance(p, basestring):
res.append("(?P<BLD_%s>%s+)" % (self.bld_counter,p))
self.brandLookupDict["BLD_%s"%self.bld_counter] = p
self.bld_counter += 1
elif isinstance(p, dict):
for k,v in p.items():
res.append("(?P<BLD_%s>%s+)" % (self.bld_counter, k))
self.brandLookupDict["BLD_%s"%self.bld_counter] = k
self.bld_counter += 1
if isinstance(v, basestring):
res.append("(?P<BLD_%s>%s+)" % (self.bld_counter,v))
self.brandLookupDict["BLD_%s"%self.bld_counter] = k
self.bld_counter += 1
elif isinstance(v, list):
for w in v:
res.append("(?P<BLD_%s>%s+)" % (self.bld_counter,w))
self.brandLookupDict["BLD_%s"%self.bld_counter] = k
self.bld_counter += 1
return res
@classmethod
def getProductNormalizationDict(cls):
res = {}
for p in cls.products:
if isinstance(p, basestring):
res[p.lower()] = p
elif isinstance(p, dict):
for k,v in p.items():
res[k.lower()] = k
if isinstance(v, basestring):
res[v] = k
elif isinstance(v, list):
for vv in v:
res[vv] = k
return res
@classmethod
def getBrandNormalizationDict(cls):
res = {}
for p in cls.brands:
if isinstance(p, basestring):
res[p.lower()] = p
elif isinstance(p, dict):
for k,v in p.items():
res[k.lower()] = k
if isinstance(v, basestring):
res[v] = k
elif isinstance(v, list):
for vv in v:
res[vv] = k
return res
@classmethod
def normalizeBrand(cls, b):
if not b: return ""
return cls.getBrandNormalizationDict().get(b.lower(), "")
@classmethod
def normalizeProduct(cls, p):
if not p: return ""
return cls.getProductNormalizationDict().get(p.lower(), "")
def getPatterns(self):
regexps = ["(" + r % {"BRANDS": '|'.join(self.getBrandLookupWords()), "PRODUCTS": '|'.join(self.getProductLookupWords())} + ")" for r in self.brand_regexps]
pattern = "(" + '|'.join(regexps) + ")"
#print pattern
patterns = [re.compile(pattern, re.I|re.U)]
return patterns
def calculateConfidence(self, pm, text):
def processClues(cluelist):
res = 0
wdict = {}
for clue in cluelist:
if isinstance(clue, tuple):
for w in clue[1:]:
wdict[w.lower()] = clue[0]
else:
raise Exception("invalid clue: %s" % clue)
if wdict:
regexps = []
kc = len(wdict.keys())
kp = 0
while kp < kc:
keys = wdict.keys()[kp:kp+25]
kp += 25
regexp = "(" + "|".join(["(?:(?<=\W)|^)(?P<CONFIDENCE_%s>%s)(?=\W|$)" % (c,k) for k,c in zip(keys, range(len(keys)))]) + ")"
#"\\b(?P<CONFIDENCE_%s>%s)\\b" % (c,k) for k,c in zip(keys, range(len(keys)))]) + ")"
#print regexp
pattern = re.compile(regexp, re.I|re.U)
for mo in pattern.finditer(text):
for k in mo.groupdict():
if mo.group(k) and k.startswith("CONFIDENCE"):
#print mo.group(k), wdict[mo.group(k).lower()]
res += wdict[mo.group(k).lower()]
return res
confidence = 0
if pm.brand_matched_word: confidence += 5
if pm.product_matched_word:
confidence += 5
if pm.product in self.product_confidence_clues: confidence += processClues(self.product_confidence_clues[pm.product])
confidence += processClues(self.brand_confidence_clues)
return confidence
def extract_old(self, text):
res = []
for pattern in self.getPatterns():
matches = pattern.finditer(text)
for m in matches:
pm = ProductMatch()
#print self.getBrandNormalizationDict()
#print 1,m.group("brand1"), m.group("product1")
#print 2,m.group("brand2"), m.group("product2")
for k in m.groupdict():
if k.startswith("BLD_") and m.group(k):
pm.brand = self.brandLookupDict[k]
pm.brand_matched_word = m.group(k)
pm.brand_matched_pos = (m.start(k), m.end(k))
pm.source = text
elif k.startswith("PLD_") and m.group(k):
#print k, m.group(k)
pm.product = self.getProductLookupDict[k]
pm.product_matched_word = m.group(k)
pm.product_matched_pos = (m.start(k), m.end(k))
pm.source = text
pm.confidence = self.calculateConfidence(pm, text)
res.append(pm)
return res
def extract(self, text):
res = []
for pattern, rule in self.brand_regexps:
matches = pattern.finditer(text)
for m in matches:
pm = ProductMatch()
#print self.getBrandNormalizationDict()
#print 1,m.group("brand1"), m.group("product1")
#print 2,m.group("brand2"), m.group("product2")
pm.pattern = pattern.pattern
for k in m.groupdict():
if k.startswith("BLD_") and m.group(k):
pm.brand = self.name.keys()[0]
pm.brand_matched_word = m.group(k)
pm.brand_matched_pos = (m.start(k), m.end(k))
pm.source = text
elif k.startswith("PLD_") and m.group(k):
pm.product = self.product_list[int(k.split("_")[1])]
pm.product_matched_word = m.group(k)
pm.product_matched_pos = (m.start(k), m.end(k))
pm.brand = self.name.keys()[0]
pm.source = text
pm.confidence = self.calculateConfidence(pm, text)
pm.rule = rule
pm.campaign_id = self.campaign_id
pm.campaign_name = self.campaign_name
pm.account_id = self.account_id
pm.account_name = self.account_name
res.append(pm)
for prod_name in self.products.keys():
for pattern,rule in self.product_regexps[prod_name]:
matches = pattern.finditer(text)
#print pattern.pattern, text
for m in matches:
pm = ProductMatch()
pm.pattern = pattern.pattern
for k in m.groupdict():
if k.startswith("BLD_") and m.group(k):
pm.brand = self.name.keys()[0]
pm.brand_matched_word = m.group(k)
pm.brand_matched_pos = (m.start(k), m.end(k))
pm.source = text
elif k.startswith("PLD_") and m.group(k):
pm.product = self.product_list[int(k.split("_")[1])]
pm.product_matched_word = m.group(k)
pm.product_matched_pos = (m.start(k), m.end(k))
pm.brand = self.name.keys()[0]
pm.source = text
pm.confidence = self.calculateConfidence(pm, text)
pm.rule = rule
pm.campaign_id = self.campaign_id
pm.campaign_name = self.campaign_name
pm.account_id = self.account_id
pm.account_name = self.account_name
res.append(pm)
return res
class AdesClassifier(BrandClassifier):
def __init__(self):
BrandClassifier.__init__(self)
self.brand_regexps = [u'(?:\\A|\\Z|\\W)(?P<brand1>%(BRANDS)s)(\\A|\\Z|\\W+)(?:(?:(?:en|de|(?:con )?(?:sabor|gusto)(?: a)?)\\W+)?(?P<product1>%(PRODUCTS)s)(?:\\A|\\Z|\\W))?']
self.name = {"Ades": []}
self.products = ["Manzana", "Durazno", "Naranja", {u"Ananá": ["anana", u"piña"]}, "Natural", {"Frutas Tropicales": "frutos tropicales"}, "Kids", "Free", "multifruta"]
self.confidence_increasing_clues = ["juguito", "juguitos", "jugo", "jugos", "tomas", u"tomás", u"tomá", "toma", "tomando", "tomar", "tome", u"tomé", "tomen", "beber", "vaso", "jarra"]
class KnorrClassifier(BrandClassifier):
def __init__(self):
BrandClassifier.__init__(self)
self.brand_regexps = [u'(?:\\A|\\Z|\\W)(?P<brand1>%(BRANDS)s)(\\A|\\Z|\\W+)(?:(?:(?:en)\\W+)?(?P<product1>%(PRODUCTS)s)(?:\\A|\\Z|\\W))?', \
u'(?:(?:\\A|\\Z|\\W)(?P<product2>%(PRODUCTS)s)(\\W+?:de)?)?(?:\\A|\\Z|\\W+)(?P<brand2>%(BRANDS)s)(?:\\A|\\Z|\\W)']
self.name = {"Knorr": [u"knorr®", "knorr suiza"]}
self.products = ["Salsa", "Arroz", {"Tomate Cubos": ["tomate en cubos"]}, "Tomate", {"Sopa": ["sopas", "sopita", "sopitas"]}, {"Caldo": ["caldos", "caldito", "calditos", "cubito", "cubos", "cubitos"]}]
self.product_confidence_incr_clues = {}
self.product_confidence_incr_clues['Caldo'] = ["carne", "gallina", "verdura"]
class AXEClassifier(BrandClassifier):
def __init__(self):
BrandClassifier.__init__(self)
self.brand_regexps = [u'(?:\\A|\\Z|\\W)(?P<brand>%(BRANDS)s)(?:\\A|\\Z|\\W+)((?:de\\W+)?(?P<product>%(PRODUCTS)s)(?:\\A|\\Z|\\W))?']
self.name = {"AXE": []}
self.products = ["Marine", {u"Dark Temptation": ["chocolate"]}]
self.brand_confidence_clues = [(5, "rociar", "rociarse", "rociado", "rociandose", u"rociándose", "rociados", u"loción", "locion", "desodorante", "desodorantes", "olor", "huele", "sobaco", "baranda", "perfume", "fragancia", "aroma", "feromonas")]
self.brand_confidence_clues.extend([(-100, "@iberia", "golden axe", "axe bahia", u"axe bahía"), (-5,"danza", "danzar"), (-7, "cancion", u"canción", "canciones", "baile","bailando", "bailaba", "bailabas", "bailar", "bailen", "musica", u"música")])
class JumexClassifier(BrandClassifier):
def __init__(self):
BrandClassifier.__init__(self)
self.brand_confidence_clues = [(-100, "museo"), (-10, u"colección", "coleccion")]
def extract(self, text):
res = []
res.extend(BrandClassifier.extract(JumexAmiClassifier(), text))
res.extend(BrandClassifier.extract(JumexPauPauClassifier(), text))
res.extend(BrandClassifier.extract(JumexBidaClassifier(), text))
res.extend(BrandClassifier.extract(JumexVigorClassifier(), text))
return res
def extract_old(self, text):
res = []
res.extend(BrandClassifier.extract_old(JumexAmiClassifier(), text))
res.extend(BrandClassifier.extract_old(JumexPauPauClassifier(), text))
res.extend(BrandClassifier.extract_old(JumexBidaClassifier(), text))
res.extend(BrandClassifier.extract_old(JumexVigorClassifier(), text))
return res
class JumexAmiClassifier(JumexClassifier):
def __init__(self):
JumexClassifier.__init__(self)
self.brand_regexps = [u'(?:\\A|\\Z|\\W)(?P<brand1>%(BRANDS)s)(\\A|\\Z|\\W+)(?:(?:(?:en|de|(?:con )?(?:sabor|gusto)(?: a)?)\\W+)?(?P<product1>%(PRODUCTS)s)(?:\\A|\\Z|\\W))?']
self.name = {u"<NAME>": [u"Jumex Amí", "jumex", "ami", u"amí"]}
self.products = [{u"Citrus punch":["citrus", "punch"]} , "Manzana", {"Naranjada": ["naranja"]}, "Mango", "Uva", {u"Piña": ["anana", u"ananá"]}]
self.brand_confidence_clues = JUGO_CONFIDENCE_CLUES
class JumexPauPauClassifier(JumexClassifier):
def __init__(self):
JumexClassifier.__init__(self)
self.brand_regexps = [u'(?:\\A|\\Z|\\W)(?P<brand1>%(BRANDS)s)(\\A|\\Z|\\W+)(?:(?:(?:en|de|(?:con )?(?:sabor|gusto)(?: a)?)\\W+)?(?P<product1>%(PRODUCTS)s)(?:\\A|\\Z|\\W))?']
self.name = {"Jumex Pau Pau!": [u"Jumex Pau Pau", "jumex", "pau", "pau!"]}
self.products = ["Cereza", "Guayaba", "Mango", {u"Limón": "limon"}, "Manzana", "Naranja", "Tamarindo", "Uva"]
self.brand_confidence_clues = JUGO_CONFIDENCE_CLUES
class JumexBidaClassifier(JumexClassifier):
def __init__(self):
JumexClassifier.__init__(self)
self.brand_regexps = [u'(?:\\A|\\Z|\\W)(?P<brand1>%(BRANDS)s)(\\A|\\Z|\\W+)(?:(?:(?:en|de|(?:con )?(?:sabor|gusto)(?: a)?)\\W+)?(?P<product1>%(PRODUCTS)s)(?:\\A|\\Z|\\W))?']
self.name = {"Jumex Bida": ["jumex", | |
value being a tuple with the numpy arrays
containing the aggregates for each range and in second position the genomic ranges
Note that ranges with same same startpoint but different endpoint will
be considered as two separate ranges
:param group_key: The group key under which the grouping has been stored
:param read_group_map: A dictionary containing read groups (in case they have not been stored in the file)
:param aggregation_fun: Function that takes a numpy array of llrs and returns the aggregate
:return: {readgroup_key: (aggregated llrs, ranges for each aggregation)
"""
all_llrs = self.get_llrs()
all_ranges = self.get_ranges()
all_groups = self.get_read_groups(group_key = group_key, read_group_map = read_group_map)
return {
group: self.__compute_llr_site_aggregate(
all_ranges[all_groups == group], all_llrs[all_groups == group], aggregation_fun
)
for group in set(all_groups)
}
def get_llr_site_readgroup_rate(
self, group_key: Optional[str] = None, read_group_map: Optional[Dict[str, int]] = None, llr_threshold: float = 2
) -> Dict[str, Tuple[np.ndarray, np.ndarray]]:
"""Calls get_llr_site_readgroup_aggregate computing the methylation betascore"""
return self.get_llr_site_readgroup_aggregate(
aggregation_fun=lambda llrs: compute_betascore(llrs, llr_threshold),
group_key=group_key,
read_group_map=read_group_map,
)
def to_sparse_methylation_matrix(
self, read_read_names: bool = True, read_groups_key: str = None
) -> SparseMethylationMatrixContainer:
"""Creates a SparseMethylationMatrixContainer from the values in
this container. If a read_groups_key is provided, then Meth5
file will be checked for a matching read group annotation, which
will then serve to define the samples in the
SparseMethylationMatrixContainer.
The resulting sparse matrix is stored as a csc_matrix and is created
directly to keep memory requirement low
:param read_read_names: Set to True if you care about reading the read_names (takes some extra disk IO),
or False if you are ok with reads being identified using their numeric id in the meth5 file
:param read_groups_key: The key in the Meth5 file under which the read groups
(samples) can be found
:return: SparseMethylationMatrixContainer or None
"""
# Define canonical order of read names
read_names = [r for r in self.get_read_names_unique()]
genomic_ranges = self.get_ranges_unique()
# Assigns y coordinate in the matrix to a genomic position
coord_to_index_dict = {genomic_ranges[i, 0]: i for i in range(len(genomic_ranges))}
# Assigns x coordinate in the matrix to a read name
read_dict = {read_names[i]: i for i in range(len(read_names))}
read_name_list = self.get_read_names()
sparse_data = self.get_llrs()[:]
sparse_x = [read_dict[r] for r in read_name_list]
sparse_y = [coord_to_index_dict[p] for p in self.get_ranges()[:, 0]]
if read_groups_key is not None:
read_groups_ds = self.get_read_groups(group_key = read_groups_key)
read_samples_dict = {rn: rg for (rn, rg) in zip(read_name_list[:], read_groups_ds[:])}
read_samples = np.array([read_samples_dict[r] for r in read_names])
else:
read_samples = None
"""Note: It's important to provide "shape" in the constructor, in case
the matrix is empty. Otherwise the csc_matrix constructor will raise
an error for not being able to infer the dimensions of the matrix"""
met_matrix = sp.csc_matrix((sparse_data, (sparse_x, sparse_y)), shape=(len(read_names), len(genomic_ranges)))
return SparseMethylationMatrixContainer(
met_matrix,
read_names,
genomic_ranges[:, 0],
genomic_ranges[:, 1],
read_samples=read_samples,
)
class ChromosomeContainer:
"""Manages access to the data of a single chromosome and provides
functions for efficient subsetting (e.g. by chunk or by genomic
region)"""
def __init__(self, parent_meth5: MetH5File, chromosome_group: h5py.Group, chunk_size: int):
"""
:param parent_meth5: parent meth5 file object
:param chromosome_group: h5py.Group object inside the Meth5 file containing
values for this chromosome
:param chunk_size: chunk size to use for hdf5 dataframes
"""
self.parent_meth5 = parent_meth5
self.h5group = chromosome_group
self.chunk_size = chunk_size
def __len__(self) -> int:
"""
:return: number of methylation calls on the entire chromosome
"""
return len(self.h5group["range"])
def get_number_of_chunks(self) -> int:
"""
:return: given length and chunk size, returns the number of chunks
"""
num_chunks = len(self) // self.chunk_size
if len(self) % self.chunk_size != 0:
num_chunks += 1
return num_chunks
def get_chunk_ids(self) -> List[int]:
"""
:return: List of integer ids, one for each chunk.
In the current implementation it's just a running counter
"""
return [i for i in range(self.get_number_of_chunks())]
def _seek_overlap_ranges_backwards(self, chunk_id: int, start_value: int = -1) -> int:
"""This helper function recursively looks backwards starting
from a specified chunk, and returns the index of the first
position in the dataframes that contains a methylation call for
the same genomic site as the start of the provided chunk. Used
to make sure all methylation calls (from all reads) are
included.
:param chunk_id: starting chunk id
:param start_value: used in recursion only - don't overwrite it
:return: first index for included sites
"""
last = min(len(self), self.chunk_size * (chunk_id + 1)) - 1
if start_value == -1:
start_value = self.h5group["range"][self.chunk_size * chunk_id, 0]
starts = self.h5group["range"][(self.chunk_size * chunk_id) : last, 0]
matches = np.arange(len(starts))[starts == start_value]
if len(matches) == 0:
# Nothing in this chunk, return beginning of the chunk we came from
return self.chunk_size * (chunk_id + 1)
if matches[0] == 0 and chunk_id > 0:
# All of this chunk is the same range, we need to go deeper
return self._seek_overlap_ranges_backwards(chunk_id - 1, start_value=start_value)
# Part of this chunk has entries for this start position
return self.chunk_size * chunk_id + matches[0]
def _seek_overlap_ranges_forwards(self, chunk_id, end_value=-1):
"""This helper function recursively looks forwards starting from
the end of a specified chunk, and returns the index of the last
position in the dataframes that contains a methylation call for
the same genomic site as the end of the provided chunk. Used to
make sure all methylation calls (from all reads) are included.
:param chunk_id: starting chunk id
:param end_value: used in recursion only - don't overwrite it
:return: last index for included sites
"""
last = min(len(self), self.chunk_size * (chunk_id + 1)) - 1
if end_value == -1:
end_value = self.h5group["range"][last, 0]
ends = self.h5group["range"][(self.chunk_size * chunk_id) : (last + 1), 0]
matches = np.arange(len(ends))[ends == end_value]
if len(matches) == 0:
# Nothing in this chunk, return end of the chunk we came from
return self.chunk_size * chunk_id - 1
if matches[-1] == self.chunk_size - 1 and chunk_id < self.get_number_of_chunks() - 1:
# All of this chunk is the same range, we need to go deeper
return self._seek_overlap_ranges_forwards(chunk_id + 1, end_value=end_value)
# Part of this chunk has entries for this end position
return self.chunk_size * chunk_id + matches[-1]
def get_chunk(self, chunk_id: int, overlap=True) -> MethlyationValuesContainer:
"""Returns a MethlyationValuesContainer providing access to the
values of said chunk, and, if overlap=True, includes values of
neighboring chunks if they are in the same genomic ranges, such
as to avoid having a subset of reads of one location in one
chunk and the rest in the other.
:param chunk_id: The chunk id (see get_chunk_ids)
:param overlap: Whether to look for same-region locations in
neighboring chunks
:return: MethlyationValuesContainer
"""
if overlap:
earliest_pos = self._seek_overlap_ranges_backwards(chunk_id)
latest_pos = self._seek_overlap_ranges_forwards(chunk_id) + 1
else:
earliest_pos = self.chunk_size * chunk_id
latest_pos = min(self.chunk_size * (chunk_id + 1), len(self))
return MethlyationValuesContainer(self, earliest_pos, latest_pos)
def create_chunk_index(self, force_update=False):
"""Needs Meth5 file to be open in write or append mode. Creates
an additional datastructure in the HDF5 file that stores an
index that stores genomic start and end site of a chunk, for
fast searching.
:param force_update: Whether an existing index should be overwritten
"""
if "chunk_ranges" in self.h5group.keys() and not force_update:
return
index = np.zeros((self.get_number_of_chunks(), 2))
num_ranges = self.h5group["range"].shape[0]
for chunk_i, start_i in enumerate(range(0, num_ranges, self.chunk_size)):
end_i = min(num_ranges - 1, start_i + self.chunk_size)
index[chunk_i, 0] = self.h5group["range"][start_i, 0]
index[chunk_i, 1] = self.h5group["range"][end_i, 1]
if "chunk_ranges" in self.h5group.keys():
self.h5group["chunk_ranges"].resize(index.shape)
self.h5group["chunk_ranges"][:] = index
else:
self.h5group.create_dataset(name="chunk_ranges", data=index, dtype=int, maxshape=(None, 2))
self.h5group.attrs["chunk_size"] = self.chunk_size
def get_all_values(self) -> MethlyationValuesContainer:
"""Returns a MethlyationValuesContainer providing access to all sites on the chromosome
Very inefficient and therefore not recommended. Chunk-based operations are recommended.
:return: MethlyationValuesContainer
"""
return MethlyationValuesContainer(self, 0, self.h5group["range"].shape[0])
def get_values_in_range(self, genomic_start: int, genomic_end: int) -> MethlyationValuesContainer:
"""Returns a MethlyationValuesContainer providing access to the
specified genomic region.
Needs an index created by create_chunk_index, since the chunk
index is used for fast searching.
:param genomic_start: Genomic start location on the chromosome
:param genomic_end: Genomic end location on the chromosome
:return: | |
(' + debug_info + '): ' + str(e))
# def _load_phytowin_datasets(self):
# """ """
# try:
# toolbox_utils.Logging().log('') # Empty line.
# toolbox_utils.Logging().log('Importing datasets...')
# toolbox_utils.Logging().start_accumulated_logging()
# self._write_to_status_bar('Importing datasets...')
#
# # Show select file dialog box. Multiple files can be selected.
# namefilter = 'Phytowin files (*.csv);;All files (*.*)'
# filenames, _filters = QtWidgets.QFileDialog.getOpenFileNames(
# self,
# 'Load PhytoWin file(s). ',
# self._lastusedphytowinfilename,
# namefilter)
# # From QString to str.
# filenames = map(str, filenames)
# # Check if user pressed ok or cancel.
# # phytowin = plankton_core.ImportPhytowin()
# # self._tabledataset = plankton_core.DatasetTable()
# if filenames:
# for filename in filenames:
# self._lastusedphytowinfilename = filename
#
#
# datasetnode = plankton_core.DataImportManager().import_dataset_file(filename,
# import_format = 'PhytoWin')
# # Use datasets-wrapper to emit change notification when dataset list is updated.
# app_framework.ToolboxDatasets().emit_change_notification()
#
#
# # phytowin.clear()
# # phytowin.read_file(filename)
# # # # Used for report 'combined datasets'.
# # # phytowin.add_to_table_dataset(self._tabledataset)
# # # Add as tree dataset for calculated reports.
# # datasetnode = plankton_core.DatasetNode()
# # phytowin.add_to_dataset_node(datasetnode)
# # # Add to dataset list. (Note:ToolboxDatasets is a wrapper containing the 'datasetListChanged'-signal).
# # app_framework.ToolboxDatasets().add_dataset(datasetnode)
# # Add metadata related to imported file.
# datasetnode.add_metadata('parser', '-')
# datasetnode.add_metadata('file_name', os.path.basename(filename))
# datasetnode.add_metadata('file_path', filename)
# datasetnode.add_metadata('import_column', '-')
# datasetnode.add_metadata('export_column', '-')
# #
# except Exception as e:
# toolbox_utils.Logging().error('PhytoWin file import failed on exception: ' + str(e))
# QtWidgets.QMessageBox.warning(self, 'Text file loading.\n',
# 'PhytoWin file import failed on exception.\n' + str(e))
# raise
# finally:
# datasetcount = len(plankton_core.Datasets().get_datasets())
# self._write_to_status_bar('Imported datasets: ' + str(datasetcount))
# toolbox_utils.Logging().log_all_accumulated_rows()
# toolbox_utils.Logging().log('Importing datasets done. Number of imported datasets: ' + str(datasetcount))
# ===== TEXT FILES ======
def _content_textfile(self):
""" """
widget = QtWidgets.QWidget()
# Active widgets and connections.
# introlabel = app_framework.RichTextQLabel()
# introlabel.setText(help_texts.HelpTexts().getText('LoadDatasetsActivity_text_intro'))
# - Select dataset parsers:
self._textfile_parser_list = QtWidgets.QComboBox()
self._textfile_parser_list.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self._textfile_parser_list.addItems(["<select>"])
self._textfile_parser_list.currentIndexChanged.connect(self._textfile_parser_selected)
# - Add available dataset parsers.
self._textfile_parser_list.addItems(self._parser_list)
# - Select import column:
self._textfile_importcolumn_list = QtWidgets.QComboBox()
self._textfile_importcolumn_list.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self._textfile_importcolumn_list.addItems(["<no parser selected>"])
self._textfile_importcolumn_list.currentIndexChanged.connect(self._textfile_import_column_selected)
# - Select export column:
self._textfile_exportcolumn_list = QtWidgets.QComboBox()
self._textfile_exportcolumn_list.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self._textfile_exportcolumn_list.addItems(["<no parser selected>"])
# - Select text coding.
self._textfile_encoding_list = QtWidgets.QComboBox()
self._encodings_list = ['<platform default>',
'windows-1252',
'utf-8',
'utf-16',
'ascii',
'latin1',
'macroman']
self._textfile_encoding_list.addItems(self._encodings_list)
# Load dataset.
self._textfile_getdataset_button = QtWidgets.QPushButton('Import dataset(s)...')
self._textfile_getdataset_button.clicked.connect(self._load_text_files)
self._textfile_trophic_list_checkbox = QtWidgets.QCheckBox('Update trophic types')
self._textfile_trophic_list_checkbox.setChecked(True)
# Layout widgets.
form1 = QtWidgets.QGridLayout()
gridrow = 0
label1 = QtWidgets.QLabel('Select parser:')
stretchlabel = QtWidgets.QLabel('')
form1.addWidget(label1, gridrow, 0, 1, 1)
form1.addWidget(self._textfile_parser_list, gridrow, 1, 1, 1)
form1.addWidget(stretchlabel, gridrow,2, 1, 9)
gridrow += 1
label1 = QtWidgets.QLabel('Select import column:')
form1.addWidget(label1, gridrow, 0, 1, 1)
form1.addWidget(self._textfile_importcolumn_list, gridrow, 1, 1, 1)
gridrow += 1
label1 = QtWidgets.QLabel('Select export column:')
form1.addWidget(label1, gridrow, 0, 1, 1)
form1.addWidget(self._textfile_exportcolumn_list, gridrow, 1, 1, 1)
#
hbox1 = QtWidgets.QHBoxLayout()
label1 = QtWidgets.QLabel('Text file character encoding (affects å, è, µ, etc.):')
hbox1.addWidget(label1)
hbox1.addWidget(self._textfile_encoding_list)
# hbox1.addStretch(10)
hbox1.addWidget(self._textfile_getdataset_button)
hbox1.addWidget(self._textfile_trophic_list_checkbox)
hbox1.addStretch(10)
#
layout = QtWidgets.QVBoxLayout()
# layout.addWidget(introlabel)
layout.addLayout(form1)
layout.addStretch(1)
layout.addLayout(hbox1)
widget.setLayout(layout)
#
return widget
def _textfile_parser_selected(self, selected_row):
""" """
try:
if (selected_row > 0) and (selected_row <= len(self._parser_list)):
toolbox_utils.Logging().log('Selected parser: ' + str(self._parser_list[selected_row - 1]))
# tabledata = plankton_core.DatasetTable()
# toolbox_utils.ExcelFiles().readToTableDataset(tabledata,
# file_name = self._parser_path + self._parser_list[selected_row - 1])
tablereader = toolbox_utils.TableFileReader(file_path = self._parser_path,
excel_file_name = self._parser_list[selected_row - 1])
self._textfile_importcolumn_list.clear()
self._textfile_exportcolumn_list.clear()
header = tablereader.header()
for row in tablereader.rows():
if (row[0] == 'info') and (row[1] == 'column_type'):
for index, item in enumerate(row):
if item == 'import':
self._textfile_importcolumn_list.addItems([header[index]])
if item == 'export':
self._textfile_exportcolumn_list.addItems([header[index]])
else:
self._textfile_importcolumn_list.clear()
self._textfile_importcolumn_list.addItems(['<no parser selected>'])
self._textfile_exportcolumn_list.clear()
self._textfile_exportcolumn_list.addItems(['<no parser selected>'])
#
except Exception as e:
debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno)
toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def _textfile_import_column_selected(self, selected_row):
""" """
try:
# Reset.
self._textfile_encoding_list.setCurrentIndex(0)
#
selectedimportcolumn = str(self._textfile_importcolumn_list.currentText())
# Read parser file.
# tabledata = plankton_core.DatasetTable()
# toolbox_utils.ExcelFiles().readToTableDataset(tabledata,
# file_name = self._parser_path + self._parser_list[self._textfile_parser_list.currentIndex() - 1])
tablereader = toolbox_utils.TableFileReader(file_path = self._parser_path,
excel_file_name = self._parser_list[self._textfile_parser_list.currentIndex() - 1])
header = tablereader.header()
for index, headeritem in enumerate(header):
if headeritem == selectedimportcolumn:
for row in tablereader.rows():
if (row[0] == 'info') and (row[1] == 'character_encoding'):
if row[index] and (row[index] in self._encodings_list):
self._textfile_encoding_list.setCurrentIndex(self._encodings_list.index(row[index]))
#
except Exception as e:
debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno)
toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def _load_text_files(self):
""" """
try:
try:
toolbox_utils.Logging().log('') # Empty line.
toolbox_utils.Logging().log('Importing datasets...')
toolbox_utils.Logging().start_accumulated_logging()
self._write_to_status_bar('Importing datasets...')
# Show select file dialog box. Multiple files can be selected.
namefilter = 'Text files (*.txt);;All files (*.*)'
filenames, _filters = QtWidgets.QFileDialog.getOpenFileNames(
self,
'Import dataset(s)',
self._last_used_textfile_name,
namefilter)
# Check if user pressed ok or cancel.
self._tabledataset = plankton_core.DatasetTable()
if filenames:
for filename in filenames:
# Store selected path. Will be used as default next time.
self._last_used_textfile_name = filename
# Text files may have strange encodings.
if str(self._textfile_encoding_list.currentText()) == '<platform default>':
textfileencoding = locale.getpreferredencoding()
else:
textfileencoding = str(self._textfile_encoding_list.currentText())
# Set up for import file parsing.
importmanager = plankton_core.ImportManager(str(pathlib.Path(self._parser_path, str(self._textfile_parser_list.currentText()))),
str(self._textfile_importcolumn_list.currentText()),
str(self._textfile_exportcolumn_list.currentText()))
# Import and parse file.
dataset = importmanager.import_text_file(filename, textfileencoding)
# Update trophic_type.
update_trophic_type = self._textfile_trophic_list_checkbox.isChecked()
print('DEBUG: excel_trophic_list')
for visit in dataset.get_children():
for sample in visit.get_children():
for variable in sample.get_children():
trophic_type = variable.get_data('trophic_type', '')
# Update all trophic_types.
if update_trophic_type:
scientific_name = variable.get_data('scientific_name', '')
size_class = variable.get_data('size_class', '')
trophic_type = plankton_core.Species().get_bvol_value(scientific_name, size_class, 'trophic_type')
if trophic_type:
variable.add_data('trophic_type', trophic_type) # Use existing if not in local list.
# Replace empty with NS=Not specified.
if not trophic_type:
variable.add_data('trophic_type', 'NS')
# Add metadata related to imported file.
dataset.add_metadata('parser', str(pathlib.Path(self._parser_path, str(self._textfile_parser_list.currentText()))))
dataset.add_metadata('file_name', os.path.basename(filename))
dataset.add_metadata('file_path', filename)
dataset.add_metadata('import_column', str(self._textfile_importcolumn_list.currentText()))
dataset.add_metadata('export_column', str(self._textfile_exportcolumn_list.currentText()))
# Add to dataset list. (Note:ToolboxDatasets is a wrapper containing the 'datasetListChanged'-signal).
app_framework.ToolboxDatasets().add_dataset(dataset)
#
except Exception as e:
toolbox_utils.Logging().error('Text file import failed on exception: ' + str(e))
QtWidgets.QMessageBox.warning(self, 'Text file loading.\n',
'Text file import failed on exception.\n' + str(e))
raise
finally:
datasetcount = len(plankton_core.Datasets().get_datasets())
self._write_to_status_bar('Imported datasets: ' + str(datasetcount))
toolbox_utils.Logging().log_all_accumulated_rows()
toolbox_utils.Logging().log('Importing datasets done. Number of imported datasets: ' + str(datasetcount))
#
except Exception as e:
debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno)
toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
# ===== EXCEL FILES ======
def _content_xlsx(self):
""" """
widget = QtWidgets.QWidget()
# Active widgets and connections.
# Intro:
# introlabel = app_framework.RichTextQLabel()
# introlabel.setText(help_texts.HelpTexts().getText('LoadDatasetsActivity_excel_intro'))
# - Select dataset parser:
self._excel_parser_list = QtWidgets.QComboBox()
self._excel_parser_list.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self._excel_parser_list.addItems(["<select>"])
self._excel_parser_list.currentIndexChanged.connect(self._excel_parser_selected)
# - Add available dataset parsers.
self._excel_parser_list.addItems(self._parser_list)
# - Select import column:
self._excel_importcolumn_list = QtWidgets.QComboBox()
self._excel_importcolumn_list.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self._excel_importcolumn_list.addItems(["<no parser selected>"])
# - Select export column:
self._excel_exportcolumn_list = QtWidgets.QComboBox()
self._excel_exportcolumn_list.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents)
self._excel_exportcolumn_list.addItems(["<no parser selected>"])
# Load dataset.
self._excel_getdataset_button = QtWidgets.QPushButton('Import dataset(s)...')
self._excel_getdataset_button.clicked.connect(self._load_excel_file)
self._excel_trophic_list_checkbox = QtWidgets.QCheckBox('Update trophic types')
self._excel_trophic_list_checkbox.setChecked(True)
# Layout widgets.
form1 = QtWidgets.QGridLayout()
gridrow = 0
label1 = QtWidgets.QLabel('Select parser:')
stretchlabel = QtWidgets.QLabel('')
form1.addWidget(label1, gridrow, 0, 1, 1)
form1.addWidget(self._excel_parser_list, gridrow, 1, 1, 1)
form1.addWidget(stretchlabel, gridrow,2, 1, 9)
gridrow += 1
label1 = QtWidgets.QLabel('Select import column:')
form1.addWidget(label1, gridrow, 0, 1, 1)
form1.addWidget(self._excel_importcolumn_list, gridrow, 1, 1, 1)
gridrow += 1
label1 = QtWidgets.QLabel('Select export column:')
form1.addWidget(label1, gridrow, 0, 1, 1)
form1.addWidget(self._excel_exportcolumn_list, gridrow, 1, 1, 1)
#
hbox1 = QtWidgets.QHBoxLayout()
# hbox1.addStretch(10)
hbox1.addWidget(self._excel_getdataset_button)
hbox1.addWidget(self._excel_trophic_list_checkbox)
hbox1.addStretch(10)
#
layout = QtWidgets.QVBoxLayout()
# layout.addWidget(introlabel)
layout.addLayout(form1)
layout.addStretch(1)
layout.addLayout(hbox1)
widget.setLayout(layout)
#
return widget
def _excel_parser_selected(self, selected_row):
""" """
try:
if (selected_row > 0) and (selected_row <= len(self._parser_list)):
toolbox_utils.Logging().log('Selected parser: ' + str(self._parser_list[selected_row - 1]))
# tabledata = plankton_core.DatasetTable()
# toolbox_utils.ExcelFiles().readToTableDataset(tabledata,
# file_name = self._parser_path + self._parser_list[selected_row - 1])
tablereader = toolbox_utils.TableFileReader(file_path = self._parser_path,
excel_file_name = self._parser_list[selected_row - 1])
self._excel_importcolumn_list.clear()
self._excel_exportcolumn_list.clear()
header = tablereader.header()
for row in tablereader.rows():
if (row[0] == 'info') and (row[1] == 'column_type'):
for index, item in enumerate(row):
if item == 'import':
self._excel_importcolumn_list.addItems([header[index]])
if item == 'export':
self._excel_exportcolumn_list.addItems([header[index]])
else:
self._excel_importcolumn_list.clear()
self._excel_importcolumn_list.addItems(['no parser selected'])
self._excel_exportcolumn_list.clear()
self._excel_exportcolumn_list.addItems(['no parser selected'])
#
except Exception as e:
debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno)
toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e))
def _load_excel_file(self):
""" """
try:
try:
toolbox_utils.Logging().log('') # Empty line.
toolbox_utils.Logging().log('Importing datasets...')
toolbox_utils.Logging().start_accumulated_logging()
self._write_to_status_bar('Importing datasets...')
# Show select file dialog box. Multiple files can be selected.
namefilter = 'Excel files (*.xlsx);;All files (*.*)'
filenames, _filters = QtWidgets.QFileDialog.getOpenFileNames(
self,
'Import dataset(s)',
self._last_used_excelfile_name,
namefilter)
# Check if user pressed ok or cancel.
self._tabledataset = plankton_core.DatasetTable()
if filenames:
for filename in filenames:
# Store selected path. Will be used as default next time.
self._last_used_excelfile_name | |
<reponame>Daulbaev/adversarial-library<gh_stars>10-100
# Adapted from https://github.com/fra31/auto-attack
import math
from functools import partial
from typing import Tuple, Optional, Union
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from adv_lib.utils.losses import difference_of_logits_ratio
def apgd(model: nn.Module,
inputs: Tensor,
labels: Tensor,
eps: Union[float, Tensor],
norm: float,
targeted: bool = False,
n_iter: int = 100,
n_restarts: int = 1,
loss_function: str = 'dlr',
eot_iter: int = 1,
rho: float = 0.75,
use_large_reps: bool = False,
use_rs: bool = True,
best_loss: bool = False) -> Tensor:
"""
Auto-PGD (APGD) attack from https://arxiv.org/abs/2003.01690 with L1 variant from https://arxiv.org/abs/2103.01208.
Parameters
----------
model : nn.Module
Model to attack.
inputs : Tensor
Inputs to attack. Should be in [0, 1].
labels : Tensor
Labels corresponding to the inputs if untargeted, else target labels.
eps : float or Tensor
Maximum norm for the adversarial perturbation. Can be a float used for all samples or a Tensor containing the
distance for each corresponding sample.
norm : float
Norm corresponding to eps in {1, 2, float('inf')}.
targeted : bool
Whether to perform a targeted attack or not.
n_iter : int
Number of optimization steps.
n_restarts : int
Number of random restarts for the attack.
loss_function : str
Loss to optimize in ['ce', 'dlr'].
eot_iter : int
Number of iterations for expectation over transformation.
rho : float
Parameters for decreasing the step size.
use_large_reps : bool
Split iterations in three phases starting with larger eps (see section 3.2 of https://arxiv.org/abs/2103.01208).
use_rs : bool
Use a random start when using large reps.
best_loss : bool
If True, search for the strongest adversarial perturbation within the distance budget instead of stopping as
soon as it finds one.
Returns
-------
adv_inputs : Tensor
Modified inputs to be adversarial to the model.
"""
assert norm in [1, 2, float('inf')]
device = inputs.device
batch_size = len(inputs)
adv_inputs = inputs.clone()
adv_found = torch.zeros(batch_size, device=device, dtype=torch.bool)
if isinstance(eps, (int, float)):
eps = torch.full_like(adv_found, eps, dtype=torch.float)
if use_large_reps:
epss = [3 * eps, 2 * eps, eps]
iters = [0.3 * n_iter, 0.3 * n_iter, 0.4 * n_iter]
iters = [math.ceil(i) for i in iters]
iters[-1] = n_iter - sum(iters[:-1])
apgd_attack = partial(_apgd, model=model, norm=norm, targeted=targeted, loss_function=loss_function,
eot_iter=eot_iter, rho=rho)
if best_loss:
loss = torch.full_like(adv_found, -float('inf'), dtype=torch.float)
for _ in range(n_restarts):
adv_inputs_run, adv_found_run, loss_run, _ = apgd_attack(inputs=inputs, labels=labels, eps=eps)
better_loss = loss_run > loss
adv_inputs[better_loss] = adv_inputs_run[better_loss]
loss[better_loss] = loss_run[better_loss]
else:
for _ in range(n_restarts):
if adv_found.all():
break
to_attack = ~adv_found
inputs_to_attack = inputs[to_attack]
labels_to_attack = labels[to_attack]
if use_large_reps:
assert norm == 1
if use_rs:
x_init = inputs_to_attack + torch.randn_like(inputs_to_attack)
x_init += l1_projection(inputs_to_attack, x_init - inputs_to_attack, epss[0][to_attack])
else:
x_init = None
for eps_, iter in zip(epss, iters):
eps_to_attack = eps_[to_attack]
if x_init is not None:
x_init += l1_projection(inputs_to_attack, x_init - inputs_to_attack, eps_to_attack)
x_init, adv_found_run, _, adv_inputs_run = apgd_attack(
inputs=inputs_to_attack, labels=labels_to_attack, eps=eps_to_attack, x_init=x_init, n_iter=iter)
else:
_, adv_found_run, _, adv_inputs_run = apgd_attack(inputs=inputs_to_attack, labels=labels_to_attack,
eps=eps[to_attack], n_iter=n_iter)
adv_inputs[to_attack] = adv_inputs_run
adv_found[to_attack] = adv_found_run
return adv_inputs
def apgd_targeted(model: nn.Module,
inputs: Tensor,
labels: Tensor,
eps: Union[float, Tensor],
norm: float,
targeted: bool = False,
n_iter: int = 100,
n_restarts: int = 1,
loss_function: str = 'dlr',
eot_iter: int = 1,
rho: float = 0.75,
use_large_reps: bool = False,
use_rs: bool = True,
num_targets: Optional[int] = None) -> Tensor:
"""
Targeted variant of the Auto-PGD (APGD) attack from https://arxiv.org/abs/2003.01690 with L1 variant from
https://arxiv.org/abs/2103.01208. This attack is not a targeted one: it tries to find an adversarial perturbation by
attacking each class, starting with the most likely one (different from the original class).
Parameters
----------
model : nn.Module
Model to attack.
inputs : Tensor
Inputs to attack. Should be in [0, 1].
labels : Tensor
Labels corresponding to the inputs if untargeted, else target labels.
eps : float or Tensor
Maximum norm for the adversarial perturbation. Can be a float used for all samples or a Tensor containing the
distance for each corresponding sample.
norm : float
Norm corresponding to eps in {1, 2, float('inf')}.
targeted : bool
Required argument for the library. Will raise an assertion error if True (will be ignored if the -O flag is
used).
n_iter : int
Number of optimization steps.
n_restarts : int
Number of random restarts for the attack for each class attacked.
loss_function : str
Loss to optimize in ['ce', 'dlr'].
eot_iter : int
Number of iterations for expectation over transformation.
rho : float
Parameters for decreasing the step size.
use_large_reps : bool
Split iterations in three phases starting with larger eps (see section 3.2 of https://arxiv.org/abs/2103.01208).
use_rs : bool
Use a random start when using large reps.
num_targets : int or None
Number of classes to attack. If None, it will attack every class (except the original class).
Returns
-------
adv_inputs : Tensor
Modified inputs to be adversarial to the model.
"""
assert targeted == False
device = inputs.device
batch_size = len(inputs)
adv_inputs = inputs.clone()
adv_found = torch.zeros(batch_size, device=device, dtype=torch.bool)
if isinstance(eps, (int, float)):
eps = torch.full_like(adv_found, eps, dtype=torch.float)
if use_large_reps:
epss = [3 * eps, 2 * eps, eps]
iters = [0.3 * n_iter, 0.3 * n_iter, 0.4 * n_iter]
iters = [math.ceil(i) for i in iters]
iters[-1] = n_iter - sum(iters[:-1])
apgd_attack = partial(_apgd, model=model, norm=norm, targeted=True, loss_function=loss_function,
eot_iter=eot_iter, rho=rho)
# determine the number of classes based on the size of the model's output
most_likely_classes = model(inputs).argsort(dim=1, descending=True)[:, 1:]
num_classes_to_attack = num_targets or most_likely_classes.size(1)
for i in range(num_classes_to_attack):
targets = most_likely_classes[:, i]
for counter in range(n_restarts):
if adv_found.all():
break
to_attack = ~adv_found
inputs_to_attack = inputs[to_attack]
targets_to_attack = targets[to_attack]
if use_large_reps:
assert norm == 1
if use_rs:
x_init = inputs_to_attack + torch.randn_like(inputs_to_attack)
x_init += l1_projection(inputs_to_attack, x_init - inputs_to_attack, epss[0][to_attack])
else:
x_init = None
for eps_, iter in zip(epss, iters):
eps_to_attack = eps_[to_attack]
if x_init is not None:
x_init += l1_projection(inputs_to_attack, x_init - inputs_to_attack, eps_to_attack)
x_init, adv_found_run, _, adv_inputs_run = apgd_attack(
inputs=inputs_to_attack, labels=targets_to_attack, eps=eps_to_attack, x_init=x_init,
n_iter=iter)
else:
_, adv_found_run, _, adv_inputs_run = apgd_attack(inputs=inputs_to_attack, labels=targets_to_attack,
eps=eps[to_attack], n_iter=n_iter)
adv_inputs[to_attack] = adv_inputs_run
adv_found[to_attack] = adv_found_run
return adv_inputs
def minimal_apgd(model: nn.Module,
inputs: Tensor,
labels: Tensor,
norm: float,
max_eps: float,
targeted: bool = False,
binary_search_steps: int = 20,
targeted_version: bool = False,
n_iter: int = 100,
n_restarts: int = 1,
loss_function: str = 'dlr',
eot_iter: int = 1,
rho: float = 0.75,
use_large_reps: bool = False,
use_rs: bool = True,
num_targets: Optional[int] = None) -> Tensor:
device = inputs.device
batch_size = len(inputs)
adv_inputs = inputs.clone()
best_eps = torch.full((batch_size,), 2 * max_eps, dtype=torch.float, device=device)
eps_low = torch.zeros_like(best_eps)
if targeted_version:
attack = partial(apgd_targeted, model=model, norm=norm, n_iter=n_iter, n_restarts=n_restarts,
loss_function=loss_function, eot_iter=eot_iter, rho=rho, use_large_reps=use_large_reps,
use_rs=use_rs, num_targets=num_targets)
else:
attack = partial(apgd, model=model, norm=norm, targeted=targeted, n_iter=n_iter, n_restarts=n_restarts,
loss_function=loss_function, eot_iter=eot_iter, rho=rho, use_large_reps=use_large_reps,
use_rs=use_rs)
for _ in range(binary_search_steps):
eps = (eps_low + best_eps) / 2
adv_inputs_run = attack(inputs=inputs, labels=labels, eps=eps)
adv_found_run = model(adv_inputs_run).argmax(1) != labels
better_adv = adv_found_run & (eps < best_eps)
adv_inputs[better_adv] = adv_inputs_run[better_adv]
eps_low = torch.where(better_adv, eps_low, eps)
best_eps = torch.where(better_adv, eps, best_eps)
return adv_inputs
def l1_projection(x: Tensor, y: Tensor, eps: Tensor) -> Tensor:
device = x.device
shape = x.shape
x, y = x.flatten(1), y.flatten(1)
sigma = y.sign()
u = torch.min(1 - x - y, x + y).clamp_max(0)
l = -y.abs()
d = u.clone()
bs, indbs = torch.sort(-torch.cat((u, l), dim=1), dim=1)
bs2 = F.pad(bs[:, 1:], (0, 1))
inu = 2 * (indbs < u.shape[1]).float() - 1
size1 = inu.cumsum(dim=1)
s1 = -u.sum(dim=1)
c = eps + l.sum(dim=1)
c5 = s1 + c < 0
s = s1.unsqueeze(-1) + torch.cumsum((bs2 - bs) * size1, dim=1)
if c5.any():
lb = torch.zeros(c5.sum(), device=device)
ub = torch.full_like(lb, bs.shape[1] - 1)
nitermax = math.ceil(math.log2(bs.shape[1]))
counter = 0
while counter < nitermax:
counter4 = torch.floor((lb + ub) / 2)
counter2 = counter4.long()
c8 = s[c5, counter2] + c[c5] < 0
lb[c8] = counter4[c8]
ub[~c8] = counter4[~c8]
counter += 1
lb2 = lb.long()
alpha = (-s[c5, lb2] - c[c5]) / size1[c5, lb2 + 1] + bs2[c5, lb2]
d[c5] = -torch.min(torch.max(-u[c5], alpha.unsqueeze(-1)), -l[c5])
return (sigma * d).view(shape)
def check_oscillation(loss_steps: Tensor, j: | |
# -*- coding: utf-8 -*-
"""Command line interface for diluvian."""
from __future__ import print_function
import argparse
import logging
import os
import random
import re
import six
from .config import CONFIG
def _make_main_parser():
"""Construct the argparse parser for the main CLI.
This exists as a separate function so the parser can be used to
auto-generate CLI documentation in Sphinx.
Returns
-------
argparse.ArgumentParser
Parser for the main CLI and all subcommands.
"""
common_parser = argparse.ArgumentParser(add_help=False)
common_parser.add_argument(
'-c', '--config-file', action='append', dest='config_files', default=[],
help='Configuration files to use. For defaults, see `diluvian/conf/default.toml`. '
'Values are overwritten in the order provided.')
common_parser.add_argument(
'-cd', action='append_const', dest='config_files',
const=os.path.join(os.path.dirname(__file__), 'conf', 'default.toml'),
help='Add default configuration file to chain of configuration files.')
common_parser.add_argument(
'-m', '--model-file', dest='model_file', default=None,
help='Existing network model file to use for prediction or continued training.')
common_parser.add_argument(
'-v', '--volume-file', action='append', dest='volume_files', default=[],
help='Volume configuration files. For example, see `diluvian/conf/cremi_datasets.toml`.'
'Values are overwritten in the order provided.')
common_parser.add_argument(
'--no-in-memory', action='store_false', dest='in_memory', default=True,
help='Do not preload entire volumes into memory.')
common_parser.add_argument(
'-rs', '--random-seed', action='store', dest='random_seed', type=int,
help='Seed for initializing the Python and NumPy random generators. '
'Overrides any seed specified in configuration files.')
common_parser.add_argument(
'-l', '--log', dest='log_level',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level.')
parser = argparse.ArgumentParser(description='Train or run flood-filling networks on EM data.')
commandparsers = parser.add_subparsers(help='Commands', dest='command')
train_parser = commandparsers.add_parser(
'train', parents=[common_parser],
help='Train a network from labeled volumes.')
train_parser.add_argument(
'-mo', '--model-output-filebase', dest='model_output_filebase', default=None,
help='Base filename for the best trained model and other output artifacts, '
'such as metric plots and configuration state.')
train_parser.add_argument(
'-mc', '--model-checkpoint-file', dest='model_checkpoint_file', default=None,
help='Filename for model checkpoints at every epoch. '
'This is different than the model output file; if provided, this HDF5 model '
'file is saved every epoch regardless of validation performance.'
'Can use Keras format arguments: https://keras.io/callbacks/#modelcheckpoint')
train_parser.add_argument(
'--early-restart', action='store_true', dest='early_restart', default=False,
help='If training is aborted early because an early abort metric '
'criteria, restart training with a new random seed.')
train_parser.add_argument(
'--tensorboard', action='store_true', dest='tensorboard', default=False,
help='Output tensorboard log files while training (limited to network graph).')
train_parser.add_argument(
'--viewer', action='store_true', dest='viewer', default=False,
help='Create a neuroglancer viewer for a training sample at the end of training.')
train_parser.add_argument(
'--metric-plot', action='store_true', dest='metric_plot', default=False,
help='Plot metric history at the end of training. '
'Will be saved as a PNG with the model output base filename.')
fill_common_parser = argparse.ArgumentParser(add_help=False)
fill_common_parser.add_argument(
'--partition-volumes', action='store_true', dest='partition_volumes', default=False,
help='Partition volumes and only fill the validation partition.')
fill_common_parser.add_argument(
'--no-bias', action='store_false', dest='bias', default=True,
help='Overwrite prediction mask at the end of each field of view inference '
'rather than using the anti-merge bias update.')
fill_common_parser.add_argument(
'--move-batch-size', dest='move_batch_size', default=1, type=int,
help='Maximum number of fill moves to process in each prediction batch.')
fill_common_parser.add_argument(
'--max-moves', dest='max_moves', default=None, type=int,
help='Cancel filling after this many moves.')
fill_common_parser.add_argument(
'--remask-interval', dest='remask_interval', default=None, type=int,
help='Interval in moves to reset filling region mask based on '
'the seeded connected component.')
fill_parser = commandparsers.add_parser(
'fill', parents=[common_parser, fill_common_parser],
help='Use a trained network to densely segment a volume.')
fill_parser.add_argument(
'--seed-generator', dest='seed_generator', default='sobel', nargs='?',
# Would be nice to pull these from .preprocessing.SEED_GENERATORS,
# but want to avoid importing so that CLI is responsive.
choices=['grid', 'sobel'],
help='Method to generate seed locations for flood filling.')
fill_parser.add_argument(
'--ordered-seeds', action='store_false', dest='shuffle_seeds', default=True,
help='Do not shuffle order in which seeds are processed.')
fill_parser.add_argument(
'--ignore-mask', dest='ignore_mask', default=False,
help='Ignore the mask channel when generating seeds.')
fill_parser.add_argument(
'--background-label-id', dest='background_label_id', default=0, type=int,
help='Label ID to output for voxels not belonging to any filled body.')
fill_parser.add_argument(
'--viewer', action='store_true', dest='viewer', default=False,
help='Create a neuroglancer viewer for a each volume after filling.')
fill_parser.add_argument(
'--max-bodies', dest='max_bodies', default=None, type=int,
help='Cancel filling after this many bodies (only useful for '
'diagnostics).')
fill_parser.add_argument(
'--reject-early-termination', action='store_true',
dest='reject_early_termination', default=False,
help='Reject seeds that terminate early, e.g., due to maximum '
'move limits.')
fill_parser.add_argument(
'--resume-file', dest='resume_filename', default=None,
help='Filename for the TOML configuration file of a segmented '
'label volume from which to resume filling. The configuration '
'should only contain one dataset.')
fill_parser.add_argument(
'segmentation_output_file', default=None,
help='Filename for the HDF5 segmentation output, without '
'extension. Should contain "{volume}", which will be '
'substituted with the volume name for each respective '
'volume\'s bounds.')
bounds_common_parser = argparse.ArgumentParser(add_help=False)
bounds_common_parser.add_argument(
'--bounds-num-moves', dest='bounds_num_moves', default=None, nargs=3, type=int,
help='Number of moves in direction to size the subvolume bounds.')
sparse_fill_parser = commandparsers.add_parser(
'sparse-fill', parents=[common_parser, fill_common_parser, bounds_common_parser],
help='Use a trained network to fill random regions in a volume.')
sparse_fill_parser.add_argument(
'--augment', action='store_true', dest='augment', default=False,
help='Apply training augmentations to subvolumes before filling.')
sparse_fill_parser.add_argument(
'-bi', '--bounds-input-file', dest='bounds_input_file', default=None,
help='Filename for bounds CSV input. Should contain "{volume}", which will be '
'substituted with the volume name for each respective volume\'s bounds.')
validate_parser = commandparsers.add_parser( # noqa
'validate', parents=[common_parser],
help='Run a model on validation data.')
evaluate_parser = commandparsers.add_parser(
'evaluate', parents=[common_parser],
help='Evaluate a filling result versus a ground truth.')
evaluate_parser.add_argument(
'--border-threshold', dest='border_threshold', default=25, type=float,
help='Region border threshold (in nm) to ignore. Official CREMI '
'default is 25nm.')
evaluate_parser.add_argument(
'--partition-volumes', action='store_true', dest='partition_volumes', default=False,
help='Partition volumes and only evaluate the validation partitions.')
evaluate_parser.add_argument(
'ground_truth_name', default=None,
help='Name of the ground truth volume.')
evaluate_parser.add_argument(
'prediction_name', default=None,
help='Name of the prediction volume.')
view_parser = commandparsers.add_parser(
'view', parents=[common_parser],
help='View a set of co-registered volumes in neuroglancer.')
view_parser.add_argument(
'--partition-volumes', action='store_true', dest='partition_volumes', default=False,
help='Partition volumes and view centered at the validation '
'partitions.')
view_parser.add_argument(
'volume_name_regex', default='.', nargs='?',
help='Regex to filter which volumes of those defined in the '
'volume configuration should be loaded.')
check_config_parser = commandparsers.add_parser(
'check-config', parents=[common_parser],
help='Check a configuration value.')
check_config_parser.add_argument(
'config_property', default=None, nargs='?',
help='Name of the property to show, e.g., `training.batch_size`.')
gen_subv_bounds_parser = commandparsers.add_parser(
'gen-subv-bounds', parents=[common_parser, bounds_common_parser],
help='Generate subvolume bounds.')
gen_subv_bounds_parser.add_argument(
'bounds_output_file', default=None,
help='Filename for the CSV output. Should contain "{volume}", which will be '
'substituted with the volume name for each respective volume\'s bounds.')
gen_subv_bounds_parser.add_argument(
'num_bounds', default=None, type=int,
help='Number of bounds to generate.')
return parser
def main():
"""Entry point for the diluvian command line interface."""
parser = _make_main_parser()
args = parser.parse_args()
if args.log_level:
logging.basicConfig(level=logging.getLevelName(args.log_level))
if args.config_files:
CONFIG.from_toml(*args.config_files)
if args.random_seed:
CONFIG.random_seed = args.random_seed
def init_seeds():
random.seed(CONFIG.random_seed)
import numpy as np
np.random.seed(CONFIG.random_seed)
import tensorflow as tf
tf.set_random_seed(CONFIG.random_seed)
if args.command == 'train':
# Late import to prevent loading large modules for short CLI commands.
init_seeds()
from .training import EarlyAbortException, train_network
volumes = load_volumes(args.volume_files, args.in_memory)
while True:
try:
train_network(model_file=args.model_file,
volumes=volumes,
model_output_filebase=args.model_output_filebase,
model_checkpoint_file=args.model_checkpoint_file,
tensorboard=args.tensorboard,
viewer=args.viewer,
metric_plot=args.metric_plot)
except EarlyAbortException as inst:
if args.early_restart:
import numpy as np
new_seed = CONFIG.random_seed
while new_seed == CONFIG.random_seed:
new_seed = np.random.randint(int(1e8))
CONFIG.random_seed = new_seed
logging.warning(str(inst))
logging.warning('Training aborted, restarting with random seed %s', new_seed)
init_seeds()
continue
else:
logging.critical(str(inst))
break
break
elif args.command == 'fill':
# Late import to prevent loading large modules for short CLI commands.
init_seeds()
from .diluvian import fill_volumes_with_model
volumes = load_volumes(args.volume_files, args.in_memory)
fill_volumes_with_model(args.model_file,
volumes,
args.segmentation_output_file,
resume_filename=args.resume_filename,
partition=args.partition_volumes,
viewer=args.viewer,
seed_generator=args.seed_generator,
background_label_id=args.background_label_id,
bias=args.bias,
move_batch_size=args.move_batch_size,
max_moves=args.max_moves,
max_bodies=args.max_bodies,
filter_seeds_by_mask=not args.ignore_mask,
reject_early_termination=args.reject_early_termination,
remask_interval=args.remask_interval,
shuffle_seeds=args.shuffle_seeds)
elif args.command == 'sparse-fill':
# Late import to prevent loading large modules for short CLI commands.
init_seeds()
from .diluvian import fill_region_with_model
volumes = load_volumes(args.volume_files, args.in_memory)
fill_region_with_model(args.model_file,
volumes=volumes,
partition=args.partition_volumes,
augment=args.augment,
bounds_input_file=args.bounds_input_file,
bias=args.bias,
move_batch_size=args.move_batch_size,
max_moves=args.max_moves,
remask_interval=args.remask_interval,
moves=args.bounds_num_moves)
elif args.command == 'validate':
# Late import to prevent loading large modules for short CLI commands.
init_seeds()
from .training import validate_model
volumes = load_volumes(args.volume_files, args.in_memory)
validate_model(args.model_file, volumes)
elif args.command == 'evaluate':
from .diluvian import evaluate_volume
volumes = load_volumes(args.volume_files, args.in_memory)
evaluate_volume(volumes,
args.ground_truth_name,
args.prediction_name,
partition=args.partition_volumes,
border_threshold=args.border_threshold)
elif args.command == 'view':
# Late import to prevent loading large modules for short CLI commands.
from .diluvian import view_volumes
volumes = load_volumes(args.volume_files, args.in_memory, name_regex=args.volume_name_regex)
view_volumes(volumes, partition=args.partition_volumes)
elif args.command == 'check-config':
prop = CONFIG
if args.config_property is not None:
properties = args.config_property.split('.')
for p in properties:
prop = getattr(prop, p)
print(prop)
elif args.command == 'gen-subv-bounds':
# Late import to prevent loading large modules for short CLI commands.
init_seeds()
from .diluvian import generate_subvolume_bounds
volumes = load_volumes(args.volume_files, args.in_memory)
generate_subvolume_bounds(args.bounds_output_file,
volumes,
args.num_bounds,
moves=args.bounds_num_moves)
def load_volumes(volume_files, in_memory, name_regex=None):
"""Load HDF5 volumes specified in a TOML description file.
Parameters
----------
volume_file : list of str
Filenames of the TOML volume descriptions to load.
in_memory : bool
If true, the entire dataset is read into an in-memory volume.
Returns
-------
diluvian.volumes.Volume
"""
# Late import to prevent loading large modules for short CLI | |
import numpy as np
from numpy import dot as dot
from numpy.linalg import inv as inverse
import sys, cv2
from .utils import *
import time
from .processor import Processor
from .line_detection_processor import LineDetectionProcessor
from .dial_processor import DialProcessor
class TrackerProcessor(Processor):
@property
def objects(self):
'''Objects should have a dictionary with center, brect, name, and id'''
if (self.dialReader is not None):
return self.dialReader._objects + self._tracking
else:
return self._tracking
def __init__(self, camera, detector_stride, background, delete_threshold_period=1.0, stride=2, detectLines = True, readDials = True, do_tracking = True, alpha=0.8):
super().__init__(camera, ['track','line-segmentation'], stride)
self._tracking = []
self.do_tracking = do_tracking #this should only be False if we're using darkflow
self.alpha = alpha #this is the spring constant
self.labels = {}
self.stride = stride
self.ticks = 0
if(do_tracking):
self.optflow = cv2.DualTVL1OpticalFlow_create()#use dense optical flow to track
self.detect_interval = 3
self.prev_gray = None
self.tracks = []
self.min_pts_near = 4#the minimum number of points we need to say an object's center is here
self.pts_dist_squared_th = int(75.0 / 2 / 720.0 * background.shape[0])**2
self.feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
print('initializing trackerprocessor. background.shape is {} by {}'.format(background.shape[0], background.shape[1]))
self.dist_th_upper = int(150.0 / 720.0 * background.shape[0])# distance upper threshold, in pixels
self.dist_th_lower = int(75.0 / 720.0 * background.shape[0]) # to account for the size of the reactor
print('dist_th_upper is {} and dist_th_lower is {}'.format(self.dist_th_upper, self.dist_th_lower))
self.max_obs_possible = 24
# set up line detector
if detectLines:
self.lineDetector = LineDetectionProcessor(camera,stride,background)
else:
self.lineDetector = None
# need to keep our own ticks because
# we don't know frame index when track() is called
if detector_stride > 0:
self.ticks_per_obs = detector_stride * delete_threshold_period /self.stride
if readDials:
self.dialReader = DialProcessor(camera, stride=1)
else:
self.dialReader = None
def close(self):
super().close()
if self.dialReader is not None:
self.dialReader.close()
async def process_frame(self, frame, frame_ind):
self.ticks += 1
delete = []
if(self.do_tracking):
smaller_frame = frame
smaller_frame = smaller_frame#4x downsampling
smaller_frame = cv2.cvtColor(smaller_frame, cv2.COLOR_BGR2GRAY)
gray = smaller_frame#cv2.UMat(smaller_frame)
if(self.prev_gray is None):
self.prev_gray = gray#gray
return
img0, img1 = self.prev_gray, gray#gray
#p0 = np.float32(self.tracks).reshape(-1, 1, 2)\
p1 = self.optflow.calc(img0, img1, None)#cv2.calcOpticalFlowFarneback(img0, img1, None, 0.5, 2, 15, 2, 5, 1.1, 0)#, p0)#, None, **self.lk_params) p1, _st, _err
if(frame_ind % self.detect_interval == 0 or len(self.tracks)==0):
mask = np.zeros((smaller_frame.shape), dtype=np.uint8)#np.zeros_like(gray)
mask[:] = 255
self.tracks = np.float32(cv2.goodFeaturesToTrack(smaller_frame, mask=mask, **self.feature_params)).reshape(-1,2)
for i,t in enumerate(self._tracking):
old_center = t['center_scaled']
t['connectedToPrimary'] = [] # list of tracked objects it is connected to as the primary/source node
t['connectedToSecondary'] = []
t['connectedToSource'] = False
#status,brect = t['tracker'].update(umat_frame)
t['observed'] -= 1
if(self.do_tracking):
# we know our objects should stay the same size all of the time.
# check if the size dramatically changed. if so, the object most likely was removed
# if not, rescale the tracked brect to the correct size
#print("t['center_scaled'] is {}".format(t['center_scaled']))
center_unscaled = (t['center_scaled'][0]*smaller_frame.shape[1] , t['center_scaled'][1]*smaller_frame.shape[0])
#print('center_unscaled is {} and smaller_frame.shape is {}'.format(center_unscaled, smaller_frame.shape))
#print('the dimensions of p1 are {}'.format(p1.shape))
a = int(center_unscaled[1])
b = int(center_unscaled[0])
flow_at_center = [p1[a][b][0], p1[a][b][1]]#get the flow computed at previous center of object
#flow_at_center = flow_at_center[::-1]#this is reversed for some reason..?
flow_at_center = scale_point(flow_at_center, smaller_frame)
print('flow_at_center is {}'.format(flow_at_center))
dist = distance_pts([[0,0], flow_at_center ])#this is the magnitude of the vector
# check if its new location is a reflection, or drastically far away
near_pts = 0
for pt in self.tracks:
if(distance_pts([center_unscaled, pt]) <= self.pts_dist_squared_th):
near_pts += 1
if (dist < .05 * max(smaller_frame.shape) and near_pts >= 5):#don't move more than 5% of the biggest dimension
#print('Updated distance is {}'.format(dist))
# rescale the brect to match the original area?
t['center_scaled'][0] += flow_at_center[0]
t['center_scaled'][1] += flow_at_center[1]
t['observed'] = min(t['observed'] +2, self.max_obs_possible)
#put note about it
# check obs counts
if t['observed'] < 0:
delete.append(i)
offset = 0
delete.sort()
for i in delete:
for j,t in enumerate(self._tracking):
# remove any references of this node from connectedToPrimary
t2 = self._tracking[i-offset]
if (t2['id'],t2['label']) in t['connectedToPrimary']:
index = t['connectedToPrimary'].index((t2['id'],t2['label']))
del t['connectedToPrimary'][index]
del self._tracking[i - offset]
offset += 1
#update _tracking with the connections each object has
await self._connect_objects(frame.shape)
#f frame_ind % 4 * self.stride == 0:
# for t in self._tracking:
# print('{} is connected to ({})'.format(t['label'], t['connectedToPrimary']))
#print('Is {} connected to the feed source? {}'.format(t['label'], t['connectedToSource']))
if(self.do_tracking):
self.prev_gray = gray
return
async def _connect_objects(self, frameSize):
if (self.lineDetector is None) or len(self.lineDetector.lines) == 0:
return
''' Iterates through tracked objects and the detected lines, finding objects are connected. Updates self._tracking to have directional knowledge of connections'''
source_position_scaled = (1.0,0.5)#first coord is X from L to R, second coord is Y from TOP to BOTTOM
source_position_unscaled = (frameSize[1],round(frameSize[0]*.5))
#source_position_unscaled = self._unscale_point(source_position_scaled, frameSize)
source_dist_thresh_upper = int(200.0 / 720.0 * frameSize[0])
source_dist_thresh_lower = int(10.0 / 720.0 * frameSize[0])
#print('source_dist_thresh_upper is {} and framesize[1] is {}'.format(source_dist_thresh_upper, frameSize[1]))
used_lines = []
for i,t1 in enumerate(self._tracking):
center = self._unscale_point(t1['center_scaled'], frameSize)
# find all lines that have an endpoint near the center of this object
for k,line in enumerate(self.lineDetector.lines):
if k in used_lines:
continue # dont attempt to use this line if it is already associated with something
dist_ep1 = distance_pts((center, line['endpoints'][0]))
dist_ep2 = distance_pts((center, line['endpoints'][1]))
nearbyEndpointFound = False
#print('Distances for {} {} (position {}) to the endpoints are {} (position {}) and {} (position {})'.format(t1['label'], t1['id'], center, min(dist_ep1,dist_ep2), line['endpoints'][0], max(dist_ep1,dist_ep2), line['endpoints'][1]))
if (val_in_range(dist_ep1,self.dist_th_lower,self.dist_th_upper) or val_in_range(dist_ep2,self.dist_th_lower,self.dist_th_upper)):
# we have a connection! use the endpoint that is further away to find another object thats close to it
if (dist_ep1 <= dist_ep2):
# use endpoint 2
endpoint = line['endpoints'][1]
#print('{} at {} is close to {} with a distance of {}, using {} to detect a connection'.format(t1['name'], center, dist_ep1, line['endpoints'][0], line['endpoints'][1]))
else:
endpoint = line['endpoints'][0]
#print('{} at {} is close to {} with a distance of {}, using {} to detect a connection'.format(t1['name'], center, dist_ep2, line['endpoints'][1], line['endpoints'][0]))
# first check if the opposite endpoint is closest to the source
dist_source = distance_pts((source_position_unscaled, endpoint))
if (val_in_range(dist_source, source_dist_thresh_lower, source_dist_thresh_upper)):
# connected to the source
t1['connectedToSource'] = True
#print('Item {} is connected to the source'.format(t1['label']))
used_lines.append(k)
break
#else:
#print('Distance from source to endpoint was {}'.format(dist_source))
# iterate over all tracked objects again to see if the end of this line is close enough to any other object
for j,t2 in enumerate(self._tracking):
#print('made it this FAR!!! {} {}'.format(t1['id'], t2['id']))#now this DOES print for whatever reason...
if (t1['id'] == t2['id']):
# don't attempt to find connections to yourself
continue
# also don't attempt a connection if these two are already connected
if (((t2['id'], t2['label']) in t1['connectedToPrimary']) or ((t2['id'], t2['label']) in t1['connectedToSecondary']) ):
continue
# check if the slope between the two rxrs and that of the line are similar
center2 = self._unscale_point(t2['center_scaled'], frameSize)
#print('the distance between objects {} {} and {} {} is {}'.format(t1['label'],t1['id'], t2['label'],t2['id'], distance_pts((center, center2))))#now this line is printing again...
lineSlope = line['slope']
lineAngle = np.pi/2.0 + np.arctan(lineSlope)#compare angles instead of slopes; bounded space from 0 to pi
rxrSlope, intercept = line_from_endpoints((center, center2)) if center[1] > center2[1] else line_from_endpoints((center2, center))
rxrAngle = np.pi/2.0 + np.arctan(rxrSlope)
angleDiff = abs(lineAngle - rxrAngle)#at most pi
#print('line angle is {} deg, rxr angle is {} deg, and angle % difference is {}'.format(lineAngle * 180./np.pi, rxrAngle * 180./np.pi, angleDiff/np.pi))#print in degrees for legibility
#sys.stdout.flush()
angleThresh = np.pi/6.0
if angleDiff > angleThresh:
continue
dist2 = distance_pts((center2, endpoint))
if (val_in_range(dist2, self.dist_th_lower,self.dist_th_upper)):
# its a connection! list this one as a connection, then break out of this loop
# we can create directionality by having two lists
# figure out which one is further to the left by checking the which x coordinate is greater (counter-intuitive, but the camera view is flipped)
# if equal, use the y coordinate
if (center[0] > center2[0]) or (center[0] == center2[0] and center[1] < center2[1]):
# first point is the primary
t1['connectedToPrimary'].append((t2['id'], t2['label']))
t2['connectedToSecondary'].append((t1['id'], t1['label']))
else:
t2['connectedToPrimary'].append((t1['id'],t1['label']))
t1['connectedToSecondary'].append((t2['id'], t2['label']))
#print('{} is connected to {}'.format(t1['name'], t2['name'])) #debug message
#print('Item {} is connected to {}'.format(t1['label'], t2['label']))
# make sure that the line used to discern this connection is not used again
used_lines.append(k)
break
else:
pass
#print('dist from object {} was {}'.format(t2['name'], dist2))
def | |
#!/usr/bin/env python3
"""pybuild -- build python3 from source (currently for macos)
usage: pybuild.py [-h] [-v] {all,framework,shared,static} ...
pybuild: builds python from src
optional arguments:
-h, --help show this help message and exit
-v, --version show program's version number and exit
subcommands:
valid subcommands
{framework,shared,static}
additional help
all build all python variations
framework build framework python
shared build shared python
static build static python
"""
import argparse
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
# import glob
IGNORE_ERRORS = False
DEBUG = True
LOG_LEVEL = logging.DEBUG if DEBUG else logging.INFO
LOG_FORMAT = '%(relativeCreated)-4d %(levelname)-5s: %(name)-10s %(message)s'
logging.basicConfig(level=LOG_LEVEL, format=LOG_FORMAT, stream=sys.stdout)
PYTHON_VERSION_STRING = platform.python_version() # e.g '3.9.1'
class Project:
root = Path.cwd()
patch = root / 'patch'
build = root / 'build'
downloads = build / 'downloads'
src = build / 'src'
lib = build / 'lib'
class DependencyManager:
"""Aggreggates, copies dylib dependencies and fixed references.
target: dylib to made relocatable
frameworks_dir: where target dylib will be copied to with copied dependents
exec_ref: back ref for executable or plugin
"""
def __init__(self,
target,
frameworks_dir='build',
staticlibs_dir=None,
exec_ref='@loader_path/../Frameworks'):
self.target = target
self.frameworks_dir = frameworks_dir
self.staticlibs_dir = staticlibs_dir
self.exec_ref = exec_ref
self.install_names = {}
self.deps = []
self.dep_list = []
def is_valid_path(self, dep_path):
return (dep_path == '' or dep_path.startswith('/opt/local/')
or dep_path.startswith('/usr/local/')
or dep_path.startswith('/User/'))
def get_deps(self, target=None):
if not target:
target = self.target
key = os.path.basename(target)
self.install_names[key] = []
result = subprocess.check_output(['otool', '-L', target])
entries = [
line.decode('utf-8').strip() for line in result.splitlines()
]
for entry in entries:
match = re.match(r'\s*(\S+)\s*\(compatibility version .+\)$',
entry)
if match:
path = match.group(1)
(dep_path, dep_filename) = os.path.split(path)
if self.is_valid_path(dep_path):
if dep_path == '':
path = os.path.join('/usr/local/lib', dep_filename)
dep_path, dep_filename = os.path.split(path)
item = (path, '@rpath/' + dep_filename)
self.install_names[key].append(item)
if path not in self.deps:
self.deps.append(path)
self.get_deps(path)
def process_deps(self):
for dep in self.deps:
dep_path, dep_filename = os.path.split(dep)
dest = os.path.join(self.frameworks_dir, dep_filename)
self.dep_list.append([dep, '@rpath/' + dep_filename])
def copy_dylibs(self):
if not os.path.exists(self.frameworks_dir):
os.mkdir(self.frameworks_dir)
# cp target to frameworks_dir
if os.path.dirname(self.target) != self.frameworks_dir:
dest = os.path.join(self.frameworks_dir,
os.path.basename(self.target))
shutil.copyfile(self.target, dest)
os.chmod(dest, 0o644)
cmdline = ['install_name_tool', '-id', self.exec_ref, dest]
err = subprocess.call(cmdline)
if err != 0:
raise RuntimeError("Failed to change '{0}' '{1}'".format(
dest, self.exec_ref))
# copy the rest
for item in self.dep_list:
orig_path, transformed = item
dirname, dylib = os.path.split(orig_path)
dest = os.path.join(self.frameworks_dir, dylib)
if not os.path.exists(dest):
shutil.copyfile(orig_path, dest)
os.chmod(dest, 0o644)
def change_install_names(self):
for key in sorted(self.install_names.keys()):
# print(key)
# for i in self.install_names[key]:
# print('\t', i)
# print()
target = os.path.join(self.frameworks_dir, key)
deps = self.install_names[key]
for dep in deps:
old, new = dep
(old_name_path, old_name_filename) = os.path.split(old)
if key == old_name_filename:
cmdline = ['install_name_tool', '-id', new, target]
else:
cmdline = [
'install_name_tool', '-change', old, new, target
]
err = subprocess.call(cmdline)
if err != 0:
raise RuntimeError(
"Failed to change '{0}' to '{1}' in '{2}".format(
old, new, target))
def transform_exec(self, target):
result = subprocess.check_output(['otool', '-L', target])
entries = [
line.decode('utf-8').strip() for line in result.splitlines()
]
for entry in entries:
match = re.match(r'\s*(\S+)\s*\(compatibility version .+\)$',
entry)
if match:
path = match.group(1)
(dep_path, dep_filename) = os.path.split(path)
if self.is_valid_path(dep_path):
if dep_path == '':
path = os.path.join('/usr/local/lib', dep_filename)
dep_path, dep_filename = os.path.split(path)
dest = os.path.join(self.exec_ref, dep_filename)
cmdline = [
'install_name_tool', '-change', path, dest, target
]
subprocess.call(cmdline)
def copy_staticlibs(self):
if not self.staticlibs_dir:
raise Exception("must set 'staticlibs_dir parameter")
for i in self.deps:
head, tail = os.path.split(i)
name = tail.rstrip('.dylib')
if '.' in name:
name = os.path.splitext(name)[0] + '.a'
static = os.path.join(head, name)
exists = os.path.exists(static)
if exists:
shutil.copyfile(static, os.path.join(self.staticlibs_dir,
name))
else:
print("revise: not exists", static)
def process(self):
self.get_deps()
self.process_deps()
self.copy_staticlibs()
self.copy_dylibs()
self.change_install_names()
self.transform_exec('./eg')
class Builder(ABC):
name: str
version: str
url_template: str
depends_on: []
def __init__(self, project, version=None, depends_on=None):
self.project = project or Project()
self.version = version or self.version
self.depends_on = ([B(project) for B in depends_on] if depends_on else
[B(project) for B in self.depends_on])
self.log = logging.getLogger(self.__class__.__name__)
def __repr__(self):
return f"<{self.__class__.__name__} '{self.name}-{self.version}'>"
def __iter__(self):
for dependency in self.depends_on:
yield dependency
yield from iter(dependency)
@property
def ver(self):
return ".".join(self.version.split('.')[:2])
@property
def ver_nodot(self):
return self.ver.replace('.', '')
@property
def name_version(self):
return f'{self.name}-{self.version}'
@property
def name_ver(self):
return f'{self.name.lower()}{self.ver}'
@property
def url(self):
return Path(
self.url_template.format(name=self.name, version=self.version))
@property
def name_archive(self):
return f'{self.name_version}.tgz'
@property
def download_path(self):
return self.project.downloads / self.name_archive
@property
def src_path(self):
return self.project.src / self.name_version
@property
def lib_path(self):
return self.prefix
@property
def prefix(self):
return self.project.lib / self.name.lower()
@property
def prefix_lib(self):
return self.prefix / 'lib'
@property
def prefix_include(self):
return self.prefix / 'include'
@property
def prefix_bin(self):
return self.prefix / 'bin'
def libs_static_exist(self):
return all((self.prefix_lib / lib).exists() for lib in self.libs_static)
def cmd(self, shellcmd, *args, **kwargs):
os.system(shellcmd.format(*args, **kwargs))
def chdir(self, path):
os.chdir(path)
def move(self, src, dst):
shutil.move(src, dst)
def copytree(self, src, dst):
shutil.copytree(src, dst)
def copyfile(self, src, dst):
shutil.copyfile(src, dst)
def remove(self, path):
if path.is_dir():
shutil.rmtree(path, ignore_errors=IGNORE_ERRORS)
else:
path.unlink(missing_ok=True)
def reset(self):
self.remove(self.src_path)
self.remove(self.prefix) # aka self.prefix
def download(self):
"download target src"
def build(self):
"build target from src"
class OSXBuilder(Builder):
mac_dep_target = '10.14'
@property
def dylib(self):
return f'lib{self.name.lower()}{self.ver}.dylib'
def download(self):
"download src"
self.project.downloads.mkdir(parents=True, exist_ok=True)
for dep in self.depends_on:
dep.download()
# download
if not self.download_path.exists():
self.log.info(f"downloading {self.download_path}")
self.cmd(f'curl -L --fail {self.url} -o {self.download_path}')
# unpack
if not self.src_path.exists():
self.project.src.mkdir(parents=True, exist_ok=True)
self.log.info(f"unpacking {self.src_path}")
self.cmd(f'tar -C {self.project.src} -xvf {self.download_path}')
class OpensslBuilder(OSXBuilder):
name = 'openssl'
version = '1.1.1g'
url_template = 'https://www.openssl.org/source/{name}-{version}.tar.gz'
depends_on = []
libs_static = ['libssl.a', 'libcrypto.a']
def build(self):
if not self.libs_static_exist():
self.chdir(self.src_path)
self.cmd(f'./config no-shared no-tests --prefix={self.prefix}')
self.cmd('make install_sw')
self.chdir(self.project.root)
class Bzip2Builder(OSXBuilder):
name = 'bzip2'
version = '1.0.8'
url_template = 'https://sourceware.org/pub/bzip2/{name}-{version}.tar.gz'
depends_on = []
libs_static = ['libbz2.a']
def build(self):
if not self.libs_static_exist():
self.chdir(self.src_path)
self.cmd(f'make install PREFIX={self.prefix}')
self.chdir(self.project.root)
class XzBuilder(OSXBuilder):
name = 'xz'
version = '5.2.5'
url_template = 'http://tukaani.org/xz/{name}-{version}.tar.gz'
depends_on = []
libs_static = ['libxz.a']
def build(self):
if not self.libs_static_exist():
self.chdir(self.src_path)
self.cmd(f"""MACOSX_DEPLOYMENT_TARGET={self.mac_dep_target} \
./configure --disable-shared --enable-static --prefix={self.prefix}"""
)
self.cmd(f'make && make install')
self.chdir(self.project.root)
class PythonBuilder(OSXBuilder):
name = 'Python'
version = PYTHON_VERSION_STRING
url_template = 'https://www.python.org/ftp/python/{version}/{name}-{version}.tgz'
depends_on = [OpensslBuilder, Bzip2Builder, XzBuilder]
suffix = ""
setup_local = None
patch = None
def __init__(self, project=None, version=None, depends_on=None):
super().__init__(project, version, depends_on)
# dependency manager attributes (revise)
self.install_names = {}
self.deps = []
self.dep_list = []
# ------------------------------------------------------------------------
# python properties
@property
def static_lib(self):
return f'lib{self.name.lower()}{self.ver}.a'
@property
def python_lib(self):
return self.prefix_lib / self.name_ver
@property
def site_packages(self):
return self.python_lib / 'site-packages'
@property
def lib_dynload(self):
return self.python_lib / 'lib-dynload'
# ------------------------------------------------------------------------
# src-level operations
def pre_process(self):
"pre-build operations"
def post_process(self):
"post-build operations"
def write_setup_local(self, setup_local=None):
if not any([setup_local, self.setup_local]):
return
if not setup_local:
setup_local = self.setup_local
self.copyfile(self.project.patch / self.ver / setup_local,
self.src_path / 'Modules' / 'Setup.local')
def apply_patch(self, patch=None):
if not any([patch, self.patch]):
return
if not patch:
patch = self.patch
self.cmd(f'patch -p1 < {self.project.patch}/{self.ver}/{patch}')
def install(self):
self.reset()
self.download()
self.pre_process()
self.build()
self.post_process()
def install_python_pkg(self):
self.install_python()
self.fix_python_dylib_for_pkg()
def install_python_ext(self):
self.install_python()
self.fix_python_dylib_for_ext()
# ------------------------------------------------------------------------
# post-processing operations
def is_valid_path(self, dep_path):
return (dep_path == '' or dep_path.startswith('/opt/local/')
or dep_path.startswith('/usr/local/')
or dep_path.startswith('/User/'))
def get_deps(self, target=None):
if not target:
target = self.target
key = os.path.basename(target)
self.install_names[key] = []
result = subprocess.check_output(['otool', '-L', target])
entries = [
line.decode('utf-8').strip() for line in result.splitlines()
]
for entry in entries:
match = re.match(r'\s*(\S+)\s*\(compatibility version .+\)$',
entry)
if match:
path = match.group(1)
(dep_path, dep_filename) = os.path.split(path)
if self.is_valid_path(dep_path):
if dep_path == '':
path = os.path.join('/usr/local/lib', dep_filename)
dep_path, dep_filename = os.path.split(path)
item = (path, '@rpath/' + dep_filename)
self.install_names[key].append(item)
if path not in self.deps:
self.deps.append(path)
self.get_deps(path)
def recursive_clean(self, name, pattern):
self.cmd(f'find {name} | grep -E "({pattern})" | xargs rm -rf')
def clean_python_pyc(self, name):
self.recursive_clean(name, r"__pycache__|\.pyc|\.pyo$")
def clean_python_tests(self, name):
self.recursive_clean(name, "tests|test")
def rm_libs(self, names):
for name in names:
self.remove(self.python_lib / name)
def rm_exts(self, names):
for name in names:
self.remove(self.python_lib / 'lib-dynload' /
f'{name}.cpython-{self.ver_nodot}-darwin.so')
def rm_bins(self, names):
for name in names:
self.remove(self.prefix_bin / name)
def clean_python_site_packages(self):
self.remove(self.python_lib / 'site-packages')
def remove_packages(self):
self.rm_libs([
f'config-{self.ver}{self.suffix}-darwin',
'idlelib',
'lib2to3',
'tkinter',
'turtledemo',
'turtle.py',
'ctypes',
'curses',
'ensurepip',
'venv',
])
def remove_extensions(self):
pass
def remove_binaries(self):
self.rm_bins([
f'2to3-{self.ver}',
f'idle{self.ver}',
f'easy_install-{self.ver}',
f'pip{self.ver}',
f'pyvenv-{self.ver}',
f'pydoc{self.ver}',
# f'python{self.ver}{self.suffix}',
# f'python{self.ver}-config',
])
def clean(self):
self.clean_python_pyc(self.prefix)
self.clean_python_tests(self.python_lib)
self.clean_python_site_packages()
for i in (self.python_lib / 'distutils' / 'command').glob('*.exe'):
self.remove(i)
self.remove(self.prefix_lib / 'pkgconfig')
self.remove(self.prefix / 'share')
self.remove_packages()
self.remove_extensions()
self.remove_binaries()
def ziplib(self):
temp_lib_dynload = self.prefix_lib / 'lib-dynload'
temp_os_py = self.prefix_lib / 'os.py'
self.remove(self.site_packages)
self.lib_dynload.rename(temp_lib_dynload)
self.copyfile(self.python_lib / 'os.py', temp_os_py)
zip_path | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygtk
pygtk.require('2.0')
import gobject
import random
import os
import sys
import gtk
import math
import platform
import webbrowser
#EndImports
# Pyperclip v1.3 (Extract, only copy functions have been implemented to use with dualPrint.)
# A cross-platform clipboard module for Python.
# By <NAME> <EMAIL>
# On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
# On Linux, this module makes use of the xclip command, which should come with the os. Otherwise run "sudo apt-get install xclip"
# Copyright (c) 2010, <NAME>
# All rights reserved.
#
# BSD-style license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the pyperclip nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY <NAME> "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Change Log:
# 1.2 Use the platform module to help determine OS.
# 1.3 Changed ctypes.windll.user32.OpenClipboard(None) to ctypes.windll.user32.OpenClipboard(0), after some people ran into some TypeError
def winSetClipboard(text):
GMEM_DDESHARE = 0x2000
ctypes.windll.user32.OpenClipboard(0)
ctypes.windll.user32.EmptyClipboard()
try:
# works on Python 2 (bytes() only takes one argument)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text))+1)
except TypeError:
# works on Python 3 (bytes() requires an encoding)
hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(bytes(text, 'ascii'))+1)
pchData = ctypes.windll.kernel32.GlobalLock(hCd)
try:
# works on Python 2 (bytes() only takes one argument)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text))
except TypeError:
# works on Python 3 (bytes() requires an encoding)
ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), bytes(text, 'ascii'))
ctypes.windll.kernel32.GlobalUnlock(hCd)
ctypes.windll.user32.SetClipboardData(1,hCd)
ctypes.windll.user32.CloseClipboard()
def macSetClipboard(text):
outf = os.popen('pbcopy', 'w')
outf.write(text)
outf.close()
def gtkSetClipboard(text):
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def qtSetClipboard(text):
cb.setText(text)
def xclipSetClipboard(text):
outf = os.popen('xclip -selection c', 'w')
outf.write(text)
outf.close()
def xselSetClipboard(text):
outf = os.popen('xsel -i', 'w')
outf.write(text)
outf.close()
if os.name == 'nt' or platform.system() == 'Windows':
import ctypes
setcb = winSetClipboard
elif os.name == 'mac' or platform.system() == 'Darwin':
setcb = macSetClipboard
elif os.name == 'posix' or platform.system() == 'Linux':
xclipExists = os.system('which xclip') == 0
if xclipExists:
setcb = xclipSetClipboard
else:
xselExists = os.system('which xsel') == 0
if xselExists:
setcb = xselSetClipboard
try:
setcb = gtkSetClipboard
except:
try:
import PyQt4.QtCore
import PyQt4.QtGui
app = QApplication([])
cb = PyQt4.QtGui.QApplication.clipboard()
setcb = qtSetClipboard
except:
raise Exception('Pyperclip requires the gtk or PyQt4 module installed, or the xclip command.')
copy = setcb
#Continue dualPrint...
class iscApp1:
iscVcapApple = 5000
iscVn8 = 8
iscVn6 = 6
iscVn4 = 4
iscVn0 = 0
iscVn2m = 2
iscVnm1 = 1
iscVparImpTest = ""
iscVcapAndro = 78
iscVlink_pHelp_Loc = "redirect.html"
iscVwAbout = 0
iscVlink_license = "http://www.opensource.org/licenses/MIT"
iscVlink_web = "http://www.dualPrint.org/"
iscVNotifyOSD_Par = "notify-send \'dualPrint: Even Copy\' \'The seccond print set has been copied to the clipboard. You may paste it in the print dialog.\'"
iscVNotifyOSD_Imp = "notify-send \'dualPrint: Odd Copy\' \'The first print set has been copied to the clipboard. You may paste it in the print dialog.\'"
iscVn2 = 2
iscVcoma = ","
iscVguion = "-"
iscVn = 12
iscVsl = 4
iscVstart = 1
iscVcount = 0
iscVnText = "12"
iscVslText = "4"
iscVstartText = "1"
iscVwPar = ""
iscVwrite = "write"
iscVn1 = 1
iscVcountText = "countText"
iscVqTest = 0
iscVqDifference = 0
iscVwImpar = ""
iscVlink = ""
iscVnullText = ""
iscVlink_pHelp_M = "http://www.dualprint.org/"
iscVtotal = "Total: "
iscVtotalPages = ""
iscWindow2browse1 = gtk.Window(gtk.WINDOW_TOPLEVEL)
iscWindow2browse1Fixed = gtk.Fixed()
iscWindow2return0 = gtk.Button("Return to dualPrint")
iscWindow3about1 = gtk.Window(gtk.WINDOW_TOPLEVEL)
iscWindow3about1Fixed = gtk.Fixed()
iscWindow3icon0 = gtk.Image()
iscWindow3info0 =gtk.Label("dualPrint is a multi-platform application to")
iscWindow3rights0 =gtk.Label("Copyright © 2012-2013 <NAME>")
iscWindow3close0 = gtk.Button("Close")
iscWindow3web0 = gtk.Button("Website")
iscWindow3MIT0 =gtk.Label("This software is under the MIT License")
iscWindow3version0 =gtk.Label("1.3")
iscWindow3dualprint0 =gtk.Label("dualPrint")
iscWindow3license0 = gtk.Button("License")
iscWindow3illumination0 =gtk.Label("Built using Illumination Software Creator")
iscWindow3info10 =gtk.Label("save sheets of paper by helping you do")
iscWindow3info20 =gtk.Label("milti-sided printing.")
iscWindow3MIT10 =gtk.Label("For more information press License.")
iscWindow1main1 = gtk.Window(gtk.WINDOW_TOPLEVEL)
iscWindow1main1Fixed = gtk.Fixed()
iscWindow1nQ0 =gtk.Label("Which would be the last page to print?")
iscWindow1slidesQ0 =gtk.Label("How many slides or pages per side?")
iscWindow1n0 = gtk.Entry()
iscWindow1sl0 = gtk.Entry()
iscWindow1bStart0 = gtk.Button("Generate Print Sets")
iscWindow1inicioQ0 =gtk.Label("Which would be the first page to print?")
iscWindow1start0 = gtk.Entry()
iscWindow1infoImpar0 =gtk.Label("Odd, set of pages to print first.")
iscWindow1wImpar0 = gtk.Entry()
iscWindow1parInfo0 =gtk.Label("Even, set of pages to print on the back.")
iscWindow1wPar0 = gtk.Entry()
iscWindow1CI0 = gtk.Image()
iscWindow1CP0 = gtk.Image()
iscWindow1about0 = gtk.Button("About dualPrint")
iscWindow1paper0 = gtk.Button("Printing help")
iscWindow1header0 = gtk.Image()
#EndOfGlobalVariables
def main(self):
gtk.main()
def destroy(self, widget, data=None):
gtk.main_quit()
#EndOfClass
def iscWindow2():
thisiscApp1.iscWindow2return0 = gtk.Button("Return to dualPrint")
thisiscApp1.iscWindow2browse1 = gtk.Window(gtk.WINDOW_TOPLEVEL)
thisiscApp1.iscWindow2browse1Fixed = gtk.Fixed()
thisiscApp1.iscWindow2browse1.set_title("Mobile Browser")
thisiscApp1.iscWindow2browse1.set_default_size(320, 460)
thisiscApp1.iscWindow2browse1.add(thisiscApp1.iscWindow2browse1Fixed)
thisiscApp1.iscWindow2browse1Fixed.width = 320
thisiscApp1.iscWindow2browse1Fixed.height = 460
thisiscApp1.iscWindow2browse1.connect("delete_event", iscWindow2Closed)
thisiscApp1.iscWindow2browse1.set_resizable(False)
thisiscApp1.iscWindow2browse1Fixed.show()
thisiscApp1.iscWindow2browse1Fixed.put(thisiscApp1.iscWindow2return0, 0, 420)
thisiscApp1.iscWindow2return0.set_size_request(320, 40)
thisiscApp1.iscWindow2return0.connect("clicked", iscWindow2returnClicked)
thisiscApp1.iscWindow2return0.show()
thisiscApp1.iscWindow2browse1.show()
iscSetWebBrowser42()
#iscWindow2Opened
#iscWindow2Done
def iscWindow2Closed(self, other):
pass
#iscWindow2Closed
def iscWindow2returnClicked(self):
pass
iscIfThen41()
#iscWindow2returnClicked
def iscWindow3():
thisiscApp1.iscWindow3icon0 = gtk.Image()
thisiscApp1.iscWindow3info0 =gtk.Label("dualPrint is a multi-platform application to")
thisiscApp1.iscWindow3rights0 =gtk.Label("Copyright © 2012-2013 <NAME>")
thisiscApp1.iscWindow3close0 = gtk.Button("Close")
thisiscApp1.iscWindow3web0 = gtk.Button("Website")
thisiscApp1.iscWindow3MIT0 =gtk.Label("This software is under the MIT License")
thisiscApp1.iscWindow3version0 =gtk.Label("1.3")
thisiscApp1.iscWindow3dualprint0 =gtk.Label("dualPrint")
thisiscApp1.iscWindow3license0 = gtk.Button("License")
thisiscApp1.iscWindow3illumination0 =gtk.Label("Built using Illumination Software Creator")
thisiscApp1.iscWindow3info10 =gtk.Label("save sheets of paper by helping you do")
thisiscApp1.iscWindow3info20 =gtk.Label("milti-sided printing.")
thisiscApp1.iscWindow3MIT10 =gtk.Label("For more information press License.")
thisiscApp1.iscWindow3about1 = gtk.Window(gtk.WINDOW_TOPLEVEL)
thisiscApp1.iscWindow3about1Fixed = gtk.Fixed()
thisiscApp1.iscWindow3about1.set_title("About dualPrint")
thisiscApp1.iscWindow3about1.set_default_size(320, 380)
thisiscApp1.iscWindow3about1.add(thisiscApp1.iscWindow3about1Fixed)
thisiscApp1.iscWindow3about1Fixed.width = 320
thisiscApp1.iscWindow3about1Fixed.height = 380
thisiscApp1.iscWindow3about1.connect("delete_event", iscWindow3Closed)
thisiscApp1.iscWindow3about1.set_resizable(False)
thisiscApp1.iscWindow3about1Fixed.show()
iscWindow3icon0EventBox = gtk.EventBox()
iscWindow3icon0EventBox.set_size_request(90, 90)
iscWindow3icon0EventBox.connect("button_press_event", iscWindow3iconClicked)
thisiscApp1.iscWindow3icon0.set_size_request(90, 90)
iscWindow3icon0EventBox.add(thisiscApp1.iscWindow3icon0)
thisiscApp1.iscWindow3about1Fixed.put(iscWindow3icon0EventBox, 115, 12)
thisiscApp1.iscWindow3icon0.show()
iscWindow3icon0EventBox.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3info0, 10, 154)
thisiscApp1.iscWindow3info0.set_size_request(300, 20)
thisiscApp1.iscWindow3info0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3rights0, 10, 225)
thisiscApp1.iscWindow3rights0.set_size_request(300, 20)
thisiscApp1.iscWindow3rights0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3close0, 234, 330)
thisiscApp1.iscWindow3close0.set_size_request(80, 45)
thisiscApp1.iscWindow3close0.connect("clicked", iscWindow3closeClicked)
thisiscApp1.iscWindow3close0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3web0, 8, 330)
thisiscApp1.iscWindow3web0.set_size_request(90, 45)
thisiscApp1.iscWindow3web0.connect("clicked", iscWindow3webClicked)
thisiscApp1.iscWindow3web0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3MIT0, 10, 284)
thisiscApp1.iscWindow3MIT0.set_size_request(300, 20)
thisiscApp1.iscWindow3MIT0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3version0, 120, 130)
thisiscApp1.iscWindow3version0.set_size_request(80, 20)
thisiscApp1.iscWindow3version0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3dualprint0, 120, 108)
thisiscApp1.iscWindow3dualprint0.set_size_request(80, 20)
thisiscApp1.iscWindow3dualprint0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3license0, 103, 330)
thisiscApp1.iscWindow3license0.set_size_request(90, 45)
thisiscApp1.iscWindow3license0.connect("clicked", iscWindow3licenseClicked)
thisiscApp1.iscWindow3license0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3illumination0, 10, 255)
thisiscApp1.iscWindow3illumination0.set_size_request(300, 20)
thisiscApp1.iscWindow3illumination0.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3info10, 10, 175)
thisiscApp1.iscWindow3info10.set_size_request(300, 20)
thisiscApp1.iscWindow3info10.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3info20, 10, 196)
thisiscApp1.iscWindow3info20.set_size_request(300, 20)
thisiscApp1.iscWindow3info20.show()
thisiscApp1.iscWindow3about1Fixed.put(thisiscApp1.iscWindow3MIT10, 10, 305)
thisiscApp1.iscWindow3MIT10.set_size_request(300, 20)
thisiscApp1.iscWindow3MIT10.show()
thisiscApp1.iscWindow3about1.show()
iscSetCanvasPicture169()
#iscWindow3Opened
#iscWindow3Done
def iscWindow3Closed(self, other):
pass
iscSetNumber173()
#iscWindow3Closed
def iscWindow3iconClicked(widget, event):
pass
#iscWindow3iconClicked
def iscWindow3closeClicked(self):
pass
iscTargetIs134()
#iscWindow3closeClicked
def iscWindow3webClicked(self):
pass
iscSetText170()
#iscWindow3webClicked
def iscWindow3licenseClicked(self):
pass
iscSetText171()
#iscWindow3licenseClicked
def iscFloat_to_integer5():
#Using a function to integer
thisiscApp1.iscVqDifference = math.floor(thisiscApp1.iscVqDifference)
#iscFloat_to_integer5Done
def iscSetText6():
thisiscApp1.iscVlink = thisiscApp1.iscVlink_pHelp_M
iscTargetIs166()
#iscSetText6Done
def iscSetText7():
thisiscApp1.iscVlink = thisiscApp1.iscVlink_pHelp_Loc
iscTargetIs166()
#iscSetText7Done
def iscConvertNumberToText8():
thisiscApp1.iscVtotalPages = str(thisiscApp1.iscVqDifference)
iscCombineText9()
#iscConvertNumberToText8Done
def iscCombineText9():
thisiscApp1.iscVtotalPages = thisiscApp1.iscVtotal + thisiscApp1.iscVtotalPages
iscSetButton158()
#iscCombineText9Done
def iscPortalDestination10():
iscSubtract14()
iscDivide12()
iscDivide11()
iscAdd13()
iscTargetIs157()
iscConvertNumberToText8()
#iscPortalDestination10Arrived
def iscDivide11():
thisiscApp1.iscVqDifference = thisiscApp1.iscVqDifference / thisiscApp1.iscVn2
#iscDivide11Done
def iscDivide12():
thisiscApp1.iscVqDifference = thisiscApp1.iscVqDifference / thisiscApp1.iscVsl
#iscDivide12Done
def iscAdd13():
thisiscApp1.iscVqDifference = thisiscApp1.iscVqDifference + thisiscApp1.iscVn1
#iscAdd13Done
def iscSubtract14():
thisiscApp1.iscVqDifference = thisiscApp1.iscVn - thisiscApp1.iscVstart
#iscSubtract14Done
def iscClipboard_Copy16():
copy(thisiscApp1.iscVwPar)
#iscClipboard_Copy16Done
def iscRunShellScript18():
os.system(thisiscApp1.iscVNotifyOSD_Par)
#iscRunShellScript18Done
def iscIf_Linux20():
if os.name == 'posix' or platform.system() == 'Linux':
OS = "Linux"
iscRunShellScript18()
if os.name == 'posix' or platform.system() == 'Linux':
OS = "Linux"
#iscIf_Linux20Linux
else:
OS = "Other"
#iscIf_Linux20Else
def iscPortalDeparture21():
iscPortalDestination147()
#iscPortalDeparture21Done
def iscIfThen22():
if thisiscApp1.iscVqTest > thisiscApp1.iscVn:
iscPortalDeparture21()
#iscIfThen22True
pass
else:
iscPortalDeparture29()
#iscIfThen22False
pass
def iscIfThen26():
if thisiscApp1.iscVqTest == thisiscApp1.iscVn:
iscPortalDeparture27()
iscPortalDeparture155()
#iscIfThen26True
pass
else:
iscDivide28()
iscIfThen31()
iscPortalDeparture155()
#iscIfThen26False
pass
def iscPortalDeparture27():
iscPortalDestination97()
#iscPortalDeparture27Done
def iscDivide28():
thisiscApp1.iscVqDifference = thisiscApp1.iscVn / thisiscApp1.iscVn2
#iscDivide28Done
def iscPortalDeparture29():
iscPortalDestination131()
#iscPortalDeparture29Done
def iscIfThen31():
if thisiscApp1.iscVqTest < thisiscApp1.iscVqDifference:
iscPortalDeparture29()
#iscIfThen31True
pass
else:
iscIfThen60()
#iscIfThen31False
pass
def iscDoWhile33():
while thisiscApp1.iscVcount < thisiscApp1.iscVn:
iscCombineText135()
iscConvertNumberToText51()
iscCombineText128()
iscAdd139()
iscAdd140()
#iscDoWhile33Loop
iscSetText44()
iscConvertTextToNumber150()
iscPortalDeparture155()
#iscDoWhile33Finished
def iscDoWhile34():
while thisiscApp1.iscVcount < thisiscApp1.iscVn:
iscCombineText135()
iscConvertNumberToText51()
iscCombineText128()
iscAdd139()
iscAdd140()
#iscDoWhile34Loop
iscSetText49()
iscAdd139()
iscAdd140()
iscDoWhile33()
#iscDoWhile34Finished
def iscMessageBox35():
message = "The starting page | |
to_datetime : Convert argument to datetime.
Notes
-----
If the precision is higher than nanoseconds, the precision of the duration is
truncated to nanoseconds for string inputs.
Examples
--------
Parsing a single string to a Timedelta:
>>> ps.to_timedelta('1 days 06:05:01.00003')
Timedelta('1 days 06:05:01.000030')
>>> ps.to_timedelta('15.5us')
Timedelta('0 days 00:00:00.000015500')
Parsing a list or array of strings:
>>> ps.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],
dtype='timedelta64[ns]', freq=None)
Converting numbers by specifying the `unit` keyword argument:
>>> ps.to_timedelta(np.arange(5), unit='s') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',
'0 days 00:00:03', '0 days 00:00:04'],
dtype='timedelta64[ns]', freq=None)
>>> ps.to_timedelta(np.arange(5), unit='d') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq=None)
"""
def pandas_to_timedelta(pser: pd.Series) -> np.timedelta64:
return pd.to_timedelta(
arg=pser,
unit=unit,
errors=errors,
)
if isinstance(arg, Series):
return arg.transform(pandas_to_timedelta)
else:
return pd.to_timedelta(
arg=arg,
unit=unit,
errors=errors,
)
def timedelta_range(
start: Union[str, Any] = None,
end: Union[str, Any] = None,
periods: Optional[int] = None,
freq: Optional[Union[str, DateOffset]] = None,
name: Optional[str] = None,
closed: Optional[str] = None,
) -> TimedeltaIndex:
"""
Return a fixed frequency TimedeltaIndex, with day as the default frequency.
Parameters
----------
start : str or timedelta-like, optional
Left bound for generating timedeltas.
end : str or timedelta-like, optional
Right bound for generating timedeltas.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
name : str, default None
Name of the resulting TimedeltaIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
Returns
-------
TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
>>> ps.timedelta_range(start='1 day', periods=4) # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)
The closed parameter specifies which endpoint is included.
The default behavior is to include both endpoints.
>>> ps.timedelta_range(start='1 day', periods=4, closed='right') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)
The freq parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as ‘M’ (month end) will raise.
>>> ps.timedelta_range(start='1 day', end='2 days', freq='6H') # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
Specify start, end, and periods; the frequency is generated automatically (linearly spaced).
>>> ps.timedelta_range(start='1 day', end='5 days', periods=4) # doctest: +NORMALIZE_WHITESPACE
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
assert freq not in ["N", "ns"], "nanoseconds is not supported"
return cast(
TimedeltaIndex,
ps.from_pandas(
pd.timedelta_range(
start=start,
end=end,
periods=periods,
freq=freq,
name=name,
closed=closed,
)
),
)
def get_dummies(
data: Union[DataFrame, Series],
prefix: Optional[Union[str, List[str], Dict[str, str]]] = None,
prefix_sep: str = "_",
dummy_na: bool = False,
columns: Optional[Union[Name, List[Name]]] = None,
sparse: bool = False,
drop_first: bool = False,
dtype: Optional[Union[str, Dtype]] = None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables, also
known as one hot encoding.
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
In pandas-on-Spark, this value must be "False".
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
dummies : DataFrame
See Also
--------
Series.str.get_dummies
Examples
--------
>>> s = ps.Series(list('abca'))
>>> ps.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> df = ps.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]},
... columns=['A', 'B', 'C'])
>>> ps.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> ps.get_dummies(ps.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> ps.get_dummies(ps.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> ps.get_dummies(ps.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
if sparse is not False:
raise NotImplementedError("get_dummies currently does not support sparse")
if columns is not None:
if not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
if dtype is None:
dtype = "byte"
if isinstance(data, Series):
if prefix is not None:
prefix = [str(prefix)]
psdf = data.to_frame()
column_labels = psdf._internal.column_labels
remaining_columns = []
else:
if isinstance(prefix, str):
raise NotImplementedError(
"get_dummies currently does not support prefix as string types"
)
psdf = data.copy()
if columns is None:
column_labels = [
label
for label in psdf._internal.column_labels
if isinstance(
psdf._internal.spark_type_for(label), _get_dummies_default_accept_types
)
]
else:
if is_name_like_tuple(columns):
column_labels = [
label
for label in psdf._internal.column_labels
if label[: len(columns)] == columns
]
if len(column_labels) == 0:
raise KeyError(name_like_string(columns))
if prefix is None:
prefix = [
str(label[len(columns) :])
if len(label) > len(columns) + 1
else label[len(columns)]
if len(label) == len(columns) + 1
else ""
for label in column_labels
]
elif any(isinstance(col, tuple) for col in columns) and any(
not is_name_like_tuple(col) for col in columns
):
raise ValueError(
"Expected tuple, got {}".format(
type(set(col for col in columns if not is_name_like_tuple(col)).pop())
)
)
else:
column_labels = [
label
for key in columns
for label in psdf._internal.column_labels
if label == key or label[0] == key
]
if len(column_labels) == 0:
if columns is None:
return psdf
raise KeyError("{} not in index".format(columns))
if prefix is None:
prefix = [str(label) if len(label) > 1 else label[0] for label in column_labels]
column_labels_set = set(column_labels)
remaining_columns = [
(
psdf[label]
if psdf._internal.column_labels_level == 1
else psdf[label].rename(name_like_string(label))
)
for label in psdf._internal.column_labels
if label not in column_labels_set
]
if any(
not isinstance(psdf._internal.spark_type_for(label), _get_dummies_acceptable_types)
for label in column_labels
):
raise NotImplementedError(
"get_dummies currently only accept {} values".format(
", ".join(
[cast(Type[DataType], t).typeName() for t in _get_dummies_acceptable_types]
)
)
)
if prefix is not None and len(column_labels) != len(prefix):
raise ValueError(
"Length of 'prefix' ({}) did not match the length of "
"the columns being encoded ({}).".format(len(prefix), len(column_labels))
)
elif isinstance(prefix, dict):
prefix = [prefix[column_label[0]] for column_label in column_labels]
all_values = _reduce_spark_multi(
psdf._internal.spark_frame,
[F.collect_set(psdf._internal.spark_column_for(label)) for label in column_labels],
)
for i, label in enumerate(column_labels):
values = all_values[i]
if isinstance(values, np.ndarray):
values = values.tolist()
values = sorted(values)
if drop_first:
values = values[1:]
def column_name(v: Any) -> Name:
if prefix is None or cast(List[str], prefix)[i] == "":
return v
else:
return "{}{}{}".format(cast(List[str], prefix)[i], prefix_sep, v)
for value in values:
remaining_columns.append(
(psdf[label].notnull() & (psdf[label] == value))
.astype(dtype)
.rename(column_name(value))
)
if dummy_na:
remaining_columns.append(psdf[label].isnull().astype(dtype).rename(column_name(np.nan)))
return psdf[remaining_columns]
# TODO: there are many parameters to implement and support. See pandas's | |
*args, **kwargs):
for k, v in self.widgets.items():
v.pack(side='left', fill='both', expand=True)
super().pack(*args, **kwargs)
def grid(self, *args, **kwargs):
"""Layout child widgets within the only child frame."""
self.grid_rowconfigure(0, weight=1)
# Main widgets
col = 0
for k, v in self.widgets.items(): # Layout each column.
self.grid_columnconfigure(col, weight=self.gridWeights[k])
v.grid(row=0, column=col, sticky=FULL_EXPAND)
col += 1
super().grid(*args, **kwargs)
self.gridConfig = kwargs
class InfoStrip(WidgetBase):
"""Show read-only descriptions. Add scrollbar only if multiline."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Widgets
self.widgets['Title'] = ttk.Label(self, text='Title')
self.widgets['Content'] = tk.Message(self, text='', justify=tk.LEFT,
anchor='w',
width=1000,
bg=COLOR_LAYERS['Common'])
self.gridWeights['Title'] = 1
self.gridWeights['Content'] = 100
def __repr__(self):
return '{}: not interactive.'.format(type(self).__name__)
def configure_internal(self, config):
"""
Extend to configure title text properties.
:param config: {'property': value} about appearance, data range, etc.
:return:
"""
self.widgets['Title']['text'] = config['Title']
self.widgets['Content']['text'] = config['Value']
def get_title(self):
return self.widgets['Title']['text']
def pack(self, *args, **kwargs):
for k, v in self.widgets.items():
v.pack(side='left', fill='both', expand=True)
super().pack(*args, **kwargs)
def grid(self, *args, **kwargs):
"""Layout child widgets within the only child frame."""
self.grid_rowconfigure(0, weight=1)
# Main widgets
col = 0
for k, v in self.widgets.items(): # Layout each column.
self.grid_columnconfigure(col, weight=self.gridWeights[k])
v.grid(row=0, column=col, sticky=FULL_EXPAND)
col += 1
super().grid(*args, **kwargs)
self.gridConfig = kwargs
class RowStrip(WidgetBase):
"""
A compound widget as a parameter config UI.
Features
- Preserves Tkinter's init-configure-layout-callback paradigm;
- Adds UI to reset data to its default value, with user hooks to
retrieve default values.
- Adds Provides UI to open help about the data, provided in config file.
Usecases
- Programmer defines control parameters for CLI script in a
JSON config file, including default values and help docs of the parameter.
- Programmer then generates UI for the parameter using generic factory
method. The factory method stack up these row-strips.
- Programmer ships package containing the CLI script including UI
generator code, and JSON config files as a standalone app.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.input = None # To implement in children
self.widgets = collections.OrderedDict()
self.gridWeights = {} # Proportions of children along the widgets.
self.gridConfig = {}
# Common widgets
self.reset = ttk.Button(self, text='Reset', command=self._on_reset)
self.help = ttk.Button(self, text='?', command=self._on_help)
self.handlers = {k: None for k in ['OnReset',
'OnHelp',
'OnChange']}
def __repr__(self):
return 'class: {}, name: {}: data: {}'.format(
self.__class__.__name__, self._name, self.get_data())
def pack(self, *args, **kwargs):
"""Pack child widgets in a row."""
for k, v in self.widgets.items():
v.pack(side='left', fill='both', expand=True)
self.help.pack(side='right', fill='both', expand=False)
self.reset.pack(side='right', fill='both', expand=False)
super().pack(*args, **kwargs)
def grid(self, *args, **kwargs):
"""
Pack child widgets by find portions,
and preserve grid config for restoring upon UI filtering.
"""
self.grid_rowconfigure(0, weight=1) # Layout single widgets.
# Main widgets
col = 0
for k, v in self.widgets.items(): # Layout each column.
self.grid_columnconfigure(col, weight=self.gridWeights[k])
v.grid(row=0, column=col, sticky=FULL_EXPAND)
col += 1
# Common widgets
self.grid_columnconfigure(col, weight=1)
self.reset.grid(row=0, column=col, sticky='nsw') # right-aligned.
# Disable button if no callback is assigned.
if not callable(self.handlers['OnReset']):
self.reset.configure(state=tk.DISABLED)
col += 1
self.grid_columnconfigure(col, weight=1)
self.help.grid(row=0, column=col, sticky='nsw') # right-aligned.
if not callable(self.handlers['OnHelp']):
self.help.configure(state=tk.DISABLED)
col += 1
super().grid(*args, **kwargs)
self.gridConfig = kwargs
def get_data(self):
"""Return user input data to assign to a named parameter."""
return self.input.get() if self.input is not None else None
def set_data(self, kvp):
"""
Update widgets based on new kvp data field.
:param kvp: field retrieved using self._name from config.
"""
self.input.set(kvp['Value'])
def get_help(self):
if callable(self.handlers['OnHelp']):
return self.handlers['OnHelp'](self) # Get help text from app.
return None
def get_title(self):
return self.widgets['Title']['text'] \
if 'Title' in self.widgets.keys() else None
def bind_internal(self, eventmaps):
"""
Register events and handlers. Support cookies.
:param eventmaps: {'on_xx": handler_func} for client logic.
to use.
:return:
"""
for k, v in eventmaps.items():
self.handlers[k] = v
# Bind var observer: lambda to pass:
# - widget for accessing data
# - vars specified by trace.
if callable(self.handlers['OnChange']):
self.input.trace(
mode='w',
callback=lambda *args: self.handlers['OnChange'](self, *args)
)
def _on_reset(self):
# Reset widget data to default based on configuration.
if callable(self.handlers['OnReset']):
# if self.reset['state'] != tk.DISABLED:
default = self.handlers['OnReset'](self._name) # Get default from
# app.
self.set_data(default)
def _on_help(self):
"""Show user docs in a separate window."""
if callable(self.handlers['OnHelp']):
help_text = self.handlers['OnHelp'](self._name) # Get help text
# from app.
window = tk.Toplevel(self)
window.title('Help: {}'.format(self._name))
text = tk.Text(window)
text.insert(tk.INSERT, help_text)
text.pack()
class EntryStrip(RowStrip):
"""Get info from app on pressing action button, and show it."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.input = tk.StringVar(name=self._name, value='...')
self.widgets['Title'] = ttk.Label(self, text='Title: ')
self.widgets['Entry'] = ttk.Entry(self, textvariable=self.input)
self.widgets['Action'] = ttk.Button(self, text='Action')
self.widgets['Action'].configure(command=self._on_action)
self.gridWeights['Title'] = 1
self.gridWeights['Entry'] = 1000
self.gridWeights['Action'] = 1
self.handlers['OnAction'] = None
def configure_internal(self, config):
self.input.set(config['Value'])
self.widgets['Title']['text'] = config['Title']
if 'Action' in config.keys():
self.widgets['Action']['text'] = config['Action']
# Rule: bind action based on action type field in config.
action_maps = {
'Copy': self._copy_to_clipboard
}
for k, v in action_maps.items():
if config['Action'].startswith(k):
self.handlers['OnAction'] = v
break
super().configure_internal({})
def _on_action(self):
if callable(self.handlers['OnAction']):
self.handlers['OnAction']()
def _copy_to_clipboard(self):
# Copy current path to clipboard.
root = self.winfo_toplevel()
root.clipboard_clear()
root.clipboard_append(self.get_data())
class PreciseScale(ttk.Scale):
""" ttk.Scale sublass that limits the precision of values. """
def __init__(self, *args, **kwargs):
self.precision = kwargs.pop('precision') # Remove non-std kwarg.
self.onChange = kwargs.pop('command', lambda *a: None) # User callback.
super().__init__(*args, command=self._value_changed, **kwargs)
def _value_changed(self, new_value):
new_value = round(float(new_value), self.precision)
self.winfo_toplevel().globalsetvar(self.cget('variable'), new_value)
self.onChange(new_value) # Call user specified function.
class NumberStrip(RowStrip):
"""Get number from user input, and show it."""
def __init__(self, *args, datatype='float', precision=3, **kwargs):
super().__init__(*args, **kwargs)
self.input = tk.IntVar(name=self._name, value=0) \
if datatype == 'int' else tk.DoubleVar(name=self._name, value=0.)
self.widgets['Title'] = ttk.Label(self, text='Number: ')
self.widgets['Spin'] = tk.Spinbox(self,
textvariable=self.input,
wrap=True)
self.widgets['Slider'] = PreciseScale(self,
variable=self.input,
orient=tk.HORIZONTAL,
precision=precision)
self.gridWeights['Title'] = 1
self.gridWeights['Spin'] = 1
self.gridWeights['Slider'] = 100
def configure_internal(self, config):
self.input.set(config['Value'])
self.widgets['Title']['text'] = config['Title']
# CAUTION: Must assign 'to' before 'from',
# because 'from' might risk getting set bigger than 'to' any other way.
self.widgets['Spin']['to'] = config['Range'][1]
self.widgets['Spin']['from'] = config['Range'][0]
self.widgets['Spin']['increment'] = config['Steps'][0]
self.widgets['Slider']['to'] = config['Range'][1]
self.widgets['Slider']['from'] = config['Range'][0]
# Fit spinbox size to value range
char_count = len(str(self.widgets['Spin']['to']))
self.widgets['Spin'].configure(width=max(char_count+1, 6))
# CAUTION: ttk.Scale has no increment or resolution
# Set slider increment to int to avoid float increment when data is int.
if isinstance(self.input, tk.IntVar):
self.widgets['Slider']['command'] = self._int_increment
elif isinstance(self.input, tk.DoubleVar):
self.widgets['Slider'].precision = config['Precision'] \
if 'Precision' in config.keys() else 3
else:
raise NotImplementedError("""
Unsupported var type: {}, expected tk.IntVar or tk.DoubleVar.
""".format(type(self.input)))
def _int_increment(self, evt=None):
"""If slider generates floating point, lets round it up."""
data = self.widgets['Slider'].get()
if int(data) != data:
self.widgets['Slider'].set(round(data))
def _float_increment(self, evt=None):
data = self.widgets['Slider'].get()
if int(data) != data:
self.widgets['Slider'].set(round(data))
class CheckStrip(RowStrip):
"""Checkbox with description."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.input = tk.BooleanVar(name=self._name, value=True)
self.widgets['Check'] = ttk.Checkbutton(self, text='',
variable=self.input)
self.widgets['Title'] = ttk.Label(self, text='To enable that Thingy!')
self.gridWeights['Check'] = 1
self.gridWeights['Title'] = 1000
def configure_internal(self, config):
self.input.set(config['Value']) # Both True/False and 1/0 work.
self.widgets['Title']['text'] = config['Title']
class OptionStrip(RowStrip):
"""Single selection through dropdown list."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.input = tk.StringVar(master=self, name=self._name, value='')
# OptionMenu widget does not save the list. So we must.
self.options = []
self.widgets['Title'] = ttk.Label(self, text='Select: ')
self.widgets['Option'] = ttk.OptionMenu(self,
self.input,
'',
*self.options)
self.gridWeights['Title'] = 1
self.gridWeights['Option'] = 1000
def configure_internal(self, config):
self.options = copy.deepcopy(config['Options'])
self.input.set(config['Options'][config['Value']])
self.widgets['Title']['text'] = config['Title']
# CAUTION: must set default (Arg #3 ) before giving the option list.
self.widgets['Option'] = ttk.OptionMenu(self,
self.input,
self.options[0],
*self.options)
def get_data(self):
"""
Return the list index of the option string to save to config.
"""
return self.options.index(self.input.get())
def set_data(self, kvp):
text = kvp['Options'][kvp['Value']]
self.input.set(text)
class PathStrip(EntryStrip):
"""Open or save to a path, with filetype filers."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filetypes = []
self.defaultextension = '*.*'
def configure_internal(self, config):
super().configure_internal(config)
self.filetypes = config['FileTypes']
# Take head of file types; Remove * from *.jpg
self.defaultextension = config['FileTypes'][0][1][1:]
# Rule: bind action based on action type field in config.
action_maps = {
'Browse': self._browse,
'Copy': super()._copy_to_clipboard
}
for k, v in action_maps.items():
if config['Action'].startswith(k):
self.handlers['OnAction'] = v
break
def _browse(self):
# Update path from browsing.
if platform.platform() in ('Windows', 'Linux'):
data = tkfiledialog.askopenfilename(
title='Select {}'.format(self.widgets['Title']['text']),
filetypes=self.filetypes,
defaultextension=self.defaultextension
)
else:
data = tkfiledialog.askopenfilename(
title='Select {}'.format(self.widgets['Title']['text'])
)
# CAUTION: tkFileDialog returns empty str on cancelling
if data != '':
self.input.set(data)
class ProgressStrip(tk.Frame):
"""
Show progress with text and bar. Support determined and | |
"""
Author: Anonymous
Description:
Contains several features for analyzing and comparing the
performance across multiple experiments:
- perfloss : Performance w.r.t. test/train loss ratio and the
used AE architecture
- perfratio : Showing performance w.r.t. test, train loss and the
used AE architecture
- bd : Plots the behaviour coverage graph
- fit : Plots the fitness graph
"""
import os
import sys
import ast
import csv
import logging
import pickle
import glob
import argparse
import time
import numpy as np
import pandas as pd
import multiprocessing as mpi
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.markers as mmarkers
import matplotlib.ticker as ticker
from itertools import combinations
from functools import partial
from behaviour_representations.analysis import load_metadata, load_dataset
from behaviour_representations.utils.utils import timing
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('-load', '--load_path',
default=None, required=True,
help="Path to directory to plot.")
parser.add_argument('-save', '--save_path',
default=None, required=False,
help="Path to directory where to save.")
parser.add_argument('-t', '--plot_type', nargs='+',
default=['bd'], # , 'fit', 'perfloss', 'perfratio', 'perfl2'
help="Select plot type(s):\n"
"'perfloss'\n"
"'perfratio'\n"
"'perfl2'\n"
"'bd'\n"
"'fit'\n")
parser.add_argument('-f', '--filter_string',
default='',
help="Take into account experiments that contain this.")
def mscatter(x,y,ax=None, m=None, **kw):
if not ax: ax=plt.gca()
sc = ax.scatter(x, y, **kw)
if (m is not None) and (len(m)==len(x)):
paths = []
for marker in m:
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
paths.append(path)
sc.set_paths(paths)
return sc
def _smooth(data, w_len=10000):
window = np.ones(w_len)/w_len
pad = np.ones(w_len//2)
data_pad = np.concatenate([pad*data[0], data, pad[:-1]*data[-1]])
data_smooth = np.convolve(data_pad, window, mode='valid')
assert len(data_smooth) == len(data), \
"data_smooth: {}; data: {}; smooth {}".format(
len(data_smooth), len(data), len(smooth))
return data_smooth
def load_bddata(filename):
data = pd.read_csv(filename)
if data.shape[1]!=8: return load_bddata_old(data.values.T, filename)
data_dict = dict(zip(data.columns, data.values.T))
data_dict['name'] = filename.split('/')[-1][9:-4]
if '_mix_' not in data_dict['name'] and '_dde' not in data_dict['name']:
data_dict['ratios'] = None
elif len(data_dict['ratios'])>1:
data_dict['ratios'][0] = data_dict['ratios'][1]
return data_dict
def load_bddata_old(data, filename):
# data = pd.read_csv(filename, header=None)
data_dict = {}
# Get experiment name
data_dict['name'] = filename.split('/')[-1][9:-4]
# Get loop number
data_dict['nloop'] = data[0]
# Get iteration number
data_dict['niter'] = data[1]
# Get number of samples per iteration
data_dict['nsmp'] = data[2]
# Get behaviour descriptor lists
with mpi.Pool(processes=7) as pool:
data_nbd = list(pool.map(ast.literal_eval, data[3]))
# data_dict['labs'] = list(pool.map(ast.literal_eval, data[4]))
try:
data_fit = list(pool.map(ast.literal_eval, data[4]))
except:
data_fit = None
try:
# data_ratios = list(pool.map(ast.literal_eval, data[5][1:]))
data_ratios = list(pool.map(ast.literal_eval, data[6][1:]))
except:
data_ratios = None
# [len(bds) for bds in data_nbd])
data_dict['coverage'] = np.array(list(map(len, data_nbd)))
data_dict['fitness'] = np.array(list(map(max, data_fit))) \
if data_fit is not None else None
# Get mixing ratios if available
if data_ratios is None or len(data_ratios)==0 \
or '_mix_' not in data_dict['name']:
data_dict['ratios'] = None
else:
tmp = np.array(data_ratios)
assert np.min(tmp, axis=0)[1] > 0
ratios = tmp[:,0]/tmp[:,1]
data_dict['ratios'] = np.concatenate([[ratios[0]], ratios])
return data_dict
def load_lossdata(filepath):
""" Extract the losses """
filepath = '/'.join(filepath.split('/')[:-1])
filename = os.path.join(filepath,
'saved_models/training_losses_param_ae.csv')
try:
data = pd.read_csv(filename,
header=None, usecols=[1, 2], names=['test', 'train'])
except:
return None, None
if len(data['test'].values)==0: return None, None
final_test = ast.literal_eval(data['test'].values[-1])
final_test = None if final_test[0] is None else sum(final_test)
final_train = sum(ast.literal_eval(data['train'].values[-1]))
return final_test, final_train
def load_metadata_info(filepath):
""" Extract the representation learning approach and latent size """
filepath = '/'.join(filepath.split('/')[:-1])
metadata = load_metadata(filepath)
search_algo = metadata['exploration']['normal']
if 'ps_' in search_algo:
return 'PS', 'PS', search_algo
if metadata['training']['ae_param'] is not None:
arch_arg = metadata['training']['ae_param']['architecture']
representation_type = 'AE-'+'-'.join([str(aa[1]) for aa in arch_arg])
else:
representation_type = 'PCA'
dim_latent = metadata['training']['dim_latent']
return 'LD-{}'.format(dim_latent), representation_type, search_algo
def mpi_get_dist(ij, param_original):
focus_param = param_original[ij[0]]
compare_param = param_original[ij[1]]
return np.linalg.norm(focus_param-compare_param)
def load_get_l2dist(filepath):
""" Extract the mean of the pairwise l2-dist of parameters in archive """
filepath = '/'.join(filepath.split('/')[:-1])
dataset = load_dataset(filepath)
param_original = dataset['param_original']
del dataset
non_inf = param_original[0]<np.inf
comb_idx = list(combinations(range(len(param_original)), 2))
param_flat = param_original[:, non_inf]
# ### indexing - cannot fit in memory
# try:
# mm = np.linalg.norm(param_flat[None,...]-param_flat[:,None,:], axis=2)
# mean_dist = np.triu(mm).sum() / (np.triu(mm)>0).sum()
# return mean_dist
# except Exception as e:
# print("\nNUMPY VERSION FAILED:", e)
### parallelizing with multiprocessing
with mpi.Pool(mpi.cpu_count()-1) as pool:
dist_list = pool.map(partial(mpi_get_dist,
param_original=param_flat), comb_idx)
mean_dist = np.mean(dist_list)
# ### bruteforce
# dist_list = []
# for i, pp in enumerate(param_original):
# focus_param = pp[non_inf]
# for j, qq in enumerate(param_original):
# if i != j:
# compare_param = qq[non_inf]
# dist_list.append(np.linalg.norm(focus_param-compare_param))
# mean_dist = np.mean(dist_list)
return mean_dist
################################################################################
def plot_performance_v_l2dist(refpath, graph_name, metric_name, metric_dim,
filter_string,
savepath=None, show_plots=False, spec_title=None,
img_format='jpg', dpi=300, **kwargs):
""" Get all .csv data files of same experiment type """
if len(filter_string): graph_name = graph_name+'__'+filter_string
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
mlist = ['o', 'v', '^', '*','P', 's', 'X', '<', '>', 'p', 'D', 'd']
colors = np.array(plt.rcParams['axes.prop_cycle'].by_key()['color'])
fname = os.path.join(refpath, 'saved_performance_v_l2dist.pkl')
if os.path.exists(fname):
print("\nLOADING:", fname)
with open(fname, "rb") as f:
experiments_dict = pickle.load(f)
else:
# Organise experiments to consider
experiments_dict = {}
if '.csv' in refpath:
# If plotting only one experiment
show_xloop = True
exp_name = '_'.join(refpath.split('/')[-2].split('__')[:2])
experiments_dict[exp_name] = refpath
else:
# Organise plotting multiple experiments
filter_include = []
filter_exclude = []
for fterm in filter_string.split('+'):
if len(fterm) and fterm[0]=='^':
filter_exclude += glob.glob('{}/ENV_*{}*'.format(refpath,
fterm[1:]))
else:
filter_include += glob.glob('{}/ENV_*{}*'.format(refpath,
fterm))
filter_exp = np.setdiff1d(filter_include, filter_exclude)
# Extract only AE-based experiments
for d in filter_exp:
# Define the filters
exp_name = d.split('/')[-1].split('__')[2]
# Group csv files accordimg to filters
if '__S' not in d.split('/')[-1]:
csv_file = glob.glob(d+'/S*/ref_data_*.csv')
else:
csv_file = glob.glob(d+'/ref_data_*.csv')
if exp_name in experiments_dict.keys():
experiments_dict[exp_name]['csv'] += csv_file
else:
experiments_dict[exp_name] = dict(csv=csv_file)
# Load and plot points for each experiment
experiments_to_plot = sorted(experiments_dict.keys(), reverse=True)
n_exp = len(experiments_to_plot)
print("\n\n=== starting: L2-DIST GRAPH ===\n- {}".format(
'\n- '.join(experiments_to_plot)))
all_points = []
for i, klab in enumerate(experiments_to_plot):
print("\n> Extracting performance ({}/{}): '{}'".format(
i+1, n_exp, klab))
# Get all seeds of this experiment and average values
l2dist, num_bd = [], []
for cv in experiments_dict[klab]['csv']:
print(" > Loading:", cv)
latent_dim, latent_type, search_algo = load_metadata_info(cv)
data_dict = load_bddata(cv)
if len(data_dict['coverage']):
final_num_bd = data_dict['coverage'][-1]
num_bd.append(final_num_bd)
seedl2dist = load_get_l2dist(cv)
l2dist.append(seedl2dist)
else:
print(" > EMPTY!")
del data_dict
if len(num_bd) == 0:
print("> ALL EMPTY!")
continue
else:
experiments_dict[klab]['num_bd'] = np.median(num_bd)
experiments_dict[klab]['l2dist'] = np.mean(l2dist)
experiments_dict[klab]['latent_type'] = latent_type
experiments_dict[klab]['latent_dim'] = latent_dim
experiments_dict[klab]['search_algo'] = search_algo
# save experiments_dict
with open(fname, "wb") as f:
print("\nSAVING:", fname)
pickle.dump(experiments_dict, f)
# Plot graph
total_l2 = [ed['l2dist'] for ed in experiments_dict.values()]
total_nbd = [ed['num_bd'] for ed in experiments_dict.values()]
l2min, l2max = min(total_l2), max(total_l2)
bdmin, bdmax = min(total_nbd), max(total_nbd)
total_ltype = [ed['latent_type'] for ed in experiments_dict.values()]
total_ldim = [ed['latent_dim'] for ed in experiments_dict.values()]
total_search = [ed['search_algo'] for ed in experiments_dict.values()]
uniq_ltype = sorted(np.unique(total_ltype), reverse=True)
uniq_ldim = sorted(np.unique(total_ldim), reverse=True)
uniq_search = sorted(np.unique(total_search))
szdict = dict(zip(uniq_ltype, (5*np.arange(1,len(uniq_ltype)+1))**2))
mkdict = dict(zip(uniq_ldim, mlist[:len(uniq_ldim)]))
expdict = dict(zip(uniq_search, colors[:len(uniq_search)]))
plot_ltype = [szdict[rtl] for rtl in total_ltype]
plot_ldim = [mkdict[rtd] for rtd in total_ldim]
plot_search = [expdict[rts] for rts in total_search]
plot_hatch = ['....' if 'PCA' in rtl else '' for rtl in total_ltype]
# Plot experimant points
for i in range(len(experiments_dict)):
ax.scatter(total_l2[i], total_nbd[i],
s=plot_ltype[i], marker=plot_ldim[i], c=plot_search[i],
hatch=plot_hatch[i],
label=total_search[i],
edgecolor='k', lw=.4, alpha=0.5)
# Plot lines to PS versions
ax.set_xlim(0.1, 10*l2max)
ax.set_ylim(0.8*bdmin, 1.05*bdmax)
ps_search = [ek for ek in experiments_dict.keys() if 'ps_' in ek]
for psexp in ps_search:
sa = experiments_dict[psexp]['search_algo']
xcoord = experiments_dict[psexp]['l2dist']
ycoord = experiments_dict[psexp]['num_bd']
# Add linear line
ax.vlines(xcoord, ax.get_ylim()[0], ycoord, alpha=0.6, linestyles='--',
lw=1, colors=expdict[sa], zorder=0)
# Add ps_mape line
ax.hlines(ycoord, ax.get_xlim()[0], xcoord, alpha=0.6, linestyles='--',
lw=1, colors=expdict[sa], zorder=0)
# Labels
max_bd = np.prod(metric_dim)
ylabel = 'discovered behaviours (max {})'.format(max_bd)
xlabel = 'mean L2-distance'
# Add labels
num_exp = len(experiments_dict)
plt.minorticks_on()
ax.set_title('{} (total: {} experiments)'.format(graph_name, num_exp))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xscale("log", nonposx='clip')
ax.grid(b=True, which='minor', alpha=0.2)
ax.grid(b=True, which='major', alpha=0.5)
# Add legends
ldim_sorted = np.vstack([(plt.scatter([], [], c='k', marker=mkdict[ld]),
ld) for ld in sorted(mkdict.keys(), reverse=True)])
ltype_sorted = np.vstack([(plt.scatter([], [], c='w', edgecolor='k',
s=szdict[lt], hatch='....' if 'PCA' in lt else ''), lt) \
for lt in sorted(szdict.keys(), reverse=True)])
search_sorted = np.vstack([(plt.scatter([], [], c=expdict[lt]), lt) \
for lt in sorted(expdict.keys())])
lgd_search = ax.legend(search_sorted[:,0], search_sorted[:,1],
loc='upper left', bbox_to_anchor=(1., 1.01,), ncol=1)
nsalg = 1 - len(search_sorted)*0.065
lgd_ldim = ax.legend(ldim_sorted[:,0], ldim_sorted[:,1],
loc='upper left', bbox_to_anchor=(1., nsalg,), ncol=1) #len(uniq_ldim))
lgd_ltype = ax.legend(ltype_sorted[:,0], ltype_sorted[:,1],
loc='upper left', bbox_to_anchor=(1.27, nsalg,), ncol=1) #len(uniq_ltype))
ax.add_artist(lgd_ldim)
ax.add_artist(lgd_ltype)
ax.add_artist(lgd_search)
# Save/show figure
savepath = refpath if savepath is | |
<gh_stars>0
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import (Dict, ItemsView, Iterable, Iterator, KeysView, Mapping,
Tuple, TypeVar, Union, ValuesView, overload, Optional, cast)
import cmath
import math
import numpy as np
from cirq import value
from cirq.ops import (
raw_types,
gate_operation,
common_gates,
op_tree,
pauli_gates,
clifford_gate,
pauli_interaction_gate,
)
TDefault = TypeVar('TDefault')
@value.value_equality(approximate=True, manual_cls=True)
class PauliString(raw_types.Operation):
def __init__(self,
qubit_pauli_map: Optional[
Mapping[raw_types.Qid, pauli_gates.Pauli]] = None,
coefficient: Union[int, float, complex] = 1) -> None:
if qubit_pauli_map is None:
qubit_pauli_map = {}
self._qubit_pauli_map = dict(qubit_pauli_map)
self._coefficient = complex(coefficient)
@staticmethod
def from_single(qubit: raw_types.Qid,
pauli: pauli_gates.Pauli) -> 'PauliString':
"""Creates a PauliString with a single qubit."""
return PauliString({qubit: pauli})
@property
def coefficient(self) -> complex:
return self._coefficient
def _value_equality_values_(self):
if len(self._qubit_pauli_map) == 1 and self.coefficient == 1:
q, p = list(self._qubit_pauli_map.items())[0]
return gate_operation.GateOperation(p,
[q])._value_equality_values_()
return (frozenset(self._qubit_pauli_map.items()),
self._coefficient)
def _value_equality_values_cls_(self):
if len(self._qubit_pauli_map) == 1 and self.coefficient == 1:
return gate_operation.GateOperation
return PauliString
def equal_up_to_coefficient(self, other: 'PauliString') -> bool:
return self._qubit_pauli_map == other._qubit_pauli_map
def __getitem__(self, key: raw_types.Qid) -> pauli_gates.Pauli:
return self._qubit_pauli_map[key]
# pylint: disable=function-redefined
@overload
def get(self, key: raw_types.Qid) -> pauli_gates.Pauli:
pass
@overload
def get(self, key: raw_types.Qid,
default: TDefault) -> Union[pauli_gates.Pauli, TDefault]:
pass
def get(self, key: raw_types.Qid, default=None):
return self._qubit_pauli_map.get(key, default)
# pylint: enable=function-redefined
def __mul__(self, other):
if isinstance(other, (int, float, complex)):
return PauliString(self._qubit_pauli_map, self._coefficient * other)
if isinstance(other, PauliString):
s1 = set(self.keys())
s2 = set(other.keys())
extra_phase = 1
terms = {}
for c in s1 - s2:
terms[c] = self[c]
for c in s2 - s1:
terms[c] = other[c]
for c in s1 & s2:
f, p = self[c].phased_pauli_product(other[c])
extra_phase *= f
if p != common_gates.I:
terms[c] = p
return PauliString(
terms, self.coefficient * other.coefficient * extra_phase)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, (int, float, complex)):
return PauliString(self._qubit_pauli_map, self._coefficient * other)
return NotImplemented
def __contains__(self, key: raw_types.Qid) -> bool:
return key in self._qubit_pauli_map
def _decompose_(self):
# HACK: Avoid circular dependency.
from cirq.ops import pauli_string_phasor
return pauli_string_phasor.PauliStringPhasor(self)._decompose_()
def keys(self) -> KeysView[raw_types.Qid]:
return self._qubit_pauli_map.keys()
@property
def qubits(self) -> Tuple[raw_types.Qid, ...]:
return tuple(sorted(self.keys()))
def with_qubits(self, *new_qubits: raw_types.Qid) -> 'PauliString':
return PauliString(dict(zip(new_qubits,
(self[q] for q in self.qubits))),
self._coefficient)
def values(self) -> ValuesView[pauli_gates.Pauli]:
return self._qubit_pauli_map.values()
def items(self) -> ItemsView:
return self._qubit_pauli_map.items()
def __iter__(self) -> Iterator[raw_types.Qid]:
return iter(self._qubit_pauli_map.keys())
def __len__(self) -> int:
return len(self._qubit_pauli_map)
def __repr__(self):
ordered_qubits = sorted(self.qubits)
prefix = ''
factors = []
if self._coefficient == -1:
prefix = '-'
elif self._coefficient != 1:
factors.append(repr(self._coefficient))
if not ordered_qubits:
factors.append('cirq.PauliString()')
for q in ordered_qubits:
factors.append(repr(cast(raw_types.Gate, self[q]).on(q)))
fused = prefix + '*'.join(factors)
if len(factors) > 1:
return '({})'.format(fused)
return fused
def __str__(self):
ordered_qubits = sorted(self.qubits)
prefix = ''
factors = []
if self._coefficient == -1:
prefix = '-'
elif self._coefficient != 1:
factors.append(repr(self._coefficient))
if not ordered_qubits:
factors.append('I')
for q in ordered_qubits:
factors.append(str(cast(raw_types.Gate, self[q]).on(q)))
return prefix + '*'.join(factors)
def zip_items(self, other: 'PauliString') -> Iterator[
Tuple[raw_types.Qid, Tuple[pauli_gates.Pauli, pauli_gates.Pauli]]]:
for qubit, pauli0 in self.items():
if qubit in other:
yield qubit, (pauli0, other[qubit])
def zip_paulis(self, other: 'PauliString'
) -> Iterator[Tuple[pauli_gates.Pauli, pauli_gates.Pauli]]:
return (paulis for qubit, paulis in self.zip_items(other))
def commutes_with(self, other: 'PauliString') -> bool:
return sum(not p0.commutes_with(p1)
for p0, p1 in self.zip_paulis(other)
) % 2 == 0
def __neg__(self) -> 'PauliString':
return PauliString(self._qubit_pauli_map, -self._coefficient)
def __pos__(self) -> 'PauliString':
return self
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""Override behavior of numpy's exp method."""
if ufunc == np.exp and len(inputs) == 1 and inputs[0] is self:
return math.e**self
return NotImplemented
def __pow__(self, power):
if power == 1:
return self
if power == -1:
return PauliString(self._qubit_pauli_map, self.coefficient**-1)
if isinstance(power, (int, float)):
r, i = cmath.polar(self.coefficient)
if abs(r - 1) > 0.0001:
raise NotImplementedError(
"Raised a non-unitary PauliString to a power <{!r}**{!r}>. "
"Coefficient must be unit-length.".format(self, power))
if len(self) == 1:
q, p = next(iter(self.items()))
gates = {
pauli_gates.X: common_gates.XPowGate,
pauli_gates.Y: common_gates.YPowGate,
pauli_gates.Z: common_gates.ZPowGate,
}
return gates[p](exponent=power).on(q)
global_half_turns = power * (i / math.pi)
# HACK: Avoid circular dependency.
from cirq.ops import pauli_string_phasor
return pauli_string_phasor.PauliStringPhasor(
PauliString(self._qubit_pauli_map),
exponent_neg=global_half_turns + power,
exponent_pos=global_half_turns)
return NotImplemented
def __rpow__(self, base):
if isinstance(base, (int, float)) and base > 0:
if abs(self.coefficient.real) > 0.0001:
raise NotImplementedError(
"Exponentiated to a non-hermitian PauliString <{}**{}>. "
"Coefficient must be imaginary.".format(base, self))
half_turns = math.log(base) * (-self.coefficient.imag / math.pi)
if len(self) == 1:
q, p = next(iter(self.items()))
gates = {
pauli_gates.X: common_gates.XPowGate,
pauli_gates.Y: common_gates.YPowGate,
pauli_gates.Z: common_gates.ZPowGate,
}
return gates[p](exponent=half_turns, global_shift=-0.5).on(q)
# HACK: Avoid circular dependency.
from cirq.ops import pauli_string_phasor
return pauli_string_phasor.PauliStringPhasor(
PauliString(self._qubit_pauli_map),
exponent_neg=+half_turns / 2,
exponent_pos=-half_turns / 2)
return NotImplemented
def map_qubits(self, qubit_map: Dict[raw_types.Qid, raw_types.Qid]
) -> 'PauliString':
new_qubit_pauli_map = {qubit_map[qubit]: pauli
for qubit, pauli in self.items()}
return PauliString(new_qubit_pauli_map, self._coefficient)
def to_z_basis_ops(self) -> op_tree.OP_TREE:
"""Returns operations to convert the qubits to the computational basis.
"""
for qubit, pauli in self.items():
yield clifford_gate.SingleQubitCliffordGate.from_single_map(
{pauli: (pauli_gates.Z, False)})(qubit)
def pass_operations_over(self,
ops: Iterable[raw_types.Operation],
after_to_before: bool = False) -> 'PauliString':
"""Determines how the Pauli string changes when conjugated by Cliffords.
The output and input pauli strings are related by a circuit equivalence.
In particular, this circuit:
───ops───INPUT_PAULI_STRING───
will be equivalent to this circuit:
───OUTPUT_PAULI_STRING───ops───
up to global phase (assuming `after_to_before` is not set).
If ops together have matrix C, the Pauli string has matrix P, and the
output Pauli string has matrix P', then P' == C^-1 P C up to
global phase.
Setting `after_to_before` inverts the relationship, so that the output
is the input and the input is the output. Equivalently, it inverts C.
Args:
ops: The operations to move over the string.
after_to_before: Determines whether the operations start after the
pauli string, instead of before (and so are moving in the
opposite direction).
"""
pauli_map = dict(self._qubit_pauli_map)
should_negate = False
for op in ops:
if not set(op.qubits) & set(pauli_map.keys()):
# op operates on an independent set of qubits from the Pauli
# string. The order can be switched with no change no matter
# what op is.
continue
should_negate ^= PauliString._pass_operation_over(pauli_map,
op,
after_to_before)
coef = -self._coefficient if should_negate else self.coefficient
return PauliString(pauli_map, coef)
@staticmethod
def _pass_operation_over(pauli_map: Dict[raw_types.Qid, pauli_gates.Pauli],
op: raw_types.Operation,
after_to_before: bool = False) -> bool:
if isinstance(op, gate_operation.GateOperation):
gate = op.gate
if isinstance(gate, clifford_gate.SingleQubitCliffordGate):
return PauliString._pass_single_clifford_gate_over(
pauli_map, gate, op.qubits[0],
after_to_before=after_to_before)
if isinstance(gate, common_gates.CZPowGate):
gate = pauli_interaction_gate.PauliInteractionGate.CZ
if isinstance(gate, pauli_interaction_gate.PauliInteractionGate):
return PauliString._pass_pauli_interaction_gate_over(
pauli_map, gate, op.qubits[0], op.qubits[1],
after_to_before=after_to_before)
raise TypeError('Unsupported operation: {!r}'.format(op))
@staticmethod
def _pass_single_clifford_gate_over(
pauli_map: Dict[raw_types.Qid, pauli_gates.Pauli],
gate: clifford_gate.SingleQubitCliffordGate,
qubit: raw_types.Qid,
after_to_before: bool = False) -> bool:
if qubit not in pauli_map:
return False
if not after_to_before:
gate **= -1
pauli, inv = gate.transform(pauli_map[qubit])
pauli_map[qubit] = pauli
return inv
@staticmethod
def _pass_pauli_interaction_gate_over(
pauli_map: Dict[raw_types.Qid, pauli_gates.Pauli],
gate: pauli_interaction_gate.PauliInteractionGate,
qubit0: raw_types.Qid,
qubit1: raw_types.Qid,
after_to_before: bool = False) -> bool:
def merge_and_kickback(qubit: raw_types.Qid,
pauli_left: Optional[pauli_gates.Pauli],
pauli_right: Optional[pauli_gates.Pauli],
inv: bool) -> int:
assert pauli_left is not None or pauli_right is not None
if pauli_left is None or pauli_right is None:
pauli_map[qubit] = cast(pauli_gates.Pauli,
pauli_left or pauli_right)
return 0
elif pauli_left == pauli_right:
del pauli_map[qubit]
return 0
else:
pauli_map[qubit] = pauli_left.third(pauli_right)
if (pauli_left < pauli_right) ^ after_to_before:
return int(inv) * 2 + 1
else:
return int(inv) * 2 - 1
quarter_kickback = 0
if (qubit0 in pauli_map and
not pauli_map[qubit0].commutes_with(gate.pauli0)):
quarter_kickback += merge_and_kickback(qubit1,
gate.pauli1,
pauli_map.get(qubit1),
gate.invert1)
if (qubit1 in pauli_map and
not pauli_map[qubit1].commutes_with(gate.pauli1)):
quarter_kickback += merge_and_kickback(qubit0,
pauli_map.get(qubit0),
gate.pauli0,
gate.invert0)
assert quarter_kickback % 2 == 0, (
'Impossible condition. '
'quarter_kickback is either incremented twice or never.')
return quarter_kickback % 4 == 2
# Ignoring type because mypy believes `with_qubits` methods are incompatible.
class SingleQubitPauliStringGateOperation( # type: ignore
gate_operation.GateOperation, PauliString):
"""A Pauli operation applied to a qubit.
Satisfies the contract of both GateOperation and PauliString. Relies
implicitly on the fact that PauliString({q: X}) compares as equal to
| |
thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
###################
## KEPLER LC EPD ##
###################
def _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge):
'''
This is the EPD function to fit.
'''
epdf = (
coeffs[0] +
coeffs[1]*npsin(2*MPI*xcc) + coeffs[2]*npcos(2*MPI*xcc) +
coeffs[3]*npsin(2*MPI*ycc) + coeffs[4]*npcos(2*MPI*ycc) +
coeffs[5]*npsin(4*MPI*xcc) + coeffs[6]*npcos(4*MPI*xcc) +
coeffs[7]*npsin(4*MPI*ycc) + coeffs[8]*npcos(4*MPI*ycc) +
coeffs[9]*bgv +
coeffs[10]*bge
)
return epdf
def _epd_residual(coeffs, fluxes, xcc, ycc, bgv, bge):
'''
This is the residual function to minimize using scipy.optimize.leastsq.
'''
f = _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge)
residual = fluxes - f
return residual
def epd_kepler_lightcurve(lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=5):
'''This runs EPD on the Kepler light curve.
Following Huang et al. 2015, we fit and subtract the following EPD function:
f = c0 +
c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +
c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +
c9*bgv + c10*bge
timestoignore is a list of tuples containing start and end times to mask
when fitting the EPD function:
[(time1_start, time1_end), (time2_start, time2_end), ...]
NOTES:
- this function returns times and mags by default
- by default, this function removes points in the Kepler LC that have ANY
quality flags set
if writetodict is set, adds the following columns to the lcdict:
epd_time = time array
epd_sapflux = uncorrected flux before EPD
epd_epdsapflux = corrected flux after EPD
epd_epdsapcorr = EPD flux corrections
epd_bkg = background array
epd_bkg_err = background errors array
epd_xcc = xcoord array
epd_ycc = ycoord array
epd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
'''
times, fluxes, background, background_err = (lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err'])
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# first, smooth the light curve
smoothedfluxes = medfilt(fluxes, epdsmooth)
# initial fit coeffs
initcoeffs = npones(11)
# fit the the smoothed mags and find better coeffs
leastsqfit = leastsq(_epd_residual,
initcoeffs,
args=(smoothedfluxes,
xcc, ycc,
background, background_err))
# if the fit succeeds, then get the EPD fluxes
if leastsqfit[-1] in (1,2,3,4):
fitcoeffs = leastsqfit[0]
epdfit = _epd_function(fitcoeffs,
fluxes,
xcc,
ycc,
background,
background_err)
epdfluxes = npmedian(fluxes) + fluxes - epdfit
# write these to the dictionary if requested
if writetodict:
lcdict['epd'] = {}
lcdict['epd']['time'] = times
lcdict['epd']['sapflux'] = fluxes
lcdict['epd']['epdsapflux'] = epdfluxes
lcdict['epd']['epdsapcorr'] = epdfit
lcdict['epd']['bkg'] = background
lcdict['epd']['bkg_err'] = background_err
lcdict['epd']['xcc'] = xcc
lcdict['epd']['ycc'] = ycc
lcdict['epd']['quality'] = flags
for newcol in ['epd.time','epd.sapflux',
'epd.epdsapflux','epd.epdsapcorr',
'epd.bkg','epd.bkg.err',
'epd.xcc','epd.ycc',
'epd.quality']:
if newcol not in lcdict['columns']:
lcdict['columns'].append(newcol)
return times, epdfluxes, fitcoeffs, epdfit
else:
LOGERROR('could not fit EPD function to light curve')
return None, None, None, None
# FIXME: this is only available if sklearn is available. not sure if we should
# add yet another dependency
if SKLEARN:
def rfepd_kepler_lightcurve(lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=23,
decorr='xcc,ycc',
nrftrees=200):
'''
This uses a RandomForestRegressor to fit and correct K2 light curves.
Fits the X and Y positions, and the background and background error.
timestoignore is a list of tuples containing start and end times to mask
when fitting the EPD function:
[(time1_start, time1_end), (time2_start, time2_end), ...]
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
if writetodict is set, adds the following columns to the lcdict:
rfepd_time = time array
rfepd_sapflux = uncorrected flux before EPD
rfepd_epdsapflux = corrected flux after EPD
rfepd_epdsapcorr = EPD flux corrections
rfepd_bkg = background array
rfepd_bkg_err = background errors array
rfepd_xcc = xcoord array
rfepd_ycc = ycoord array
rfepd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
'''
times, fluxes, background, background_err = (
lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err']
)
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, '
'ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# set up the regressor
RFR = RandomForestRegressor(n_estimators=nrftrees)
if decorr == 'xcc,ycc,bgv,bge':
# collect the features and target variable
features = npcolumn_stack((xcc,ycc,background,background_err))
elif decorr == 'xcc,ycc':
# collect the features and target variable
features = npcolumn_stack((xcc,ycc))
elif decorr == 'bgv,bge':
# collect the features and target variable
features = npcolumn_stack((background,background_err))
else:
LOGERROR("couldn't understand decorr, not decorrelating...")
return None
# smooth the light curve
if epdsmooth:
smoothedfluxes = medfilt(fluxes, epdsmooth)
else:
smoothedfluxes = fluxes
# fit, then generate the predicted values, then get corrected values
RFR.fit(features, smoothedfluxes)
flux_corrections = RFR.predict(features)
corrected_fluxes = npmedian(fluxes) + fluxes - flux_corrections
# remove the random forest to save RAM
del RFR
# write these to the dictionary if requested
if writetodict:
lcdict['rfepd'] = {}
lcdict['rfepd']['time'] = times
lcdict['rfepd']['sapflux'] = fluxes
lcdict['rfepd']['epdsapflux'] = corrected_fluxes
lcdict['rfepd']['epdsapcorr'] = flux_corrections
lcdict['rfepd']['bkg'] = background
lcdict['rfepd']['bkg_err'] = background_err
lcdict['rfepd']['xcc'] = xcc
lcdict['rfepd']['ycc'] = ycc
lcdict['rfepd']['quality'] = flags
for newcol in ['rfepd.time','rfepd.sapflux',
'rfepd.epdsapflux','rfepd.epdsapcorr',
'rfepd.bkg','rfepd.bkg.err',
'rfepd.xcc','rfepd.ycc',
'rfepd.quality']:
if newcol not in lcdict['columns']:
lcdict['columns'].append(newcol)
return times, corrected_fluxes, flux_corrections
# if SKLEARN = False
else:
LOGWARNING('scikit-learn package not found, '
'function rfepd_kepler_lightcurve '
'will not be available')
#######################
## CENTROID ANALYSIS ##
#######################
def detrend_centroid(lcd, detrend='legendre', sigclip=None, mingap=0.5):
'''
You are given a dictionary, for a single quarter of Kepler data, returned
by `astrokep.read_kepler_fitslc`. This module returns this same dictionary,
appending detrended centroid_x and centroid_y values.
Here "detrended" means "finite, SAP quality | |
<reponame>primkey7607/deeplens-cv
"""This file is part of DeepLens which is released under MIT License and
is copyrighted by the University of Chicago. This project is developed by
the database group (chidata).
tiered_videoio.py uses opencv (cv2) to read and write files to disk. It contains
primitives to encode and decode archived and regular video formats for a tiered
storage system.
"""
from deeplens.header import *
from deeplens.simple_manager.file import *
from deeplens.utils.frame_xform import *
import cv2
import os
from os import path
import time
import shutil
import logging
import json
import threading
def _update_headers_batch(conn, crops, background_id, name, video_refs,
full_width, full_height, start_time, end_time, update = False):
"""
Update or create new headers all headers for one batch. In terms of updates, we assume certain
constraints on the system, and only update possible changes.
"""
if update:
# Updates
for i in range(0, len(crops) + 1):
clip_info = query_clip(conn, i + background_id, name)[0]
#print(i + background_id)
updates = {}
updates['start_time'] = min(start_time, clip_info[2])
updates['end_time'] = max(end_time, clip_info[3])
#print(updates['end_time'])
if i != 0:
origin_x = crops[i - 1]['bb'].x0
origin_y = crops[i - 1]['bb'].y0
translation = clip_info[10]
if translation == 'NULL':
if origin_x != clip_info[4] or origin_y != clip_info[5]:
updates['translation'] = json.dumps([(start_time, origin_x, origin_y)])
else:
translation = json.loads(clip_info[10])
if type(translation) is list:
if translation[-1][1] != origin_x or translation[-1][2] != origin_y:
translation.append((start_time, origin_x, origin_y))
updates['translation'] = json.dumps(translation)
else:
raise ValueError('Translation object is wrongly formatted')
other = clip_info[11]
if other == 'NULL':
updates['other'] = json.dumps(crops[i - 1]['all'], cls=Serializer)
else:
other = json.loads(clip_info[11])
if type(other) is dict:
logging.debug(crops[i - 1])
other.update(crops[i - 1]['all'])
updates['other'] = json.dumps(other, cls=Serializer)
else:
raise ValueError('All object is wrongly formatted')
update_clip_header(conn, background_id + i, name, updates)
else:
for i in range(1, len(crops) + 1):
insert_background_header(conn, background_id, i + background_id, name)
for i in range(0, len(crops) + 1):
if i == 0:
insert_clip_header(conn, i + background_id, name, start_time, end_time, 0, 0,
full_width, full_height, video_refs[i], is_background = True)
else:
origin_x = crops[i - 1]['bb'].x0
origin_y = crops[i - 1]['bb'].y0
width = crops[i - 1]['bb'].x1 - crops[i - 1]['bb'].x0
height = crops[i - 1]['bb'].y1 - crops[i - 1]['bb'].y0
insert_clip_header(conn, i + background_id, name, start_time, end_time, origin_x,
origin_y, width, height, video_refs[i], other = json.dumps(crops[i - 1]['all'], cls=Serializer))
for i in range(0, len(crops)):
if type(crops[i]['label']) is list: # TODO: deal with crop all later
for j in range(len(crops[i]['label'])):
insert_label_header(conn, crops[i]['label'][j], background_id + i + 1, name)
else:
insert_label_header(conn, crops[i]['label'], background_id + i + 1, name)
def _write_video_batch(vstream, \
crops, \
encoding,
batch_size,
limit,
start_time,
dir = DEFAULT_TEMP, \
frame_rate = 1,
release = True,
writers = None):
'''
Private function which processes and stores a batch of video frames
Arguments:
- vstream: VideoStream which is processed
- crops: physical crops of frames
- batch_size: size of batch
- release: whether we release or return the videos after finishing
- writers: list of optional pre-existing writers that we can write frames into
- Note: each writer must match a crop
'''
file_names = []
out_vids = []
if writers == None:
r_name = get_rnd_strng()
for i in range(len(crops) + 1):
seg_name = os.path.join(dir, r_name)
file_name = add_ext(seg_name, AVI, i)
file_names.append(file_name)
fourcc = cv2.VideoWriter_fourcc(*encoding)
if i == 0:
try:
width = vstream.width
height = vstream.height
except AttributeError:
width = vstream[0].shape[1]
height = vstream[0].shape[0]
else:
width = abs(crops[i - 1]['bb'].x1 - crops[i - 1]['bb'].x0)
height = abs(crops[i - 1]['bb'].y1 - crops[i - 1]['bb'].y0)
out_vid = cv2.VideoWriter(file_name,
fourcc,
frame_rate,
(width, height),
True)
out_vids.append(out_vid)
else:
out_vids = writers
index = 0
for frame in vstream:
if type(frame) == dict:
frame = frame['data']
if len(crops) == 0:
out_vids[0].write(frame)
else:
out_vids[0].write(reverse_crop(frame, crops))
i = 1
for cr in crops:
fr = crop_box(frame, cr['bb'])
out_vids[i].write(fr)
i +=1
index += 1
if index >= batch_size or limit != -1 and index >= limit - start_time:
break
if not release:
if len(file_names) != 0:
return (out_vids, file_names, index)
else:
return (out_vids, None, index)
else:
for vid in out_vids:
vid.release()
if len(file_names) != 0:
return (None, file_names, index)
return (None, None, index)
def _split_video_batch(vstream,
splitter,
batch_size,
limit,
start_time,
process_vid = False,
scratch = None,
vstream_behind = None,
v_cache = None):
'''
Private function which labels and crops a batch of video frames.
Arguments:
- vstream: VideoStream which is labeled
- splitter: Splitter object which crops based on labels
- size: size of batch
- process_vid: whether we also process the video batch after applying a map to it
- Note: if this is True, we also need scratch and vstream_behind
- scratch: where to store the video batch after processing it
- vstream_behind: a copy of the previous video stream so that we can apply map onto it
- v_cache: cache a buffer of the vstream (neccessary for streaming)
'''
labels = []
i = 0
for frame in vstream:
labels.append(frame['objects'])
i += 1
if v_cache != None:
v_cache.append(frame['frame'])
if i >= batch_size or limit != -1 and i >= limit - start_time:
break
if i == 0:
return None
crops = splitter.map(labels)
if process_vid:
if not splitter.map_to_video:
raise ManagerIOError('Splitter does not support map to video')
videos = _write_video_batch(vstream_behind, crops, limit) # TODO: parameters wrong
return (crops, videos)
return crops
# TODO: parallelize
def write_video_single(conn, \
video_file, \
target,
dir, \
splitter, \
map, \
stream = False,
args={}):
batch_size = args['batch_size']
v = VideoStream(video_file, args['limit'])
v = iter(v[map])
if stream:
v.set_stream(True)
full_width = v.width
full_height = v.height
curr_back = 0 # current clip background id
start_time = 0 #current batch start time (NOTE: Not current clip start time)
i = 0
if stream:
v_behind = [] # if it's a stream, we cache the buffered video instead of having a slow pointer
else:
v_behind = VideoStream(video_file, args['limit'])
v_behind = iter(v_behind)
labels = []
vid_files = []
for frame in v:
labels.append(frame['objects'])
logging.debug(labels)
i += 1
if stream:
v_behind.append(frame['frame'])
if args['limit'] != -1 and i >= args['limit'] or i >= batch_size:
break
crops, batch_prev, _ = splitter.initialize(labels)
(writers, file_names, time_block) = _write_video_batch(v_behind, crops, args['encoding'], batch_size, args['limit'], start_time, dir, release = False)
_update_headers_batch(conn, crops, curr_back, target, file_names,
full_width, full_height, start_time, start_time + time_block, update = False)
start_time = start_time + time_block
next_back = curr_back + len(crops) + 1
vid_files.extend(file_names)
while True:
if stream:
v_behind = []
v_cache = v_behind
else:
v_cache = None
batch_crops = _split_video_batch(v, splitter, batch_size, args['limit'], start_time, v_cache = v_cache)
if batch_crops == None:
break
crops, batch_prev, do_join = splitter.join(batch_prev, batch_crops)
if do_join:
writers, _ , time_block = _write_video_batch(v_behind, crops, args['encoding'], batch_size, args['limit'], start_time, dir, release = False, writers = writers)
_update_headers_batch(conn, crops, curr_back, target, file_names,
full_width, full_height, start_time, start_time + time_block, update = True)
start_time = start_time + time_block
else:
for writer in writers:
writer.release()
writers, file_names, time_block = _write_video_batch(v_behind, crops, args['encoding'], batch_size, args['limit'], start_time, dir, release = False)
curr_back = next_back
_update_headers_batch(conn, crops, curr_back, target, file_names,
full_width, full_height, start_time, start_time + time_block, update = False)
start_time = start_time + time_block
next_back = curr_back + len(crops) + 1
vid_files.extend(file_names)
return vid_files
def write_video_parrallel_1(conn, \
video_file, \
threading, \
target,
dir, \
splitter, \
map, \
stream = False,
args={}):
'''
parallelized the put function for preprocessing only
'''
pass
def write_video_parrallel_2(conn, \
video_file, \
target,
dir, \
splitter, \
map, \
stream = False,
args={}):
'''
parallelized the put function for preprocessing and crops
'''
pass
def delete_video_if_exists(conn, video_name):
c = conn.cursor()
c.execute("SELECT clip_id FROM clips WHERE video_name = '%s'" % video_name)
clips = c.fetchall()
if len(clips) == 0:
# not exist in header file, nothing to do
return
clips = set().union(*map(set, clips))
for clip in clips:
c.execute("SELECT video_ref FROM clip WHERE clip_id = '%d' AND video_name = '%s'" % (clip, video_name))
video_ref = c.fetchone()[0]
try:
os.remove(video_ref)
except FileNotFoundError:
logging.warning("File %s not found" % video_ref)
c.execute("DELETE FROM clip WHERE video_name = '%s'" % (video_name))
c.execute("DELETE FROM label WHERE video_name = '%s'" % (video_name))
c.execute("DELETE FROM background WHERE video_name = '%s'" % video_name)
conn.commit()
def move_one_file(conn, clip_id, video_name, dest_ref):
c = conn.cursor()
c.execute("SELECT video_ref FROM clip WHERE clip_id = | |
os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def main_MakeMethodFinal(self, Root_path_udb_project, source_class, method_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = MakeMethodFinalRefactoringListener(common_token_stream=token_stream,
source_class=source_class,
method_name=method_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def main_IncreaseMethodVisibility(self, Root_path_udb_project, source_class, method_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = IncreaseMethodVisibilityRefactoringListener(common_token_stream=token_stream,
source_class=source_class,
method_name=method_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def main_DecreaseMethodVisibility(self, Root_path_udb_project, source_class, method_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = DecreaseMethodVisibilityRefactoringListener(common_token_stream=token_stream,
source_class=source_class,
method_name=method_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def main_DecreaseFieldVisibility(self, Root_path_udb_project, source_class, field_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = DecreaseFieldVisibilityRefactoringListener(common_token_stream=token_stream,
source_class=source_class,
field_name=field_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def main_IncreaseFieldVisibility(self, Root_path_udb_project, source_class, field_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = IncreaseFieldVisibilityRefactoringListener(common_token_stream=token_stream,
source_class=source_class,
field_name=field_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def main_Remove_Field(self, Root_path_udb_project, source_class, field_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = RemoveFieldRefactoringListener(common_token_stream=token_stream, source_class=source_class,
field_name=field_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
def main_Make_Field_Static(self, Root_path_udb_project, source_class, field_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = MakeFieldStaticRefactoringListener(common_token_stream=token_stream, source_class=source_class,
field_name=field_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
def main_Make_Field_Non_Static(self, Root_path_udb_project, source_class, field_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# initialize with undrestand [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[
mainfile = ""
db = und.open(Root_path_udb_project)
for cls in db.ents("class"):
if (cls.longname() == source_class):
print(cls.parent().relname())
mainfile = cls.parent().relname()
# ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]
file_main = roorpath + mainfile
argparser = argparse.ArgumentParser()
argparser.add_argument('-n', '--file', help='Input source', default=file_main)
args = argparser.parse_args()
stream = FileStream(args.file, encoding='utf8')
# Step 2: Create an instance of AssignmentStLexer
lexer = JavaLexer(stream)
# Step 3: Convert the input source into a list of tokens
token_stream = CommonTokenStream(lexer)
# Step 4: Create an instance of the AssignmentStParser
parser = JavaParser(token_stream)
parser.getTokenStream()
parse_tree = parser.compilationUnit()
my_listener = MakeFieldNonStaticRefactoringListener(common_token_stream=token_stream, source_class=source_class,
field_name=field_name)
walker = ParseTreeWalker()
walker.walk(t=parse_tree, listener=my_listener)
filename = "files_refactored/" + mainfile
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open("files_refactored/" + mainfile, mode='w', newline='') as f:
f.write(my_listener.token_stream_rewriter.getDefaultText())
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
def main_Make_Field_Non_Final(self, Root_path_udb_project, source_class, field_name):
roorpath = ""
a_string = Root_path_udb_project
new_string = a_string.replace(".udb", "")
roorpath = new_string + "//"
print(roorpath)
# | |
#!/usr/bin/env python3
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Model Classes for Enterprise Endpoint Detection and Response"""
from __future__ import absolute_import
import uuid
from cbc_sdk.errors import ApiError, InvalidObjectError, NonQueryableModel
from cbc_sdk.base import CreatableModelMixin, MutableBaseModel, UnrefreshableModel, SimpleQuery
import logging
import time
import validators
from schema import And, Optional, Schema, SchemaError
log = logging.getLogger(__name__)
"""Models"""
class FeedModel(UnrefreshableModel, CreatableModelMixin, MutableBaseModel):
"""A common base class for models used by the Feed and Watchlist APIs."""
SCHEMA_IOCV2 = Schema(
{
"id": And(And(str, error="IOC field 'id' is not a string"), len),
"match_type": And(And(str, error="IOC field 'match_type' is not a string"),
And(lambda type: type in ["query", "equality", "regex"],
error="error in IOC 'match_type' value: Invalid match type")),
"values": And(And(list, error="IOC field 'values' is not a list"),
[And(str, error="IOC value is not a string")], len),
Optional("field"): And(str, error="IOC field 'field' is not a string"),
Optional("link"): And(str, error="IOC field 'link' is not a string")
}
)
SCHEMA_REPORT = Schema(
{
"id": And(And(str, error="Report field 'id' is not a string"), len),
"timestamp": And(And(int, error="Report field 'timestamp' is not an integer"),
And(lambda n: n > 0, error="Timestamp cannot be negative")),
"title": And(And(str, error="Report field 'title' is not a string"), len),
"description": And(And(str, error="Report field 'description' is not a string"), len),
"severity": And(And(int, error="Report field 'severity' is not an integer"),
And(lambda n: 0 < n < 11, error="Severity value out of range")),
Optional("link"): And(str, error="Report field 'link' is not a string"),
Optional("tags"): And(And(list, error="Report field 'tags' is not a list"),
[And(str, error="Report tag is not a string")]),
"iocs_v2": And(And(list, error="Report field 'iocs_v2' is not a list"), [SCHEMA_IOCV2],
And(len, error="Report should have at least one IOC")),
Optional("visibility"): And(str, error="Report field 'visibility' is not a string")
}
)
class Watchlist(FeedModel):
"""Represents an Enterprise EDR watchlist."""
# NOTE(ww): Not documented.
urlobject = "/threathunter/watchlistmgr/v2/watchlist"
urlobject_single = "/threathunter/watchlistmgr/v2/watchlist/{}"
swagger_meta_file = "enterprise_edr/models/watchlist.yaml"
def __init__(self, cb, model_unique_id=None, initial_data=None):
"""
Initialize the Watchlist object.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
model_unique_id (str): The unique ID of the watch list.
initial_data (dict): The initial data for the object.
"""
item = {}
if initial_data:
item = initial_data
elif model_unique_id:
item = cb.get_object(self.urlobject_single.format(model_unique_id))
feed_id = item.get("id")
super(Watchlist, self).__init__(cb, model_unique_id=feed_id, initial_data=item,
force_init=False, full_doc=True)
class WatchlistBuilder:
"""Helper class allowing Watchlists to be assembled."""
def __init__(self, cb, name):
"""
Creates a new WatchlistBuilder object.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
name (str): Name for the new watchlist.
"""
self._cb = cb
self._new_info = {"name": name, "tags_enabled": True, "alerts_enabled": False, "report_ids": []}
def set_name(self, name):
"""
Sets the name for the new watchlist.
Args:
name (str): New name for the watchlist.
Returns:
WatchlistBuilder: This object.
"""
self._new_info['name'] = name
return self
def set_description(self, description):
"""
Sets the description for the new watchlist.
Args:
description (str): New description for the watchlist.
Returns:
WatchlistBuilder: This object.
"""
self._new_info['description'] = description
return self
def set_tags_enabled(self, flag):
"""
Sets whether tags will be enabled on the new watchlist.
Args:
flag (bool): True to enable tags, False to disable them. Default is True.
Returns:
WatchlistBuilder: This object.
"""
self._new_info['tags_enabled'] = bool(flag)
return self
def set_alerts_enabled(self, flag):
"""
Sets whether alerts will be enabled on the new watchlist.
Args:
flag (bool): True to enable alerts, False to disable them. Default is False.
Returns:
WatchlistBuilder: This object.
"""
self._new_info['alerts_enabled'] = bool(flag)
return self
def add_report_ids(self, report_ids):
"""
Adds report IDs to the watchlist.
Args:
report_ids (list[str]): List of report IDs to add to the watchlist.
Returns:
WatchlistBuilder: This object.
"""
self._new_info['report_ids'] += report_ids
return self
def add_reports(self, reports):
"""
Adds reports to the watchlist.
Args:
reports (list[Report]): List of reports to be added to the watchlist.
Returns:
WatchlistBuilder: This object.
"""
id_values = []
for report in reports:
if report._from_watchlist and 'id' in report._info:
report.validate()
id_values.append(report._info['id'])
return self.add_report_ids(id_values)
def build(self):
"""
Builds the new Watchlist using information in the builder. The new watchlist must still be saved.
Returns:
Watchlist: The new Watchlist.
"""
return Watchlist(self._cb, initial_data=self._new_info)
@classmethod
def create(cls, cb, name):
"""
Starts creating a new Watchlist by returning a WatchlistBuilder that can be used to set attributes.
Args:
cb (CBCloudAPI): A reference to the CBCloudAPI object.
name (str): Name for the new watchlist.
Returns:
WatchlistBuilder: The builder for the new watchlist. Call build() to create the actual Watchlist.
"""
return Watchlist.WatchlistBuilder(cb, name)
@classmethod
def create_from_feed(cls, feed, name=None, description=None, enable_alerts=False, enable_tags=True):
"""
Creates a new Watchlist that encapsulates a Feed.
Args:
feed (Feed): The feed to be encapsulated by this Watchlist.
name (str): Name for the new watchlist. The default is to use the Feed name.
description (str): Description for the new watchlist. The default is to use the Feed summary.
enable_alerts (bool) - True to enable alerts, False to disable them. The default is False.
enable_tags (bool) - True to enable tags, False to disable them. The default is True.
Returns:
Watchlist: A new Watchlist object, which must be saved to the server.
"""
return Watchlist(feed._cb, initial_data={
"name": f"Feed {feed.name}" if not name else name,
"description": feed.summary if not description else description,
"tags_enabled": enable_tags,
"alerts_enabled": enable_alerts,
"classifier": {
"key": "feed_id",
"value": feed.id
}
})
@classmethod
def _query_implementation(self, cb, **kwargs):
"""
Returns the appropriate query object for Watchlists.
Args:
cb (BaseAPI): Reference to API object used to communicate with the server.
**kwargs (dict): Not used, retained for compatibility.
Returns:
WatchlistQuery: The query object for Watchlists.
"""
return WatchlistQuery(self, cb)
def save(self):
"""Saves this watchlist on the Enterprise EDR server.
Returns:
Watchlist (Watchlist): The saved Watchlist.
Raises:
InvalidObjectError: If Watchlist.validate() fails.
"""
self.validate()
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists".format(
self._cb.credentials.org_key
)
new_info = self._cb.post_object(url, self._info).json()
self._info.update(new_info)
return self
def validate(self):
"""
Checks to ensure this watchlist contains valid data.
Raises:
InvalidObjectError: If the watchlist contains invalid data.
"""
super(Watchlist, self).validate()
def update(self, **kwargs):
"""Updates this watchlist with the given arguments.
Arguments:
**kwargs (dict(str, str)): The fields to update.
Raises:
InvalidObjectError: If `id` is missing or Watchlist.validate() fails.
ApiError: If `report_ids` is given and is empty.
Example:
>>> watchlist.update(name="<NAME>")
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
# NOTE(ww): Special case, according to the docs.
if "report_ids" in kwargs and not kwargs["report_ids"]:
raise ApiError("can't update a watchlist to have an empty report list")
for key, value in kwargs.items():
if key in self._info:
self._info[key] = value
self.validate()
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}".format(
self._cb.credentials.org_key,
self.id
)
new_info = self._cb.put_object(url, self._info).json()
self._info.update(new_info)
@property
def classifier_(self):
"""Returns the classifier key and value, if any, for this watchlist.
Returns:
tuple(str, str): Watchlist's classifier key and value.
None: If there is no classifier key and value.
"""
classifier_dict = self._info.get("classifier")
if not classifier_dict:
return None
return (classifier_dict["key"], classifier_dict["value"])
def delete(self):
"""Deletes this watchlist from the Enterprise EDR server.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
def enable_alerts(self):
"""Enable alerts for this watchlist. Alerts are not retroactive.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/alert".format(
self._cb.credentials.org_key,
self.id
)
self._cb.put_object(url, None)
def disable_alerts(self):
"""Disable alerts for this watchlist.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/alert".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
def enable_tags(self):
"""Enable tagging for this watchlist.
Raises:
InvalidObjectError: If `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/tag".format(
self._cb.credentials.org_key,
self.id
)
self._cb.put_object(url, None)
def disable_tags(self):
"""Disable tagging for this watchlist.
Raises:
InvalidObjectError: if `id` is missing.
"""
if not self.id:
raise InvalidObjectError("missing Watchlist ID")
url = "/threathunter/watchlistmgr/v3/orgs/{}/watchlists/{}/tag".format(
self._cb.credentials.org_key,
self.id
)
self._cb.delete_object(url)
@property
def feed(self):
"""Returns the Feed linked to this Watchlist, if there is one."""
if not self.classifier:
return None
if self.classifier["key"] != "feed_id":
log.warning("Unexpected | |
# userVerificationRequired but not done
continue
d = key_data[:32]
REM_GETASSERTION_PARAMETERS.append([d, user_description, pkc_descriptor['id'], credRandom])
else:
# search for applicable residential keys
for storage in ks_ctap2.load_rk(data[1]):
rk_data = storage['rk_data']
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
if rk_data[0x0a] > 1 and FLAGS & 0x04 == 0:
# userVerificationRequired but not done
continue
else:
REM_NUM_RESIDENTIAL_KEYS += 1
if REM_NUM_RESIDENTIAL_KEYS > 0:
REM_ITERATOR = ks_ctap2.load_rk(data[1])
if (not REM_GETASSERTION_PARAMETERS) and (REM_NUM_RESIDENTIAL_KEYS == 0):
# no potential valid keys found at all
return CTAP2_ERR_NO_CREDENTIALS
# make assertion
extension_hmac_secret = {}
if REM_GETASSERTION_PARAMETERS:
d, user_description, credentialID, credRandom = REM_GETASSERTION_PARAMETERS.pop()
else:
useRK = True
while True:
storage = REM_ITERATOR.__next__()
rk_data = storage['rk_data']
if rk_data[0x0a] > 1 and FLAGS & 0x04 == 0:
# userVerificationRequired but not done
continue
else:
credentialID = rk_data[7]['id']
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
key_data = dec_key_handle(credentialID)
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
if key_data is None:
return CTAP2_ERR_NOT_ALLOWED
key_dict = decode(key_data[32:])
user_description = key_dict['user']
credRandom = key_dict['credRandom']
d = key_data[:32]
REM_NUM_RESIDENTIAL_KEYS -= 1
break
if hmac_secret is True and credRandom != b'':
credRandom = credRandom[:32] if FLAGS & 0x04 > 0 else credRandom[32:]
ret = genSharedSecret(channel, data[4]['hmac-secret'],
credRandom, extension_hmac_secret)
if ret != CTAP2_OK:
return ret
FLAGS |= 0x80 # ED
if FLAGS & 0x04 == 0:
# uv=PIN not done: remove all optional user informations
user_description = {'id': user_description['id']}
NUMBEROFCREDENTIALS = 1 + len(REM_GETASSERTION_PARAMETERS) + REM_NUM_RESIDENTIAL_KEYS
# rpIdHash
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
rp_id_hash = sha256(bytes(data[1], 'utf8'))
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
# increase signature counter
counter_fido2.inc() # and store counter
cb = counter_fido2.to_bytes()
# authenticator data: https://www.w3.org/TR/webauthn/#table-authData
auth_data = rp_id_hash + FLAGS.to_bytes(1, 'big') + cb
if extension_hmac_secret:
# add hmac-secret extension
auth_data += encode(extension_hmac_secret)
# compute signature
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
ret = ec_sign(d, auth_data + data[2]) # auth_data + client_data_hash
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
if ret is None:
return CTAP1_ERR_OTHER # error of nrf cryptocell
signature = der_encode_signature(*ret) # ret = r,s
if NUMBEROFCREDENTIALS > 1:
REM_GETASSERTION_PARAMETERS_COMMON = [rp_id_hash, data[2], FLAGS, {}]
if hmac_secret is True and 'hmac-secret' in storage:
REM_GETASSERTION_PARAMETERS_COMMON[3] = data[4]['hmac-secret']
CREDENTIALCOUNTER = 1
NEXT_CREDENTIAL_TIMER = monotonic() # start clock
# https://www.w3.org/TR/webauthn/#sctn-attestation
ret = {1: {'id': credentialID, 'type': 'public-key'},
2: auth_data,
3: signature}
if useRK is True:
ret[4] = user_description
if NUMBEROFCREDENTIALS > 1:
ret[5] = NUMBEROFCREDENTIALS
return CTAP2_OK + encode(ret)
def getNextAssertion(channel):
# https://fidoalliance.org/specs/fido-v2.0-ps-20190130/fido-client-to-authenticator-protocol-v2.0-ps-20190130.html#authenticatorGetNextAssertion
global ks_ctap2, counter_fido2
global NEXT_CREDENTIAL_TIMER, REM_GETASSERTION_PARAMETERS
global CREDENTIALCOUNTER, NUMBEROFCREDENTIALS
global REM_GETASSERTION_PARAMETERS_COMMON, REM_LAST_CMD
global REM_ITERATOR, REM_NUM_RESIDENTIAL_KEYS
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
if not REM_GETASSERTION_PARAMETERS and REM_NUM_RESIDENTIAL_KEYS == 0:
return CTAP2_ERR_NOT_ALLOWED
if REM_LAST_CMD not in(authenticatorGetAssertion, authenticatorGetNextAssertion):
return CTAP2_ERR_NOT_ALLOWED
if CREDENTIALCOUNTER >= NUMBEROFCREDENTIALS:
return CTAP2_ERR_NOT_ALLOWED
if monotonic() - NEXT_CREDENTIAL_TIMER > 30.0:
return CTAP2_ERR_NOT_ALLOWED # time out
try:
rp_id_hash, clientDataHash, FLAGS, hmac_secret = REM_GETASSERTION_PARAMETERS_COMMON
except ValueError:
return CTAP2_ERR_NOT_ALLOWED
extension_hmac_secret = {}
if REM_GETASSERTION_PARAMETERS:
useRK = False
d, user_description, credentialID, credRandom = REM_GETASSERTION_PARAMETERS.pop()
else:
# load next residential key
useRK = True
try:
while True:
storage = REM_ITERATOR.__next__()
rk_data = storage['rk_data']
if rk_data[0x0a] > 1 and FLAGS & 0x04 == 0:
# userVerificationRequired but not done
continue
break
credentialID = rk_data[7]['id']
key_data = dec_key_handle(credentialID)
if key_data is None:
return CTAP2_ERR_NOT_ALLOWED
key_dict = decode(key_data[32:])
user_description = key_dict['user']
credRandom = key_dict['credRandom']
d = key_data[:32]
REM_NUM_RESIDENTIAL_KEYS -= 1
except (StopIteration, ValueError, AttributeError):
REM_NUM_RESIDENTIAL_KEYS = 0
return CTAP2_ERR_NOT_ALLOWED
if hmac_secret and credRandom != b'':
credRandom = credRandom[:32] if FLAGS & 0x04 > 0 else credRandom[32:]
ret = genSharedSecret(
channel, hmac_secret, credRandom, extension_hmac_secret)
if ret != CTAP2_OK:
return ret
FLAGS |= 0x80 # ED
if FLAGS & 0x04 == 0:
# uv=PIN not done: remove all optional user informations
user_description = {'id': user_description['id']}
# increase signature counter
counter_fido2.inc() # and store counter
# authenticator data: https://www.w3.org/TR/webauthn/#table-authData
auth_data = rp_id_hash + FLAGS.to_bytes(1, 'big') + counter_fido2.to_bytes()
if extension_hmac_secret:
# add hmac-secret extension
auth_data += encode(extension_hmac_secret)
# compute signature
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
ret = ec_sign(d, auth_data + clientDataHash) # auth_data + client_data_hash
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
if ret is None:
return CTAP1_ERR_OTHER # error of nrf cryptocell
signature = der_encode_signature(*ret) # ret = r,s
CREDENTIALCOUNTER += 1
NEXT_CREDENTIAL_TIMER = monotonic() # start clock
ret = {1: {'id': credentialID, 'type': 'public-key'},
2: auth_data,
3: signature}
if useRK is True:
ret[4] = user_description
return CTAP2_OK + encode(ret)
def enc_key_handle(data):
# add padding data 80 00 00 ...
cipher = aes_cbc(data + b'\x80' + bytes(-(1 + len(data)) % 16),
ks_ctap2.AES_KEY, ks_ctap2.AES_IV, True)
return cipher + hmac_sha256(ks_ctap2.KEY_5C, ks_ctap2.KEY_36, cipher)
def dec_key_handle(data):
if len(data) < 64 or len(data) % 16 > 0:
return None
if data[-32:] != hmac_sha256(ks_ctap2.KEY_5C, ks_ctap2.KEY_36, data[:-32]):
return None
m = aes_cbc(data[:-32], ks_ctap2.AES_KEY, ks_ctap2.AES_IV, False)
if m is None:
return None
# remove padding 80 00 00 ...
for i in range(len(m) - 1, 31, -1):
if m[i] == 0x80:
return m[:i]
elif m[i] == 0x00:
continue
else:
return None # wrong padding
return None
def genSharedSecret(channel, hmac_secret, credRandom, extension_hmac_secret):
# https://fidoalliance.org/specs/fido2/fido-client-to-authenticator-protocol-v2.1-rd-20191217.html#sctn-hmac-secret-extension
global DH_a, DH_aG
x = hmac_secret[1][-2]
y = hmac_secret[1][-3]
Q = b'\x04' + bytes(-len(x) % 32) + x \
+ bytes(-len(y) % 32) + y
# compute shared secret as SHA-256(Q.x)
if DH_a is None or DH_aG is None:
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
DH_a, DH_aG = ec_genkeypair()
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
X = ec_dh(DH_a, Q)
if X is None:
return CTAP1_ERR_OTHER
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
shared_secret = sha256(X)
if shared_secret is None:
return CTAP1_ERR_OTHER
k5c = bytes((c ^ 0x5c for c in shared_secret)) + b'\x5c' * 32
k36 = bytes((c ^ 0x36 for c in shared_secret)) + b'\x36' * 32
# The authenticator verifies saltEnc by generating
# LEFT(HMAC-SHA-256(sharedSecret, saltEnc), 16) and matching against the
# input saltAuth parameter.
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
if hmac_sha256(k5c, k36, hmac_secret[2])[:16] != hmac_secret[3]:
return CTAP2_ERR_EXTENSION_FIRST
# decrypt saltEnc
salt = aes256_cbc(hmac_secret[2], shared_secret, bytes(16), False)
# The authenticator generates one or two HMAC-SHA-256 values
k5c = bytes((c ^ 0x5c for c in credRandom)) + b'\x5c' * 32
k36 = bytes((c ^ 0x36 for c in credRandom)) + b'\x36' * 32
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
output1 = hmac_sha256(k5c, k36, salt[:32])
if len(salt) == 64:
output2 = hmac_sha256(k5c, k36, salt[32:])
ext = aes256_cbc(output1 + output2, shared_secret, bytes(16), True)
else:
ext = aes256_cbc(output1, shared_secret, bytes(16), True)
if keepalive(channel) == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
extension_hmac_secret['hmac-secret'] = ext
return CTAP2_OK
def reset(channel):
global PIN_CONSECUTIVE_RETRIES, ks_ctap2, ks_pin
# user presence required
#if monotonic() - POWER_UP > 10.0:
# return CTAP2_ERR_NOT_ALLOWED
ret = up_check(channel, LED2)
if ret == CTAP2_ERR_KEEPALIVE_CANCEL:
return CTAP2_ERR_KEEPALIVE_CANCEL
elif ret == CTAP2_ERR_USER_ACTION_TIMEOUT:
return CTAP2_ERR_USER_ACTION_TIMEOUT
PIN_CONSECUTIVE_RETRIES = 0
ks_ctap2.gen_new_keys()
ks_ctap2.save_keystore()
ks_pin.gen_new_keys()
ks_pin.save_keystore()
counter_fido2.reset()
return CTAP2_OK
def pin_check_steps_1_2(data, key_pin_auth, key_pin_prot):
if key_pin_auth in data:
if len(data[key_pin_auth]) == 0:
# If authenticator supports clientPin and platform sends a zero
# length pinUvAuthParam, wait for user touch and then return either
# CTAP2_ERR_PIN_NOT_SET if pin is not set or CTAP2_ERR_PIN_INVALID
# if pin has been set.
ret = up_check(channel)
if ret == CTAP2_OK:
if isPINset() is False:
return CTAP2_ERR_PIN_NOT_SET
else:
return CTAP2_ERR_PIN_INVALID
else:
return ret
if key_pin_prot in data:
if data[key_pin_prot] != 1:
# If authenticator supports clientPin and pinUvAuthParam parameter
# is present and the pinUvAuthProtocol is not supported,
# return CTAP2_ERR_PIN_AUTH_INVALID error.
return CTAP2_ERR_PIN_AUTH_INVALID
return CTAP2_OK
def clientPIN(data):
# https://fidoalliance.org/specs/fido-v2.0-ps-20190130/fido-client-to-authenticator-protocol-v2.0-ps-20190130.html#authenticatorClientPIN
global ks_pin, PIN_CONSECUTIVE_RETRIES, DH_a, DH_aG
if DH_a is None or DH_aG is None:
DH_a, DH_aG = ec_genkeypair()
if DH_a is None or DH_aG is None:
return CTAP1_ERR_OTHER
try:
data = decode(data)
except ValueError:
return CTAP2_ERR_INVALID_CBOR
ret = ccp.authenticatorClientPIN.verify(data)
if ret != CTAP2_OK:
return ret
if data[2] == 0x01: # getRetries
return CTAP2_OK + encode({3: ks_pin.PIN_RETRIES})
elif data[2] == 0x02: # getKeyAgreement
return CTAP2_OK + encode({1: {1: 2, # kty: EC2 key type
3: -25, # alg: ECDH-ES+HKDF-256
-1: 1, # crv: P-256 curve
# x-coordinate
-2: DH_aG[1: 1 + 32],
# y-coordinate
-3: DH_aG[32 + 1:]
}
})
elif data[2] in (0x03, 0x04, 0x05):
# verify parameters for setPIN, changePIN, getPINToken
if 3 not in | |
if inPlace:
if agent.currentSpd < 100:
localTarget = toLocal(enemy_goal, agent.me)
angle = math.degrees(math.atan2(localTarget[1], localTarget[0]))
if abs(angle) > 35:
agent.setGuidance(enemy_goal)
#return SimpleControllerState()
return arrest_movement(agent)
return driveController(agent, center, agent.time + 0.6, expedite=False)
def replacementAvailable(
agent,
): # is there a teammate able to become a suitable back man
for ally in agent.allies:
if (
abs(
ally.location[1] * sign(agent.team)
- agent.me.location[1] * sign(agent.team)
)
< 500
):
return True
return False
def all_allies_back(agent):
for ally in agent.allies:
if ally.location[1] * sign(agent.team) < 3120 * sign(agent.team):
return False
# print(f"saving myself from sitting back {agent.time}")
return True
def interceptGuidance(agent, e_goaldist, distLimit=900):
center = Vector([0, 5200 * sign(agent.team), 200])
defensiveDistance = distance2D(agent.currentHit.pred_vector, center)
if False:
#if len(agent.allies) > 1:
if agent.lastMan == agent.me.location:
if defensiveDistance > 2000:
if not replacementAvailable(agent):
if not agent.goalPred:
if (
agent.enemyBallInterceptDelay + agent.contestedTimeLimit
< agent.currentHit.time_difference()
):
if e_goaldist > distLimit:
if not all_allies_back(agent):
if (
distance2D(
agent.me.location, agent.ball.location
)
> 250
):
if not goalie_shot(agent,agent.currentHit):
return True, smart_retreat(agent)
return False, None
def arrest_movement(agent):
controls = SimpleControllerState()
if agent.currentSpd > 20:
if agent.forward:
controls.throttle = -1
else:
controls.throttle = 1
return controls
def buyTime(agent, attackTarget, defendTarget):
if agent.currentHit.time_difference() < agent.enemyBallInterceptDelay + 0.25:
if abs(agent.currentHit.pred_vector[0]) < 2600:
if distance2D(agent.currentHit.pred_vector, attackTarget) > 2000:
if distance2D(agent.currentHit.pred_vector, defendTarget) > 2000:
predVec = agent.currentHit.pred_vector
proceed = False
if (
agent.me.location[0] > 500
and predVec[0] > 500
and agent.me.location[0] > predVec[0]
):
proceed = True
elif (
agent.me.location[0] < -500
and predVec[0] < -500
and agent.me.location[0] < predVec[0]
):
proceed = True
if proceed:
agent.log.append(f"proceeding {agent.time}")
myGoal = Vector([0, 5250 * sign(agent.team), 200])
targDist = distance2D(agent.me.location, predVec)
if agent.me.location[0] > predVec[0]:
attackTarget = Vector([5000, predVec[1], predVec[2]])
else:
attackTarget = Vector([-5000, predVec[1], predVec[2]])
localPos = toLocal(predVec, agent.me)
angleDegrees = correctAngle(
math.degrees(math.atan2(localPos[1], localPos[0]))
)
if abs(angleDegrees) <= 40:
carOffset = agent.carLength * 0.6
elif abs(angleDegrees) >= 140:
carOffset = agent.carLength * 0.25
else:
carOffset = agent.carWidth * 0.4
totalOffset = (90 + carOffset) * 0.8
_direction = direction(attackTarget, predVec)
destination = predVec + _direction.scale(totalOffset)
badDirection = direction(myGoal, predVec)
badPosition = predVec + badDirection.scale(totalOffset)
shotViable = False
futurePos = agent.me.location + (
agent.me.velocity.scale(agent.currentHit.time_difference())
)
fpos_pred_distance = distance2D(futurePos, predVec)
if fpos_pred_distance <= totalOffset:
shotViable = True
shotlimit = 1
if agent.contested:
shotlimit = 0.7
if agent.currentHit.time_difference() < shotlimit:
if distance2D(futurePos, destination) * 1.5 < distance2D(
futurePos, badPosition
):
if agent.currentSpd * agent.ballDelay >= clamp(
99999, 0, targDist - totalOffset
):
if not agent.onWall and agent.onSurface:
if shotViable:
destination = predVec
agent.setPowershot(
agent.currentHit.time_difference(),
predVec,
)
agent.log.append("stall tactics")
# print(f"buying time {agent.time}")
return (
True,
driveController(
agent,
destination,
agent.time + agent.currentHit.time_difference(),
expedite=True,
),
)
return False, None
def findFirstAllyOnTeamSideOfBall(agent):
best = None
bestDist = math.inf
for ally in agent.allies:
if ally.location[1] * sign(agent.team) > agent.ball.location[1] * sign(
agent.team
):
dist = distance2D(ally.location, agent.ball.location)
if dist < bestDist:
best = ally
bestDist = dist
if agent.me.location[1] * sign(agent.team) > agent.ball.location[1] * sign(
agent.team
):
dist = distance2D(agent.me.location, agent.ball.location)
if dist < bestDist:
best = agent.me
bestDist = dist
return best
def get_ball_offset(agent,hit):
ballOffset = 93
if hit.hit_type == 0 or hit.hit_type == 1 and hit.pred_vector[2] < agent.groundCutOff:
height_offset = clamp(1000,93,hit.pred_vector[2]) - 93
if height_offset < agent.functional_car_height:
ballOffset = math.sqrt((93 * 93) - ((agent.functional_car_height - height_offset) * (agent.functional_car_height - height_offset)))
#print(f"set ball offset to {ballOffset}")
else:
agent.log.append("Had to fudge numbers!!!")
ballOffset = 45
return ballOffset
def mirrorshot_decider(agent):
enemyGoal = Vector([0,5200*sign(agent.team),0])
targetvec = agent.currentHit.pred_vector
if targetvec[1] * sign(agent.team) < 0:
return False
if agent.me.location[0] >= targetvec[0] >= enemyGoal[0]:
return False
if agent.me.location[0] <= targetvec[0] <= enemyGoal[0]:
return False
difference = agent.me.location - targetvec
if abs(difference[0]) > abs(difference[1]):
if not butterZone(targetvec):
return True
return False
def ShellTime(agent, retreat_enabled = True):
defendTarget = Vector([0, 5500 * sign(agent.team), 200])
attackTarget = Vector([0, 5200 * -sign(agent.team), 200])
# rush = False
#print("in shell")
targetVec = agent.currentHit.pred_vector
defensiveRange = 200
maxRange = 1200
if agent.contested:
maxRange = 400
goalDistance = distance2D(targetVec, defendTarget)
carDistance = distance2D(agent.me.location, defendTarget)
ballGoalDistance = distance2D(agent.ball.location, defendTarget)
targDistance = distance2D(agent.me.location, targetVec)
dist3D = findDistance(agent.me.location, targetVec)
carToGoalDistance = distance2D(agent.me.location, attackTarget)
expedite = True
flippant = False
offensive = agent.ball.location[1] * sign(agent.team) < 0
if agent.currentHit.hit_type == 5:
#print("why is there an aerial hit in shelltime?")
agent.activeState = agent.currentHit.aerialState
return agent.activeState.update()
if ballGoalDistance + defensiveRange < carDistance:
cornerShot = cornerDetection(targetVec) != -1
#if (retreat_enabled and agent.me.location != agent.lastMan) or (not agent.contested and retreat_enabled) or (retreat_enabled and not enough_momentum):
# if not cornerShot:
# if (retreat_enabled and agent.me.location != agent.lastMan) or (not agent.contested and retreat_enabled) or (
# retreat_enabled and not agent.ballDelay > agent.enemyBallInterceptDelay):
# #if retreat_enabled:
# # delay = buyTime(agent,attackTarget,defendTarget)
# # if delay[0]:
# # return delay[1]
# rightPost = Vector([900, 5000 * sign(agent.team), 200])
# leftPost = Vector([-900, 5000 * sign(agent.team), 200])
# if distance2D(agent.me.location, rightPost) < distance2D(
# agent.me.location, leftPost
# ):
# post = rightPost
# else:
# post = leftPost
#
# if distance2D(targetVec, post) + defensiveRange < distance2D(
# agent.me.location, post
# ):
# return driveController(agent, post, agent.time, expedite=True)
# #return bringToCorner(agent)
# else:
# if offensive:
# return smart_retreat(agent)
# else:
# return handleBounceShot(agent, waitForShot=True, forceDefense=True)
# else:
# return smart_retreat(agent)
if retreat_enabled or cornerShot:
rightPost = Vector([900, 5000 * sign(agent.team), 200])
leftPost = Vector([-900, 5000 * sign(agent.team), 200])
if distance2D(agent.me.location, rightPost) < distance2D(
agent.me.location, leftPost
):
post = rightPost
else:
post = leftPost
if distance2D(targetVec, post) + defensiveRange < distance2D(
agent.me.location, post
):
return driveController(agent, post, agent.time, expedite=True)
else:
if offensive:
return handleBounceShot(agent, waitForShot=True, forceDefense=True)
else:
return smart_retreat(agent)
goalSpot, ballGoalAngle = goal_selector_revised(agent, mode=0)
if len(agent.allies) < 2:
if abs(ballGoalAngle) >= agent.angleLimit:
expedite = False
if retreat_enabled:
if (
agent.contested
or agent.enemyBallInterceptDelay < agent.currentHit.time_difference()
or agent.me.boostLevel < agent.boostThreshold
):
return playBack(agent)
#return thirdManPositioning(agent)
corner = cornerDetection(targetVec)
if len(agent.allies) < 1:
if agent.team == 0:
if corner == 0 or corner == 1:
expedite = False
else:
if corner == 2 or corner == 3:
expedite = False
if agent.goalPred == None and len(agent.allies) < 1: # and agent.team == 1:
if agent.currentHit.time_difference() - agent.enemyBallInterceptDelay >= 1:
expedite = False
# if len(agent.allies) == 0:
# if goalDistance > 2000:
if retreat_enabled:
challenge = interceptGuidance(agent, ballGoalDistance)
if challenge[0]:
return challenge[1]
localPos = toLocal(targetVec, agent.me)
angleDegrees = correctAngle(math.degrees(math.atan2(localPos[1], localPos[0])))
moddedOffset = False
if abs(angleDegrees) <= 40:
carOffset = agent.carLength * 0.5
elif abs(angleDegrees) >= 140:
carOffset = agent.carLength * 0.5
else:
carOffset = agent.carWidth * 0.5
ballOffset = get_ball_offset(agent,agent.currentHit)
#totalOffset = carOffset + ballOffset
totalOffset = (carOffset + ballOffset) * 0.85
adjustedOffset = totalOffset * 1
offset_min = totalOffset * .85
positioningOffset = offset_min
destination = None
moddedOffset = False
if agent.currentHit.hit_type == 1 or agent.currentHit.hit_type == 4:
return handleBounceShot(agent, waitForShot=False)
if agent.currentHit.hit_type == 2:
agent.wallShot = True
agent.ballGrounded = False
return handleWallShot(agent)
if len(agent.enemies) < 3:
if carDistance < goalDistance:
#if agent.goalward:
if targetVec[2] > 93 + (agent.carHeight * .5):
if not agent.contested:
#if agent.team == 0:
return catch_ball(agent)
if targetVec[2] >= agent.groundCutOff*.9 and agent.ballDelay < 0.5:
return handleBounceShot(agent, waitForShot=False)
if offensive and relativeSpeed(agent.currentHit.pred_vel,agent.me.velocity) > distance2D(agent.me.location,attackTarget)*0.8 and agent.ballDelay < 0.5:
return handleBounceShot(agent, waitForShot=False)
is_mirror_shot = False #mirrorshot_decider(agent)
_direction = direction(targetVec, goalSpot)
if agent.team == 3:
test_direction = optimal_intercept_vector(
targetVec.flatten(),
agent.currentHit.pred_vel.flatten(),
attackTarget.flatten(),
)
if abs(angleBetweenVectors(agent.me.velocity, test_direction)) < 90:
_direction = test_direction
if not destination and abs(targetVec[0]) < 3500:
#if not agent.contested:
if (
targDistance > totalOffset
and targDistance > (agent.currentSpd * agent.currentHit.time_difference())
and abs(targetVec[1]) <= 4000
):
# print(f"in here {agent.time}")
offset = clamp(1800, offset_min, targDistance * 0.25)
# _direction = direction(attackTarget, targetVec)
positioningOffset = offset
destination = targetVec + _direction.scale(positioningOffset)
if agent.team !=3:
if agent.team == 4:
target_position = get_aim_vector(agent, goalSpot.flatten(), targetVec.flatten(),
agent.currentHit.pred_vel, positioningOffset)
if abs(target_position[1]) <= 90 or butterZone(targetVec) or targDistance >= 2000:
destination = target_position[0]
else:
destination = aim_wallshot_naive(agent, agent.currentHit, positioningOffset)
else:
if not is_mirror_shot:
destination = get_aim_vector(agent, goalSpot.flatten(), targetVec.flatten(),
agent.currentHit.pred_vel, positioningOffset)[0]
else:
destination = aim_wallshot_naive(agent, agent.currentHit, positioningOffset)
moddedOffset = True
#print(f"defensive altered shot {agent.time}")
if not destination:
# _direction = direction(targetVec, attackTarget)
positioningOffset = offset_min
destination = targetVec + _direction.scale(positioningOffset)
| |
_invoke_api('ntdtest-iternoread-get', *args)
return api_call
def ntdtest_iternoread_get_alt(*args):
api_call = _invoke_api('ntdtest-iternoread-get-alt', *args)
return api_call
def ntdtest_iternoread_get_iter(*args):
api_call = _invoke_api('ntdtest-iternoread-get-iter', *args)
return api_call
def ntdtest_iternoread_get_iter_alt(*args):
api_call = _invoke_api('ntdtest-iternoread-get-iter-alt', *args)
return api_call
def ntdtest_iternoread_list_info(*args):
api_call = _invoke_api('ntdtest-iternoread-list-info', *args)
return api_call
def ntdtest_iternoread_modify(*args):
api_call = _invoke_api('ntdtest-iternoread-modify', *args)
return api_call
def ntdtest_iternoread_modify_iter(*args):
api_call = _invoke_api('ntdtest-iternoread-modify-iter', *args)
return api_call
def ntdtest_iterwants_get(*args):
api_call = _invoke_api('ntdtest-iterwants-get', *args)
return api_call
def ntdtest_iterwants_get_iter(*args):
api_call = _invoke_api('ntdtest-iterwants-get-iter', *args)
return api_call
def ntdtest_list_non_test_action_default(*args):
api_call = _invoke_api('ntdtest-list-non-test-action-default', *args)
return api_call
def ntdtest_list_non_test_method_default(*args):
api_call = _invoke_api('ntdtest-list-non-test-method-default', *args)
return api_call
def ntdtest_method_only_default(*args):
api_call = _invoke_api('ntdtest-method-only-default', *args)
return api_call
def ntdtest_method_only_method2(*args):
api_call = _invoke_api('ntdtest-method-only-method2', *args)
return api_call
def ntdtest_method_only_method3(*args):
api_call = _invoke_api('ntdtest-method-only-method3', *args)
return api_call
def ntdtest_method_only_method3_a(*args):
api_call = _invoke_api('ntdtest-method-only-method3-a', *args)
return api_call
def ntdtest_method_only_method3_async(*args):
api_call = _invoke_api('ntdtest-method-only-method3-async', *args)
return api_call
def ntdtest_method_only_method3_async_a(*args):
api_call = _invoke_api('ntdtest-method-only-method3-async-a', *args)
return api_call
def ntdtest_method_only_method3_async_iter(*args):
api_call = _invoke_api('ntdtest-method-only-method3-async-iter', *args)
return api_call
def ntdtest_method_only_method3_iter(*args):
api_call = _invoke_api('ntdtest-method-only-method3-iter', *args)
return api_call
def ntdtest_multiple_array_get_deep_element(*args):
api_call = _invoke_api('ntdtest-multiple-array-get-deep-element', *args)
return api_call
def ntdtest_multiple_array_get_shallow_element(*args):
api_call = _invoke_api('ntdtest-multiple-array-get-shallow-element', *args)
return api_call
def ntdtest_multiple_arrays_get_iter(*args):
api_call = _invoke_api('ntdtest-multiple-arrays-get-iter', *args)
return api_call
def ntdtest_multiple_default_method1_alternate(*args):
api_call = _invoke_api('ntdtest-multiple-default-method1-alternate', *args)
return api_call
def ntdtest_multiple_default_method1_default(*args):
api_call = _invoke_api('ntdtest-multiple-default-method1-default', *args)
return api_call
def ntdtest_multiple_inout_method1_alternate(*args):
api_call = _invoke_api('ntdtest-multiple-inout-method1-alternate', *args)
return api_call
def ntdtest_multiple_inout_method1_default(*args):
api_call = _invoke_api('ntdtest-multiple-inout-method1-default', *args)
return api_call
def ntdtest_multiple_with_default_create(*args):
api_call = _invoke_api('ntdtest-multiple-with-default-create', *args)
return api_call
def ntdtest_multiple_with_inout_create(*args):
api_call = _invoke_api('ntdtest-multiple-with-inout-create', *args)
return api_call
def ntdtest_nonlist_get(*args):
api_call = _invoke_api('ntdtest-nonlist-get', *args)
return api_call
def ntdtest_nonlist_get_iter(*args):
api_call = _invoke_api('ntdtest-nonlist-get-iter', *args)
return api_call
def ntdtest_shownoread_default_get(*args):
api_call = _invoke_api('ntdtest-shownoread-default-get', *args)
return api_call
def ntdtest_shownoread_get(*args):
api_call = _invoke_api('ntdtest-shownoread-get', *args)
return api_call
def ntdtest_top_level_alt_create(*args):
api_call = _invoke_api('ntdtest-top-level-alt-create', *args)
return api_call
def ntdtest_top_level_alt_get(*args):
api_call = _invoke_api('ntdtest-top-level-alt-get', *args)
return api_call
def ntdtest_top_level_default_create(*args):
api_call = _invoke_api('ntdtest-top-level-default-create', *args)
return api_call
def ntdtest_top_level_default_destroy(*args):
api_call = _invoke_api('ntdtest-top-level-default-destroy', *args)
return api_call
def ntdtest_top_level_default_get(*args):
api_call = _invoke_api('ntdtest-top-level-default-get', *args)
return api_call
def ntdtest_top_level_default_modify(*args):
api_call = _invoke_api('ntdtest-top-level-default-modify', *args)
return api_call
def ntdtest_top_level_no_inputs_create(*args):
api_call = _invoke_api('ntdtest-top-level-no-inputs-create', *args)
return api_call
def ntdtest_view_alternate_create_1(*args):
api_call = _invoke_api('ntdtest-view-alternate-create-1', *args)
return api_call
def ntdtest_view_alternate_create_2(*args):
api_call = _invoke_api('ntdtest-view-alternate-create-2', *args)
return api_call
def ntdtest_view_alternate_destroy_1(*args):
api_call = _invoke_api('ntdtest-view-alternate-destroy-1', *args)
return api_call
def ntdtest_view_alternate_get_1(*args):
api_call = _invoke_api('ntdtest-view-alternate-get-1', *args)
return api_call
def ntdtest_view_alternate_get_2(*args):
api_call = _invoke_api('ntdtest-view-alternate-get-2', *args)
return api_call
def ntdtest_view_alternate_modify_1(*args):
api_call = _invoke_api('ntdtest-view-alternate-modify-1', *args)
return api_call
def ntdtest_view_default_create(*args):
api_call = _invoke_api('ntdtest-view-default-create', *args)
return api_call
def ntdtest_view_default_destroy(*args):
api_call = _invoke_api('ntdtest-view-default-destroy', *args)
return api_call
def ntdtest_view_default_get(*args):
api_call = _invoke_api('ntdtest-view-default-get', *args)
return api_call
def ntdtest_view_default_modify(*args):
api_call = _invoke_api('ntdtest-view-default-modify', *args)
return api_call
def ntdtest_view_destroy_iter(*args):
api_call = _invoke_api('ntdtest-view-destroy-iter', *args)
return api_call
def ntdtest_view_get_iter(*args):
api_call = _invoke_api('ntdtest-view-get-iter', *args)
return api_call
def ntdtest_view_modify_iter(*args):
api_call = _invoke_api('ntdtest-view-modify-iter', *args)
return api_call
def ntp_server_create(*args):
api_call = _invoke_api('ntp-server-create', *args)
return api_call
def ntp_server_delete(*args):
api_call = _invoke_api('ntp-server-delete', *args)
return api_call
def ntp_server_get(*args):
api_call = _invoke_api('ntp-server-get', *args)
return api_call
def ntp_server_get_iter(*args):
api_call = _invoke_api('ntp-server-get-iter', *args)
return api_call
def ntp_server_modify(*args):
api_call = _invoke_api('ntp-server-modify', *args)
return api_call
def ntp_server_reset(*args):
api_call = _invoke_api('ntp-server-reset', *args)
return api_call
def ntp_server_validate(*args):
api_call = _invoke_api('ntp-server-validate', *args)
return api_call
def options_get_iter(*args):
api_call = _invoke_api('options-get-iter', *args)
return api_call
def options_modify_iter(*args):
api_call = _invoke_api('options-modify-iter', *args)
return api_call
def perf_archive_config_get(*args):
api_call = _invoke_api('perf-archive-config-get', *args)
return api_call
def perf_archive_config_modify(*args):
api_call = _invoke_api('perf-archive-config-modify', *args)
return api_call
def perf_archive_create(*args):
api_call = _invoke_api('perf-archive-create', *args)
return api_call
def perf_archive_datastore_get_iter(*args):
api_call = _invoke_api('perf-archive-datastore-get-iter', *args)
return api_call
def perf_archive_destroy(*args):
api_call = _invoke_api('perf-archive-destroy', *args)
return api_call
def perf_archive_get_iter(*args):
api_call = _invoke_api('perf-archive-get-iter', *args)
return api_call
def perf_archive_modify(*args):
api_call = _invoke_api('perf-archive-modify', *args)
return api_call
def perf_object_counter_list_info(*args):
api_call = _invoke_api('perf-object-counter-list-info', *args)
return api_call
def perf_object_get_instances(*args):
api_call = _invoke_api('perf-object-get-instances', *args)
return api_call
def perf_object_instance_list_info_iter(*args):
api_call = _invoke_api('perf-object-instance-list-info-iter', *args)
return api_call
def perf_object_list_info(*args):
api_call = _invoke_api('perf-object-list-info', *args)
return api_call
def perf_preset_create(*args):
api_call = _invoke_api('perf-preset-create', *args)
return api_call
def perf_preset_delete(*args):
api_call = _invoke_api('perf-preset-delete', *args)
return api_call
def perf_preset_detail_get(*args):
api_call = _invoke_api('perf-preset-detail-get', *args)
return api_call
def perf_preset_get_iter(*args):
api_call = _invoke_api('perf-preset-get-iter', *args)
return api_call
def perf_preset_import(*args):
api_call = _invoke_api('perf-preset-import', *args)
return api_call
def perf_preset_modify(*args):
api_call = _invoke_api('perf-preset-modify', *args)
return api_call
def portset_get_iter(*args):
api_call = _invoke_api('portset-get-iter', *args)
return api_call
def qos_policy_group_create(*args):
api_call = _invoke_api('qos-policy-group-create', *args)
return api_call
def qos_policy_group_delete(*args):
api_call = _invoke_api('qos-policy-group-delete', *args)
return api_call
def qos_policy_group_delete_iter(*args):
api_call = _invoke_api('qos-policy-group-delete-iter', *args)
return api_call
def qos_policy_group_get(*args):
api_call = _invoke_api('qos-policy-group-get', *args)
return api_call
def qos_policy_group_get_iter(*args):
api_call = _invoke_api('qos-policy-group-get-iter', *args)
return api_call
def qos_policy_group_modify(*args):
api_call = _invoke_api('qos-policy-group-modify', *args)
return api_call
def qos_policy_group_modify_iter(*args):
api_call = _invoke_api('qos-policy-group-modify-iter', *args)
return api_call
def qos_policy_group_rename(*args):
api_call = _invoke_api('qos-policy-group-rename', *args)
return api_call
def qos_settings_control_get(*args):
api_call = _invoke_api('qos-settings-control-get', *args)
return api_call
def qos_settings_control_modify(*args):
api_call = _invoke_api('qos-settings-control-modify', *args)
return api_call
def qos_settings_read_ahead_create(*args):
api_call = _invoke_api('qos-settings-read-ahead-create', *args)
return api_call
def qos_settings_read_ahead_destroy(*args):
api_call = _invoke_api('qos-settings-read-ahead-destroy', *args)
return api_call
def qos_settings_read_ahead_destroy_iter(*args):
api_call = _invoke_api('qos-settings-read-ahead-destroy-iter', *args)
return api_call
def qos_settings_read_ahead_get(*args):
api_call = _invoke_api('qos-settings-read-ahead-get', *args)
return api_call
def qos_settings_read_ahead_get_iter(*args):
api_call = _invoke_api('qos-settings-read-ahead-get-iter', *args)
return api_call
def qos_settings_read_ahead_modify(*args):
api_call = _invoke_api('qos-settings-read-ahead-modify', *args)
return api_call
def qos_settings_read_ahead_modify_iter(*args):
api_call = _invoke_api('qos-settings-read-ahead-modify-iter', *args)
return api_call
def qos_test_smf_zapi_error(*args):
api_call = _invoke_api('qos-test-smf-zapi-error', *args)
return api_call
def qos_workload_delete(*args):
api_call = _invoke_api('qos-workload-delete', *args)
return api_call
def qos_workload_delete_iter(*args):
api_call = _invoke_api('qos-workload-delete-iter', *args)
return api_call
def qos_workload_get(*args):
api_call = _invoke_api('qos-workload-get', *args)
return api_call
def qos_workload_get_iter(*args):
api_call = _invoke_api('qos-workload-get-iter', *args)
return api_call
def qos_workload_modify(*args):
api_call = _invoke_api('qos-workload-modify', *args)
return api_call
def qos_workload_modify_iter(*args):
api_call = _invoke_api('qos-workload-modify-iter', *args)
return api_call
def qtree_list_iter(*args):
api_call = _invoke_api('qtree-list-iter', *args)
return api_call
def quota_list_entries_iter(*args):
api_call = _invoke_api('quota-list-entries-iter', *args)
return api_call
def quota_policy_copy(*args):
api_call = _invoke_api('quota-policy-copy', *args)
return api_call
def quota_policy_create(*args):
api_call = _invoke_api('quota-policy-create', *args)
return api_call
def quota_policy_delete_iter(*args):
api_call = _invoke_api('quota-policy-delete-iter', *args)
return api_call
def quota_policy_get_iter(*args):
api_call = _invoke_api('quota-policy-get-iter', *args)
return api_call
def quota_policy_rename(*args):
api_call = _invoke_api('quota-policy-rename', *args)
return api_call
def quota_policy_rule_count_get_iter(*args):
api_call = _invoke_api('quota-policy-rule-count-get-iter', *args)
return api_call
def quota_report_iter(*args):
api_call = _invoke_api('quota-report-iter', *args)
return api_call
def quota_status_iter(*args):
api_call = _invoke_api('quota-status-iter', *args)
return api_call
def raidgroup_get_iter(*args):
api_call = _invoke_api('raidgroup-get-iter', *args)
return api_call
def security_certificate_ca_issued_get_iter(*args):
api_call = _invoke_api('security-certificate-ca-issued-get-iter', *args)
return api_call
def security_certificate_create(*args):
api_call = _invoke_api('security-certificate-create', *args)
return api_call
def security_certificate_delete(*args):
api_call = _invoke_api('security-certificate-delete', *args)
return api_call
def security_certificate_delete_iter(*args):
api_call = _invoke_api('security-certificate-delete-iter', *args)
return api_call
def security_certificate_file_get_iter(*args):
api_call = _invoke_api('security-certificate-file-get-iter', *args)
return api_call
def security_certificate_generate_csr(*args):
api_call = _invoke_api('security-certificate-generate-csr', *args)
return api_call
def security_certificate_get_iter(*args):
api_call = _invoke_api('security-certificate-get-iter', *args)
return api_call
def security_certificate_install(*args):
api_call = _invoke_api('security-certificate-install', *args)
return api_call
def security_certificate_revoke(*args):
api_call = _invoke_api('security-certificate-revoke', *args)
return api_call
def security_certificate_sign(*args):
api_call = _invoke_api('security-certificate-sign', *args)
return api_call
def security_key_manager_add_iter(*args):
api_call = _invoke_api('security-key-manager-add-iter', *args)
return api_call
def security_key_manager_create_key(*args):
api_call = _invoke_api('security-key-manager-create-key', *args)
return api_call
def security_key_manager_delete_iter(*args):
api_call = _invoke_api('security-key-manager-delete-iter', *args)
return api_call
def security_key_manager_get(*args):
api_call = _invoke_api('security-key-manager-get', *args)
return api_call
def security_key_manager_get_iter(*args):
api_call = _invoke_api('security-key-manager-get-iter', *args)
return api_call
def security_key_manager_query_get(*args):
api_call = _invoke_api('security-key-manager-query-get', *args)
return api_call
def security_key_manager_query_get_iter(*args):
api_call = _invoke_api('security-key-manager-query-get-iter', *args)
return api_call
def security_key_manager_restore_get(*args):
api_call = _invoke_api('security-key-manager-restore-get', *args)
return api_call
def security_key_manager_restore_get_iter(*args):
api_call = _invoke_api('security-key-manager-restore-get-iter', *args)
return api_call
def security_key_manager_setup(*args):
api_call = _invoke_api('security-key-manager-setup', *args)
return api_call
def security_login_create(*args):
api_call = _invoke_api('security-login-create', *args)
return api_call
def security_login_delete(*args):
api_call = _invoke_api('security-login-delete', *args)
return api_call
def security_login_delete_iter(*args):
api_call = _invoke_api('security-login-delete-iter', *args)
return api_call
def security_login_get(*args):
api_call = _invoke_api('security-login-get', *args)
return api_call
def security_login_get_iter(*args):
api_call = _invoke_api('security-login-get-iter', *args)
return api_call
def security_login_lock(*args):
api_call = _invoke_api('security-login-lock', *args)
return api_call
def security_login_modify(*args):
api_call = _invoke_api('security-login-modify', *args)
return api_call
def security_login_modify_iter(*args):
api_call = _invoke_api('security-login-modify-iter', *args)
return api_call
def security_login_modify_password(*args):
api_call = _invoke_api('security-login-modify-password', *args)
return api_call
def security_login_role_config_get(*args):
api_call = _invoke_api('security-login-role-config-get', *args)
return api_call
def security_login_role_config_get_iter(*args):
api_call = _invoke_api('security-login-role-config-get-iter', *args)
return api_call
def security_login_role_config_modify(*args):
api_call = _invoke_api('security-login-role-config-modify', *args)
return api_call
def security_login_role_config_modify_iter(*args):
api_call = _invoke_api('security-login-role-config-modify-iter', *args)
return api_call
def security_login_role_create(*args):
api_call = _invoke_api('security-login-role-create', *args)
return api_call
def security_login_role_delete(*args):
api_call = _invoke_api('security-login-role-delete', *args)
return api_call
def security_login_role_delete_iter(*args):
api_call = _invoke_api('security-login-role-delete-iter', *args)
return api_call
def security_login_role_get(*args):
api_call = _invoke_api('security-login-role-get', *args)
return api_call
def security_login_role_get_iter(*args):
api_call = _invoke_api('security-login-role-get-iter', *args)
return api_call
def security_login_role_modify(*args):
api_call = _invoke_api('security-login-role-modify', *args)
return api_call
def security_login_role_modify_iter(*args):
api_call = _invoke_api('security-login-role-modify-iter', *args)
return api_call
def security_login_unlock(*args):
api_call = _invoke_api('security-login-unlock', *args)
return api_call
def security_reset(*args):
api_call = _invoke_api('security-reset', *args)
return api_call
def security_ssh_add(*args):
api_call = _invoke_api('security-ssh-add', *args)
return api_call
def security_ssh_get_iter(*args):
api_call = _invoke_api('security-ssh-get-iter', *args)
return api_call
def security_ssh_remove(*args):
api_call = _invoke_api('security-ssh-remove', *args)
return api_call
def security_ssl_get_iter(*args):
api_call = _invoke_api('security-ssl-get-iter', *args)
return api_call
def security_ssl_modify(*args):
api_call = _invoke_api('security-ssl-modify', *args)
return api_call
def security_trace_filter_get_iter(*args):
api_call = _invoke_api('security-trace-filter-get-iter', *args)
return api_call
def security_trace_result_show(*args):
api_call = _invoke_api('security-trace-result-show', *args)
return api_call
def service_processor_api_service_get(*args):
api_call = _invoke_api('service-processor-api-service-get', *args)
return api_call
def service_processor_api_service_modify(*args):
api_call = _invoke_api('service-processor-api-service-modify', *args)
return api_call
def service_processor_api_service_renew_certificates(*args):
api_call = _invoke_api('service-processor-api-service-renew-certificates', *args)
return api_call
def service_processor_asup_config_get(*args):
api_call = _invoke_api('service-processor-asup-config-get', *args)
return api_call
def service_processor_asup_config_set(*args):
api_call = _invoke_api('service-processor-asup-config-set', *args)
return api_call
def service_processor_asup_invoke(*args):
api_call = _invoke_api('service-processor-asup-invoke', *args)
return api_call
def service_processor_auto_configuration_disable(*args):
api_call = _invoke_api('service-processor-auto-configuration-disable', *args)
return api_call
def service_processor_auto_configuration_enable(*args):
api_call = _invoke_api('service-processor-auto-configuration-enable', *args)
return api_call
def service_processor_auto_configuration_get(*args):
api_call = _invoke_api('service-processor-auto-configuration-get', *args)
return api_call
def service_processor_get(*args):
api_call = _invoke_api('service-processor-get', *args)
return api_call
def service_processor_get_iter(*args):
api_call = _invoke_api('service-processor-get-iter', *args)
return api_call
def service_processor_image_get(*args):
api_call = _invoke_api('service-processor-image-get', *args)
return api_call
def service_processor_image_modify(*args):
api_call = _invoke_api('service-processor-image-modify', *args)
return api_call
def service_processor_image_update(*args):
api_call = _invoke_api('service-processor-image-update', *args)
return api_call
def service_processor_image_update_progress_get(*args):
api_call = _invoke_api('service-processor-image-update-progress-get', *args)
return api_call
def service_processor_log_allocation_get(*args):
api_call = _invoke_api('service-processor-log-allocation-get', *args)
return api_call
def service_processor_log_allocation_get_iter(*args):
api_call = _invoke_api('service-processor-log-allocation-get-iter', *args)
return api_call
def service_processor_network_get(*args):
api_call = _invoke_api('service-processor-network-get', *args)
return api_call
def service_processor_network_get_iter(*args):
api_call = _invoke_api('service-processor-network-get-iter', *args)
return api_call
def service_processor_network_modify(*args):
api_call = _invoke_api('service-processor-network-modify', *args)
return api_call
def service_processor_network_modify_iter(*args):
api_call | |
<reponame>huangyingw/fastai_fastai
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# hide
# skip
from nbdev.export import notebook2script
from fastprogress.fastprogress import format_time
from torch.utils.data import TensorDataset
from nbdev.showdoc import *
from fastai.callback.core import *
from fastai.optimizer import *
from fastai.data.all import *
! [-e / content] & & pip install - Uqq fastai # upgrade fastai on colab
# +
# default_exp learner
# -
# export
# hide
# export
_all_ = ['CancelFitException', 'CancelEpochException', 'CancelTrainException', 'CancelValidException', 'CancelBatchException']
# # Learner
#
# > Basic class for handling the training loop
# You probably want to jump directly to the definition of `Learner`.
# ## Utils function
# hide
# For tests
# +
# hide
def synth_dbunch(a=2, b=3, bs=16, n_train=10, n_valid=2, cuda=False):
"A simple dataset where `x` is random and `y = a*x + b` plus some noise."
def get_data(n):
x = torch.randn(int(bs * n))
return TensorDataset(x, a * x + b + 0.1 * torch.randn(int(bs * n)))
train_ds = get_data(n_train)
valid_ds = get_data(n_valid)
device = default_device() if cuda else None
train_dl = TfmdDL(train_ds, bs=bs, shuffle=True, num_workers=0)
valid_dl = TfmdDL(valid_ds, bs=bs, num_workers=0)
return DataLoaders(train_dl, valid_dl, device=device)
class RegModel(Module):
"A r"
def __init__(self): self.a, self.b = nn.Parameter(torch.randn(1)), nn.Parameter(torch.randn(1))
def forward(self, x): return x * self.a + self.b
# -
# export
defaults.lr = 1e-3
# export
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o, attr)
try:
yield setattr(o, attr, val)
finally:
setattr(o, attr, old)
# +
class _A:
def __init__(self, a): self.a = a
@contextmanager
def a_changed(self, v): return replacing_yield(self, 'a', v)
a = _A(42)
with a.a_changed(32):
test_eq(a.a, 32)
test_eq(a.a, 42)
# -
# export
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
return m if isinstance(m, Metric) else AvgMetric(m)
# See the class `Metric` below for more information.
# export
def save_model(file, model, opt, with_opt=True, pickle_protocol=2):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if rank_distrib():
return # don't save if child proc
if opt is None:
with_opt = False
state = get_model(model).state_dict()
if with_opt:
state = {'model': state, 'opt': opt.state_dict()}
torch.save(state, file, pickle_protocol=pickle_protocol)
# `file` can be a `Path` object, a string or an opened file object. `pickle_protocol` is passed along to `torch.save`
# export
def load_model(file, model, opt, with_opt=None, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
distrib_barrier()
if isinstance(device, int):
device = torch.device('cuda', device)
elif device is None:
device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state) == {'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and ifnone(with_opt, True):
try:
opt.load_state_dict(state['opt'])
except:
if with_opt:
warn("Could not load the optimizer state.")
elif with_opt:
warn("Saved filed doesn't contain an optimizer state.")
# `file` can be a `Path` object, a string or an opened file object. If a `device` is passed, the model is loaded on it, otherwise it's loaded on the CPU.
#
# If `strict` is `True`, the file must exactly contain weights for every parameter key in `model`, if `strict` is `False`, only the keys that are in the saved model are loaded in `model`.
# export
def _try_concat(o):
try:
return torch.cat(o)
except:
return sum([L(o_[i, :] for i in range_of(o_)) for o_ in o], L())
# export
_before_epoch = [event.before_fit, event.before_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
# export
class _ConstantFunc():
"Returns a function that returns `o`"
def __init__(self, o): self.o = o
def __call__(self, *args, **kwargs): return self.o
# ## Learner -
# export
_loop = ['Start Fit', 'before_fit', 'Start Epoch Loop', 'before_epoch', 'Start Train', 'before_train',
'Start Batch Loop', 'before_batch', 'after_pred', 'after_loss', 'before_backward', 'after_backward',
'after_step', 'after_cancel_batch', 'after_batch', 'End Batch Loop', 'End Train',
'after_cancel_train', 'after_train', 'Start Valid', 'before_validate', 'Start Batch Loop',
'**CBs same as train batch**', 'End Batch Loop', 'End Valid', 'after_cancel_validate',
'after_validate', 'End Epoch Loop', 'after_cancel_epoch', 'after_epoch', 'End Fit',
'after_cancel_fit', 'after_fit']
# +
# export
@log_args(but='dls,model,opt_func,cbs')
class Learner():
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95, 0.85, 0.95)):
path = Path(path) if path is not None else getattr(dls, 'path', Path('.'))
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.dls, self.model = dls, model
store_attr(but='dls,model,cbs')
self.training, self.create_mbar, self.logger, self.opt, self.cbs = False, True, print, None, L()
self.add_cbs([(cb() if isinstance(cb, type) else cb) for cb in L(defaults.callbacks) + L(cbs)])
self("after_create")
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self, v): self._metrics = L(v).map(mk_metric)
def _grab_cbs(self, cb_cls): return L(cb for cb in self.cbs if isinstance(cb, cb_cls))
def add_cbs(self, cbs): L(cbs).map(self.add_cb)
def remove_cbs(self, cbs): L(cbs).map(self.remove_cb)
def add_cb(self, cb):
old = getattr(self, cb.name, None)
assert not old or isinstance(old, type(cb)), f"self.{cb.name} already registered"
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
if isinstance(cb, type):
self.remove_cbs(self._grab_cbs(cb))
else:
cb.learn = None
if hasattr(self, cb.name):
delattr(self, cb.name)
if cb in self.cbs:
self.cbs.remove(cb)
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
try:
yield
finally:
self.remove_cbs(cbs)
@contextmanager
def removed_cbs(self, cbs):
self.remove_cbs(cbs)
try:
yield self
finally:
self.add_cbs(cbs)
def ordered_cbs(self, event): return [cb for cb in sort_by_run(self.cbs) if hasattr(cb, event)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
assert hasattr(event, event_name), event_name
[cb(event_name) for cb in sort_by_run(self.cbs)]
def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True):
p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False):
p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b) == 1 else len(b) - 1)
self.xb, self.yb = b[:i], b[i:]
def _step(self): self.opt.step()
def _backward(self): self.loss.backward()
def _with_events(self, f, event_type, ex, final=noop):
try:
self(f'before_{event_type}')
f()
except ex:
self(f'after_cancel_{event_type}')
finally:
self(f'after_{event_type}')
final()
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl):
self.one_batch(*o)
def _do_one_batch(self):
self.pred = self.model(*self.xb)
self('after_pred')
if len(self.yb):
self.loss = self.loss_func(self.pred, *self.yb)
self('after_loss')
if not self.training or not len(self.yb):
return
self('before_backward')
self._backward()
self('after_backward')
self._step()
self('after_step')
self.opt.zero_grad()
def one_batch(self, i, b):
self.iter = i
self._split(b)
self._with_events(self._do_one_batch, 'batch', CancelBatchException)
def _do_epoch_train(self):
self.dl = self.dls.train
self._with_events(self.all_batches, 'train', CancelTrainException)
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None:
dl = self.dls[ds_idx]
self.dl = dl
with torch.no_grad():
self._with_events(self.all_batches, 'validate', CancelValidException)
def _do_epoch(self):
self._do_epoch_train()
self._do_epoch_validate()
def _do_fit(self):
for epoch in range(self.n_epoch):
self.epoch = epoch
self._with_events(self._do_epoch, 'epoch', CancelEpochException)
@log_args(but='cbs')
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt:
self.create_opt()
if wd is None:
wd = self.wd
if wd is not None:
self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
self.n_epoch = n_epoch
self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
def _end_cleanup(self): self.dl, self.xb, self.yb, self.pred, self.loss = None, (None,), (None,), None, None
def __enter__(self): self(_before_epoch)
return self
def __exit__(self, exc_type, exc_value, tb): self(_after_epoch)
def validation_context(self, cbs=None, inner=False):
cms = [self.no_logging(), self.no_mbar()]
if cbs:
cms.append(self.added_cbs(cbs))
if not inner:
cms.append(self)
return ContextManagers(cms)
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None:
dl = self.dls[ds_idx]
with self.validation_context(cbs=cbs):
self._do_epoch_validate(ds_idx, dl)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, reorder=True, cbs=None, **kwargs):
if dl is None:
dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
if reorder and hasattr(dl, 'get_idxs'):
idxs = dl.get_idxs()
dl = dl.new(get_idxs=_ConstantFunc(idxs))
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
ctx_mgrs = self.validation_context(cbs=L(cbs) + [cb], inner=inner)
if with_loss:
ctx_mgrs.append(self.loss_not_reduced())
with ContextManagers(ctx_mgrs):
self._do_epoch_validate(dl=dl)
if act is None:
act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded:
res.insert(pred_i + 2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
if reorder and hasattr(dl, 'get_idxs'):
res = nested_reorder(res, tensor(idxs).argsort())
return tuple(res)
self._end_cleanup()
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0)
inp, preds, _, dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
i = getattr(self.dls, 'n_inp', -1)
inp = (inp,) if i == 1 else tuplify(inp)
dec = self.dls.decode_batch(inp + tuplify(dec_preds))[0]
dec_inp, dec_targ = map(detuplify, [dec[:i], dec[i:]])
res = dec_targ, dec_preds[0], preds[0]
if with_input:
res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None:
dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_, _, preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'):
print(f'{" "*indent}{s}')
indent += 2
elif s.startswith('End'):
indent -= 2
print(f'{" "*indent}{s}')
else:
print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def | |
)
# Big-endian output
decoded = decode_frame(header + data, 2 * 3, 32, '>')
arr = np.frombuffer(decoded, np.dtype('>u4'))
assert [0, 16777216, 65536, 256, 1, 4294967295] == arr.tolist()
# Little-endian output
decoded = decode_frame(header + data, 2 * 3, 32, '<')
arr = np.frombuffer(decoded, np.dtype('<u4'))
assert [0, 16777216, 65536, 256, 1, 4294967295] == arr.tolist()
def test_u32_3s(self):
"""Test decoding 32-bit, 3 sample/pixel."""
header = (
b'\x0C\x00\x00\x00' # 12 segments
b'\x40\x00\x00\x00' # 64
b'\x47\x00\x00\x00' # 71
b'\x4E\x00\x00\x00' # 78
b'\x55\x00\x00\x00' # 85
b'\x5C\x00\x00\x00' # 92
b'\x63\x00\x00\x00' # 99
b'\x6A\x00\x00\x00' # 106
b'\x71\x00\x00\x00' # 113
b'\x78\x00\x00\x00' # 120
b'\x7F\x00\x00\x00' # 127
b'\x86\x00\x00\x00' # 134
b'\x8D\x00\x00\x00' # 141
)
header += (64 - len(header)) * b'\x00'
# 2 x 3 data
data = (
# 0, 16777216, 65536, 256, 4294967295
b'\x05\x00\x01\x00\x00\x00\xFF' # MSB
b'\x05\x00\x00\x01\x00\x00\xFF'
b'\x05\x00\x00\x00\x01\x00\xFF'
b'\x05\x00\x00\x00\x00\x01\xFF' # LSB
b'\x05\xFF\x01\x00\x00\x00\x00' # MSB
b'\x05\xFF\x00\x01\x00\x00\x00'
b'\x05\xFF\x00\x00\x01\x00\x00'
b'\x05\xFF\x00\x00\x00\x01\x00' # LSB
b'\x05\x00\x01\x00\x00\x00\xFF' # MSB
b'\x05\x00\x00\x01\x00\x00\xFF'
b'\x05\x00\x00\x00\x01\x00\xFF'
b'\x05\x01\x00\x00\x00\x01\xFE' # LSB
)
# Big-endian output
decoded = decode_frame(header + data, 2 * 3, 32, '>')
arr = np.frombuffer(decoded, np.dtype('>u4'))
assert [0, 16777216, 65536, 256, 1, 4294967295] == arr[:6].tolist()
assert [4294967295, 16777216, 65536, 256, 1, 0] == arr[6:12].tolist()
assert [1, 16777216, 65536, 256, 1, 4294967294] == arr[12:].tolist()
# Little-endian output
decoded = decode_frame(header + data, 2 * 3, 32, '<')
arr = np.frombuffer(decoded, np.dtype('<u4'))
assert [0, 16777216, 65536, 256, 1, 4294967295] == arr[:6].tolist()
assert [4294967295, 16777216, 65536, 256, 1, 0] == arr[6:12].tolist()
assert [1, 16777216, 65536, 256, 1, 4294967294] == arr[12:].tolist()
@pytest.mark.skipif(not HAVE_PYDICOM, reason="No pydicom")
class TestDecodeFrame_Datasets:
"""Test DICOM dataset decoding."""
def test_u8_1s_1f(self):
"""Test unsigned 8-bit, 1 sample/px, 1 frame."""
ds = INDEX["OBXXXX1A_rle.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 8 == ds.BitsAllocated
assert 1 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 1 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(arr, ref)
assert (600, 800) == arr.shape
assert '>u1' == arr.dtype
assert 244 == arr[0].min() == arr[0].max()
assert (1, 246, 1) == tuple(arr[300, 491:494])
assert 0 == arr[-1].min() == arr[-1].max()
def test_u8_1s_2f(self):
"""Test unsigned 8-bit, 1 sample/px, 2 frame."""
ds = INDEX["OBXXXX1A_rle_2frame.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 8 == ds.BitsAllocated
assert 1 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 2 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(arr, ref)
assert (2, 600, 800) == arr.shape
assert '>u1' == arr.dtype
assert 244 == arr[0, 0].min() == arr[0, 0].max()
assert (1, 246, 1) == tuple(arr[0, 300, 491:494])
assert 0 == arr[0, -1].min() == arr[0, -1].max()
# Frame 2 is frame 1 inverted
assert np.array_equal((2**ds.BitsAllocated - 1) - arr[1], arr[0])
def test_u8_3s_1f(self):
"""Test unsigned 8-bit, 3 sample/px, 1 frame."""
ds = INDEX["SC_rgb_rle.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 8 == ds.BitsAllocated
assert 3 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 1 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(arr, ref)
assert (100, 100, 3) == arr.shape
assert '>u1' == arr.dtype
assert (255, 0, 0) == tuple(arr[5, 50, :])
assert (255, 128, 128) == tuple(arr[15, 50, :])
assert (0, 255, 0) == tuple(arr[25, 50, :])
assert (128, 255, 128) == tuple(arr[35, 50, :])
assert (0, 0, 255) == tuple(arr[45, 50, :])
assert (128, 128, 255) == tuple(arr[55, 50, :])
assert (0, 0, 0) == tuple(arr[65, 50, :])
assert (64, 64, 64) == tuple(arr[75, 50, :])
assert (192, 192, 192) == tuple(arr[85, 50, :])
assert (255, 255, 255) == tuple(arr[95, 50, :])
def test_u8_3s_2f(self):
"""Test unsigned 8-bit, 3 sample/px, 2 frame."""
ds = INDEX["SC_rgb_rle_2frame.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 8 == ds.BitsAllocated
assert 3 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 2 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(arr, ref)
assert (2, 100, 100, 3) == arr.shape
assert '>u1' == arr.dtype
# Frame 1
frame = arr[0]
assert (255, 0, 0) == tuple(frame[5, 50, :])
assert (255, 128, 128) == tuple(frame[15, 50, :])
assert (0, 255, 0) == tuple(frame[25, 50, :])
assert (128, 255, 128) == tuple(frame[35, 50, :])
assert (0, 0, 255) == tuple(frame[45, 50, :])
assert (128, 128, 255) == tuple(frame[55, 50, :])
assert (0, 0, 0) == tuple(frame[65, 50, :])
assert (64, 64, 64) == tuple(frame[75, 50, :])
assert (192, 192, 192) == tuple(frame[85, 50, :])
assert (255, 255, 255) == tuple(frame[95, 50, :])
# Frame 2 is frame 1 inverted
assert np.array_equal((2**ds.BitsAllocated - 1) - arr[1], arr[0])
def test_i16_1s_1f(self):
"""Test signed 16-bit, 1 sample/px, 1 frame."""
ds = INDEX["MR_small_RLE.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 16 == ds.BitsAllocated
assert 1 == ds.SamplesPerPixel
assert 1 == ds.PixelRepresentation
assert 1 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(arr, ref)
assert (64, 64) == arr.shape
assert '<i2' == arr.dtype
assert (422, 319, 361) == tuple(arr[0, 31:34])
assert (366, 363, 322) == tuple(arr[31, :3])
assert (1369, 1129, 862) == tuple(arr[-1, -3:])
def test_u16_1s_10f(self):
"""Test unsigned 16-bit, 1 sample/px, 10 frame."""
ds = INDEX["emri_small_RLE.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 16 == ds.BitsAllocated
assert 1 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 10 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(arr, ref)
assert (10, 64, 64) == arr.shape
assert '<u2' == arr.dtype
# Frame 1
assert (206, 197, 159) == tuple(arr[0, 0, 31:34])
assert (49, 78, 128) == tuple(arr[0, 31, :3])
assert (362, 219, 135) == tuple(arr[0, -1, -3:])
# Frame 5
assert (67, 82, 44) == tuple(arr[4, 0, 31:34])
assert (37, 41, 17) == tuple(arr[4, 31, :3])
assert (225, 380, 355) == tuple(arr[4, -1, -3:])
# Frame 10
assert (72, 86, 69) == tuple(arr[-1, 0, 31:34])
assert (25, 4, 9) == tuple(arr[-1, 31, :3])
assert (227, 300, 147) == tuple(arr[-1, -1, -3:])
def test_u16_3s_1f(self):
"""Test unsigned 16-bit, 3 sample/px, 1 frame."""
ds = INDEX["SC_rgb_rle_16bit.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 16 == ds.BitsAllocated
assert 3 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 1 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(ds.pixel_array, ref)
assert (100, 100, 3) == arr.shape
assert '<u2' == arr.dtype
assert (65535, 0, 0) == tuple(arr[5, 50, :])
assert (65535, 32896, 32896) == tuple(arr[15, 50, :])
assert (0, 65535, 0) == tuple(arr[25, 50, :])
assert (32896, 65535, 32896) == tuple(arr[35, 50, :])
assert (0, 0, 65535) == tuple(arr[45, 50, :])
assert (32896, 32896, 65535) == tuple(arr[55, 50, :])
assert (0, 0, 0) == tuple(arr[65, 50, :])
assert (16448, 16448, 16448) == tuple(arr[75, 50, :])
assert (49344, 49344, 49344) == tuple(arr[85, 50, :])
assert (65535, 65535, 65535) == tuple(arr[95, 50, :])
def test_u16_3s_2f(self):
"""Test unsigned 16-bit, 3 sample/px, 2 frame."""
ds = INDEX["SC_rgb_rle_16bit_2frame.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 16 == ds.BitsAllocated
assert 3 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 2 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert arr.flags.writeable
assert np.array_equal(ds.pixel_array, ref)
assert (2, 100, 100, 3) == arr.shape
assert '<u2' == arr.dtype
# Frame 1
frame = arr[0]
assert (65535, 0, 0) == tuple(frame[5, 50, :])
assert (65535, 32896, 32896) == tuple(frame[15, 50, :])
assert (0, 65535, 0) == tuple(frame[25, 50, :])
assert (32896, 65535, 32896) == tuple(frame[35, 50, :])
assert (0, 0, 65535) == tuple(frame[45, 50, :])
assert (32896, 32896, 65535) == tuple(frame[55, 50, :])
assert (0, 0, 0) == tuple(frame[65, 50, :])
assert (16448, 16448, 16448) == tuple(frame[75, 50, :])
assert (49344, 49344, 49344) == tuple(frame[85, 50, :])
assert (65535, 65535, 65535) == tuple(frame[95, 50, :])
# Frame 2 is frame 1 inverted
assert np.array_equal((2**ds.BitsAllocated - 1) - arr[1], arr[0])
def test_u32_1s_1f(self):
"""Test unsigned 32-bit, 1 sample/px, 1 frame."""
ds = INDEX["rtdose_rle_1frame.dcm"]['ds']
assert ds.file_meta.TransferSyntaxUID == RLELossless
assert 32 == ds.BitsAllocated
assert 1 == ds.SamplesPerPixel
assert 0 == ds.PixelRepresentation
assert 1 == getattr(ds, 'NumberOfFrames', 1)
ref = ds.pixel_array
arr = pixel_array(ds)
assert (10, 10) == arr.shape
assert '<u4' == arr.dtype
assert arr.flags.writeable
assert np.array_equal(arr, ref)
assert (1249000, 1249000, 1250000) == tuple(arr[0, :3])
assert (1031000, 1029000, 1027000) == tuple(arr[4, 3:6])
assert (803000, 801000, 798000) == tuple(arr[-1, -3:])
| |
(InstanceTaskConfig, InstanceTaskConfig.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'settings', (JobUpdateSettings, JobUpdateSettings.thrift_spec), None, ), # 3
)
def __init__(self, initialState=None, desiredState=None, settings=None,):
self.initialState = initialState
self.desiredState = desiredState
self.settings = settings
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.SET:
self.initialState = set()
(_etype168, _size165) = iprot.readSetBegin()
for _i169 in range(_size165):
_elem170 = InstanceTaskConfig()
_elem170.read(iprot)
self.initialState.add(_elem170)
iprot.readSetEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.desiredState = InstanceTaskConfig()
self.desiredState.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.settings = JobUpdateSettings()
self.settings.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JobUpdateInstructions')
if self.initialState is not None:
oprot.writeFieldBegin('initialState', TType.SET, 1)
oprot.writeSetBegin(TType.STRUCT, len(self.initialState))
for iter171 in self.initialState:
iter171.write(oprot)
oprot.writeSetEnd()
oprot.writeFieldEnd()
if self.desiredState is not None:
oprot.writeFieldBegin('desiredState', TType.STRUCT, 2)
self.desiredState.write(oprot)
oprot.writeFieldEnd()
if self.settings is not None:
oprot.writeFieldBegin('settings', TType.STRUCT, 3)
self.settings.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.initialState)
value = (value * 31) ^ hash(self.desiredState)
value = (value * 31) ^ hash(self.settings)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JobUpdate:
"""
Full definition of the job update.
Attributes:
- summary: Update summary.
- instructions: Update configuration.
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'summary', (JobUpdateSummary, JobUpdateSummary.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'instructions', (JobUpdateInstructions, JobUpdateInstructions.thrift_spec), None, ), # 2
)
def __init__(self, summary=None, instructions=None,):
self.summary = summary
self.instructions = instructions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.summary = JobUpdateSummary()
self.summary.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.instructions = JobUpdateInstructions()
self.instructions.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JobUpdate')
if self.summary is not None:
oprot.writeFieldBegin('summary', TType.STRUCT, 1)
self.summary.write(oprot)
oprot.writeFieldEnd()
if self.instructions is not None:
oprot.writeFieldBegin('instructions', TType.STRUCT, 2)
self.instructions.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.summary)
value = (value * 31) ^ hash(self.instructions)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JobUpdateDetails:
"""
Attributes:
- update: Update definition.
- updateEvents: History for this update.
- instanceEvents: History for the individual instances updated.
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'update', (JobUpdate, JobUpdate.thrift_spec), None, ), # 1
(2, TType.LIST, 'updateEvents', (TType.STRUCT,(JobUpdateEvent, JobUpdateEvent.thrift_spec)), None, ), # 2
(3, TType.LIST, 'instanceEvents', (TType.STRUCT,(JobInstanceUpdateEvent, JobInstanceUpdateEvent.thrift_spec)), None, ), # 3
)
def __init__(self, update=None, updateEvents=None, instanceEvents=None,):
self.update = update
self.updateEvents = updateEvents
self.instanceEvents = instanceEvents
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.update = JobUpdate()
self.update.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.updateEvents = []
(_etype175, _size172) = iprot.readListBegin()
for _i176 in range(_size172):
_elem177 = JobUpdateEvent()
_elem177.read(iprot)
self.updateEvents.append(_elem177)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.instanceEvents = []
(_etype181, _size178) = iprot.readListBegin()
for _i182 in range(_size178):
_elem183 = JobInstanceUpdateEvent()
_elem183.read(iprot)
self.instanceEvents.append(_elem183)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JobUpdateDetails')
if self.update is not None:
oprot.writeFieldBegin('update', TType.STRUCT, 1)
self.update.write(oprot)
oprot.writeFieldEnd()
if self.updateEvents is not None:
oprot.writeFieldBegin('updateEvents', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.updateEvents))
for iter184 in self.updateEvents:
iter184.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.instanceEvents is not None:
oprot.writeFieldBegin('instanceEvents', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.instanceEvents))
for iter185 in self.instanceEvents:
iter185.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.update)
value = (value * 31) ^ hash(self.updateEvents)
value = (value * 31) ^ hash(self.instanceEvents)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JobUpdateRequest:
"""
A request to update the following instances of an existing job. Used by startUpdate.
Attributes:
- taskConfig: Desired TaskConfig to apply.
- instanceCount: Desired number of instances of the task config.
- settings: Update settings and limits.
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'taskConfig', (TaskConfig, TaskConfig.thrift_spec), None, ), # 1
(2, TType.I32, 'instanceCount', None, None, ), # 2
(3, TType.STRUCT, 'settings', (JobUpdateSettings, JobUpdateSettings.thrift_spec), None, ), # 3
)
def __init__(self, taskConfig=None, instanceCount=None, settings=None,):
self.taskConfig = taskConfig
self.instanceCount = instanceCount
self.settings = settings
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.taskConfig = TaskConfig()
self.taskConfig.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.instanceCount = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.settings = JobUpdateSettings()
self.settings.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JobUpdateRequest')
if self.taskConfig is not None:
oprot.writeFieldBegin('taskConfig', TType.STRUCT, 1)
self.taskConfig.write(oprot)
oprot.writeFieldEnd()
if self.instanceCount is not None:
oprot.writeFieldBegin('instanceCount', TType.I32, 2)
oprot.writeI32(self.instanceCount)
oprot.writeFieldEnd()
if self.settings is not None:
oprot.writeFieldBegin('settings', TType.STRUCT, 3)
self.settings.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.taskConfig)
value = (value * 31) ^ hash(self.instanceCount)
value = (value * 31) ^ hash(self.settings)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JobUpdateQuery:
"""
Contains a set of restrictions on matching job updates where all restrictions must be met
(terms are AND'ed together).
Attributes:
- role: Job role.
- key: Unique identifier for a job update.
- jobKey: Job key.
- user: User who created the update.
- updateStatuses: Set of update statuses.
- offset: Offset to serve data from. Used by pagination.
- limit: Number or records to serve. Used by pagination.
"""
thrift_spec = (
None, # 0
None, # 1
(2, TType.STRING, 'role', None, None, ), # 2
(3, TType.STRUCT, 'jobKey', (JobKey, JobKey.thrift_spec), None, ), # 3
(4, TType.STRING, 'user', None, None, ), # 4
(5, TType.SET, 'updateStatuses', (TType.I32,None), None, ), # 5
(6, TType.I32, 'offset', None, None, ), # 6
(7, TType.I32, 'limit', None, None, ), # 7
(8, TType.STRUCT, 'key', (JobUpdateKey, JobUpdateKey.thrift_spec), None, ), # 8
)
def __init__(self, role=None, key=None, jobKey=None, user=None, updateStatuses=None, offset=None, limit=None,):
self.role = role
self.key = key
self.jobKey = jobKey
self.user = user
self.updateStatuses = updateStatuses
self.offset = offset
self.limit = limit
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is | |
from __future__ import unicode_literals
import logging
import os
import sys
import getpass
from six import reraise
from io import StringIO
from functools import wraps
from collections import defaultdict
import sprinter.lib as lib
from sprinter.core import PHASE, load_global_config, Directory, Injections, Manifest, load_manifest, FeatureDict
from sprinter.core.templates import shell_utils_template, source_template, warning_template
from sprinter.core.messages import REMOVE_WARNING, INVALID_MANIFEST
from sprinter.lib import system
from sprinter.exceptions import SprinterException, FormulaException
from sprinter.external import brew
def warmup(f):
""" Decorator to run warmup before running a command """
@wraps(f)
def wrapped(self, *args, **kwargs):
if not self.warmed_up:
self.warmup()
return f(self, *args, **kwargs)
return wrapped
def install_required(f):
""" Return an exception if the namespace is not already installed """
@wraps(f)
def wrapped(self, *args, **kwargs):
if self.directory.new:
raise SprinterException("Namespace %s is not yet installed!" % self.namespace)
return f(self, *args, **kwargs)
return wrapped
# http://www.gnu.org/software/bash/manual/bashref.html#Bash-Startup-Files
# http://zsh.sourceforge.net/Guide/zshguide02.html
SHELL_CONFIG = {
'bash': {
'rc': ['.bashrc'],
'env': ['.bash_profile', '.bash_login', '.profile']
},
'zsh': {
'rc': ['.zshrc'],
'env': ['.zprofile', '.zlogin']
},
'gui': {
'debian': ['.profile'],
'osx': lib.insert_environment_osx
}
}
# for now, they are all still dealt with en masse
RC_FILES = []
ENV_FILES = []
for shell, shell_config in SHELL_CONFIG.items():
if shell != 'gui':
RC_FILES += shell_config['rc']
ENV_FILES += shell_config['env']
CONFIG_FILES = RC_FILES + ENV_FILES
class Environment(object):
source = None # the path to the source handle, the handle itself, or a manifest instance
target = None # the path to the target handle, the handle itself, or a manifest instance
namespace = None # the namespace of the environment
custom_directory_root = None # the root to install directories too
do_inject_environment_config = True # inject configuration into shells
sprinter_namespace = None # the namespace to make installs with. this affects:
phase = None # the phase currently running
# the prefix added to injections
# the libraries that environment utilizes
directory = None # handles interactions with the environment directory
injections = None # handles injections
global_injections = None # handles injections for the global sprinter configuration
# variables typically populated programatically
warmed_up = False # returns true if the environment is ready for environments
shell_util_path = None # the path to the shell utils file
error_occured = False
_errors = [] # list to keep all the errors
sandboxes = [] # a list of package managers to sandbox (brew)
# specifies where to get the global sprinter root
global_config = None # configuration file, which defaults to loading from SPRINTER_ROOT/.global/config.cfg
ignore_errors = False # ignore errors in features
def __init__(self,
logger=None,
logging_level=logging.INFO,
root=None,
sprinter_namespace=None,
global_config=None,
ignore_errors=False):
# base logging object to log instances
self.logger = logger or self._build_logger(level=logging_level)
if logging_level == logging.DEBUG:
self.logger.info("Starting in debug mode...")
# the sprinter namespace
self.sprinter_namespace = sprinter_namespace or 'sprinter'
# the root directory which sprinter installs sandboxable files too
self.root = root or os.path.expanduser(os.path.join("~", ".%s" % self.sprinter_namespace))
self.ignore_errors = ignore_errors
# path to the directory to install global files
self.global_path = os.path.join(self.root, ".global")
self.global_config_path = os.path.join(self.global_path, "config.cfg")
self.global_config = global_config or load_global_config(self.global_config_path)
self.shell_util_path = os.path.join(self.global_path, "utils.sh")
self.main_manifest = None
# a dictionary of the errors associated with features.
# The key is a tuple of feature name and formula, while the value is an instance.
self._error_dict = defaultdict(list)
@warmup
def install(self):
""" Install the environment """
self.phase = PHASE.INSTALL
if not self.directory.new:
self.logger.info("Namespace %s directory already exists!" % self.namespace)
self.source = load_manifest(self.directory.manifest_path)
return self.update()
try:
self.logger.info("Installing environment %s..." % self.namespace)
self.directory.initialize()
self.install_sandboxes()
self.instantiate_features()
self.grab_inputs()
self._specialize()
for feature in self.features.run_order:
self.run_action(feature, 'sync')
self.inject_environment_config()
self._finalize()
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
self.logger.info("An error occured during installation!")
if not self.ignore_errors:
self.clear_all()
self.logger.info("Removing installation %s..." % self.namespace)
self.directory.remove()
et, ei, tb = sys.exc_info()
reraise(et, ei, tb)
@warmup
@install_required
def update(self, reconfigure=False):
""" update the environment """
try:
self.phase = PHASE.UPDATE
self.logger.info("Updating environment %s..." % self.namespace)
self.install_sandboxes()
self.instantiate_features()
# We don't grab inputs, only on install
# updates inputs are grabbed on demand
# self.grab_inputs(reconfigure=reconfigure)
if reconfigure:
self.grab_inputs(reconfigure=True)
else:
self._copy_source_to_target()
self._specialize(reconfigure=reconfigure)
for feature in self.features.run_order:
self.run_action(feature, 'sync')
self.inject_environment_config()
self._finalize()
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb)
@warmup
@install_required
def remove(self):
""" remove the environment """
try:
self.phase = PHASE.REMOVE
self.logger.info("Removing environment %s..." % self.namespace)
self.instantiate_features()
self._specialize()
for feature in self.features.run_order:
try:
self.run_action(feature, 'sync')
except FormulaException:
# continue trying to remove any remaining features.
pass
self.clear_all()
self.directory.remove()
self.injections.commit()
if self.error_occured:
self.logger.error(warning_template)
self.logger.error(REMOVE_WARNING)
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb)
@warmup
@install_required
def deactivate(self):
""" deactivate the environment """
try:
self.phase = PHASE.DEACTIVATE
self.logger.info("Deactivating environment %s..." % self.namespace)
self.directory.rewrite_config = False
self.instantiate_features()
self._specialize()
for feature in self.features.run_order:
self.logger.info("Deactivating %s..." % feature[0])
self.run_action(feature, 'deactivate')
self.clear_all()
self._finalize()
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb)
@warmup
@install_required
def activate(self):
""" activate the environment """
try:
self.phase = PHASE.ACTIVATE
self.logger.info("Activating environment %s..." % self.namespace)
self.directory.rewrite_config = False
self.instantiate_features()
self._specialize()
for feature in self.features.run_order:
self.logger.info("Activating %s..." % feature[0])
self.run_action(feature, 'activate')
self.inject_environment_config()
self._finalize()
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb)
@warmup
def validate(self):
""" Validate the target environment """
self.phase = PHASE.VALIDATE
self.logger.info("Validating %s..." % self.namespace)
self.instantiate_features()
context_dict = {}
if self.target:
for s in self.target.formula_sections():
context_dict["%s:root_dir" % s] = self.directory.install_directory(s)
context_dict['config:root_dir'] = self.directory.root_dir
context_dict['config:node'] = system.NODE
self.target.add_additional_context(context_dict)
for feature in self.features.run_order:
self.run_action(feature, 'validate', run_if_error=True)
@warmup
def inject_environment_config(self):
if not self.do_inject_environment_config:
return
for shell in SHELL_CONFIG:
if shell == 'gui':
if system.is_debian():
self._inject_config_source(".gui", SHELL_CONFIG['gui']['debian'])
else:
if (self.global_config.has_option('shell', shell)
and lib.is_affirmative(self.global_config.get('shell', shell))):
rc_file, rc_path = self._inject_config_source(".rc", SHELL_CONFIG[shell]['rc'])
env_file, env_path = self._inject_config_source(".env", SHELL_CONFIG[shell]['env'])
# If an rc file is sourced by an env file, we should alert the user.
if (self.phase is PHASE.INSTALL
and self.injections.in_noninjected_file(env_path, rc_file)
and self.global_injections.in_noninjected_file(env_path, rc_file)):
self.logger.info("You appear to be sourcing %s from inside %s." % (rc_file, env_file))
self.logger.info("Please ensure it is wrapped in a #SPRINTER_OVERRIDES block " +
"to avoid repetitious operations!")
full_rc_path = os.path.expanduser(os.path.join("~", rc_file))
full_env_path = os.path.expanduser(os.path.join("~", env_file))
if lib.is_affirmative(self.global_config.get('global', 'env_source_rc')):
self.global_injections.inject(
full_env_path,
source_template % (full_rc_path, full_rc_path))
else:
self.global_injections.inject(full_env_path, '')
if system.is_osx() and not self.injections.in_noninjected_file(env_path, rc_file):
if self.phase is PHASE.INSTALL:
self.logger.info("On OSX, login shell are the default, which only source config files")
@warmup
def clear_all(self):
""" clear all files that were to be injected """
self.injections.clear_all()
for config_file in CONFIG_FILES:
self.injections.clear(os.path.join("~", config_file))
def install_sandboxes(self):
if self.target:
if system.is_osx():
if not self.target.is_affirmative('config', 'use_global_packagemanagers'):
self._install_sandbox('brew', brew.install_brew)
elif lib.which('brew') is None:
install_brew = lib.prompt(
"Looks like you don't have brew, " +
"which is sprinter's package manager of choice for OSX.\n"
"Would you like sprinter to install brew for you?",
default="yes", boolean=True)
if install_brew:
lib.call("sudo mkdir -p /usr/local/", stdout=None,
output_log_level=logging.DEBUG)
lib.call("sudo chown -R %s /usr/local/" % getpass.getuser(),
output_log_level=logging.DEBUG, stdout=None)
brew.install_brew('/usr/local')
def instantiate_features(self):
if hasattr(self, 'features') and self.features:
return
self.features = FeatureDict(self,
self.source, self.target,
self.global_path)
def run_feature(self, feature, action):
for k in self.features.run_order:
if feature in k:
self.run_action(k, action, run_if_error=True)
def write_debug_log(self, file_path):
""" Write the debug log to a file """
with open(file_path, "wb+") as fh:
fh.write(system.get_system_info().encode('utf-8'))
# writing to debug stream
self._debug_stream.seek(0)
fh.write(self._debug_stream.read().encode('utf-8'))
fh.write("The following errors occured:\n".encode('utf-8'))
for error in self._errors:
fh.write((error + "\n").encode('utf-8'))
for k, v in self._error_dict.items():
if len(v) > 0:
fh.write(("Error(s) in %s with formula %s:\n" % k).encode('utf-8'))
for error in v:
fh.write((error + "\n").encode('utf-8'))
def write_manifest(self):
""" Write the manifest to the file """
if os.path.exists(self.directory.manifest_path):
self.main_manifest.write(open(self.directory.manifest_path, "w+"))
def message_failure(self):
""" return a failure message, if one exists """
if not isinstance(self.main_manifest, Manifest):
return None
return self.main_manifest.get('config', 'message_failure', default=None)
def message_success(self):
""" return a success message, if one exists """
return self.main_manifest.get('config', 'message_success', default=None)
def warmup(self):
""" initialize variables necessary to perform a sprinter action """
self.logger.debug("Warming up...")
try:
if not isinstance(self.source, Manifest) and self.source:
self.source = load_manifest(self.source)
if not isinstance(self.target, Manifest) and self.target:
self.target = load_manifest(self.target)
self.main_manifest = self.target or self.source
except lib.BadCredentialsException:
e = sys.exc_info()[1]
self.logger.error(str(e))
raise SprinterException("Fatal error! Bad credentials to grab manifest!")
if not getattr(self, 'namespace', None):
if self.target:
self.namespace = self.target.namespace
elif not self.namespace and self.source:
self.namespace = self.source.namespace
else:
raise SprinterException("No environment name has been specified!")
self.directory_root = self.custom_directory_root
if not self.directory:
if not self.directory_root:
self.directory_root = os.path.join(self.root, self.namespace)
self.directory = Directory(self.directory_root,
shell_util_path=self.shell_util_path)
if not self.injections:
self.injections = Injections(wrapper="%s_%s" | |
<reponame>JustinTW/pulumi-eks<filename>python/pulumi_eks/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by pulumi-gen-eks. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from .vpc_cni import VpcCni
import pulumi_aws
import pulumi_kubernetes
__all__ = [
'ClusterNodeGroupOptionsArgs',
'CoreDataArgs',
'CreationRoleProviderArgs',
'FargateProfileArgs',
'KubeconfigOptionsArgs',
'RoleMappingArgs',
'StorageClassArgs',
'TaintArgs',
'UserMappingArgs',
'VpcCniOptionsArgs',
]
@pulumi.input_type
class ClusterNodeGroupOptionsArgs:
def __init__(__self__, *,
ami_id: Optional[pulumi.Input[str]] = None,
auto_scaling_group_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
bootstrap_extra_args: Optional[pulumi.Input[str]] = None,
cloud_formation_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
cluster_ingress_rule: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroupRule']] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
encrypt_root_block_device: Optional[pulumi.Input[bool]] = None,
extra_node_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]]] = None,
gpu: Optional[pulumi.Input[bool]] = None,
instance_profile: Optional[pulumi.Input['pulumi_aws.iam.InstanceProfile']] = None,
instance_type: Optional[pulumi.Input[str]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kubelet_extra_args: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
max_size: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
node_associate_public_ip_address: Optional[pulumi.Input[bool]] = None,
node_public_key: Optional[pulumi.Input[str]] = None,
node_root_volume_size: Optional[pulumi.Input[int]] = None,
node_security_group: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroup']] = None,
node_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_user_data: Optional[pulumi.Input[str]] = None,
node_user_data_override: Optional[pulumi.Input[str]] = None,
spot_price: Optional[pulumi.Input[str]] = None,
taints: Optional[pulumi.Input[Mapping[str, pulumi.Input['TaintArgs']]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Describes the configuration options accepted by a cluster to create its own node groups.
:param pulumi.Input[str] ami_id: The AMI ID to use for the worker nodes.
Defaults to the latest recommended EKS Optimized Linux AMI from the AWS Systems Manager Parameter Store.
Note: `amiId` and `gpu` are mutually exclusive.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] auto_scaling_group_tags: The tags to apply to the NodeGroup's AutoScalingGroup in the CloudFormation Stack.
Per AWS, all stack-level tags, including automatically created tags, and the `cloudFormationTags` option are propagated to resources that AWS CloudFormation supports, including the AutoScalingGroup. See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
Note: Given the inheritance of auto-generated CF tags and `cloudFormationTags`, you should either supply the tag in `autoScalingGroupTags` or `cloudFormationTags`, but not both.
:param pulumi.Input[str] bootstrap_extra_args: Additional args to pass directly to `/etc/eks/bootstrap.sh`. Fror details on available options, see: https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh. Note that the `--apiserver-endpoint`, `--b64-cluster-ca` and `--kubelet-extra-args` flags are included automatically based on other configuration parameters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] cloud_formation_tags: The tags to apply to the CloudFormation Stack of the Worker NodeGroup.
Note: Given the inheritance of auto-generated CF tags and `cloudFormationTags`, you should either supply the tag in `autoScalingGroupTags` or `cloudFormationTags`, but not both.
:param pulumi.Input['pulumi_aws.ec2.SecurityGroupRule'] cluster_ingress_rule: The ingress rule that gives node group access.
:param pulumi.Input[int] desired_capacity: The number of worker nodes that should be running in the cluster. Defaults to 2.
:param pulumi.Input[bool] encrypt_root_block_device: Encrypt the root block device of the nodes in the node group.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]] extra_node_security_groups: Extra security groups to attach on all nodes in this worker node group.
This additional set of security groups captures any user application rules that will be needed for the nodes.
:param pulumi.Input[bool] gpu: Use the latest recommended EKS Optimized Linux AMI with GPU support for the worker nodes from the AWS Systems Manager Parameter Store.
Defaults to false.
Note: `gpu` and `amiId` are mutually exclusive.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html
- https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html
:param pulumi.Input['pulumi_aws.iam.InstanceProfile'] instance_profile: The ingress rule that gives node group access.
:param pulumi.Input[str] instance_type: The instance type to use for the cluster's nodes. Defaults to "t2.medium".
:param pulumi.Input[str] key_name: Name of the key pair to use for SSH access to worker nodes.
:param pulumi.Input[str] kubelet_extra_args: Extra args to pass to the Kubelet. Corresponds to the options passed in the `--kubeletExtraArgs` flag to `/etc/eks/bootstrap.sh`. For example, '--port=10251 --address=0.0.0.0'. Note that the `labels` and `taints` properties will be applied to this list (using `--node-labels` and `--register-with-taints` respectively) after to the expicit `kubeletExtraArgs`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Custom k8s node labels to be attached to each woker node. Adds the given key/value pairs to the `--node-labels` kubelet argument.
:param pulumi.Input[int] max_size: The maximum number of worker nodes running in the cluster. Defaults to 2.
:param pulumi.Input[int] min_size: The minimum number of worker nodes running in the cluster. Defaults to 1.
:param pulumi.Input[bool] node_associate_public_ip_address: Whether or not to auto-assign public IP addresses on the EKS worker nodes. If this toggle is set to true, the EKS workers will be auto-assigned public IPs. If false, they will not be auto-assigned public IPs.
:param pulumi.Input[str] node_public_key: Public key material for SSH access to worker nodes. See allowed formats at:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
If not provided, no SSH access is enabled on VMs.
:param pulumi.Input[int] node_root_volume_size: The size in GiB of a cluster node's root volume. Defaults to 20.
:param pulumi.Input['pulumi_aws.ec2.SecurityGroup'] node_security_group: The security group for the worker node group to communicate with the cluster.
This security group requires specific inbound and outbound rules.
See for more details:
https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
Note: The `nodeSecurityGroup` option and the cluster option`nodeSecurityGroupTags` are mutually exclusive.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_subnet_ids: The set of subnets to override and use for the worker node group.
Setting this option overrides which subnets to use for the worker node group, regardless if the cluster's `subnetIds` is set, or if `publicSubnetIds` and/or `privateSubnetIds` were set.
:param pulumi.Input[str] node_user_data: Extra code to run on node startup. This code will run after the AWS EKS bootstrapping code and before the node signals its readiness to the managing CloudFormation stack. This code must be a typical user data script: critically it must begin with an interpreter directive (i.e. a `#!`).
:param pulumi.Input[str] node_user_data_override: User specified code to run on node startup. This code is expected to handle the full AWS EKS bootstrapping code and signal node readiness to the managing CloudFormation stack. This code must be a complete and executable user data script in bash (Linux) or powershell (Windows).
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/worker.html
:param pulumi.Input[str] spot_price: Bidding price for spot instance. If set, only spot instances will be added as worker node.
:param pulumi.Input[Mapping[str, pulumi.Input['TaintArgs']]] taints: Custom k8s node taints to be attached to each worker node. Adds the given taints to the `--register-with-taints` kubelet argument
:param pulumi.Input[str] version: Desired Kubernetes master / control plane version. If you do not specify a value, the latest available version is used.
"""
if ami_id is not None:
pulumi.set(__self__, "ami_id", ami_id)
if auto_scaling_group_tags is not None:
pulumi.set(__self__, "auto_scaling_group_tags", auto_scaling_group_tags)
if bootstrap_extra_args is not None:
pulumi.set(__self__, "bootstrap_extra_args", bootstrap_extra_args)
if cloud_formation_tags is not None:
pulumi.set(__self__, "cloud_formation_tags", cloud_formation_tags)
if cluster_ingress_rule is not None:
pulumi.set(__self__, "cluster_ingress_rule", cluster_ingress_rule)
if desired_capacity is not None:
pulumi.set(__self__, "desired_capacity", desired_capacity)
if encrypt_root_block_device is not None:
pulumi.set(__self__, "encrypt_root_block_device", encrypt_root_block_device)
if extra_node_security_groups is not None:
pulumi.set(__self__, "extra_node_security_groups", extra_node_security_groups)
if gpu is not None:
pulumi.set(__self__, "gpu", gpu)
if instance_profile is not None:
pulumi.set(__self__, "instance_profile", instance_profile)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if kubelet_extra_args is not None:
pulumi.set(__self__, "kubelet_extra_args", kubelet_extra_args)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if max_size is not None:
pulumi.set(__self__, "max_size", max_size)
if min_size is not None:
pulumi.set(__self__, "min_size", min_size)
if node_associate_public_ip_address is not None:
pulumi.set(__self__, "node_associate_public_ip_address", node_associate_public_ip_address)
if node_public_key is not None:
pulumi.set(__self__, "node_public_key", node_public_key)
if node_root_volume_size is not None:
pulumi.set(__self__, "node_root_volume_size", node_root_volume_size)
if node_security_group is not None:
pulumi.set(__self__, "node_security_group", node_security_group)
if node_subnet_ids is not None:
pulumi.set(__self__, "node_subnet_ids", node_subnet_ids)
if node_user_data is not None:
pulumi.set(__self__, "node_user_data", node_user_data)
if node_user_data_override is not None:
pulumi.set(__self__, "node_user_data_override", node_user_data_override)
if spot_price is not None:
pulumi.set(__self__, "spot_price", spot_price)
if taints is not None:
pulumi.set(__self__, "taints", taints)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="amiId")
def ami_id(self) -> Optional[pulumi.Input[str]]:
"""
The AMI ID to use for the worker nodes.
Defaults to the latest recommended EKS Optimized Linux AMI from the AWS Systems Manager Parameter Store.
Note: `amiId` and `gpu` are mutually exclusive.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.
"""
return pulumi.get(self, "ami_id")
@ami_id.setter
def ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ami_id", value)
@property
@pulumi.getter(name="autoScalingGroupTags")
def auto_scaling_group_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags to apply to the NodeGroup's AutoScalingGroup in the CloudFormation Stack.
Per AWS, all stack-level tags, including automatically created tags, | |
<filename>scaaml/capture/scope/ps6424e.py<gh_stars>0
"""This code is modified from the ChipWhisperer project:
http://www.github.com/newaetech/chipwhisperer"""
from __future__ import absolute_import
import ctypes
from decimal import Decimal, ROUND_HALF_DOWN
import traceback
from typing import OrderedDict
from chipwhisperer.capture.api.cwcommon import ChipWhispererCommonInterface
from chipwhisperer.common.utils import util
import numpy as np
from picosdk.ps6000a import ps6000a as ps
from picosdk.PicoDeviceEnums import picoEnum
from picosdk.functions import adc2mV, assert_pico_ok
from picosdk.errors import PicoSDKCtypesError
def assert_ok(status):
"""Check assert_pico_ok and if it raises change PicoSDKCtypesError to
IOError."""
try:
assert_pico_ok(status)
except PicoSDKCtypesError as e:
raise IOError from e
class CaptureSettings(object):
"""Channel settings."""
_name = "Capture Setting"
CHANNEL_COUPLINGS = {
"DC50": picoEnum.PICO_COUPLING["PICO_DC_50OHM"],
"DC": picoEnum.PICO_COUPLING["PICO_DC"],
"AC": picoEnum.PICO_COUPLING["PICO_AC"],
}
CHANNELS = {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"External": 4,
"MaxChannels": 4,
"TriggerAux": 5
}
CHANNEL_RANGE = [
{
"rangeV": 20E-3,
"apivalue": 1,
"rangeStr": "20 mV"
},
{
"rangeV": 50E-3,
"apivalue": 2,
"rangeStr": "50 mV"
},
{
"rangeV": 100E-3,
"apivalue": 3,
"rangeStr": "100 mV"
},
{
"rangeV": 200E-3,
"apivalue": 4,
"rangeStr": "200 mV"
},
{
"rangeV": 500E-3,
"apivalue": 5,
"rangeStr": "500 mV"
},
{
"rangeV": 1.0,
"apivalue": 6,
"rangeStr": "1 V"
},
{
"rangeV": 2.0,
"apivalue": 7,
"rangeStr": "2 V"
},
{
"rangeV": 5.0,
"apivalue": 8,
"rangeStr": "5 V"
},
{
"rangeV": 10.0,
"apivalue": 9,
"rangeStr": "10 V"
},
{
"rangeV": 20.0,
"apivalue": 10,
"rangeStr": "20 V"
},
]
ATTENUATION = {
"1:1": 1,
"1:10": 10,
}
REV_ATTENUATION = {1: "1:1", 10: "1:10"}
def __init__(self):
self._couplings = {}
self._rev_couplings = {}
for name, val in self.CHANNEL_COUPLINGS.items():
self._couplings[name] = val
self._rev_couplings[val] = name
# channels
self._ch_list = {}
self._rev_ch_list = {}
for channel_name, channel_id in self.CHANNELS.items():
if channel_id < self.CHANNELS["MaxChannels"]:
self._ch_list[channel_name] = channel_id
self._rev_ch_list[channel_id] = channel_name
# ranges
self._ch_range = {}
self._ch_range_list = []
self._ch_range_api_value = {}
for key in self.CHANNEL_RANGE:
self._ch_range[key["rangeV"]] = key["rangeStr"]
self._ch_range_list.append(key["rangeV"])
self._ch_range_api_value[key["rangeV"]] = key["apivalue"]
self._ch_range_list.sort()
self._channel = 0
self._probe_attenuation = 1
self._coupling = self._couplings["AC"]
self._range: float = 5.0
@property
def ps_api_channel(self):
"""Channel for PicoScope API."""
channel_enums = {
0: picoEnum.PICO_CHANNEL["PICO_CHANNEL_A"],
1: picoEnum.PICO_CHANNEL["PICO_CHANNEL_B"],
2: picoEnum.PICO_CHANNEL["PICO_CHANNEL_C"],
3: picoEnum.PICO_CHANNEL["PICO_CHANNEL_D"],
}
# Ensure that we did not forget any channel
assert len(channel_enums) == self.CHANNELS["MaxChannels"]
return channel_enums[self._channel]
@property
def channel(self):
return self._rev_ch_list[self._channel]
@channel.setter
def channel(self, val):
if val not in self._ch_list:
raise ValueError("Unknown channel")
self._channel = self._ch_list[val]
@property
def probe_attenuation(self):
return self.REV_ATTENUATION[self._probe_attenuation]
@probe_attenuation.setter
def probe_attenuation(self, val):
if val not in self.ATTENUATION:
raise ValueError("Unsupported value")
self._probe_attenuation = self.ATTENUATION[val]
@property
def coupling_picoapi(self):
return self._coupling
@property
def coupling(self):
return self._rev_couplings[self._coupling]
@coupling.setter
def coupling(self, val):
if val not in self._couplings:
raise ValueError("Unsupported value")
self._coupling = self._couplings[val]
@property
def ps_api_range(self):
"""Range value for PicoScope API."""
return self._ch_range_api_value[self._range]
@property
def range(self):
"""Human readable range voltage string."""
return self._ch_range[self._range]
@range.setter
def range(self, val):
if not isinstance(val, float):
raise ValueError("Unsupported value (should be float)")
# Find the smallest supported range that is higher than val
for r in self._ch_range_list:
if val <= r:
self._range = r
return
raise ValueError(f"Unsupported value (too large), got {val}, maximum "
f"is {self._ch_range_list[-1]}")
def _dict_repr(self):
ret = OrderedDict()
ret["channel"] = self.channel
ret["range"] = self.range
ret["probe_attenuation"] = self.probe_attenuation
ret["coupling"] = self.coupling
return ret
def dict_repr(self):
"""Public dictionary representation."""
return self._dict_repr()
def __repr__(self):
return util.dict_to_str(self._dict_repr())
def __str__(self):
return self.__repr__()
class TriggerSettings(CaptureSettings):
"""Trigger channel settings."""
_name = "Trigger Setting"
THRESHOLD_DIRECTION = {
"Above":
picoEnum.PICO_THRESHOLD_DIRECTION["PICO_ABOVE"],
"Below":
picoEnum.PICO_THRESHOLD_DIRECTION["PICO_BELOW"],
"Rising":
picoEnum.PICO_THRESHOLD_DIRECTION["PICO_RISING"],
"Falling":
picoEnum.PICO_THRESHOLD_DIRECTION["PICO_FALLING"],
"RiseOrFall":
picoEnum.PICO_THRESHOLD_DIRECTION["PICO_RISING_OR_FALLING"],
}
def __init__(self):
CaptureSettings.__init__(self)
self._trig_dir = {}
self._rev_trig_dir = {}
for name, val in self.THRESHOLD_DIRECTION.items():
self._trig_dir[name] = val
self._rev_trig_dir[val] = name
self._channel = 1
self._range = 5.0
self._coupling = self._couplings["DC"]
self._trigger_direction = self._trig_dir["Rising"]
self._trigger_level = 2 # V
@property
def ps_api_trigger_direction(self):
"""Trigger direction compatible with PicoScope API."""
return self._trigger_direction
@property
def ps_api_trigger_level(self) -> int:
"""Trigger level compatible with PicoScope simple trigger API. Returns
trigger level in mV.
"""
# From V to mV and convert to integer
return int(1_000 * self._trigger_level)
@property
def trigger_level(self) -> float:
"""Return trigger level value in V."""
return self._trigger_level
@trigger_level.setter
def trigger_level(self, val: float) -> None:
"""Set trigger level in V.
Args:
val (float): The level in V at which to trigger.
"""
self._trigger_level = val
@property
def trigger_direction(self):
return self._rev_trig_dir[self._trigger_direction]
@trigger_direction.setter
def trigger_direction(self, val):
if val not in self._trig_dir:
raise ValueError("Unupported value")
self._trigger_direction = self._trig_dir[val]
def _dict_repr(self):
ret = OrderedDict()
ret["channel"] = self.channel
ret["range"] = self.range
ret["probe_attenuation"] = self.probe_attenuation
ret["coupling"] = self.coupling
ret["trigger_level"] = self.trigger_level
ret["trigger_direction"] = self.trigger_direction
return ret
class Pico6424E(ChipWhispererCommonInterface):
"""Class that interacts with the Picoscope 6424E oscilloscope."""
_name = "Picoscope 6424E series 6000a (picosdk)"
_NUM_CHANNELS = 4 # Number of analog channels
# Resolutions 8bit and 10bit work, but 12bit does not seem to be working
# (PICO_CHANNEL_COMBINATION_NOT_VALID_IN_THIS_RESOLUTION)
_RESOLUTION = picoEnum.PICO_DEVICE_RESOLUTION["PICO_DR_10BIT"]
DOWNSAMPLING_RATIO = 1
def __init__(self, *args, **kwargs):
del args # unused
del kwargs # unused
super().__init__()
self.ps_handle = ctypes.c_int16()
self.trace = CaptureSettings()
self.trigger = TriggerSettings()
self._sample_length = 500
self._sample_offset = 0
# Sample rate settings.
self._sample_rate = 1E6
# Set timebase (seconds per sample)
self._timebase = Pico6424E._get_timebase(self._sample_rate)
# Trace and trigger buffer, _buffers[0] is the trace buffer,
# _buffers[1] is the trigger buffer.
self._buffers = [[], []]
# Part of cw API
self.connectStatus = False # Connected status for cw # pylint: disable=C0103
self._max_adc = ctypes.c_int16() # To get mV values
@staticmethod
def _get_timebase(sample_rate: float):
"""Return timebase for PicoScope API.
Args:
sample_rate (float): Samples per second (in Hz).
Returns: Timebase (seconds per sample) representated as ctypes.c_uint32
value for use in ps6000aRunBlock.
"""
# Handle too large sample_rate
if sample_rate > 5e9:
raise ValueError("This scope support at most 5GHz sample_rate.")
# From PicoScope API manual:
# https://www.picotech.com/download/manuals/picoscope-6000-series-a-api-programmers-guide.pdf
# n<5 2**timebase / 5_000_000_000
# n>4 (timebase - 4) / 156_250_000
# timebase time
# 0 200ps
# 1 400ps
# 2 800ps
# 3 1.6ns
# 4 3.2ns
# 5 6.4ns
# ...
# 2**32-1 6.87s
s_per_sample = 1 / Decimal(sample_rate) # avoid floating point errors
if s_per_sample >= Decimal("6.4e-9"):
# Compute for the large timebase
timebase = (156_250_000 * s_per_sample) + 4
# Round carefully
timebase = timebase.to_integral_exact(rounding=ROUND_HALF_DOWN)
return ctypes.c_uint32(int(timebase))
# timebase should be <= 4
smallest_timebase = Decimal("0.2e-9") # 200ps
for i in range(4, -1, -1):
if s_per_sample >= (2**i) * smallest_timebase:
return ctypes.c_uint32(i)
def con(self, sn=None):
del sn # unused
try:
# Open the scope and get the corresponding handle self.ps_handle.
# resolution 8, 10, 12 bit
assert_ok(
ps.ps6000aOpenUnit(
ctypes.byref(self.ps_handle), # handle
None, # serial, open the first scope found
self._RESOLUTION, # resolution
))
# ps6000aOpenUnit could return an indication of a needed firmware
# update, but picosdk.constants.PICO_STATUS raises KeyError on
# PICO_FIRMWARE_UPDATE_REQUIRED_TO_USE_DEVICE_WITH_THIS_DRIVER.
# Get analog to digital converter limits.
assert_ok(
ps.ps6000aGetAdcLimits(
self.ps_handle, # handle
self._RESOLUTION, # resolution
ctypes.byref(ctypes.c_int16()), # minADC
ctypes.byref(self._max_adc), # maxADC
))
# Set channels and trigger.
self._set_channels()
self.connectStatus = True
return True
except Exception: # pylint: disable=W0703
# Whatever happened call disconnect.
# Print stack traceback.
traceback.print_exc()
# Disconnect the scope if the exception was raised during setting
# channels.
self.dis()
return False
def dis(self):
if self.ps_handle.value > 0:
# Check that the scope is connected
assert self.connectStatus
# Interrupt data capture
assert_ok(ps.ps6000aStop(self.ps_handle))
# Close the connection to the PicoScope.
assert_ok(ps.ps6000aCloseUnit(self.ps_handle))
# set the handle value to zero
self.ps_handle.value = 0
self.connectStatus = False
# ScopeTemplate expects True to be returned.
return True
def arm(self):
"""Prepare the scope for capturing."""
# Check if this scope is connected.
if self.connectStatus is False:
raise Exception(
f"Scope {self._name} is not connected. Connect it first.")
# Run the capture block
assert_ok(
ps.ps6000aRunBlock(
self.ps_handle, # handle
self.DOWNSAMPLING_RATIO *
self._sample_offset, # Pre-trigger samples
self.DOWNSAMPLING_RATIO *
self._sample_length, # Post-trigger samples
self._timebase, # timebase
ctypes.byref(ctypes.c_double(0)), # timeIndisposedMs
0, # segmentIndex
None, # lpReady callback
None, # pParameter
))
def capture(self, poll_done: bool = False) -> bool:
"""Capture one trace and return True if timeout has happened
(possible capture failure).
Args:
poll_done: Not supported in PicoScope, but a part of API.
Raises: IOError if unknown failure.
Returns: True if timeout happened, False otherwise.
"""
del poll_done # unused
# Wait until the result is ready
ready = ctypes.c_int16(0)
check = ctypes.c_int16(0)
while ready.value == check.value:
ps.ps6000aIsReady(self.ps_handle, ctypes.byref(ready))
# Retrieve the values
overflow = ctypes.c_int16()
max_samples = ctypes.c_int32(self._total_samples)
| |
# Mar21, 2022
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# August 3, 2021 clean
# FINAL github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Count # of windows with enough reads for complete/impute
def coverage(methbin,complete,w):
count=0
tot = 0
meth=methbin.iloc[:,methbin.columns!='Qname']
if len(meth.columns)>=w:
for i in range(len(meth.columns)-w+1):
# extract a window
temp = meth.iloc[:,i:i+w].copy()
#print(temp)
tot = tot+1
if (enough_reads(window=temp,complete=complete,w=w)):
count=count+1
#toprint=temp.notnull().sum(axis=1)>=w
#print(toprint.sum())
#print(count)
#print(tot)
return count/tot*100
else:
return 0
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=2**w
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
return temp.sum()>=2**(w-2) and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def getcomplete(window,w):
temp=np.isnan(window).sum(axis=1)==0
mat=window[np.where(temp)[0],:]
#temp=window.notnull().sum(axis=1)>=w
#mat=window.iloc[np.where(temp)[0],:]
#else:
# temp=mat.notnull().sum(axis=1)>=w-1
return mat
def PattoDis(mat,dist=1):
s=mat.shape[0]
dis=np.zeros((s,s))
for i in range(s):
for j in range(s):
if j<i:
if dist==1:
d=Ham_d(mat.iloc[i,],mat.iloc[j,])
else:
d=WDK_d(mat.iloc[i,],mat.iloc[j,])
dis[i,j]=dis[j,i]=d
return dis
def Ham_d(pat1,pat2):
return (pat1!=pat2).sum()
def WDK_d(pat1,pat2):
d=0
w=pat1.shape[0]
for i in range(w): # k-1
for j in range(w-i): # starting pos
s=(w-i-1)*(1-np.all(pat1[j:j+i+1]==pat2[j:j+i+1]))
d+=s
return d
# input a window of w CGs and output a list of proportions with starting genomic location and genomic distance across
def window_summ(pat,start,dis,chrom):
m=np.shape(pat)[0]
d=np.shape(pat)[1]
all_pos=np.zeros((2**d,d))
for i in range(d):
all_pos[:,i]=np.linspace(0,2**d-1,2**d)%(2**(i+1))//(2**i)
#print(all_pos)
prob=np.zeros((2**d,1))
#print(prob)
for i in range(2**d):
count = 0
for j in range(m):
if (all_pos[i,:]==pat.iloc[j,:]).sum()==d:
count += 1
#print(count)
prob[i]=count
if d==3:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'dis':dis})
if d==4:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'dis':dis})
if d==5:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'dis':dis})
if d==6:
out=pd.DataFrame({'chrom':chrom,'pos':start,'p01':prob[0],'p02':prob[1],'p03':prob[2],'p04':prob[3],\
'p05':prob[4],'p06':prob[5],'p07':prob[6],'p08':prob[7],'p09':prob[8],'p10':prob[9],\
'p11':prob[10],'p12':prob[11],'p13':prob[12],'p14':prob[13],'p15':prob[14],\
'p16':prob[15],'p17':prob[16],'p18':prob[17],'p19':prob[18],'p20':prob[19],\
'p21':prob[20],'p22':prob[21],'p23':prob[22],'p24':prob[23],'p25':prob[24],\
'p26':prob[25],'p27':prob[26],'p28':prob[27],'p29':prob[28],'p30':prob[29],\
'p31':prob[30],'p32':prob[31],'p33':prob[32],'p34':prob[33],'p35':prob[34],\
'p36':prob[35],'p37':prob[36],'p38':prob[37],'p39':prob[38],'p40':prob[39],\
'p41':prob[40],'p42':prob[41],'p43':prob[42],'p44':prob[43],'p45':prob[44],\
'p46':prob[45],'p47':prob[46],'p48':prob[47],'p49':prob[48],'p50':prob[49],\
'p51':prob[50],'p52':prob[51],'p53':prob[52],'p54':prob[53],'p55':prob[54],\
'p56':prob[55],'p57':prob[56],'p58':prob[57],'p59':prob[58],'p60':prob[59],\
'p61':prob[60],'p62':prob[61],'p63':prob[62],'p64':prob[63],'dis':dis})
return out
def MeHperwindow(pat,start,dis,chrom,D,w,optional,MeH=2,dist=1,strand='f'):
count=np.zeros((2**w,1))
m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if MeH==1: # Abundance based
score=(((count/m)**2).sum(axis=0))**(-1)
elif MeH==2: # PWS based
interaction=np.multiply.outer(count/m,count/m).reshape((2**w,2**w))
Q=sum(sum(D*interaction))
#print("Q =",Q)
if Q==0:
score=0
else:
score=(sum(sum(D*(interaction**2)))/(Q**2))**(-0.5)
elif MeH==3: #Phylogeny based
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if dist==1 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(0.5,16)),np.repeat(0.25,6)),[0.5]),np.repeat(0.25,6))
#phylotree=np.repeat(0,1).append(np.repeat(0.5,16)).append(np.repeat(0.25,6)).append(0.5).append(np.repeat(0.25,6))
countn=np.zeros(30)
#count<-rep(0,29)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[4]+countn[7]
countn[18]=countn[9]+countn[12]
countn[19]=countn[1]+countn[2]
countn[20]=countn[3]+countn[6]
countn[21]=countn[17]+countn[18]
countn[22]=countn[19]+countn[20]
countn[23]=countn[21]+countn[22]
countn[24]=countn[5]+countn[8]
countn[25]=countn[10]+countn[13]
countn[26]=countn[24]+countn[25]
countn[27]=countn[23]+countn[26]
countn[28]=countn[11]+countn[14]
countn[29]=countn[27]+countn[28]
#Q=sum(sum(phylotree*count))
if dist==2 and w==4:
phylotree=np.append(np.append(np.append(np.append([0],np.repeat(3,16)),np.repeat(1.5,6)),[3.2,0.8]),np.repeat(2,3),np.repeat(1.5,2))
#phylotree=c(rep(3,16),rep(1.5,6),3.2,0.8,rep(2,3),1.5,1.5)
countn=np.zeros(30)
#print(count)
countn[1:17]=count[[1,9,5,3,2,13,11,10,7,6,4,15,14,12,8,16]]
countn[17]=countn[1]+countn[2]
countn[18]=countn[5]+countn[8]
countn[19]=countn[3]+countn[6]
countn[20]=countn[10]+countn[13]
countn[21]=countn[4]+countn[7]
countn[22]=countn[11]+countn[14]
countn[23]=countn[17]+countn[18]
countn[24]=countn[21]+countn[22]
countn[25]=countn[19]+countn[20]
countn[26]=countn[23]+countn[24]
countn[27]=countn[25]+countn[26]
countn[28]=countn[9]+countn[12]
countn[29]=countn[27]+countn[28]
#Q=sum(phylotree*count)
if dist==2 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(1.5,8)),np.repeat(0.75,3)),np.repeat(1.5,0.75))
#phylotree=np.array(0).append(np.repeat(1.5,8)).append(np.repeat(0.75,3)).append(1.5,0.75)
#phylotree=c(rep(1.5,8),rep(0.75,3),1.5,0.75)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#Q=sum(phylotree*count)
if dist==1 and w==3:
phylotree=np.append(np.append(np.append([0],np.repeat(0.5,8)),np.repeat(0.25,3)),[0.5,0.25])
#phylotree=np.array(0).append(np.repeat(0.5,8)).append(np.repeat(0.25,3)).append(0.5,0.25)
countn=np.zeros(14)
countn[1:9]=count[1:9]
countn[9]=countn[1]+countn[2]
countn[10]=countn[5]+countn[6]
countn[11]=countn[3]+countn[4]
countn[12]=countn[9]+countn[10]
countn[13]=countn[11]+countn[12]
#print("count = ",count)
#print("phylotree = ",phylotree)
Q=sum(phylotree*countn)
score=sum(phylotree*((countn/Q)**2))**(-1)
elif MeH==4: #Entropy
score=0
for i in count:
if i>0:
score-=(i/m)*np.log2(i/m)/w
elif MeH==5: #Epipoly
score=1-((count/m)**2).sum(axis=0)
if optional:
if MeH!=3:
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
if w==3:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==4:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==5:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
if w==6:
opt=pd.DataFrame({'chrom':chrom,'pos':start,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out, opt
else:
out=pd.DataFrame({'chrom':chrom,'pos':start,'MeH':round(score,5),'dis':dis,'strand':strand}, index=[0])
return out
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
return window
def CGgenome_scr(bamfile,w,fa,optional,melv,silence=False,dist=1,MeH=2,imp=True):
filename, file_extension = os.path.splitext(bamfile)
sample = str.split(filename,'_')[0]
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("MeHdata/%s.bam" % (filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('MeHdata/%s.fa' % fa)
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# initialise data frame for output
ResultPW = pd.DataFrame(columns=['chrom','pos','MeH','dis','strand'])
if melv:
ResML = pd.DataFrame(columns=['chrom','pos','ML','strand','depth'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if optional:
if w==3:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08',\
'MeH','dis','strand'])
if w==4:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11',\
'p12','p13','p14','p15','p16','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','MeH','dis','strand'])
if w==5:
Resultopt = pd.DataFrame(columns=\
['chrom','pos','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'MeH','dis','strand'])
neverr = never = True
# all methylation patterns for Methylation heterogeneity evaluation
all_pos=np.zeros((2**w,w))
for i in range(w):
all_pos[:,i]=np.linspace(0,2**w-1,2**w)%(2**(i+1))//(2**i)
# distance matrix, also for Methylation heterogeneity evaluation
D=PattoDis(pd.DataFrame(all_pos),dist=dist) # 1:Hamming distance, 2: WDK
start=datetime.datetime.now()
# vector for saving methylation statuses before imputation
MU=np.zeros((2,w))
# screen bamfile by column
for pileupcolumn in samfile.pileup():
coverage += 1
chrom = pileupcolumn.reference_name
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
if melv:
temp2 = temp.replace(['C'],1)
temp2 = temp2.replace(['G'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'f','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if melv:
temp2 = tempr.replace(['G'],1)
temp2 = temp2.replace(['C'],0)
temp2 = temp2.replace(['A','T','N'],np.nan)
temp2 = temp2.drop('Qname',axis=1)
MC=(temp2==1).sum(axis=0).to_numpy()
UC=(temp2==0).sum(axis=0).to_numpy()
depth=MC+UC
if depth>3:
toappend=pd.DataFrame({'chrom':chrom,'pos':temp2.columns[0], \
'strand':'r','depth':depth,'ML':float(MC)/depth}, index=[0])
ResML=ResML.append(toappend)
if (not tempr.empty):
aggreR = pd.merge(aggreR,tempr,how='outer',on=['Qname'])
aggreR = aggreR.drop_duplicates()
# Impute and estimate, if there are 2w-1 columns
if never and aggreC.shape[1] == (2*w):
# C/G to 1, rest to 0, N to NA
never = False
aggreC = aggreC.replace(['C'],1)
aggreC = aggreC.replace(['T'],0)
aggreC = aggreC.replace(['A','N','G'],np.nan)
methbin = aggreC
meth = methbin.copy()
# remove read ID
meth = meth.drop('Qname',axis=1)
# back up for imputation
if imp:
methtemp = meth.copy()
# imputation by sliding window of 1 C
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# save methylation statuses before imputation
# check if eligible for imputation, impute
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
ind=np.where(window.notnull().sum(axis=1)==w)[0]
methtemp.loc[methtemp.iloc[ind,:].index,meth.iloc[:,range(i,i+w)].columns]=window.loc[ind,:].values
# overwrite imputed window
meth = methtemp.copy()
# Evaluate methylation level and methylation heterogeneity and append to result
for i in range(0,w,1): # w windows
window = meth.iloc[:,range(i,i+w)].values
# check if enough complete patterns for evaluating MeH
if enough_reads(window,w,complete=True):
matforMH=getcomplete(window,w)
# if need to output methylation patterns
if optional:
toappend,opt=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
Resultopt=Resultopt.append(opt)
# evaluate and output MeH
else:
toappend=MeHperwindow(pd.DataFrame(matforMH),start=meth.iloc[:,range(i,i+w)].columns[0],\
dis=meth.iloc[:,range(i,i+w)].columns[w-1]-meth.iloc[:,range(i,i+w)].columns[0],\
chrom=chrom,D=D,w=w,dist=dist,MeH=MeH,strand='f',optional=optional)
ResultPW=ResultPW.append(toappend)
# remove 1 column
aggreC = aggreC.drop(meth.columns[0:1],axis=1)
# drop rows with no values
aggreC.dropna(axis = 0, thresh=2, inplace = True)
#total += w
# Reverse
if neverr and aggreR.shape[1] == (2*w):
neverr = False
aggreR = aggreR.replace(['G'],1)
aggreR = aggreR.replace(['A'],0)
aggreR = aggreR.replace(['C','N','T'],np.nan)
methbin = aggreR # backup
#meth = methbin.iloc[:,methbin.columns!='Qname'] # pd to np
meth = methbin.copy()
meth = meth.drop('Qname',axis=1)
if imp:
methtemp = meth.copy()
# impute once if valid
for i in range(0,meth.shape[1]-w+1,1):
window = meth.iloc[:,range(i,i+w)].values
# if eligible for imputation
if enough_reads(window,w,complete=False):
window=pd.DataFrame(data=impute(window,w))
| |
else:
self.form.deiconify()
self.ifd.entryByName['mol_list']['widget'].setlist(self.mol_list)
self.ifd.entryByName['+Silder']['widget'].canvas.config(bg="Blue")
self.ifd.entryByName['-Silder']['widget'].canvas.config(bg="Red")
self.ifd.entryByName['-visible']['wcfg']['variable'].set(True)
self.ifd.entryByName['+visible']['wcfg']['variable'].set(True)
self.ifd.entryByName['mol_list']['widget'].setentry(self.mol_list[0])
self.ifd.entryByName['mol_list']['widget']._entryWidget.\
config(state='readonly')
self.APBS_Iso_Net.run()
self.Left_Visible()
self.Right_Visible()
self.vf.GUI.ROOT.config(cursor='')
self.vf.GUI.VIEWER.master.config(cursor='')
self.vf.GUI.MESSAGE_BOX.tx.component('text').config(cursor='xterm')
def run(self):
"""Animates isocontours"""
inv_d = 1./(self.maxi - self.mini)
data = Numeric.arange(inv_d,inv_d*500,inv_d*15).tolist()
data += Numeric.arange(inv_d*500,inv_d*5000,inv_d*150).tolist()
for values in data:
if self.cancel:
return
self.ifd.entryByName['+Silder']['widget'].set(values)
#self.Isocontour_L.getInputPortByName('isovalue').widget.set(values)
self.ifd.entryByName['-Silder']['widget'].set(-values)
#self.Isocontour_R.getInputPortByName('isovalue').widget.set(-values)
self.vf.GUI.VIEWER.update()
def __call__(self, **kw):
"""Displays APBS Potential Isocontours using\n
VisionInterface/APBSIsoContour_net.py\n
Required Arguments:\n
potential = location of the potential.dx file\n"""
if kw.has_key('potential'):
self.doitWrapper(potential = kw['potential'])
else:
print >>sys.stderr, "potential is missing"
return
def buildForm(self):
"""Builds 'default' GUI form'"""
VolumeStats = self.APBS_Iso_Net.getNodeByName('VolumeStats')[0]
self.maxi = VolumeStats.getOutputPortByName('maxi').data
self.mini = VolumeStats.getOutputPortByName('mini').data
self.Update(1);self.Update(-1)
self.ifd = ifd = InputFormDescr(title="Isocontours Control Panel")
ifd.append({'name':'mol_list',
'widgetType':Pmw.ComboBox,
'tooltip':
"""Click on the fliparrow to view
the list of available molecules""" ,
'defaultValue': self.combo_default,
'wcfg':{'labelpos':'e','label_text':'Select molecule',
'scrolledlist_items':self.mol_list, 'history':0,
'selectioncommand':self.Combo_Selection,
'entry_width':5,
'fliparrow':1, 'dropdown':1, 'listheight':80},
'gridcfg':{'sticky':'we', 'row':0, 'column':0,'columnspan':2}
})
ifd.append({'name':'+Silder',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type the isovalue manually""",
'wcfg':{'value':1.0,'oneTurn':10,
'type':'float',
'increment':0.1,
'min':0,
'precision':2,
'wheelPad':2,'width':120,'height':19,
'callback':self.Update,
}
})
ifd.append({'name':'-Silder',
'widgetType':ThumbWheel,
'tooltip':
"""Right click on the widget to type the isovalue manually""",
'wcfg':{'value':-1.0,'oneTurn':10,
'type':'float',
'increment':0.1,
'precision':2,
'max':-0.000000001,
'wheelPad':2,'width':120,'height':19,
'callback':self.Update,
},
})
ifd.append({'widgetType':Tkinter.Checkbutton,
'tooltip':"""(De)select this checkbutton to
(un)display blue isocontour""",
'name':'+visible',
'defaultValue':1,
'wcfg':{'text':'Blue isocontour',
'command':self.Left_Visible,
'bg':'Blue','fg':'White',
'variable':Tkinter.BooleanVar()},
'gridcfg':{'sticky':'e','row':1, 'column':1}
})
ifd.append({'widgetType':Tkinter.Checkbutton,
'tooltip':"""(De)select this checkbutton to
(un)display red isocontour""",
'name':'-visible',
'defaultValue':1,
'wcfg':{'text':'Red isocontour',
'command':self.Right_Visible,
'bg':'Red','fg':'White',
'variable':Tkinter.BooleanVar()},
'gridcfg':{'sticky':'e','row':2, 'column':1}
})
ifd.append({'name':'dismiss',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Cancel',
'command':self.dismiss},
'gridcfg':{'sticky':'wens','row':3, 'column':0}
})
ifd.append({'name':'run',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Animate',
'command':self.run},
'gridcfg':{'sticky':'wens','row':3, 'column':1}
})
self.form = self.vf.getUserInput(self.ifd, modal=0, blocking=0)
return ifd
def Combo_Selection(self, mol_name):
"""
This command is triggered as selectioncommand for ComboBox mol_list
"""
potential_dx = os.path.join(os.getcwd(), "apbs-" + mol_name)
potential_dx = os.path.join(potential_dx, mol_name + '.potential.dx')
self.doitWrapper(potential = potential_dx)
def Left_Visible(self):
"""Sets "+polygons" and "left_label" objects visible state"""
left_object = self.vf.GUI.VIEWER.GUI.objectByName('+polygons')
left_label = self.vf.GUI.VIEWER.GUI.objectByName('LeftLabel')
visible = self.ifd.entryByName['+visible']['wcfg']['variable'].get()
left_object.Set(visible = visible)
left_label.Set(visible = visible)
self.vf.GUI.VIEWER.Redraw()
def Right_Visible(self):
"""Sets "-polygons" and "right_label" objects visible states"""
right_object = self.vf.GUI.VIEWER.GUI.objectByName('-polygons')
right_label = self.vf.GUI.VIEWER.GUI.objectByName('RightLabel')
visible = self.ifd.entryByName['-visible']['wcfg']['variable'].get()
right_object.Set(visible = visible)
right_label.Set(visible = visible)
self.vf.GUI.VIEWER.Redraw()
def Update(self,val):
"""Updates Isocontour_L or Isocontour_R"""
if val > 0:
self.Isocontour_R.getInputPortByName('isovalue').widget.set(val)
else:
self.Isocontour_L.getInputPortByName('isovalue').widget.set(val)
APBSDisplay_Isocontours_GUI = CommandGUI()
APBSDisplay_Isocontours_GUI.addMenuCommand('menuRoot', 'Compute', \
'Isocontour Potential', cascadeName=cascadeName)
from DejaVu.colorTool import RedWhiteBlueARamp
class APBSDisplayOrthoSlice(MVCommand):
"""APBSDisplayOrthoslice displays APBS Potential Orthoslice\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSDisplayOrthoslice
\nCommand name : APBSDisplayOrthoslice
\nSynopsis:\n
None <--- APBSDisplayOrthoslice()
"""
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
# def onAddObjectToViewer(self, object):
# """Called when object is added to viewer"""
# change_Menu_state(self, 'normal')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
potential = object.name +'.potential.dx'
try:
self.vf.Grid3DCommands.select(potential)
self.vf.Grid3DAddRemove.remove()
except:
pass #can't remove from 3D Grid Rendering widget
def doit(self):
"""doit function"""
self.vf.Grid3DCommands.show()
self.vf.Grid3DCommands.select(self.vf.APBSSetup.potential)
self.vf.Grid3DCommands.Checkbuttons['OrthoSlice'].invoke()
grid = self.vf.grids3D[self.vf.APBSSetup.potential]
self.vf.Grid3DOrthoSlice.select()
self.vf.Grid3DOrthoSlice.X_vis.set(True)
self.vf.Grid3DOrthoSlice.Y_vis.set(True)
self.vf.Grid3DOrthoSlice.Z_vis.set(True)
self.vf.Grid3DOrthoSlice.createX()
self.vf.Grid3DOrthoSlice.createY()
self.vf.Grid3DOrthoSlice.createZ()
self.vf.Grid3DOrthoSlice.ifd.entryByName['X_Slice']['widget'].set(grid.dimensions[0]/2)
self.vf.Grid3DOrthoSlice.ifd.entryByName['Y_Slice']['widget'].set(grid.dimensions[1]/2)
self.vf.Grid3DOrthoSlice.ifd.entryByName['Z_Slice']['widget'].set(grid.dimensions[2]/2)
mini = - grid.std/10.
maxi = grid.std/10.
grid.geomContainer['OrthoSlice']['X'].colormap.configure(ramp=RedWhiteBlueARamp(), mini=mini, maxi=maxi)
grid.geomContainer['OrthoSlice']['Y'].colormap.configure(ramp=RedWhiteBlueARamp(), mini=mini, maxi=maxi)
grid.geomContainer['OrthoSlice']['Z'].colormap.configure(ramp=RedWhiteBlueARamp(), mini=mini, maxi=maxi)
def guiCallback(self):
"""GUI callback"""
self.doitWrapper()
def __call__(self, **kw):
"""Displays APBS Potential Isocontours using\n
VisionInterface/APBSIsoContour_net.py\n
Required Arguments:\n
potential = location of the potential.dx file\n"""
self.doitWrapper()
APBSDisplayOrthoSlice_GUI = CommandGUI()
APBSDisplayOrthoSlice_GUI.addMenuCommand('menuRoot', 'Compute', \
'Display OrthoSlice', cascadeName=cascadeName)
class APBSVolumeRender(MVCommand):
"""APBSVolumeRender \n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSVolumeRender
\nCommand name : APBSVolumeRender
\nSynopsis:\n
None <--- APBSAPBSVolumeRender()
"""
def checkDependencies(self, vf):
if not vf.hasGui:
return 'ERROR'
from Volume.Renderers.UTVolumeLibrary import UTVolumeLibrary
test = UTVolumeLibrary.VolumeRenderer()
flagVolume = test.initRenderer()
if not flagVolume:
return 'ERROR'
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
# def onAddObjectToViewer(self, object):
# """Called when object is added to viewer"""
# change_Menu_state(self, 'normal')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
def doit(self):
"""doit function"""
grid = self.vf.grids3D[self.vf.APBSSetup.potential]
mini = - grid.std/10.
maxi = grid.std/10.
tmpMax = grid.maxi
tmpMin = grid.mini
grid.mini = mini
grid.maxi = maxi
self.vf.Grid3DCommands.show()
self.vf.Grid3DCommands.select(self.vf.APBSSetup.potential)
self.vf.Grid3DCommands.Checkbuttons['VolRen'].invoke()
self.vf.Grid3DVolRen.select()
widget = self.vf.Grid3DVolRen.ifd.entryByName['VolRen']['widget']
widget.colorGUI()
ramp = RedWhiteBlueARamp()
ramp[:,3] = Numeric.arange(0,0.25,1./(4*256.),'f')
grid = self.vf.grids3D[self.vf.APBSSetup.potential]
widget.ColorMapGUI.configure(ramp=ramp, mini=mini, maxi=maxi)
widget.ColorMapGUI.apply_cb()
grid.mini = tmpMin
grid.maxi = tmpMax
def guiCallback(self):
"""GUI callback"""
self.doitWrapper()
def __call__(self, **kw):
"""Displays APBS Potential Isocontours using\n
VisionInterface/APBSIsoContour_net.py\n
Required Arguments:\n
potential = location of the potential.dx file\n"""
self.doitWrapper()
APBSVolumeRender_GUI = CommandGUI()
APBSVolumeRender_GUI.addMenuCommand('menuRoot', 'Compute', \
'Volume Renderer', cascadeName=cascadeName)
from tkFileDialog import *
class APBSLoad_Profile(MVCommand):
"""APBSLoadProfile loads APBS parameters\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSLoad_Profile
\nCommand name : APBSLoadProfile
\nSynopsis:\n
None <--- APBSLoadProfile(filename = None)
\nOptional Arguments:\n
filename = name of the file containing APBS parameters\n
"""
def doit(self, filename = None):
"""doit function"""
self.vf.APBSSetup.loadProfile(filename=filename)
def guiCallback(self):
"""GUI callback"""
filename=askopenfilename(filetypes=[('APBS Profile','*.apbs.pf')],\
title="Load APBS Profile")
if filename:
self.doitWrapper(filename=filename)
def __call__(self, **kw):
"""None <--- APBSSave_Profile()\n
Calls APBSSetup.loadProfile\n"""
if kw.has_key('filename'):
self.doitWrapper(filename=kw['filename'])
else:
if self.vf.APBSSetup.cmdForms.has_key('default') and \
self.vf.APBSSetup.cmdForms['default'].f.winfo_toplevel().\
wm_state() == 'normal':
filename=askopenfilename(filetypes=\
[('APBS Profile','*.apbs.pf')],
title="Load APBS Profile",
parent=self.vf.APBSSetup.cmdForms['default'].root)
else:
filename = askopenfilename(filetypes =
[('APBS Profile','*.apbs.pf')], title = "Load APBS Profile")
if filename:
self.doitWrapper(filename=filename)
APBSLoad_Profile_GUI = CommandGUI()
APBSLoad_Profile_GUI.addMenuCommand('menuRoot', 'Compute', 'Load Profile',
cascadeName=cascadeName, separatorAbove=1)
class APBSSave_Profile(MVCommand):
"""APBSSaveProfile saves APBS parameters\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSSave_Profile
\nCommand name : APBSSaveProfile
\nSynopsis:\n
None <--- APBSSaveProfile(filename = None)
\nOptional Arguments:\n
filename = name of the file where APBS parameters are to be saved\n
"""
def onAddCmdToViewer(self):
"""Called when added to viewer"""
if self.vf.hasGui:
change_Menu_state(self, 'disabled')
def onRemoveObjectFromViewer(self, object):
"""Called when object is removed from viewer"""
if self.vf.hasGui:
if len(self.vf.Mols) == 0:
change_Menu_state(self, 'disabled')
def doit(self, Profilename=None):
"""doit function"""
self.vf.APBSSetup.saveProfile(Profilename=Profilename, fileFlag=True, flagCommand=True)
def guiCallback(self):
"""GUI callback"""
filename=asksaveasfilename(filetypes=[('APBS Profile','*.apbs.pf')],
title="Save APBS Profile As")
if filename:
self.doitWrapper(Profilename=filename)
def __call__(self, **kw):
"""None <--- APBSSave_Profile(filename = None)\n
Calls APBSSetup.saveProfile\n"""
if kw.has_key('Profilename'):
self.doitWrapper(Profilename=kw['Profilename'])
else:
if self.vf.APBSSetup.cmdForms.has_key('default') and \
self.vf.APBSSetup.cmdForms['default'].f.winfo_toplevel().\
wm_state() == 'normal':
filename = asksaveasfilename(filetypes=[('APBS Profile',
'*.apbs.pf')],title="Save APBS Profile As",
parent = self.vf.APBSSetup.cmdForms['default'].root)
else:
filename = asksaveasfilename(filetypes =
[('APBS Profile','*.apbs.pf')],title = "Save APBS Profile As")
if filename:
self.doitWrapper(Profilename=filename)
APBSSave_Profile_GUI = CommandGUI()
APBSSave_Profile_GUI.addMenuCommand('menuRoot', 'Compute', 'Save Profile',
cascadeName=cascadeName)
class APBSWrite_APBS_Parameter_File(MVCommand):
"""APBSOutputWrite writes APBS input file\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSWrite_APBS_Parameter_File
\nCommand name : APBSOutputWrite
\nSynopsis:\n
None <--- APBSOutputWrite(filename)
\nRequired Arguments:\n
filename = name of the apbs input file \n
"""
def doit(self, filename = None):
"""doit function for APBSWrite_APBS_Parameter_File"""
if filename:
self.vf.APBSSetup.params.SaveAPBSInput(filename)
def guiCallback(self, **kw):
"""
GUI Callback for APBSWrite_APBS_Parameter_File
Asks for the file name to save current parameters
"""
filename=asksaveasfilename(filetypes=[('APBS Paramter File','*.apbs')],
title="Save APBS Parameters As ")
apply ( self.doitWrapper, (filename,), kw)
APBSWrite_Parameter_File_GUI = CommandGUI()
APBSWrite_Parameter_File_GUI.addMenuCommand('menuRoot', 'Compute', \
'Write APBS Parameter File', cascadeName=cascadeName)
class APBSPreferences(MVCommand):
"""APBSPreferences allows to change APBS Preferences\n
\nPackage : Pmv
\nModule : APBSCommands
\nClass : APBSPreferences
\nCommand name : APBSPreferences
\nSynopsis:\n
None <--- APBSPreferences(APBS_Path = None, pdb2pqr_Path = None, ff = None,
debump = None, hopt = None, hdebump = None, watopt = None)
\nOptional Arguments:\n
APBS_Path -- path to apbs executable
pdb2pqr_Path -- path to pdb2pqr.py script
ff -- Force Field for pdb2pqr ('amber', 'charmm' or 'parse')
nodebump : Do not perform the debumping operation
nohopt : Do not perform hydrogen optimization
nohdebump : Do not perform hydrogen debumping
nowatopt : Do not perform water optimization
"""
def doit(self, APBS_Path = None, pdb2pqr_Path = None, ff = None,
nodebump = False, nohopt = False):
"""
doit function for APBSPreferences class
\nOptional Arguments:\n
APBS_Path -- path to apbs executable
pdb2pqr_Path -- path to pdb2pqr.py script
ff -- Force Field for pdb2pqr ('amber', 'charmm' or 'parse')
nodebump : Do not perform the debumping operation
nohopt : Do not perform hydrogen optimization
nohdebump : Do not perform hydrogen debumping
nowatopt : Do not perform water optimization
"""
self.overwrite_pqr = False
if APBS_Path:
self.vf.APBSSetup.params.APBS_Path = APBS_Path
if pdb2pqr_Path:
self.vf.APBSSetup.params.pdb2pqr_Path = pdb2pqr_Path
if ff:
self.vf.APBSSetup.params.pdb2pqr_ForceField = ff
if nodebump != self.nodebump_past:
self.nodebump_past = nodebump
self.nodebump.set(nodebump)
self.overwrite_pqr = True
if nohopt != self.nohopt_past:
self.nohopt_past = nohopt
self.nohopt.set(nohopt)
self.overwrite_pqr = True
def __init__(self):
MVCommand.__init__(self)
try:
self.nodebump = Tkinter.BooleanVar()
self.nodebump.set(False)
self.nohopt = Tkinter.BooleanVar()
self.nohopt.set(False)
except:
self.nodebump = False
self.nohopt = False
self.nodebump_past = False
self.nohopt_past = False
self.overwrite_pqr = False
def guiCallback(self):
"""GUI Callback for APBSPreferences"""
self.APBS_Path | |
E501
A list of valid options for the property. This field is required for enumerated properties, but will be empty for other property types. # noqa: E501
:return: The options of this ModelProperty. # noqa: E501
:rtype: list[Option]
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this ModelProperty.
A list of valid options for the property. This field is required for enumerated properties, but will be empty for other property types. # noqa: E501
:param options: The options of this ModelProperty. # noqa: E501
:type: list[Option]
"""
if self.local_vars_configuration.client_side_validation and options is None: # noqa: E501
raise ValueError("Invalid value for `options`, must not be `None`") # noqa: E501
self._options = options
@property
def created_user_id(self):
"""Gets the created_user_id of this ModelProperty. # noqa: E501
The internal ID of the user who created the property in HubSpot. This field may not exist if the property was created outside of HubSpot. # noqa: E501
:return: The created_user_id of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._created_user_id
@created_user_id.setter
def created_user_id(self, created_user_id):
"""Sets the created_user_id of this ModelProperty.
The internal ID of the user who created the property in HubSpot. This field may not exist if the property was created outside of HubSpot. # noqa: E501
:param created_user_id: The created_user_id of this ModelProperty. # noqa: E501
:type: str
"""
self._created_user_id = created_user_id
@property
def updated_user_id(self):
"""Gets the updated_user_id of this ModelProperty. # noqa: E501
The internal user ID of the user who updated the property in HubSpot. This field may not exist if the property was updated outside of HubSpot. # noqa: E501
:return: The updated_user_id of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._updated_user_id
@updated_user_id.setter
def updated_user_id(self, updated_user_id):
"""Sets the updated_user_id of this ModelProperty.
The internal user ID of the user who updated the property in HubSpot. This field may not exist if the property was updated outside of HubSpot. # noqa: E501
:param updated_user_id: The updated_user_id of this ModelProperty. # noqa: E501
:type: str
"""
self._updated_user_id = updated_user_id
@property
def referenced_object_type(self):
"""Gets the referenced_object_type of this ModelProperty. # noqa: E501
If this property is related to other object(s), they'll be listed here. # noqa: E501
:return: The referenced_object_type of this ModelProperty. # noqa: E501
:rtype: str
"""
return self._referenced_object_type
@referenced_object_type.setter
def referenced_object_type(self, referenced_object_type):
"""Sets the referenced_object_type of this ModelProperty.
If this property is related to other object(s), they'll be listed here. # noqa: E501
:param referenced_object_type: The referenced_object_type of this ModelProperty. # noqa: E501
:type: str
"""
self._referenced_object_type = referenced_object_type
@property
def display_order(self):
"""Gets the display_order of this ModelProperty. # noqa: E501
The order that this property should be displayed in the HubSpot UI relative to other properties for this object type. Properties are displayed in order starting with the lowest positive integer value. A value of -1 will cause the property to be displayed **after** any positive values. # noqa: E501
:return: The display_order of this ModelProperty. # noqa: E501
:rtype: int
"""
return self._display_order
@display_order.setter
def display_order(self, display_order):
"""Sets the display_order of this ModelProperty.
The order that this property should be displayed in the HubSpot UI relative to other properties for this object type. Properties are displayed in order starting with the lowest positive integer value. A value of -1 will cause the property to be displayed **after** any positive values. # noqa: E501
:param display_order: The display_order of this ModelProperty. # noqa: E501
:type: int
"""
self._display_order = display_order
@property
def calculated(self):
"""Gets the calculated of this ModelProperty. # noqa: E501
For default properties, true indicates that the property is calculated by a HubSpot process. It has no effect for custom properties. # noqa: E501
:return: The calculated of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._calculated
@calculated.setter
def calculated(self, calculated):
"""Sets the calculated of this ModelProperty.
For default properties, true indicates that the property is calculated by a HubSpot process. It has no effect for custom properties. # noqa: E501
:param calculated: The calculated of this ModelProperty. # noqa: E501
:type: bool
"""
self._calculated = calculated
@property
def external_options(self):
"""Gets the external_options of this ModelProperty. # noqa: E501
For default properties, true indicates that the options are stored externally to the property settings. # noqa: E501
:return: The external_options of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._external_options
@external_options.setter
def external_options(self, external_options):
"""Sets the external_options of this ModelProperty.
For default properties, true indicates that the options are stored externally to the property settings. # noqa: E501
:param external_options: The external_options of this ModelProperty. # noqa: E501
:type: bool
"""
self._external_options = external_options
@property
def archived(self):
"""Gets the archived of this ModelProperty. # noqa: E501
Whether or not the property is archived. # noqa: E501
:return: The archived of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._archived
@archived.setter
def archived(self, archived):
"""Sets the archived of this ModelProperty.
Whether or not the property is archived. # noqa: E501
:param archived: The archived of this ModelProperty. # noqa: E501
:type: bool
"""
self._archived = archived
@property
def has_unique_value(self):
"""Gets the has_unique_value of this ModelProperty. # noqa: E501
Whether or not the property's value must be unique. Once set, this can't be changed. # noqa: E501
:return: The has_unique_value of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._has_unique_value
@has_unique_value.setter
def has_unique_value(self, has_unique_value):
"""Sets the has_unique_value of this ModelProperty.
Whether or not the property's value must be unique. Once set, this can't be changed. # noqa: E501
:param has_unique_value: The has_unique_value of this ModelProperty. # noqa: E501
:type: bool
"""
self._has_unique_value = has_unique_value
@property
def hidden(self):
"""Gets the hidden of this ModelProperty. # noqa: E501
:return: The hidden of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._hidden
@hidden.setter
def hidden(self, hidden):
"""Sets the hidden of this ModelProperty.
:param hidden: The hidden of this ModelProperty. # noqa: E501
:type: bool
"""
self._hidden = hidden
@property
def hubspot_defined(self):
"""Gets the hubspot_defined of this ModelProperty. # noqa: E501
This will be true for default object properties built into HubSpot. # noqa: E501
:return: The hubspot_defined of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._hubspot_defined
@hubspot_defined.setter
def hubspot_defined(self, hubspot_defined):
"""Sets the hubspot_defined of this ModelProperty.
This will be true for default object properties built into HubSpot. # noqa: E501
:param hubspot_defined: The hubspot_defined of this ModelProperty. # noqa: E501
:type: bool
"""
self._hubspot_defined = hubspot_defined
@property
def show_currency_symbol(self):
"""Gets the show_currency_symbol of this ModelProperty. # noqa: E501
Whether the property will display the currency symbol set in the account settings. # noqa: E501
:return: The show_currency_symbol of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._show_currency_symbol
@show_currency_symbol.setter
def show_currency_symbol(self, show_currency_symbol):
"""Sets the show_currency_symbol of this ModelProperty.
Whether the property will display the currency symbol set in the account settings. # noqa: E501
:param show_currency_symbol: The show_currency_symbol of this ModelProperty. # noqa: E501
:type: bool
"""
self._show_currency_symbol = show_currency_symbol
@property
def modification_metadata(self):
"""Gets the modification_metadata of this ModelProperty. # noqa: E501
:return: The modification_metadata of this ModelProperty. # noqa: E501
:rtype: PropertyModificationMetadata
"""
return self._modification_metadata
@modification_metadata.setter
def modification_metadata(self, modification_metadata):
"""Sets the modification_metadata of this ModelProperty.
:param modification_metadata: The modification_metadata of this ModelProperty. # noqa: E501
:type: PropertyModificationMetadata
"""
self._modification_metadata = modification_metadata
@property
def form_field(self):
"""Gets the form_field of this ModelProperty. # noqa: E501
Whether or not the property can be used in a HubSpot form. # noqa: E501
:return: The form_field of this ModelProperty. # noqa: E501
:rtype: bool
"""
return self._form_field
@form_field.setter
def form_field(self, form_field):
"""Sets the form_field of this ModelProperty.
Whether or not the property can be used in a HubSpot form. # noqa: E501
:param form_field: The form_field of this ModelProperty. # noqa: E501
:type: bool
"""
self._form_field = form_field
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in | |
snapshotsessionname):
volumeGroupUri = self.query_by_name(name)
snapshotsessionUri = self.query_snapshotsession_uri_by_name(name, snapshotsessionname)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"GET",
VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_SESSION_SHOW.format(volumeGroupUri, snapshotsessionUri), None)
o = common.json_decode(s)
return o
def volume_group_snapshotsession_get_sets(self, name):
volumeGroupUri = self.query_by_name(name)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"GET",
VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_SESSION_GET_COPY_SETS.format(volumeGroupUri), None)
o = common.json_decode(s)
return o
def volume_group_snapshotsession_get(self, name, setname):
volumeGroupUri = self.query_by_name(name)
request = dict()
request["copy_set_name"] = setname
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"POST",
VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_SESSION_GET_COPY_SETS.format(volumeGroupUri), json.dumps(request))
o = common.json_decode(s)
if('snapshot_session' in o):
return o['snapshot_session']
else:
return []
def volume_group_snapshotsession_operation(self, name, copysetname, subGroups, snapshotsessionnames, partial, uri):
'''
Makes REST API call to deactivate/restore volume group snapshot sessions
Parameters:
partial: Enable the flag to operate on snapshots for subset of VolumeGroup.
Please specify one snapshot from each Array Replication Group
snapshotsessions: A snapshot session of a volume group specifying which snapshot session set to act on.
For partial operation, specify one snapshot from each Array Replication Group
Returns:
response of the operation
'''
volume_group_uri = self.query_by_name(name)
request = dict()
if (snapshotsessionnames):
request["snapshot_sessions"] = self.query_snapshotsession_uris_by_names(name, snapshotsessionnames)
if (copysetname):
request["copy_set_name"] = copysetname
if (subGroups):
request["subgroups"] = subGroups.split(',')
# if partial request
if (partial):
request["partial"] = partial
body = json.dumps(request)
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"POST",
uri.format(volume_group_uri), body)
o = common.json_decode(s)
return o
# link target
def volume_group_snapshotsession_link(self, name, copysetname, subGroups, snapshotsessionnames, count, target_name, copymode, partial):
volume_group_uri = self.query_by_name(name)
request = dict()
if (snapshotsessionnames):
request["snapshot_sessions"] = self.query_snapshotsession_uris_by_names(name, snapshotsessionnames)
if (copysetname):
request["copy_set_name"] = copysetname
if (subGroups):
request["subgroups"] = subGroups.split(',')
new_linked_targets_dict = {
'count' : count,
'target_name' : target_name,
'copy_mode' : copymode
}
request["new_linked_targets"] = new_linked_targets_dict
# if partial request
if (partial):
request["partial"] = partial
body = json.dumps(request)
# REST api call
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"POST",
VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_SESSION_LINK.format(volume_group_uri), body)
o = common.json_decode(s)
return o
def get_unlink_target_entries(self, name, resources):
targetEntries = []
for target in resources:
targetParam = []
targetParam = target.split(":")
targetDict = dict()
uri = self.query_snapshot_uri_by_name(name, targetParam[0])
targetDict['id'] = uri
if(len(targetParam) > 1):
if(targetParam[1] == "delete"):
targetDict['delete_target'] = True
else:
raise SOSError(
SOSError.CMD_LINE_ERR,
"Please specify :delete if the target volume need to be deleted")
else:
targetDict['delete_target'] = False
targetEntries.append(targetDict)
return targetEntries
# relink target
def volume_group_snapshotsession_relink_operation(self, name, copysetName, targetName, snapshotsessionnames, target_names, partial):
volume_group_uri = self.query_by_name(name)
request = dict()
if (snapshotsessionnames):
request["snapshot_sessions"] = self.query_snapshotsession_uris_by_names(name, set(snapshotsessionnames.split(',')))
if (copysetName):
request["copy_set_name"] = copysetName
if (targetName):
request["target_name"] = targetName
if (target_names):
request["ids"] = self.query_snapshot_uris_by_names(name, set(target_names.split(',')))
# if partial request
if (partial):
request["partial"] = partial
body = json.dumps(request)
# REST api call
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"POST",
VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_SESSION_RELINK.format(volume_group_uri), body)
o = common.json_decode(s)
return o
# unlink target
def volume_group_snapshotsession_unlink_operation(self, name, copysetName, targetName, delete, snapshotsessionnames, target_names, partial):
volume_group_uri = self.query_by_name(name)
request = dict()
if (snapshotsessionnames):
request["snapshot_sessions"] = self.query_snapshotsession_uris_by_names(name, set(snapshotsessionnames.split(',')))
if (copysetName):
request["copy_set_name"] = copysetName
if (targetName):
request["target_name"] = targetName
if (delete):
request["delete_target"] = delete
if (target_names):
request["linked_targets"] = self.get_unlink_target_entries(name, set(target_names.split(',')))
# if partial request
if (partial):
request["partial"] = partial
body = json.dumps(request)
# REST api call
(s, h) = common.service_json_request(
self.__ipAddr, self.__port,
"POST",
VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_SESSION_UNLINK.format(volume_group_uri), body)
o = common.json_decode(s)
return o
# Blocks the operation until the task is complete/error out/timeout
def check_for_sync(self, result, sync):
if(sync):
if(len(result["resource"]) > 0):
resource = result["resource"]
return (
common.block_until_complete("volume", resource["id"],
result["id"], self.__ipAddr,
self.__port)
)
else:
raise SOSError(
SOSError.SOS_FAILURE_ERR,
"error: task list is empty, no task response found")
else:
return result
#SHOW resource parser
def show_volume_group_volume_parser(subcommand_parsers, common_parser):
volume_group_volume_parser = subcommand_parsers.add_parser('show-volumes',
description='ViPR VolumeGroup Show Volumes CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Show volume group volumes')
volume_group_volume_parser.add_argument('-xml',
dest='xml',
action='store_true',
help='XML response')
mandatory_args = volume_group_volume_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-n', '-name',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
volume_group_volume_parser.set_defaults(func=volume_group_volume_show)
def volume_group_volume_show(args):
obj = VolumeGroup(args.ip, args.port)
try:
res = obj.volume_show(args.name, args.xml)
if(res):
if (args.xml == True):
return common.format_xml(res)
return common.format_json_object(res)
except SOSError as e:
raise e
def show_volume_group_children_parser(subcommand_parsers, common_parser):
volume_group_volume_parser = subcommand_parsers.add_parser('show-children',
description='ViPR VolumeGroup Show Children CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Show volume group child volume groups')
volume_group_volume_parser.add_argument('-xml',
dest='xml',
action='store_true',
help='XML response')
mandatory_args = volume_group_volume_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-n', '-name',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
volume_group_volume_parser.set_defaults(func=volume_group_children_show)
def volume_group_children_show(args):
obj = VolumeGroup(args.ip, args.port)
try:
res = obj.volume_group_children_show(args.name, args.xml)
if(res):
if (args.xml == True):
return common.format_xml(res)
return common.format_json_object(res)
except SOSError as e:
raise e
def create_parser(subcommand_parsers, common_parser):
# create command parser
create_parser = subcommand_parsers.add_parser('create',
description='ViPR VolumeGroup Create CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Create a volume group')
mandatory_args = create_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-n', '-name',
metavar='<name>',
dest='name',
help='Name of VolumeGroup',
required=True)
mandatory_args.add_argument('-r', '-roles',
metavar='<roles>',
dest='roles',
help='[COPY | DR]',
required=True)
create_parser.add_argument('-d', '-description',
metavar='<description>',
dest='description',
help='description for volume group')
create_parser.add_argument('-pa', '-parent',
metavar='<parent>',
dest='parent',
help='parent volume group for volume group')
create_parser.add_argument('-mt', '-migrationType',
metavar='<migrationType>',
dest='migrationType',
help='migration type for mobility volume group')
create_parser.add_argument('-mg', '-migrationGroupBy',
metavar='<migrationGroupBy>',
dest='migrationGroupBy',
help='migration group by for mobility volume group')
create_parser.set_defaults(func=create)
def create(args):
obj = VolumeGroup(args.ip, args.port)
try:
obj.create(args.name, args.description, args.roles, args.parent, args.migrationType, args.migrationGroupBy)
except SOSError as e:
if (e.err_code in [SOSError.NOT_FOUND_ERR,
SOSError.ENTRY_ALREADY_EXISTS_ERR]):
raise SOSError(e.err_code,
"VolumeGroup create failed: " + e.err_text)
else:
raise e
def delete_parser(subcommand_parsers, common_parser):
# delete command parser
delete_parser = subcommand_parsers.add_parser('delete',
description='ViPR VolumeGroup Delete CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Delete a volume group')
mandatory_args = delete_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-n', '-name',
metavar='<name>',
dest='name',
help='Name of VolumeGroup',
required=True)
delete_parser.set_defaults(func=delete_by_name)
def delete_by_name(args):
obj = VolumeGroup(args.ip, args.port)
try:
obj.delete_by_name(args.name)
except SOSError as e:
if (e.err_code == SOSError.NOT_FOUND_ERR):
raise SOSError(SOSError.NOT_FOUND_ERR,
"VolumeGroup delete failed: " + e.err_text)
else:
raise e
# show command parser
def show_parser(subcommand_parsers, common_parser):
show_parser = subcommand_parsers.add_parser('show',
description='ViPR VolumeGroup Show CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Show volume group details')
show_parser.add_argument('-xml',
dest='xml',
action='store_true',
help='XML response')
mandatory_args = show_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-n', '-name',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
show_parser.set_defaults(func=show)
def show(args):
obj = VolumeGroup(args.ip, args.port)
try:
res = obj.show(args.name, args.xml)
if(res):
if (args.xml == True):
return common.format_xml(res)
return common.format_json_object(res)
except SOSError as e:
raise e
# list command parser
def list_parser(subcommand_parsers, common_parser):
list_parser = subcommand_parsers.add_parser('list',
description='ViPR VolumeGroup List CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Lists volume groups')
list_parser.add_argument('-v', '-verbose',
dest='verbose',
help='List volume groups with details',
action='store_true')
list_parser.add_argument('-l', '-long',
dest='largetable',
help='List volume groups in table format',
action='store_true')
list_parser.set_defaults(func=list)
def list(args):
obj = VolumeGroup(args.ip, args.port)
try:
from common import TableGenerator
volume_groups = obj.list()
records = []
for volume_group in volume_groups:
volume_group_uri = volume_group['id']
app_detail = obj.show_by_uri(volume_group_uri)
if(app_detail):
records.append(app_detail)
if(len(records) > 0):
if(args.verbose == True):
return common.format_json_object(records)
elif(args.largetable == True):
TableGenerator(records, ['name', 'description',
'roles', 'tags']).printTable()
else:
TableGenerator(records, ['name']).printTable()
else:
return
except SOSError as e:
raise e
# update volume group command parser
def update_parser(subcommand_parsers, common_parser):
update_parser = subcommand_parsers.add_parser('update',
description='ViPR update volume group CLI usage',
parents=[common_parser],
conflict_handler='resolve',
help='Update volume group properties')
mandatory_args = update_parser.add_argument_group(
'mandatory arguments')
mandatory_args.add_argument('-n', '-name',
metavar='<name>',
dest='name',
help='Name of existing volume group',
required=True)
update_parser.add_argument('-nn', '-newname',
metavar='<newname>',
dest='newname',
help='New name of volume group')
update_parser.add_argument('-d', '-description',
metavar='<description>',
dest='description',
help='New description of volume group')
update_parser.add_argument('-r', '-remove_volumes',
metavar='<tenant/project/volume_label | volume_uid,...>',
dest='remove_volumes',
help='A list of volumes to remove from the volume group')
update_parser.add_argument('-a', '-add_volumes',
metavar='<tenant/project/volume_label | volume_uid,...>',
dest='add_volumes',
help='A list of volumes to add to the volume group')
update_parser.add_argument('-cg', '-consistency_group',
metavar='<consistency_group>',
dest='consistency_group',
help='A consistency group for adding volumes to the volume group')
update_parser.add_argument('-rg', '-replication_group',
metavar='<replication_group>',
dest='replication_group',
help='Application sub group. Maps to the storage group on the array where volumes will be added to')
update_parser.add_argument('-sg', '-sub-group',
metavar='<sub_group>',
dest='sub_group',
help='Application sub group. Maps to the storage group on the array where volumes will be added to')
update_parser.add_argument('-pa', '-parent',
metavar='<parent>',
dest='parent',
help='A parent volume group for the volume group')
update_parser.add_argument('-rh', '-remove_hosts',
metavar='<remove_hosts>',
dest='remove_hosts',
help='A list of hosts to remove from the volume group')
update_parser.add_argument('-ah', '-add_hosts',
metavar='<add_hosts>',
dest='add_hosts',
help='A list of hosts to add to the volume group')
update_parser.add_argument('-rc', '-remove_clusters',
metavar='<remove_clusters>',
dest='remove_clusters',
help='A list of clusters to remove from the volume group')
update_parser.add_argument('-ac', '-add_clusters',
metavar='<add_clusters>',
dest='add_clusters',
help='A list of clusters to add to the volume group')
update_parser.set_defaults(func=update)
def update(args):
if(args.newname is None and args.description is None and args.add_volumes is None and args.remove_volumes is None and args.parent is None and args.remove_hosts is None and args.add_hosts is None and args.add_clusters is None and args.remove_clusters is None):
raise SOSError(SOSError.CMD_LINE_ERR,
"viprcli volume group update: error: at least one of " +
"the arguments -np/-newname -d/-description -a/-add_volumes " +
" -r/-remove_volumes -rh/-remove_hosts -ah/-add_hosts " +
" -rc/-remove_clusters -ac/-add_clusters required")
add_vols = []
if(args.add_volumes and | |
#!/usr/bin/env python
""" Locate single cells
Annotate the files with points in a nested layout:
.. code-block:: bash
$ ./cell_locator.py \\
--sel-mode point \\
--layout nested \\
-r /path/to/data
Annotate tiles within a certain range:
.. code-block:: bash
$ ./cell_locator.py \\
--sel-mode point \\
--layout nested \\
-r /path/to/data \\
--min-timepoint 5 \\
--max-timepoint 15
Ignore already annotated tiles:
.. code-block:: bash
$ ./cell_locator.py \\
--sel-mode point \\
--layout nested \\
-r /path/to/data \\
--skip-tagged
Putting it all together, here's what I use to annotate 20x inverted images:
.. code-block:: bash
$ ./cell_locator.py \\
--sel-mode point \\
--layout nested \\
-r /data/Experiment/2017-03-03 \\
--max-timepoint 12 \\
--skip-tagged
API Documentation
-----------------
"""
# Imports
# Standard lib
import sys
import time
import pathlib
import tkinter
import platform
# 3rd party
import numpy as np
from PIL import Image
import matplotlib as mpl
mpl.use('TkAgg')
mpl.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
# Our own imports
from model import load_selection_db
import gui
# Constants
UNAME = platform.system().lower()
MARKERSIZE = 12
NIGHT_MODE = True
SEL_CLASS_COLORS = {
1: 'red',
2: 'orange',
3: 'gold',
4: 'green',
5: 'blue',
6: 'indigo',
7: 'violet',
8: 'magenta',
9: 'cyan',
0: 'darkgreen',
}
SEL_CLASS_DEFAULT = 1
SEL_MODE_DEFAULT = 'point'
# Functions
def find_all_images(rootdir):
""" Find all the images under rootdir """
return (p for p in sorted(rootdir.iterdir())
if p.is_file() and p.suffix in ('.tif', '.png'))
# Classes
class Crosshair(object):
""" Draw a crosshair on the plot """
def __init__(self, mode='off', window=None):
self.set(mode)
self.window = window
self.should_draw = False
self.cur_cross = None
self.cur_region = None
def set(self, mode):
if mode in ('on', True):
mode = True
elif mode in ('off', False):
mode = False
else: # Toggle
mode = not self.should_draw
self.should_draw = mode
def add(self, x=0.0, y=0.0):
""" Add a cross at these coordinates """
bbox = self.window.fig.get_window_extent().bounds
x0, y0, width, height = bbox
horz_line = Line2D([x0, x0+width], [y, y], linewidth=2, linestyle='--', color=(0.6, 0.6, 0.6))
vert_line = Line2D([x, x], [y0, y0+height], linewidth=2, linestyle='--', color=(0.6, 0.6, 0.6))
horz_line.set_animated(True)
vert_line.set_animated(True)
self.cur_region = self.window.canvas.copy_from_bbox(self.window.fig.bbox)
self.window.fig.lines.append(horz_line)
self.window.fig.lines.append(vert_line)
self.cur_cross = horz_line, vert_line
def remove(self):
if self.cur_cross is None:
return
horz_line, vert_line = self.cur_cross
self.window.canvas.restore_region(self.cur_region)
self.cur_cross = None
self.cur_region = None
horz_line.set_animated(False)
vert_line.set_animated(False)
self.window.fig.lines.remove(horz_line)
self.window.fig.lines.remove(vert_line)
def update(self, x, y):
if self.cur_cross is None:
return
horz_line, vert_line = self.cur_cross
horz_line.set_ydata([y, y])
vert_line.set_xdata([x, x])
self.window.canvas.restore_region(self.cur_region)
self.window.fig.draw_artist(horz_line)
self.window.fig.draw_artist(vert_line)
self.window.canvas.blit(self.window.fig.bbox)
class ImageTagger(object):
def __init__(self,
sel_class=SEL_CLASS_DEFAULT,
sel_mode=SEL_MODE_DEFAULT):
# Use some file location lookup to find the data tables
if getattr(sys, 'frozen', False):
thisfile = pathlib.Path(sys.executable).resolve()
elif __file__:
thisfile = pathlib.Path(__file__).resolve()
rootdir = thisfile.parent / 'data'
if not rootdir.is_dir():
raise OSError('Cannot find root directory: {}'.format(rootdir))
self.rootdir = rootdir
self.imagedir = rootdir.parent / 'images'
self.dbfile = rootdir / 'RegionDB.sqlite3'
print('Rootdir: {}'.format(self.rootdir))
print('DBfile: {}'.format(self.dbfile))
self.db = load_selection_db(self.dbfile)
self.records = list(find_all_images(rootdir))
self.annotated_records = self.db.find_annotated_records()
self.cur_record = None
self.cur_record_idx = 0
self.cur_record_start = None
self.cur_region = None
self.cur_x0 = None
self.cur_y0 = None
self.cur_sel_class = sel_class
self.cur_sel_mode = sel_mode
self.display_mode = 'normal'
self.help_objects = []
self.encourage_objects = []
self.dpi = None
self.cur_cross = Crosshair(mode='off', window=self)
self.shape_manager = gui.ShapeManager(window=self)
self.points = {}
self.rects = {}
@property
def cur_filepath(self):
if self.cur_record is None:
return None
return str(self.cur_record.relative_to(self.rootdir))
@property
def figsize(self):
return self.fig.get_size_inches()
@property
def markersize(self):
# Scale the marker size
return round(max([self.figsize[0] / 38.4, self.figsize[1] / 21.3])*MARKERSIZE)
def get_color(self, sel_class=None):
""" Get the color for the current box """
if sel_class is None:
sel_class = self.cur_sel_class
return SEL_CLASS_COLORS[sel_class]
def load_window(self):
""" Create the figure, axis, and canvas """
window = plt.get_current_fig_manager().window
screen_x, screen_y = None, None
# FIXME: Make this work with non-TkAgg backends
screen_x, screen_y = window.wm_maxsize()
print('Screen: {}x{}'.format(screen_x, screen_y))
self.dpi = int(mpl.rcParams['figure.dpi'])
print('DPI: {}'.format(self.dpi))
figsize = (screen_x / self.dpi, screen_y / self.dpi)
# Force the window to be as fullscreen as we can
self.fig = plt.gcf()
self.fig.set_size_inches(figsize[0], figsize[1])
self.fig.canvas.set_window_title('Cell Locator')
try:
window.state('zoomed')
except tkinter.TclError:
window.state('normal')
plt.draw()
self.ax = self.fig.gca()
self.canvas = self.fig.canvas
if NIGHT_MODE:
self.fig.patch.set_facecolor('black')
# Disable the default shortcut keys
self.canvas.mpl_disconnect(self.canvas.manager.key_press_handler_id)
self.ax_img = None
def load_image(self, step=1):
""" Load the next image """
self.cur_record_idx = (self.cur_record_idx + step) % len(self.records)
self.cur_record = self.records[self.cur_record_idx]
self.cur_record_start = time.monotonic()
img = Image.open(str(self.cur_record))
img = np.asarray(img)
if img.ndim == 2:
img = np.stack([img, img, img], axis=2)
elif img.shape[2] == 1:
img = np.concatenate([img, img, img], axis=2)
assert img.ndim == 3
assert img.shape[2] == 3
self.cur_image = img
if self.ax_img is None:
self.ax_img = self.ax.imshow(self.cur_image, aspect='equal')
else:
rows, cols = img.shape[:2]
self.ax_img.set_data(self.cur_image)
self.ax_img.set_extent((0, cols, rows, 0))
self.ax.set_xticks([])
self.ax.set_yticks([])
plt.tight_layout()
def load_bounds(self):
""" Calculate absolute bounds """
# This one seems to actually follow the cells
ax_tight_bbox = self.ax.get_tightbbox(self.canvas)
im_bbox = ((ax_tight_bbox.x0, ax_tight_bbox.y0),
(ax_tight_bbox.x1, ax_tight_bbox.y1))
# print('im_bbox: {}'.format(im_bbox))
# We have to correct for aspect ratio too?
aspect = self.ax.get_data_ratio()
self.shape_manager.load_axis_bounds(im_bbox, aspect)
def connect(self):
self.cid_close = self.canvas.mpl_connect(
'close_event', self.on_window_close)
self.cid_press = self.canvas.mpl_connect(
'button_press_event', self.on_mouse_press)
self.cid_keypress = self.canvas.mpl_connect(
'key_press_event', self.on_key_press)
self.cid_resize = self.canvas.mpl_connect(
'resize_event', self.on_resize)
def clear_shapes(self, draw=True):
""" Clear all the rects """
self.shape_manager.on_clear_all()
if draw:
self.canvas.draw()
def load_points(self):
""" Load the points from the database """
points = self.db.find_points(self.cur_filepath)
for p_class, px, py in points:
self.shape_manager.on_point_complete(
p_class, px, py)
self.canvas.draw()
def save_points(self):
""" Save the selected points to the database """
points = self.shape_manager.points
classes = [s.sel_class for s in points]
points = [(s.x, s.y) for s in points]
self.db.set_points(self.cur_filepath,
classes=classes,
points=points)
self.db.add_view(self.cur_filepath,
self.cur_record_start,
time.monotonic())
def draw_point(self, point_obj):
""" Draw a single point """
if point_obj in self.points:
return
p_class, px, py = point_obj
fx, fy = self.shape_manager.warp_to_figure(
px, py)
p_color = self.get_color(p_class)
bbox = self.fig.get_window_extent().bounds
x0, y0, _, _ = bbox
line = Line2D([fx+x0], [fy+y0], markersize=self.markersize,
linestyle='-', marker='o', color=p_color)
self.fig.lines.append(line)
self.points[point_obj] = line
def remove_point(self, point_obj):
""" Remove a single point """
if point_obj not in self.points:
return
line = self.points[point_obj]
self.fig.lines.remove(line)
del self.points[point_obj]
def load_last_index(self):
""" Work out the index of the last image loaded """
last_record = self.db.get_last_viewed()
if last_record is not None:
last_record = last_record[0]
last_index = [i for i, r in enumerate(self.records) if r.name == last_record]
if len(last_index) != 1:
cur_record_idx = 0
else:
cur_record_idx = last_index[0]
self.cur_record_idx = cur_record_idx
def load_next_record(self, step=1):
""" Load the next image tile """
# Reset
self.points = {}
self.cur_record = None
self.shape_manager.on_reset_actions()
self.load_image(step=step)
if self.cur_record is None:
print('No more records to process...')
plt.close()
return
self.load_bounds()
self.load_points()
self.canvas.draw()
def maybe_draw_encouragement(self):
""" Try to draw a screen to encourage the user """
if self.display_mode != 'normal':
return
# See if we've added any new annotated images since last save
annotated_records = self.db.find_annotated_records()
new_records = annotated_records - self.annotated_records
if new_records == set():
return
milestones = [float(p.stem) for p in self.imagedir.iterdir()
if p.suffix == '.jpg']
# Cool, now did we cross a milestone
pct_new = len(annotated_records) / len(self.records) * 100
pct_old = len(self.annotated_records) / len(self.records) * 100
print('{:0.1f}% done!'.format(pct_new))
new_milestone = None
for milestone in milestones:
if pct_new >= milestone and pct_old < milestone:
new_milestone = milestone
break
self.annotated_records = annotated_records
if new_milestone is None:
return
image_file = self.imagedir / '{:d}.jpg'.format(int(round(new_milestone)))
img = np.asarray(Image.open(str(image_file)))
rows, cols, _ = img.shape
# Okay, in here we need to draw an overlay
self.display_mode = 'encouragement' if new_milestone < 100 else 'finished'
encourage_objects = []
bbox = self.fig.get_window_extent().bounds
x0, y0, x1, y1 = bbox
xct = (x1 + x0)/2
yct = (y1 + y0)/2
# Draw a black background over the image
bg_patch = Rectangle((x0, y0), (x1-x0), (y1-y0),
fill=True, alpha=0.9, color=(0, 0, 0), zorder=99)
encourage_objects.append(bg_patch)
self.fig.patches.append(bg_patch)
# Draw some encouraging text
title = self.fig.text(0.5, 0.9, '{:1.0f}% Complete!'.format(new_milestone),
color='white',
visible=True,
horizontalalignment='center',
family='sans-serif',
zorder=100,
fontsize=32)
encourage_objects.append(title)
if new_milestone >= 100:
enc_text = self.fig.text(0.5, 0.1, 'Press any key to exit',
color='white',
visible=True,
horizontalalignment='center',
family='sans-serif',
zorder=100,
fontsize=24)
else:
enc_text = self.fig.text(0.5, 0.1, 'Press any key to continue',
color='white',
visible=True,
horizontalalignment='center',
family='sans-serif',
zorder=100,
fontsize=24)
encourage_objects.append(enc_text)
# Scale the encouragement image
yext = abs(y1 - y0) * 0.65
xext = cols / rows * yext
simg = Image.fromarray(img)
simg = simg.resize((int(np.floor(xext)), int(np.floor(yext))))
simg = np.asarray(simg)
srows, scols, _ = simg.shape
enc_img = self.fig.figimage(simg, xo=xct-scols//2, yo=yct-srows//2, zorder=100, alpha=1.0)
encourage_objects.append(enc_img)
self.encourage_objects = encourage_objects
plt.draw()
def clear_encouragement(self):
""" Clear the encouragement display """
self.display_mode = 'normal'
for obj in self.encourage_objects:
if obj in self.fig.patches:
self.fig.patches.remove(obj)
if obj in self.fig.texts:
self.fig.texts.remove(obj)
if | |
is reachable
for i in range(0,4):
if (vm5_fixture.ping_to_ip(self.vm2_macvlan_ip.split('/')[0])):
ping_to_macvlan = True
break
self.logger.warn("Retrying ping")
assert ping_to_macvlan, "Ping to macvlan failed."
# checking evpn table
evpn_route = self.agent_inspect[vm5_node_ip].get_vna_evpn_route(
vm5_vrf_id,
vxlanid=self.vn2_vxlan_id,
mac=vm3_macvlan_mac_addr,
ip=self.vm2_macvlan_ip)['mac']
assert evpn_route == str(self.vn2_vxlan_id) + "-" + vm3_macvlan_mac_addr + \
"-" + self.vm2_macvlan_ip, "Mac route for macvlan1 is absent in EVPN table. "
# checking if route for macvlan2 is deleted from vm5 evpn table
try:
evpn_route = self.agent_inspect[vm5_node_ip].get_vna_evpn_route(
vm5_vrf_id,
vxlanid=self.vn2_vxlan_id,
mac=vm2_macvlan_mac_addr,
ip=self.vm2_macvlan_ip)['mac']
except TypeError:
evpn_route = None
assert not evpn_route, "Mac route for macvlan5 is present in EVPN table. "
# checking bridge table
peer = self.agent_inspect[vm5_node_ip].get_vna_layer2_route(
vm5_vrf_id, mac=vm3_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
assert peer == "EVPN", "Peer is not EVPN."
# checking if route for macvlan2 is deleted from vm5 bridge table
try:
peer = self.agent_inspect[vm5_node_ip].get_vna_layer2_route(
vm5_vrf_id, mac=vm2_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
except TypeError:
peer = None
assert not peer, "MAC1 bridge route is present"
# checking if route for macvlan3 is present in vm5 inet table
route = inspect_h.get_vna_route(
vrf_id=vm5_vrf_id,
ip=self.vm2_macvlan_ip.split("/")[0])
assert route, ('No route seen in inet table for %s' %
(self.vm2_macvlan_ip.split("/")[0]))
assert vm5_mpls_label != route['routes'][0]['path_list'][0]['label'], "Mpls label has not changed."
assert route['routes'][0]['path_list'][0]['nh']['type'] == 'tunnel', "Nh type is not tunnel."
# checking if route for macvlan3 is present vm5 Vrouter inet table
route = inspect_h.get_vrouter_route_table(vm5_vrf_id,
prefix=self.vm2_macvlan_ip.split('/')[0],
prefix_len='128',
get_nh_details=True,
v6=True)
assert route, ('No route seen in vrouter for %s' %
(self.vm2_macvlan_ip))
# checking stitched MAC addr
stitched_mac_cmd = 'contrail-tools rt --get %s --vrf %d --family inet6 | awk \'{print $6}\'| grep \':\'' % (
self.vm2_macvlan_ip, int(vm5_vrf_id))
output = self.inputs.run_cmd_on_server(
vm5_node_ip, stitched_mac_cmd).split("(")[0]
assert EUI(output, dialect=mac_unix_expanded) == EUI(
vm3_macvlan_mac_addr, dialect=mac_unix_expanded), "Stitched mac address is invalid."
return True
# end test_move_ip_across_computes_pkt_mode_l2l3
@preposttest_wrapper
def test_dynamically_disable_maciplearningflag(self):
'''
Description: Dynamically disable MAC-IP learning on VN and verify that all routes correspoding to learnt MAC-IP pairs are deleted
Test steps:
2. Create macvlan intf on vm4.
3. Disable mac-ip learning flag on vm4.
Pass criteria:
1. Ping from vm1 to vm4 macvlan intf should not go
2. MAC route should be deleted in vm1 evpn table
3. Derived bridge route with peer as EVPN is deleted in vm1
4. POD IP is deleted from vm1 agent and vrouter inet table
Maintainer : <EMAIL>
'''
cmds_vm4 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm4_macvlan_ip.split('/')[0] + "/64"),
'ifup --force eth0']
self.vm4_fixture.run_cmd_on_vm(cmds_vm4, as_sudo=True)
assert self.vn1_fixture.set_mac_ip_learning(
mac_ip_learning_enable=False)
mac_cmd = ['ifconfig macvlan1 | grep HWaddr | awk \'{ print $5 }\'']
vm4_macvlan_mac_addr = list(
self.vm4_fixture.run_cmd_on_vm(mac_cmd).values())[0]
# from vm1 to mac4 intf
assert not self.vm1_fixture.ping_to_ip(
self.vm4_macvlan_ip.split('/')[0])
# checking evpn table
vm1_node_ip = self.vm1_fixture.vm_node_ip
vm1_vrf_id = self.get_vrf_id(self.vn1_fixture, self.vm1_fixture)
try:
evpn_route = self.agent_inspect[vm1_node_ip].get_vna_evpn_route(
vm1_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip=self.vm4_macvlan_ip)['mac']
except TypeError:
evpn_route = None
assert not evpn_route, "Mac route for macvlan4 is present in EVPN table. "
# checking bridge table
try:
peer = self.agent_inspect[vm1_node_ip].get_vna_layer2_route(
vm1_vrf_id, mac=vm4_macvlan_mac_addr)['routes'][0]['path_list'][0]['peer']
except TypeError:
peer = None
assert not peer, "MAC Bridge route is present "
# checking inet table for vm1 pod ip
inspect_h = self.agent_inspect[vm1_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm1_vrf_id,
ip=self.vm4_macvlan_ip.split("/")[0])
assert not route, ('Route seen in vrouter for %s' %
(self.vm4_macvlan_ip.split("/")[0]))
# checking route in vrouter got deleted
route_ppl_cmd = 'contrail-tools rt --dump %d --family inet6 | grep %s | awk \'{print $2}\'' % (
int(vm1_vrf_id), self.vm4_macvlan_ip.split('/')[0])
output = self.inputs.run_cmd_on_server(vm1_node_ip, route_ppl_cmd)
assert output != "128", "Route not deleted in vrouter inet table."
cmd = ['ip link delete macvlan1']
self.vm4_fixture.run_cmd_on_vm(cmd, as_sudo=True)
# end test_dynamically_disable_maciplearningflag
@preposttest_wrapper
def test_change_fwding_mode(self):
'''
Description: dynamically change forwarding mode VN and verify that routes are added/deleted/updated accordingly for MAC-IP pair
Test steps:
1. launch pod1 on vm4
2. Change fwd mode from l2_l3 to l2
Pass criteria:
1. When changed from l2 to l2_l3: vm4 macvlan ip is added to vm1 inet table
MAC/IP route added to evpn table
2. When changed from l2_l3 to l2: vm4 macvlan ip is deleted from vm1 inet table
MAC/IP route deleted from evpn table
3. On vrouter: flags are updated in vif --list
pod ip is added to inet table
Maintainer : <EMAIL>
'''
# checking flag from vif --list
vm1_node_ip = self.vm1_fixture.vm_node_ip
vif_id = self.agent_inspect[self.inputs.host_data[self.inputs.compute_ips[0]]['name']].get_vna_intf_details(
self.vm1_fixture.get_tap_intf_of_vm()[0]['name'])[0]['index']
flag_cmd = "vif --get %s | awk {'print $4'} | grep Flags" % (vif_id)
flag = self.inputs.run_cmd_on_server(
vm1_node_ip, flag_cmd).split(":")[1]
assert ("L2" in flag) and ("L3" in flag), "L3L2 mode is not enabled."
cmds_vm4 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm4_macvlan_ip.split('/')[0] + "/64"),
'ifup --force eth0']
self.vm4_fixture.run_cmd_on_vm(cmds_vm4, as_sudo=True)
mac_cmd = ['ifconfig macvlan1 | grep HWaddr | awk \'{ print $5 }\'']
vm4_macvlan_mac_addr = list(
self.vm4_fixture.run_cmd_on_vm(mac_cmd).values())[0]
# from vm1 to mac4 intf
assert self.vm1_fixture.ping_to_ip(self.vm4_macvlan_ip.split('/')[0])
# checking evpn table
vm1_vrf_id = self.get_vrf_id(self.vn1_fixture, self.vm1_fixture)
evpn_route = self.agent_inspect[vm1_node_ip].get_vna_evpn_route(
vm1_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip=self.vm4_macvlan_ip)['mac']
assert evpn_route == str(self.vn1_vxlan_id) + "-" + vm4_macvlan_mac_addr + \
"-" + self.vm4_macvlan_ip, "Mac route for macvlan4 is absent in EVPN table. "
# checking if route macvlan4 is in vm1 inet table route
inspect_h = self.agent_inspect[vm1_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm1_vrf_id,
ip=self.vm4_macvlan_ip.split("/")[0])
assert route, ('No route seen in inet table for %s' %
(self.vm4_macvlan_ip.split("/")[0]))
self.vn1_fixture.add_forwarding_mode(
project_fq_name=self.inputs.project_fq_name,
vn_name=self.vn1_name,
forwarding_mode="l2")
# checking flag from vif --list
vif_id = self.agent_inspect[self.inputs.host_data[self.inputs.compute_ips[0]]['name']].get_vna_intf_details(
self.vm1_fixture.get_tap_intf_of_vm()[0]['name'])[0]['index']
flag_cmd = "vif --get %s | awk {'print $4'} | grep Flags" % (vif_id)
flag = self.inputs.run_cmd_on_server(
vm1_node_ip, flag_cmd).split(":")[1]
assert "L2" in flag, "L2 mode is not enabled."
# checking evpn table
try:
evpn_route = self.agent_inspect[vm1_node_ip].get_vna_evpn_route(
vm1_vrf_id,
vxlanid=self.vn1_vxlan_id,
mac=vm4_macvlan_mac_addr,
ip=self.vm4_macvlan_ip)['mac']
except TypeError:
evpn_route = None
assert not evpn_route, "Mac route for macvlan4 is not deleted in EVPN table. "
# checking if route macvlan4 is in vm1 inet table route
inspect_h = self.agent_inspect[vm1_node_ip]
route = inspect_h.get_vna_route(
vrf_id=vm1_vrf_id,
ip=self.vm4_macvlan_ip.split("/")[0])
assert not route, ('Route seen in inet table for %s' %
(self.vm4_macvlan_ip.split("/")[0]))
# checking for macvlan4 ip in vm1 Vrouter inet table
route = inspect_h.get_vrouter_route_table(vm1_vrf_id,
prefix=self.vm4_macvlan_ip.split('/')[0],
prefix_len='128',
get_nh_details=True,
v6=True)
assert not route, ('No route seen in vrouter for %s' %
(self.vm4_macvlan_ip))
return True
# end test_change_fwding_mode
@preposttest_wrapper
def test_fifty_macvlans(self):
'''
Description: Creating 50 macvlans on a VMI and checking if 50 inet routes are updated.
Test steps:
1. Create 50 macvlan intfs on vm1
Pass criteria:
1. Ping between vm4 and macvlans should go thru fine.
2. macvlan ip is added to inet route
Maintainer : <EMAIL>
'''
for i in range(1, 51):
macvlan_ip = ":".join(self.vm1_eth0_ip.split('/')[0].split(
':')[:-1]) + ":" + str(int(self.vm1_eth0_ip.split('/')[0].split(':')[-1]) + 5 + i)
cmds_vm1 = ['ip link add macvlan%d link eth0 type macvlan' % i,
'ip link set dev macvlan%d up' % i,
'ip -6 addr add %s/64 dev macvlan%d scope global' % (macvlan_ip, i),
'ifup --force eth0']
self.vm1_fixture.run_cmd_on_vm(cmds_vm1, as_sudo=True)
vm4_node_ip = self.vm4_fixture.vm_node_ip
vm4_vrf_id = self.get_vrf_id(self.vn1_fixture, self.vm4_fixture)
inspect_h = self.agent_inspect[vm4_node_ip]
for i in range(1, 51):
macvlan_ip = ":".join(self.vm1_eth0_ip.split('/')[0].split(
':')[:-1]) + ":" + str(int(self.vm1_eth0_ip.split('/')[0].split(':')[-1]) + 5 + i)
self.logger.info('Starting ping to macvlan%d' % i)
# sometimes there is little loss in packets observed while pinging, retrying to ensure pod is reachable
ping_to_macvlan = False
for j in range(0,4):
if(self.vm4_fixture.ping_to_ip(macvlan_ip)):
ping_to_macvlan = True
break
self.logger.warn("Retrying ping")
assert ping_to_macvlan, ("Ping to macvlan%d failed" % i)
route = inspect_h.get_vna_route(vrf_id=vm4_vrf_id, ip=macvlan_ip)
assert route, ('No route seen in inet table for %s' % (macvlan_ip))
return True
# end test_fifty_macvlans
@preposttest_wrapper
def test_vrouter_agent_restart(self):
'''
Description: Routes are re-learnt when vrouter_agent container is restarted. Forwarding mode = L2L3
Test steps:
1. Create macvlan intf on vm2 and vm3.
Pass criteria:
1. After restart : MAC route should be present in evpn table
derived bridge route with peer as EVPN for MAC1 and MAC2
Maintainer : <EMAIL>
'''
cmds_vm2 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope global' % (self.vm2_macvlan_ip.split('/')[0] + "/64"),
'ifup --force eth0']
cmds_vm3 = ['ip link add macvlan1 link eth0 type macvlan',
'ip link set dev macvlan1 up',
'ip -6 addr add %s dev macvlan1 scope | |
# this is not going to overflow in CPython
raise OverflowError
except OverflowError:
msg = "too many decimal digits in format string"
raise ValueError(msg)
result += c - ord("0")
else:
break
i += 1
if i == start:
result = -1
return result, i
class TemplateFormatter(object):
# Auto number state
ANS_INIT = 1
ANS_AUTO = 2
ANS_MANUAL = 3
def __init__(self, space, template):
self.space = space
self.empty = ""
self.template = template
self.parser_list_w = None # used to be a class variable
def build(self, args, kw):
self.args, self.kwargs = args, kw
self.auto_numbering = 0
self.auto_numbering_state = self.ANS_INIT
return self._build_string(0, len(self.template), 2)
def _build_string(self, start, end, level):
out = StringBuilder()
if not level:
raise ValueError("Recursion depth exceeded")
level -= 1
s = self.template
return self._do_build_string(start, end, level, out, s)
def _do_build_string(self, start, end, level, out, s):
last_literal = i = start
while i < end:
c = s[i]
i += 1
if c == "{" or c == "}":
at_end = i == end
# Find escaped "{" and "}"
markup_follows = True
if c == "}":
if at_end or s[i] != "}":
raise ValueError("Single '}'")
i += 1
markup_follows = False
if c == "{":
if at_end:
raise ValueError("Single '{'")
if s[i] == "{":
i += 1
markup_follows = False
# Attach literal data
out.append_slice(s, last_literal, i - 1)
if not markup_follows:
last_literal = i
continue
nested = 1
field_start = i
recursive = False
while i < end:
c = s[i]
if c == "{":
recursive = True
nested += 1
elif c == "}":
nested -= 1
if not nested:
break
i += 1
if nested:
raise ValueError("Unmatched '{'")
rendered = self._render_field(field_start, i, recursive, level)
out.append(rendered)
i += 1
last_literal = i
out.append_slice(s, last_literal, end)
return out.build()
# This is only ever called if we're already unrolling _do_build_string
def _parse_field(self, start, end):
s = self.template
# Find ":" or "!"
i = start
while i < end:
c = s[i]
if c == ":" or c == "!":
end_name = i
if c == "!":
i += 1
if i == end:
w_msg = "expected conversion"
raise ValueError(w_msg)
conversion = s[i]
i += 1
if i < end:
if s[i] != ':':
w_msg = "expected ':' after format specifier"
raise ValueError(w_msg)
i += 1
else:
conversion = None
i += 1
return s[start:end_name], conversion, i
i += 1
return s[start:end], None, end
def _get_argument(self, name):
# First, find the argument.
i = 0
end = len(name)
while i < end:
c = name[i]
if c == "[" or c == ".":
break
i += 1
empty = not i
if empty:
index = -1
else:
index, stop = _parse_int(name, 0, i)
if stop != i:
index = -1
use_numeric = empty or index != -1
if self.auto_numbering_state == self.ANS_INIT and use_numeric:
if empty:
self.auto_numbering_state = self.ANS_AUTO
else:
self.auto_numbering_state = self.ANS_MANUAL
if use_numeric:
if self.auto_numbering_state == self.ANS_MANUAL:
if empty:
msg = "switching from manual to automatic numbering"
raise ValueError(msg)
elif not empty:
msg = "switching from automatic to manual numbering"
raise ValueError(msg)
if empty:
index = self.auto_numbering
self.auto_numbering += 1
if index == -1:
kwarg = name[:i]
arg_key = kwarg
try:
w_arg = self.kwargs[arg_key]
except KeyError:
raise KeyError(arg_key)
else:
try:
w_arg = self.args[index]
except IndexError:
w_msg = "index out of range"
raise IndexError(w_msg)
except:
raise
return self._resolve_lookups(w_arg, name, i, end)
def _resolve_lookups(self, w_obj, name, start, end):
# Resolve attribute and item lookups.
i = start
while i < end:
c = name[i]
if c == ".":
i += 1
start = i
while i < end:
c = name[i]
if c == "[" or c == ".":
break
i += 1
if start == i:
w_msg = "Empty attribute in format string"
raise ValueError(w_msg)
w_attr = name[start:i]
if w_obj is not None:
w_obj = getattr(w_obj, w_attr)
else:
self.parser_list_w.append(self.space.newtuple([
self.space.w_True, w_attr]))
elif c == "[":
got_bracket = False
i += 1
start = i
while i < end:
c = name[i]
if c == "]":
got_bracket = True
break
i += 1
if not got_bracket:
raise ValueError("Missing ']'")
if name[start] == '{':
# CPython raise TypeError on '{0[{1}]}', pyjs converts
raise TypeError('no replacement on fieldname')
index, reached = _parse_int(name, start, i)
if index != -1 and reached == i:
w_item = index
else:
w_item = name[start:i]
i += 1 # Skip "]"
if w_obj is not None:
w_obj = w_obj[w_item]
else:
self.parser_list_w.append(self.space.newtuple([
self.space.w_False, w_item]))
else:
msg = "Only '[' and '.' may follow ']'"
raise ValueError(msg)
return w_obj
def formatter_field_name_split(self):
name = self.template
i = 0
end = len(name)
while i < end:
c = name[i]
if c == "[" or c == ".":
break
i += 1
if i == 0:
index = -1
else:
index, stop = _parse_int(name, 0, i)
if stop != i:
index = -1
if index >= 0:
w_first = index
else:
w_first = name[:i]
#
self.parser_list_w = []
self._resolve_lookups(None, name, i, end)
#
return self.space.newtuple([w_first,
self.space.iter(self.space.newlist(self.parser_list_w))])
def _convert(self, w_obj, conversion):
conv = conversion[0]
if conv == "r":
return repr(w_obj)
elif conv == "s":
return str(w_obj)
else:
raise ValueError("invalid conversion")
def _render_field(self, start, end, recursive, level):
name, conversion, spec_start = self._parse_field(start, end)
spec = self.template[spec_start:end]
# when used from formatter_parser()
if self.parser_list_w is not None:
if level == 1: # ignore recursive calls
startm1 = start - 1
assert startm1 >= self.last_end
w_entry = self.space.newtuple([
self.template[self.last_end:startm1],
name,
spec,
conversion])
self.parser_list_w.append(w_entry)
self.last_end = end + 1
return self.empty
#
w_obj = self._get_argument(name)
if conversion is not None:
w_obj = self._convert(w_obj, conversion)
if recursive:
spec = self._build_string(spec_start, end, level)
w_rendered = self.space.format(w_obj, spec)
return str(w_rendered)
def formatter_parser(self):
self.parser_list_w = []
self.last_end = 0
self._build_string(0, len(self.template), 2)
#
if self.last_end < len(self.template):
w_lastentry = self.space.newtuple([
self.template[self.last_end:],
self.space.w_None,
self.space.w_None,
self.space.w_None])
self.parser_list_w.append(w_lastentry)
return self.space.iter(self.space.newlist(self.parser_list_w))
class NumberSpec(object):
pass
class BaseFormatter(object):
def format_int_or_long(self, w_num, kind):
raise NotImplementedError
def format_float(self, w_num):
raise NotImplementedError
def format_complex(self, w_num):
raise NotImplementedError
INT_KIND = 1
LONG_KIND = 2
NO_LOCALE = 1
DEFAULT_LOCALE = 2
CURRENT_LOCALE = 3
class Formatter(BaseFormatter):
"""__format__ implementation for builtin types."""
_grouped_digits = None
def __init__(self, space, spec):
self.space = space
self.empty = ""
self.spec = spec
def _is_alignment(self, c):
return (c == "<" or
c == ">" or
c == "=" or
c == "^")
def _is_sign(self, c):
return (c == " " or
c == "+" or
c == "-")
def _parse_spec(self, default_type, default_align):
self._fill_char = self._lit("\0")[0]
self._align = default_align
self._alternate = False
self._sign = "\0"
self._thousands_sep = False
self._precision = -1
the_type = default_type
spec = self.spec
if not spec:
return True
length = len(spec)
i = 0
got_align = True
if length - i >= 2 and self._is_alignment(spec[i + 1]):
self._align = spec[i + 1]
self._fill_char = spec[i]
i += 2
elif length - i >= 1 and self._is_alignment(spec[i]):
self._align = spec[i]
i += 1
else:
got_align = False
if length - i >= 1 and self._is_sign(spec[i]):
self._sign = spec[i]
i += 1
if length - i >= 1 and spec[i] == "#":
self._alternate = True
i += 1
if self._fill_char == "\0" and length - i >= 1 and spec[i] == "0":
self._fill_char = self._lit("0")[0]
if not got_align:
self._align = "="
i += 1
start_i = i
self._width, i = _parse_int(spec, i, length)
if length != i and spec[i] == ",":
self._thousands_sep = True
i += 1
if length != i and spec[i] == ".":
i += 1
self._precision, i = _parse_int(spec, i, length)
if self._precision == -1:
raise ValueError("no precision given")
if length - i > 1:
raise ValueError("invalid format spec")
if length - i == 1:
presentation_type = spec[i]
the_type = presentation_type
i += 1
self._type = the_type
if self._thousands_sep:
tp = self._type
if (tp == "d" or
tp == "e" or
tp == "f" or
tp == "g" or
tp == "E" or
tp == "G" or
tp == "%" or
tp == "F" or
| |
prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocket(dict):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
def __init__(__self__, *,
port: 'outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPort',
host: Optional[str] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPortArgs' port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param str host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> 'outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPort':
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersStartupProbeTcpSocketPort(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeDevices(dict):
"""
volumeDevice describes a mapping of a raw block device within a container.
"""
def __init__(__self__, *,
device_path: str,
name: str):
"""
volumeDevice describes a mapping of a raw block device within a container.
:param str device_path: devicePath is the path inside of the container that the device will be mapped to.
:param str name: name must match the name of a persistentVolumeClaim in the pod
"""
pulumi.set(__self__, "device_path", device_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="devicePath")
def device_path(self) -> str:
"""
devicePath is the path inside of the container that the device will be mapped to.
"""
return pulumi.get(self, "device_path")
@property
@pulumi.getter
def name(self) -> str:
"""
name must match the name of a persistentVolumeClaim in the pod
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecInitContainersVolumeMounts(dict):
"""
VolumeMount describes a mounting of a Volume within a container.
"""
def __init__(__self__, *,
mount_path: str,
name: str,
mount_propagation: Optional[str] = None,
read_only: Optional[bool] = None,
sub_path: Optional[str] = None,
sub_path_expr: Optional[str] = None):
"""
VolumeMount describes a mounting of a Volume within a container.
:param str mount_path: Path within the container at which the volume should be mounted. Must not contain ':'.
:param str name: This must match the Name of a Volume.
:param str mount_propagation: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
:param bool read_only: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
:param str sub_path: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
:param str sub_path_expr: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
if mount_propagation is not None:
pulumi.set(__self__, "mount_propagation", mount_propagation)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if sub_path is not None:
pulumi.set(__self__, "sub_path", sub_path)
if sub_path_expr is not None:
pulumi.set(__self__, "sub_path_expr", sub_path_expr)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> str:
"""
Path within the container at which the volume should be mounted. Must not contain ':'.
"""
return pulumi.get(self, "mount_path")
@property
@pulumi.getter
def name(self) -> str:
"""
This must match the Name of a Volume.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="mountPropagation")
def mount_propagation(self) -> Optional[str]:
"""
mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.
"""
return pulumi.get(self, "mount_propagation")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
"""
Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
"""
return pulumi.get(self, "read_only")
@property
@pulumi.getter(name="subPath")
def sub_path(self) -> Optional[str]:
"""
Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
"""
return pulumi.get(self, "sub_path")
@property
@pulumi.getter(name="subPathExpr")
def sub_path_expr(self) -> Optional[str]:
"""
Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive.
"""
return pulumi.get(self, "sub_path_expr")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecOverhead(dict):
def __init__(__self__):
pass
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecReadinessGates(dict):
"""
PodReadinessGate contains the reference to a pod condition
"""
def __init__(__self__, *,
condition_type: str):
"""
PodReadinessGate contains the reference to a pod condition
:param str condition_type: ConditionType refers to a condition in the pod's condition list with matching type.
"""
pulumi.set(__self__, "condition_type", condition_type)
@property
@pulumi.getter(name="conditionType")
def condition_type(self) -> str:
"""
ConditionType refers to a condition in the pod's condition list with matching type.
"""
return pulumi.get(self, "condition_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContext(dict):
"""
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
"""
def __init__(__self__, *,
fs_group: Optional[int] = None,
fs_group_change_policy: Optional[str] = None,
run_as_group: Optional[int] = None,
run_as_non_root: Optional[bool] = None,
run_as_user: Optional[int] = None,
se_linux_options: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptions'] = None,
supplemental_groups: Optional[Sequence[int]] = None,
sysctls: Optional[Sequence['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSysctls']] = None,
windows_options: Optional['outputs.SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextWindowsOptions'] = None):
"""
SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.
:param int fs_group: A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:
1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----
If unset, the Kubelet will not modify the ownership and permissions of any volume.
:param str fs_group_change_policy: fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
:param int run_as_group: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param bool run_as_non_root: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
:param int run_as_user: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param 'SeldonDeploymentSpecPredictorsComponentSpecsSpecSecurityContextSeLinuxOptionsArgs' se_linux_options: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
:param Sequence[int] supplemental_groups: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups | |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`AbrahamsonSilva2008`.
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
class AbrahamsonSilva2008(GMPE):
"""
Implements GMPE developed by <NAME> and <NAME> and
published as "Summary of the Abrahamson & Silva NGA Ground-Motion
Relations" (2008, Earthquakes Spectra, Volume 24, Number 1, pages 67-97).
This class implements only the equations for mainshock/foreshocks/swarms
type events, that is the aftershock term (4th term in equation 1, page 74)
is set to zero. The constant displacement model (page 80) is also not
implemented (that is equation 1, page 74 is used for all periods and no
correction is applied for periods greater than the constant displacement
period). This class implements also the corrections (for standard
deviation and hanging wall term calculation) as described in:
http://peer.berkeley.edu/products/abrahamson-silva_nga_report_files/
AS08_NGA_errata.pdf
"""
#: Supported tectonic region type is active shallow crust, see paragraph
#: 'Data Set Selection', see page 68.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration, peak
#: ground velocity and peak ground acceleration, see tables 5a and 5b
#: pages 84, 85, respectively.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
PGV,
SA
])
#: Supported intensity measure component is orientation-independent
#: average horizontal :attr:`~openquake.hazardlib.const.IMC.GMRotI50`,
#: see abstract, page 67.
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.GMRotI50
#: Supported standard deviation types are inter-event, intra-event
#: and total, see paragraph "Equations for standard deviations", page 81.
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT
])
#: Required site parameters are Vs30, Vs30 type (measured or inferred),
#: and Z1.0, see paragraph 'Soil Depth Model', page 79, and table 6,
#: page 86.
REQUIRES_SITES_PARAMETERS = set(('vs30', 'vs30measured', 'z1pt0'))
#: Required rupture parameters are magnitude, rake, dip, ztor, and width
#: (see table 2, page 75)
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'rake', 'dip', 'ztor', 'width'))
#: Required distance measures are Rrup, Rjb and Rx (see Table 2, page 75).
REQUIRES_DISTANCES = set(('rrup', 'rjb', 'rx'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extract dictionaries of coefficients specific to required
# intensity measure type and for PGA
C = self.COEFFS[imt]
C_PGA = self.COEFFS[PGA()]
# compute median pga on rock (vs30=1100), needed for site response
# term calculation
pga1100 = np.exp(self._compute_imt1100(PGA(), sites, rup, dists))
mean = (self._compute_base_term(C, rup, dists) +
self._compute_faulting_style_term(C, rup) +
self._compute_site_response_term(C, imt, sites, pga1100) +
self._compute_hanging_wall_term(C, dists, rup) +
self._compute_top_of_rupture_depth_term(C, rup) +
self._compute_large_distance_term(C, dists, rup) +
self._compute_soil_depth_term(C, imt, sites.z1pt0, sites.vs30))
stddevs = self._get_stddevs(C, C_PGA, pga1100, rup, sites,
stddev_types)
return mean, stddevs
def _compute_base_term(self, C, rup, dists):
"""
Compute and return base model term, that is the first term in equation
1, page 74. The calculation of this term is explained in paragraph
'Base Model', page 75.
"""
c1 = self.CONSTS['c1']
R = np.sqrt(dists.rrup ** 2 + self.CONSTS['c4'] ** 2)
base_term = (C['a1'] +
C['a8'] * ((8.5 - rup.mag) ** 2) +
(C['a2'] + self.CONSTS['a3'] * (rup.mag - c1)) *
np.log(R))
if rup.mag <= c1:
return base_term + self.CONSTS['a4'] * (rup.mag - c1)
else:
return base_term + self.CONSTS['a5'] * (rup.mag - c1)
def _compute_faulting_style_term(self, C, rup):
"""
Compute and return faulting style term, that is the sum of the second
and third terms in equation 1, page 74.
"""
# ranges of rake values for each faulting mechanism are specified in
# table 2, page 75
return (C['a12'] * float(rup.rake > 30 and rup.rake < 150) +
C['a13'] * float(rup.rake > -120 and rup.rake < -60))
def _compute_site_response_term(self, C, imt, sites, pga1100):
"""
Compute and return site response model term, that is the fifth term
in equation 1, page 74.
"""
site_resp_term = np.zeros_like(sites.vs30)
vs30_star, _ = self._compute_vs30_star_factor(imt, sites.vs30)
vlin, c, n = C['VLIN'], self.CONSTS['c'], self.CONSTS['n']
a10, b = C['a10'], C['b']
idx = sites.vs30 < vlin
arg = vs30_star[idx] / vlin
site_resp_term[idx] = (a10 * np.log(arg) -
b * np.log(pga1100[idx] + c) +
b * np.log(pga1100[idx] + c * (arg ** n)))
idx = sites.vs30 >= vlin
site_resp_term[idx] = (a10 + b * n) * np.log(vs30_star[idx] / vlin)
return site_resp_term
def _compute_hanging_wall_term(self, C, dists, rup):
"""
Compute and return hanging wall model term, that is the sixth term in
equation 1, page 74. The calculation of this term is explained in
paragraph 'Hanging-Wall Model', page 77.
"""
if rup.dip == 90.0:
return np.zeros_like(dists.rx)
else:
idx = dists.rx > 0
Fhw = np.zeros_like(dists.rx)
Fhw[idx] = 1
# equation 8, page 77
T1 = np.zeros_like(dists.rx)
idx1 = (dists.rjb < 30.0) & (idx)
T1[idx1] = 1.0 - dists.rjb[idx1] / 30.0
# equation 9, page 77
T2 = np.ones_like(dists.rx)
idx2 = ((dists.rx <= rup.width * np.cos(np.radians(rup.dip))) &
(idx))
T2[idx2] = (0.5 + dists.rx[idx2] /
(2 * rup.width * np.cos(np.radians(rup.dip))))
# equation 10, page 78
T3 = np.ones_like(dists.rx)
idx3 = (dists.rx < rup.ztor) & (idx)
T3[idx3] = dists.rx[idx3] / rup.ztor
# equation 11, page 78
if rup.mag <= 6.0:
T4 = 0.0
elif rup.mag > 6 and rup.mag < 7:
T4 = rup.mag - 6
else:
T4 = 1.0
# equation 5, in AS08_NGA_errata.pdf
if rup.dip >= 30:
T5 = 1.0 - (rup.dip - 30.0) / 60.0
else:
T5 = 1.0
return Fhw * C['a14'] * T1 * T2 * T3 * T4 * T5
def _compute_top_of_rupture_depth_term(self, C, rup):
"""
Compute and return top of rupture depth term, that is the seventh term
in equation 1, page 74. The calculation of this term is explained in
paragraph 'Depth-to-Top of Rupture Model', page 78.
"""
if rup.ztor >= 10.0:
return C['a16']
else:
return C['a16'] * rup.ztor / 10.0
def _compute_large_distance_term(self, C, dists, rup):
"""
Compute and return large distance model term, that is the 8-th term
in equation 1, page 74. The calculation of this term is explained in
paragraph 'Large Distance Model', page 78.
"""
# equation 15, page 79
if rup.mag < 5.5:
T6 = 1.0
elif rup.mag >= 5.5 and rup.mag <= 6.5:
T6 = 0.5 * (6.5 - rup.mag) + 0.5
else:
T6 = 0.5
# equation 14, page 79
large_distance_term = np.zeros_like(dists.rrup)
idx = dists.rrup >= 100.0
large_distance_term[idx] = C['a18'] * (dists.rrup[idx] - 100.0) * T6
return large_distance_term
def _compute_soil_depth_term(self, C, imt, z1pt0, vs30):
"""
Compute and return soil depth model term, that is the 9-th term in
equation 1, page 74. The calculation of this term is explained in
paragraph 'Soil Depth Model', page 79.
"""
a21 = self._compute_a21_factor(C, imt, z1pt0, vs30)
a22 = self._compute_a22_factor(imt)
median_z1pt0 = self._compute_median_z1pt0(vs30)
soil_depth_term = a21 * np.log((z1pt0 + self.CONSTS['c2']) /
(median_z1pt0 + self.CONSTS['c2']))
idx = z1pt0 >= 200
soil_depth_term[idx] += a22 * np.log(z1pt0[idx] / 200)
return soil_depth_term
def _compute_imt1100(self, imt, sites, rup, dists):
"""
Compute and return mean imt value for rock conditions
(vs30 = 1100 m/s)
"""
vs30_1100 = np.zeros_like(sites.vs30) + 1100
vs30_star, _ = self._compute_vs30_star_factor(imt, vs30_1100)
C = self.COEFFS[imt]
mean = (self._compute_base_term(C, rup, dists) +
self._compute_faulting_style_term(C, rup) +
self._compute_hanging_wall_term(C, dists, rup) +
self._compute_top_of_rupture_depth_term(C, rup) +
self._compute_large_distance_term(C, dists, rup) +
self._compute_soil_depth_term(C, imt, sites.z1pt0, vs30_1100) +
# this is the site response term in case of vs30=1100
((C['a10'] + C['b'] * self.CONSTS['n']) *
np.log(vs30_star / C['VLIN'])))
return mean
def _get_stddevs(self, C, C_PGA, pga1100, rup, sites, stddev_types):
"""
Return standard deviations as described in paragraph 'Equations for
standard deviation', | |
<filename>utility_active.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
# Color
from matplotlib.colors import LinearSegmentedColormap
import cartopy.crs as ccrs
projection = ccrs.Mollweide(central_longitude=0)
import matplotlib.colors as colors
color_zesty_cbf = [(0.0, 0.10980392156862745, 0.30196078431372547),
(0.5019607843137255, 0.6862745098039216, 1.0),
(1, 1, 1),
(1.0, 0.5372549019607843, 0.30196078431372547),
(0.30196078431372547, 0.10196078431372549, 0.0)] # dark bluish -> bright blue -> white -> bright orange -> darker orange
cm_zesty_cbf = LinearSegmentedColormap.from_list("zesty_cbf", color_zesty_cbf, N=10001)
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0.0, 0.5, 1.0]
return np.ma.masked_array(np.interp(value, x, y))
def plot_while_learning(epoch):
# Plot learning curve
fig = plt.figure(figsize=(9,9), constrained_layout=True) # Initiate figure with constrained layout
gs = fig.add_gridspec(1, 2) # Add 1x2 grid
ax1 = fig.add_subplot(gs[0, :])
# Finalize plots
ax1.clear()
ax1.set_title('Error CN')
ax1.set_xlabel("Epoch")
ax1.set_xlim([np.min(epoch_range),N_epochs])
#ax1.set_ylim([0.0,np.max([E_valid_collect,E_train_collect])])
ax1.grid()
ax1.semilogy(epoch_range, E_train_collect[:,0], '-', color="C0", label = "E training")
ax1.semilogy(epoch_range, E_valid_collect[:,0], '--', color="C0", label = "E validation")
ax1.semilogy(epoch_range, C_train_collect[:,0], '-', color="C1", label = "C training")
ax1.semilogy(epoch_range, C_valid_collect[:,0], '--', color="C1", label = "C validation")
ax1.semilogy(epoch_range, Li_train_collect[:,0], '-', color="C2", label = "Li training")
ax1.semilogy(epoch_range, Li_valid_collect[:,0], '--', color="C2", label = "Li validation")
ax1.semilogy(epoch_range, sat_train_collect[:,0], '-', color="C3", label = "sat training")
ax1.semilogy(epoch_range, sat_valid_collect[:,0], '--', color="C3", label = "sat validation")
ax1.text(0.5, 0.9, "Current lr: " + str(optimizer.param_groups[0]["lr"]),
horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes)
ax1.legend(loc="upper right")
fig.canvas.draw()
fig.savefig('nets/training_sequences/training_test_{}'.format(epoch), bbox_inches='tight', dpi = 100)
# Plot validation batch RMSE
sat_p = sat_in_v[:,:].permute(1,0).detach().cpu().numpy()
C_op = C_ov[:,:].permute(1,0).detach().cpu().numpy()
Li_op = Li_ov[:,:].permute(1,0).detach().cpu().numpy()
C_lp = C_lv[:,:].permute(1,0).detach().cpu().numpy()
Li_lp = Li_lv[:,:].permute(1,0).detach().cpu().numpy()
# Label
clip_op = Li_op.copy()
clip_op[:mt_util.shc_vec_len(n_cut_max)] += C_op[:mt_util.shc_vec_len(n_cut_max),:]
clip_lp = Li_lp.copy()
clip_lp[:mt_util.shc_vec_len(n_cut_max)] += C_lp[:mt_util.shc_vec_len(n_cut_max),:]
sat_op = Gr@clip_op
sat_lp = Gr@clip_lp
rmse_v_b = np.sqrt(np.mean((sat_lp-sat_op)**2,axis=1))
rmse_v = np.sqrt(np.mean((sat_lp-sat_op)**2,axis=0))
fig = plt.figure(figsize=(9,9), constrained_layout=True) # Initiate figure with constrained layout
gs = fig.add_gridspec(1, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.clear()
ax1.set_title("Validation sat obs RMSE, mean over batch")
ax1.set_xlabel("[nT]")
ax1.set_ylabel("Count")
ax1.grid()
ax1.hist(rmse_v_b.reshape(-1),bins=21)
ax2 = fig.add_subplot(gs[0, 1])
ax2.clear()
ax2.set_title("Validation sat obs RMSE, mean over obs")
ax2.set_xlabel("[nT]")
ax2.set_ylabel("Count")
ax2.grid()
ax2.hist(rmse_v.reshape(-1),bins=21)
fig.canvas.draw()
fig.savefig('nets/training_sequences/rmse_val_test_{}'.format(epoch), bbox_inches='tight', dpi = 100)
# Plot training batch RMSE
sat_p = sat_in_t[:,:].permute(1,0).detach().cpu().numpy()
C_op = C_ot[:,:].permute(1,0).detach().cpu().numpy()
Li_op = Li_ot[:,:].permute(1,0).detach().cpu().numpy()
C_lp = C_lt[:,:].permute(1,0).detach().cpu().numpy()
Li_lp = Li_lt[:,:].permute(1,0).detach().cpu().numpy()
# Label
clip_op = Li_op.copy()
clip_op[:mt_util.shc_vec_len(n_cut_max)] += C_op[:mt_util.shc_vec_len(n_cut_max),:]
clip_lp = Li_lp.copy()
clip_lp[:mt_util.shc_vec_len(n_cut_max)] += C_lp[:mt_util.shc_vec_len(n_cut_max),:]
sat_op = Gr@clip_op
sat_lp = Gr@clip_lp
rmse_v_b = np.sqrt(np.mean((sat_lp-sat_op)**2,axis=1))
rmse_v = np.sqrt(np.mean((sat_lp-sat_op)**2,axis=0))
fig = plt.figure(figsize=(9,9), constrained_layout=True) # Initiate figure with constrained layout
gs = fig.add_gridspec(1, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.clear()
ax1.set_title("Training sat obs RMSE, mean over batch")
ax1.set_xlabel("[nT]")
ax1.set_ylabel("Count")
ax1.grid()
ax1.hist(rmse_v_b.reshape(-1),bins=21)
ax2 = fig.add_subplot(gs[0, 1])
ax2.clear()
ax2.set_title("Training sat obs RMSE, mean over obs")
ax2.set_xlabel("[nT]")
ax2.set_ylabel("Count")
ax2.grid()
ax2.hist(rmse_v.reshape(-1),bins=21)
fig.canvas.draw()
fig.savefig('nets/training_sequences/rmse_tra_test_{}'.format(epoch), bbox_inches='tight', dpi = 100)
# Plot fit training
fig = plt.figure(figsize=(9,6), constrained_layout=True) # Initiate figure with constrained layout
gs = fig.add_gridspec(3, 3) # Add 3x3 grid
ax1 = fig.add_subplot(gs[0, 0], projection=projection)
ax2 = fig.add_subplot(gs[0, 1], projection=projection)
ax12 = fig.add_subplot(gs[0, 2])
ax3 = fig.add_subplot(gs[1, 0], projection=projection)
ax4 = fig.add_subplot(gs[1, 1], projection=projection)
ax34 = fig.add_subplot(gs[1, 2])
ax5 = fig.add_subplot(gs[2, 0], projection=projection)
ax6 = fig.add_subplot(gs[2, 1], projection=projection)
ax56 = fig.add_subplot(gs[2, 2])
sat_p = sat_in_t[0,:].detach().cpu().numpy()
C_op = C_ot[0,:].detach().cpu().numpy()
Li_op = Li_ot[0,:].detach().cpu().numpy()
C_lp = C_lt[0,:].detach().cpu().numpy()
Li_lp = Li_lt[0,:].detach().cpu().numpy()
# Input
ax1.clear()
ax1.set_title("Li+C input obs")
im1 = ax1.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=sat_p, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -5*10**4, vmax = 5*10**4
ax1.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax1.axis('off')
# Label
clip_op = Li_op.copy()
#clip_op[:i_n_C] += C_op[:mt_util.shc_vec_len(20)]
clip_op[:mt_util.shc_vec_len(n_cut_max)] += C_op[:mt_util.shc_vec_len(n_cut_max)]
clip_lp = Li_lp.copy()
#clip_lp[:i_n_C] += C_lp
clip_lp[:mt_util.shc_vec_len(n_cut_max)] += C_lp[:mt_util.shc_vec_len(n_cut_max)]
sat_op = Gr@clip_op
sat_lp = Gr@clip_lp
ax2.clear()
ax2.set_title("Net output obs")
im2 = ax2.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=sat_op, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -5*10**4, vmax = 5*10**4
ax2.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax2.axis('off')
ax12.clear()
ax12.set_title("Residuals")
ax12.hist((sat_op-sat_p).reshape(-1),bins=21)
# C Label
#C_lpm = Gr_C@C_lp
C_lpm = Gr_C[:,:mt_util.shc_vec_len(n_cut_max)]@C_lp[:mt_util.shc_vec_len(n_cut_max)]
ax3.clear()
ax3.set_title("Dynamo simulation core")
im3 = ax3.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=C_lpm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -2*10**6, vmax = 2*10**6
ax3.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax3.axis('off')
# C output
C_opm = Gr_C[:,:mt_util.shc_vec_len(n_cut_max)]@C_op[:mt_util.shc_vec_len(n_cut_max)]
#C_opm = Gr_C@C_op
ax4.clear()
ax4.set_title("Net output shc core")
im4 = ax4.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=C_opm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -2*10**6, vmax = 2*10**6
ax4.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax4.axis('off')
ax34.clear()
ax34.set_title("Residuals")
ax34.hist((C_opm-C_lpm).reshape(-1),bins=21)
# Li Label
Li_lpm = Gr_Li@Li_lp
ax5.clear()
ax5.set_title("Crustal lith")
im5 = ax5.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=Li_lpm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -3*10**2, vmax = 3*10**2
ax5.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax5.axis('off')
# Li output
Li_opm = Gr_Li@Li_op
ax6.clear()
ax6.set_title("Net output shc lith")
im6 = ax6.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=Li_opm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -3*10**2, vmax = 3*10**2
ax6.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax6.axis('off')
ax56.clear()
ax56.set_title("Residuals")
ax56.hist((Li_opm-Li_lpm).reshape(-1),bins=21)
# End
fig.canvas.draw()
fig.savefig('nets/training_sequences/fit_test_tra_{}'.format(epoch), bbox_inches='tight', dpi = 100)
# Plot fit
fig = plt.figure(figsize=(9,6), constrained_layout=True) # Initiate figure with constrained layout
gs = fig.add_gridspec(3, 3) # Add 3x3 grid
ax1 = fig.add_subplot(gs[0, 0], projection=projection)
ax2 = fig.add_subplot(gs[0, 1], projection=projection)
ax12 = fig.add_subplot(gs[0, 2])
ax3 = fig.add_subplot(gs[1, 0], projection=projection)
ax4 = fig.add_subplot(gs[1, 1], projection=projection)
ax34 = fig.add_subplot(gs[1, 2])
ax5 = fig.add_subplot(gs[2, 0], projection=projection)
ax6 = fig.add_subplot(gs[2, 1], projection=projection)
ax56 = fig.add_subplot(gs[2, 2])
sat_p = sat_in_v[0,:].detach().cpu().numpy()
C_op = C_ov[0,:].detach().cpu().numpy()
Li_op = Li_ov[0,:].detach().cpu().numpy()
C_lp = C_lv[0,:].detach().cpu().numpy()
Li_lp = Li_lv[0,:].detach().cpu().numpy()
# Input
ax1.clear()
ax1.set_title("Li+C input obs")
im1 = ax1.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=sat_p, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -5*10**4, vmax = 5*10**4
ax1.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax1.axis('off')
# Label
clip_op = Li_op.copy()
#clip_op[:i_n_C] += C_op[:mt_util.shc_vec_len(20)]
clip_op[:mt_util.shc_vec_len(n_cut_max)] += C_op[:mt_util.shc_vec_len(n_cut_max)]
clip_lp = Li_lp.copy()
#clip_lp[:i_n_C] += C_lp
clip_lp[:mt_util.shc_vec_len(n_cut_max)] += C_lp[:mt_util.shc_vec_len(n_cut_max)]
sat_op = Gr@clip_op
sat_lp = Gr@clip_lp
ax2.clear()
ax2.set_title("Net output obs")
im2 = ax2.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=sat_op, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -5*10**4, vmax = 5*10**4
ax2.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax2.axis('off')
ax12.clear()
ax12.set_title("Residuals")
ax12.hist((sat_op-sat_p).reshape(-1),bins=21)
# C Label
#C_lpm = Gr_C@C_lp
C_lpm = Gr_C[:,:mt_util.shc_vec_len(n_cut_max)]@C_lp[:mt_util.shc_vec_len(n_cut_max)]
ax3.clear()
ax3.set_title("Dynamo simulation core")
im3 = ax3.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=C_lpm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -2*10**6, vmax = 2*10**6
ax3.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax3.axis('off')
# C output
C_opm = Gr_C[:,:mt_util.shc_vec_len(n_cut_max)]@C_op[:mt_util.shc_vec_len(n_cut_max)]
#C_opm = Gr_C@C_op
ax4.clear()
ax4.set_title("Net output shc core")
im4 = ax4.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=C_opm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -2*10**6, vmax = 2*10**6
ax4.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax4.axis('off')
ax34.clear()
ax34.set_title("Residuals")
ax34.hist((C_opm-C_lpm).reshape(-1),bins=21)
# Li Label
Li_lpm = Gr_Li@Li_lp
ax5.clear()
ax5.set_title("Crustal lith")
im5 = ax5.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=Li_lpm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -3*10**2, vmax = 3*10**2
ax5.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax5.axis('off')
# Li output
Li_opm = Gr_Li@Li_op
ax6.clear()
ax6.set_title("Net output shc lith")
im6 = ax6.scatter(clip.grid_phi, 90-clip.grid_theta, s=10, c=Li_opm, marker = "o",
transform=ccrs.PlateCarree(), rasterized=True, cmap=cm_zesty_cbf,
norm = MidpointNormalize(midpoint=0.0)) #, vmin = -3*10**2, vmax = 3*10**2
ax6.coastlines(linewidth = 0.2, color = (0.4,0.4,0.4))
ax6.axis('off')
ax56.clear()
ax56.set_title("Residuals")
ax56.hist((Li_opm-Li_lpm).reshape(-1),bins=21)
# End
fig.canvas.draw()
fig.savefig('nets/training_sequences/fit_test_val_{}'.format(epoch), bbox_inches='tight', dpi = 100)
# P spec
C_op = C_ot[:5,:].detach().cpu().numpy()
Li_op = Li_ot[:5,:].detach().cpu().numpy()
C_lp = C_lt[:5,:].detach().cpu().numpy()
Li_lp = Li_lt[:5,:].detach().cpu().numpy()
nmax_pairs = np.ones(5,dtype=int)*int(n_max_C)
label = ["1","2","3","4","5"]
mt_util.plot_p_spec(C_op, clip.r_cmb, n_max_C, g_spec_compares = C_lp, nmax_pairs = nmax_pairs,
nmax_pairs_compare = nmax_pairs, spec_style="pair_compare", figsize=(9,9), label=label,
savefig = True, save_string = 'C_test_tra_{}'.format(epoch),
save_folder="nets/training_sequences/")
nmax_pairs = np.ones(5,dtype=int)*int(n_max_Li)
label = ["1","2","3","4","5"]
mt_util.plot_p_spec(Li_op, clip.a, n_max_Li, g_spec_compares = Li_lp, nmax_pairs = nmax_pairs,
nmax_pairs_compare = nmax_pairs, spec_style="pair_compare", figsize=(9,9), label=label,
savefig = True, save_string = 'Li_test_tra_{}'.format(epoch),
save_folder="nets/training_sequences/")
# P spec
C_op = C_ov[:5,:].detach().cpu().numpy()
Li_op = Li_ov[:5,:].detach().cpu().numpy()
C_lp = C_lv[:5,:].detach().cpu().numpy()
Li_lp = Li_lv[:5,:].detach().cpu().numpy()
nmax_pairs = np.ones(5,dtype=int)*int(n_max_C)
label = ["1","2","3","4","5"]
mt_util.plot_p_spec(C_op, clip.r_cmb, n_max_C, g_spec_compares = C_lp, nmax_pairs = nmax_pairs, | |
from membase.api.rest_client import RestConnection
from membase.helper.rebalance_helper import RebalanceHelper
from rebalance_new.rebalance_base import RebalanceBaseTest
from BucketLib.BucketOperations import BucketHelper
from rebalance_new import rebalance_base
from sdk_exceptions import SDKException
class RebalanceInOutTests(RebalanceBaseTest):
def setUp(self):
super(RebalanceInOutTests, self).setUp()
def tearDown(self):
super(RebalanceInOutTests, self).tearDown()
def test_rebalance_in_out_after_mutation(self):
"""
Rebalances nodes out and in of the cluster while doing mutations.
Use different nodes_in and nodes_out params to have uneven add and deletion. Use 'zone'
param to have nodes divided into server groups by having zone > 1.
This test begins by loading a given number of items into the cluster. It then
removes one node, rebalances that node out the cluster, and then rebalances it back
in. During the rebalancing we update all of the items in the cluster. Once the
node has been removed and added back we wait for the disk queues to drain, and
then verify that there has been no data loss, sum(curr_items) match the curr_items_total.
We then remove and add back two nodes at a time and so on until we have reached the point
where we are adding back and removing at least half of the nodes.
"""
# Shuffle the nodes if zone > 1 is specified.
if self.zone > 1:
self.shuffle_nodes_between_zones_and_rebalance()
gen = self.get_doc_generator(0, self.num_items)
if self.atomicity:
self._load_all_buckets_atomicty(gen, "rebalance_only_update")
else:
tasks_info = self.bucket_util._async_load_all_buckets(
self.cluster, gen, "update", 0,
sdk_client_pool=self.sdk_client_pool)
for task in tasks_info:
self.task_manager.get_task_result(task)
self.bucket_util.verify_doc_op_task_exceptions(
tasks_info, self.cluster,
sdk_client_pool=self.sdk_client_pool)
self.bucket_util.log_doc_ops_task_failures(tasks_info)
for task, task_info in tasks_info.items():
self.assertFalse(
task_info["ops_failed"],
"Doc ops failed for task: {}".format(task.thread_name))
servs_in = self.cluster.servers[self.nodes_init:self.nodes_init + self.nodes_in]
servs_out = self.cluster.servers[self.nodes_init - self.nodes_out:self.nodes_init]
result_nodes = list(set(self.cluster.servers[:self.nodes_init] + servs_in) - set(servs_out))
if not self.atomicity:
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.validate_docs_per_collections_all_buckets(
timeout=self.wait_timeout)
self.sleep(20)
prev_vbucket_stats = self.bucket_util.get_vbucket_seqnos(
self.cluster.servers[:self.nodes_init], self.bucket_util.buckets)
prev_failover_stats = self.bucket_util.get_failovers_logs(
self.cluster.servers[:self.nodes_init], self.bucket_util.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.get_and_compare_active_replica_data_set_all(
self.cluster.servers[:self.nodes_init], self.bucket_util.buckets, path=None)
self.bucket_util.compare_vbucketseq_failoverlogs(prev_vbucket_stats, prev_failover_stats)
self.add_remove_servers_and_rebalance(servs_in, servs_out)
self.sleep(30)
if not self.atomicity:
self.bucket_util.validate_docs_per_collections_all_buckets(
timeout=self.wait_timeout)
self.bucket_util.verify_cluster_stats(self.num_items, check_ep_items_remaining=True,
timeout=self.wait_timeout)
new_failover_stats = self.bucket_util.compare_failovers_logs(prev_failover_stats, result_nodes, self.bucket_util.buckets)
new_vbucket_stats = self.bucket_util.compare_vbucket_seqnos(prev_vbucket_stats, result_nodes, self.bucket_util.buckets,
perNode=False)
self.bucket_util.compare_vbucketseq_failoverlogs(new_vbucket_stats, new_failover_stats)
self.sleep(30)
self.bucket_util.data_analysis_active_replica_all(disk_active_dataset, disk_replica_dataset, result_nodes, self.bucket_util.buckets,
path=None)
self.bucket_util.verify_unacked_bytes_all_buckets()
nodes = self.cluster.nodes_in_cluster
#self.bucket_util.vb_distribution_analysis(servers=nodes, std=1.0, total_vbuckets=self.cluster_util.vbuckets)
def test_rebalance_in_out_with_failover_addback_recovery(self):
"""
Rebalances nodes out and in with failover and full/delta recovery add back of a node
Use different nodes_in and nodes_out params to have uneven add and deletion. Use 'zone'
param to have nodes divided into server groups by having zone > 1.
This test begins by loading a given number of items into the cluster. It then
removes one node, rebalances that node out the cluster, and then rebalances it back
in. During the rebalancing we update all of the items in the cluster. Once the
node has been removed and added back we wait for the disk queues to drain, and
then verify that there has been no data loss, sum(curr_items) match the curr_items_total.
We then remove and add back two nodes at a time and so on until we have reached the point
where we are adding back and removing at least half of the nodes.
"""
recovery_type = self.input.param("recoveryType", "full")
gen = self.get_doc_generator(0, self.num_items)
if self.atomicity:
self._load_all_buckets_atomicty(gen, "rebalance_only_update")
else:
tasks_info = self.bucket_util._async_load_all_buckets(
self.cluster, gen, "update", 0,
sdk_client_pool=self.sdk_client_pool)
for task in tasks_info:
self.task_manager.get_task_result(task)
servs_in = self.cluster.servers[self.nodes_init:self.nodes_init + self.nodes_in]
servs_out = self.cluster.servers[self.nodes_init - self.nodes_out:self.nodes_init]
if not self.atomicity:
self.bucket_util.verify_doc_op_task_exceptions(
tasks_info, self.cluster,
sdk_client_pool=self.sdk_client_pool)
self.bucket_util.log_doc_ops_task_failures(tasks_info)
for task, task_info in tasks_info.items():
self.assertFalse(
task_info["ops_failed"],
"Doc ops failed for task: {}".format(task.thread_name))
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.validate_docs_per_collections_all_buckets(
timeout=self.wait_timeout)
# Update replica value before performing rebalance in/out as given in conf file
if self.replica_to_update:
bucket_helper = BucketHelper(self.cluster.master)
self.log.info("Updating replica count of bucket to {0}"
.format(self.replica_to_update))
bucket_helper.change_bucket_props(
self.bucket_util.buckets[0],
replicaNumber=self.replica_to_update)
# self.bucket_util.buckets[0].replicaNumber = self.replica_to_update
self.sleep(20)
prev_vbucket_stats = self.bucket_util.get_vbucket_seqnos(self.cluster.servers[:self.nodes_init], self.bucket_util.buckets)
prev_failover_stats = self.bucket_util.get_failovers_logs(self.cluster.servers[:self.nodes_init], self.bucket_util.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.get_and_compare_active_replica_data_set_all(
self.cluster.servers[:self.nodes_init], self.bucket_util.buckets, path=None)
self.bucket_util.compare_vbucketseq_failoverlogs(prev_vbucket_stats, prev_failover_stats)
self.rest = RestConnection(self.cluster.master)
self.nodes = self.cluster.nodes_in_cluster
chosen = self.cluster_util.pick_nodes(self.cluster.master, howmany=1)
for node in servs_in:
self.rest.add_node(self.cluster.master.rest_username, self.cluster.master.rest_password, node.ip, node.port)
# Mark Node for failover
self.sleep(30)
success_failed_over = self.rest.fail_over(chosen[0].id, graceful=False)
# Mark Node for full recovery
if success_failed_over:
self.rest.set_recovery_type(otpNode=chosen[0].id, recoveryType=recovery_type)
self.sleep(30)
try:
self.shuffle_nodes_between_zones_and_rebalance(servs_out)
except Exception, e:
if "deltaRecoveryNotPossible" not in e.__str__():
self.fail("Rebalance did not fail. Rebalance has to fail since no delta recovery should be possible"
" while adding nodes too")
def test_rebalance_in_out_with_failover(self):
"""
Rebalances nodes out and in with failover
Use different nodes_in and nodes_out params to have uneven add and deletion. Use 'zone'
param to have nodes divided into server groups by having zone > 1.
This test begins by loading a given number of items into the cluster. It then
removes one node, rebalances that node out the cluster, and then rebalances it back
in. During the rebalancing we update all of the items in the cluster. Once the
node has been removed and added back we wait for the disk queues to drain, and
then verify that there has been no data loss, sum(curr_items) match the curr_items_total.
We then remove and add back two nodes at a time and so on until we have reached the point
where we are adding back and removing at least half of the nodes.
"""
fail_over = self.input.param("fail_over", False)
gen = self.get_doc_generator(0, self.num_items)
if self.atomicity:
self._load_all_buckets_atomicty(gen, "rebalance_only_update")
else:
tasks_info = self.bucket_util._async_load_all_buckets(
self.cluster, gen, "update", 0,
sdk_client_pool=self.sdk_client_pool)
for task in tasks_info:
self.task_manager.get_task_result(task)
servs_in = self.cluster.servers[self.nodes_init:self.nodes_init + self.nodes_in]
servs_out = self.cluster.servers[self.nodes_init - self.nodes_out:self.nodes_init]
if not self.atomicity:
self.bucket_util.verify_doc_op_task_exceptions(
tasks_info, self.cluster,
sdk_client_pool=self.sdk_client_pool)
self.bucket_util.log_doc_ops_task_failures(tasks_info)
for task, task_info in tasks_info.items():
self.assertFalse(
task_info["ops_failed"],
"Doc ops failed for task: {}".format(task.thread_name))
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.validate_docs_per_collections_all_buckets(
timeout=self.wait_timeout)
# Update replica value before performing rebalance in/out
if self.replica_to_update:
bucket_helper = BucketHelper(self.cluster.master)
# Update bucket replica to new value as given in conf file
self.log.info("Updating replica count of bucket to {0}"
.format(self.replica_to_update))
bucket_helper.change_bucket_props(
self.bucket_util.buckets[0], replicaNumber=self.replica_to_update)
# self.bucket_util.buckets[0].replicaNumber = self.replica_to_update
self.sleep(20)
prev_vbucket_stats = self.bucket_util.get_vbucket_seqnos(self.cluster.servers[:self.nodes_init], self.bucket_util.buckets)
prev_failover_stats = self.bucket_util.get_failovers_logs(self.cluster.servers[:self.nodes_init], self.bucket_util.buckets)
disk_replica_dataset, disk_active_dataset = self.bucket_util.get_and_compare_active_replica_data_set_all(
self.cluster.servers[:self.nodes_init], self.bucket_util.buckets, path=None)
self.bucket_util.compare_vbucketseq_failoverlogs(prev_vbucket_stats, prev_failover_stats)
self.rest = RestConnection(self.cluster.master)
chosen = self.cluster_util.pick_nodes(self.cluster.master, howmany=1)
result_nodes = list(set(self.cluster.servers[:self.nodes_init] + servs_in) - set(servs_out))
result_nodes = [node for node in result_nodes if node.ip != chosen[0].ip]
for node in servs_in:
self.rest.add_node(self.cluster.master.rest_username, self.cluster.master.rest_password, node.ip, node.port)
# Mark Node for failover
self.rest.fail_over(chosen[0].id, graceful=fail_over)
self.shuffle_nodes_between_zones_and_rebalance(servs_out)
self.cluster.nodes_in_cluster = result_nodes
if not self.atomicity:
self.bucket_util.verify_cluster_stats(
self.num_items,
check_ep_items_remaining=True,
timeout=self.wait_timeout)
self.bucket_util.compare_failovers_logs(prev_failover_stats,
result_nodes,
self.bucket_util.buckets)
self.sleep(30)
self.bucket_util.data_analysis_active_replica_all(
disk_active_dataset, disk_replica_dataset, result_nodes,
self.bucket_util.buckets, path=None)
self.bucket_util.verify_unacked_bytes_all_buckets()
nodes = self.cluster.nodes_in_cluster
# self.bucket_util.vb_distribution_analysis(servers=nodes,
# std=1.0, total_vbuckets=self.cluster_util.vbuckets)
def test_incremental_rebalance_in_out_with_mutation(self):
"""
Rebalances nodes out and in of the cluster while doing mutations.
Use 'zone' param to have nodes divided into server groups by having zone > 1.
This test begins by loading a given number of items into the cluster. It then
removes one node, rebalances that node out the cluster, and then rebalances it back
in. During the rebalancing we update all of the items in the cluster. Once the
node has been removed and added back we wait for the disk queues to drain, and
then verify that there has been no data loss, sum(curr_items) match the curr_items_total.
We then remove and add back two nodes at a time and so on until we have reached the point
where we are adding back and removing at least half of the nodes.
"""
self.add_remove_servers_and_rebalance(self.cluster.servers[self.nodes_init:self.num_servers], [])
self.doc_ops = "update"
self.gen_update = self.get_doc_generator(0, self.num_items)
for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
# CRUDs while rebalance is running in parallel
tasks_info = self.loadgen_docs(retry_exceptions=rebalance_base.retry_exceptions)
self.add_remove_servers_and_rebalance([], self.cluster.servers[i:self.num_servers])
self.sleep(10)
for task in tasks_info:
self.task_manager.get_task_result(task)
self.bucket_util.verify_doc_op_task_exceptions(
tasks_info, self.cluster,
sdk_client_pool=self.sdk_client_pool)
self.bucket_util.log_doc_ops_task_failures(tasks_info)
for task, task_info in tasks_info.items():
self.assertFalse(
task_info["ops_failed"],
"Doc ops failed for task: {}".format(task.thread_name))
tasks_info = self.loadgen_docs(
retry_exceptions=rebalance_base.retry_exceptions)
self.add_remove_servers_and_rebalance(
self.cluster.servers[i:self.num_servers], [])
for task in tasks_info:
self.task_manager.get_task_result(task)
self.bucket_util.verify_doc_op_task_exceptions(
tasks_info, self.cluster,
sdk_client_pool=self.sdk_client_pool)
self.bucket_util.log_doc_ops_task_failures(tasks_info)
self.bucket_util.verify_cluster_stats(self.num_items, timeout=self.wait_timeout)
self.bucket_util.verify_unacked_bytes_all_buckets()
def test_incremental_rebalance_in_out_with_mutation_and_compaction(self):
"""
Rebalances nodes out and in of the cluster while doing mutations and compaction.
Use 'zone' param to have nodes divided into server groups by having zone > 1.
This test begins by loading a given number of items into the cluster. It then
removes one node, rebalances that node out the cluster, and then rebalances it back
in. During the rebalancing we update all of the items in the cluster. Once the
node has been removed and added back we wait for the disk queues to drain, and
then verify that there has been no data | |
= self._DealCalcPriceForSwap(propIGet, propIGive, 0)
if price == Deal.NO :
return self._DealReject()
if price < 0 :
price = -price
return self._DealAccept(monopyly.DealResponse(
action = monopyly.DealResponse.Action.ACCEPT,
minimum_cash_wanted = int(price)))
return self._DealAccept(monopyly.DealResponse(
action = monopyly.DealResponse.Action.ACCEPT,
maximum_cash_offered = int(price)))
def _NbrPropertiesPlayerNeedsForSet(self, player, prop) :
'''How many properties of the set type of prop does player need to own the set?'''
propSet = prop.property_set
nbrOwned = sum(1 for p in propSet.properties if p.owner == player)
return len(propSet.properties) - nbrOwned
def deal_result(self, dealInfo):
#if TOURNAMENT :
return
if dealInfo == monopyly.PlayerAIBase.DealInfo.INVALID_DEAL_PROPOSED :
raise Exception('invalid deal')
if dealInfo == monopyly.PlayerAIBase.DealInfo.SUCCEEDED :
self._nbrSuccessfulDeals += 1
if self._dealInProgress is not None :
self._DealResult(dealInfo)
self._dealInProgress = None
def player_went_bankrupt(self, player):
for i, p in enumerate(self._otherPlayers) :
if p.name == player.name :
del self._otherPlayers[i]
break
def game_over(self, winner, maximumRoundsPlayed) :
if not TOURNAMENT :
print('turn %d. nbr auctions = %d, deals (all, me) = (%d, %d)' % (self._turnCounter, self._nbrAuctions, self._nbrSuccessfulDeals, self._nbrSuccessfulDealsWithMe))
def ai_error(self, message):
if not TOURNAMENT :
print(message)
os.abort()
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
#################################################################################
# Utilities ideally moved to a separate module
# Probabilities of landing on the squares, taken from running 1 AI for 500,000 turns
# rounded to 3dp
__probabilities = {
'Mayfair' : 0.060,
'Park Lane' : 0.049,
'Regent Street' : 0.061,
'Bond Street' : 0.056,
'Oxford Street' : 0.059,
'Trafalgar Square' : 0.073,
'Leicester Square' : 0.062,
'Coventry Street' : 0.061,
'Piccadilly' : 0.060,
'Strand' : 0.064,
'Fleet Street' : 0.062,
'Vine Street' : 0.069,
'Marlborough Street' : 0.068,
'Bow Street' : 0.064,
'Northumberland Avenue' : 0.058,
'Pall Mall' : 0.062,
'Whitehall' : 0.053,
'Pentonville Road' : 0.053,
'Euston Road' : 0.054,
'The Angel Islington' : 0.053,
'Whitechapel Road' : 0.050,
'Old Kent Road' : 0.060,
'Community Chest' : 0.177,
'Chance' : 0.171,
'Jail' : 0.144,
'Go' : 0.071,
'Marylebone Station' : 0.070,
'Free Parking' : 0.066,
'Fenchurch Street Station' : 0.063,
'Water Works' : 0.061,
'Go To Jail' : 0.061,
'Electric Company' : 0.055,
'Liverpool Street Station' : 0.054,
'Income Tax' : 0.054,
'Kings Cross Station' : 0.052,
'Super Tax' : 0.049,
}
__setInfo = {
# P = prob of landing on any square in set
# P * set_rent_no_houses, P * set_rent_hotels
monopyly.squares.PropertySet.GREEN : (9.4, 231.2),
monopyly.squares.PropertySet.YELLOW : (8.1, 213.9),
monopyly.squares.PropertySet.RED: (7.5, 212.8),
monopyly.squares.PropertySet.ORANGE : (5.9, 194.1),
monopyly.squares.PropertySet.DARK_BLUE : (9.4, 193.3),
monopyly.squares.PropertySet.PURPLE : (3.7, 138.0),
monopyly.squares.PropertySet.LIGHT_BLUE: (2.1, 90.7),
monopyly.squares.PropertySet.BROWN : (0.6, 37.5),
monopyly.squares.PropertySet.STATION : (47.8, 47.8),
monopyly.squares.PropertySet.UTILITY : (5, 5) # TODO
}
__simpleSetScores = {
monopyly.squares.PropertySet.UTILITY : 1,
monopyly.squares.PropertySet.BROWN : 2,
monopyly.squares.PropertySet.STATION : 3,
monopyly.squares.PropertySet.LIGHT_BLUE : 4,
monopyly.squares.PropertySet.PURPLE : 5,
monopyly.squares.PropertySet.ORANGE : 6,
monopyly.squares.PropertySet.RED : 7,
monopyly.squares.PropertySet.YELLOW : 8,
monopyly.squares.PropertySet.GREEN : 9,
monopyly.squares.PropertySet.DARK_BLUE : 10,
}
def SimpleSetScore(propSetName) :
return __simpleSetScores[propSetName]
class PropertyInfo :
def __init__(self, name, prob, rents) :
self.name = name
self.prob = prob # probability of landing on in 1 turn.
self.rents = rents # [SetRent, NoHouses .. Hotels]
self.averageRents = [prob * r for r in rents]
__propertyInfo = {} # name : PropertyInfo map.
def InitialisePropertyInfo(squares) :
for square in squares :
rents = []
if isinstance(square, monopyly.Street) :
rents = [square.rents[0] * 2] + square.rents
elif isinstance(square, monopyly.Station) :
rents = [200, 25, 200, 200, 200]
elif isinstance(square, monopyly.Utility) :
rents = [10*7, 4*7, 10*7, 10*7, 10*7]
if rents:
__propertyInfo[square.name] = PropertyInfo(square.name, __probabilities[square.name], rents)
def GetPropertyInfo(propertyName) :
return __propertyInfo[propertyName]
def GetPropertySetAverageRents(propertySetName) :
'''returns P * set_rent_without_houses, P * set_rent_hotels where P is landing on probability'''
return __setInfo[propertySetName]
def GetPropertySetScore(setEnum):
return __setInfo[setEnum][1]
def CanIAcceptDeal(propToGet, propToGive) :
# assumes that the exchange of these two gives each player the set.
setToGet = propToGet.property_set.set_enum
setToGive = propToGive.property_set.set_enum
if setToGet == monopyly.PropertySet.UTILITY :
return False
if setToGet == monopyly.PropertySet.STATION :
return setToGive == monopyly.PropertySet.BROWN
if setToGive == monopyly.PropertySet.BROWN :
return True
if setToGive == monopyly.PropertySet.LIGHT_BLUE :
return setToGet != monopyly.PropertySet.BROWN
if setToGive == monopyly.PropertySet.PURPLE :
return setToGet != monopyly.PropertySet.BROWN
if setToGive == monopyly.PropertySet.ORANGE :
return setToGet not in (monopyly.PropertySet.BROWN, monopyly.PropertySet.LIGHT_BLUE, monopyly.PropertySet.PURPLE)
if setToGive == monopyly.PropertySet.RED :
return setToGet not in (monopyly.PropertySet.BROWN, monopyly.PropertySet.LIGHT_BLUE, monopyly.PropertySet.PURPLE)
if setToGive == monopyly.PropertySet.YELLOW :
return setToGet not in (monopyly.PropertySet.BROWN, monopyly.PropertySet.LIGHT_BLUE, monopyly.PropertySet.PURPLE)
if setToGive == monopyly.PropertySet.GREEN :
return setToGet not in (monopyly.PropertySet.BROWN, monopyly.PropertySet.LIGHT_BLUE, monopyly.PropertySet.PURPLE)
if setToGive == monopyly.PropertySet.DARK_BLUE :
return setToGet not in (monopyly.PropertySet.BROWN, monopyly.PropertySet.LIGHT_BLUE, monopyly.PropertySet.PURPLE)
if setToGive == monopyly.PropertySet.STATION:
return setToGet != monopyly.PropertySet.BROWN
if setToGive == monopyly.PropertySet.UTILITY:
return True
return False
class Deal :
NO = -1
# buckets for cash
CASH_1 = 0 # <200
CASH_2 = 1 # 200-500
CASH_3 = 2 # 500-1000
CASH_4 = 3 # >1000
# buckets for the prop set score comparison
MAJOR_DISADV = 0
MINOR_DISADV = 1
EQUAL = 2
MINOR_ADV = 3
MAJOR_ADV = 4
@staticmethod
def GetCashBucket(cash):
if cash < 200 :
return Deal.CASH_1
if cash < 500 :
return Deal.CASH_2
if cash < 1000 :
return Deal.CASH_3
if cash >= 1000 :
return Deal.CASH_4
@staticmethod
def GetScoreDiffBucket(score):
if score < -40 :
return Deal.MAJOR_DISADV
if score < -15 :
return Deal.MINOR_DISADV
if score > 40 :
return Deal.MAJOR_ADV
if score > 15 :
return Deal.MINOR_ADV
return Deal.EQUAL
__decisionTable = (
# To determine deal price. >0 means I will give money, <0 means I want money
# integer is a cash amount
# float is a multiplier to my cash (if > 0) , his cash (if < 0)
#
# MyCash,EnemyCash,Score, PRICE
(CASH_1, CASH_1, MAJOR_DISADV, NO ),
(CASH_1, CASH_1, MINOR_DISADV, NO ),
(CASH_1, CASH_1, EQUAL, 0 ),
(CASH_1, CASH_1, MINOR_ADV, 10 ),
(CASH_1, CASH_1, MAJOR_ADV, 0.25 ),
(CASH_1, CASH_2, MAJOR_DISADV, NO ),
(CASH_1, CASH_2, MINOR_DISADV, -0.5 ),
(CASH_1, CASH_2, EQUAL, -0.5 ),
(CASH_1, CASH_2, MINOR_ADV, -100 ),
(CASH_1, CASH_2, MAJOR_ADV, 0.25 ),
(CASH_1, CASH_3, MAJOR_DISADV, -0.8 ),
(CASH_1, CASH_3, MINOR_DISADV, -0.6 ),
(CASH_1, CASH_3, EQUAL, -0.4 ),
(CASH_1, CASH_3, MINOR_ADV, -0.4 ),
(CASH_1, CASH_3, MAJOR_ADV, -200 ),
(CASH_1, CASH_4, MAJOR_DISADV, -0.8 ),
(CASH_1, CASH_4, MINOR_DISADV, -0.6 ),
(CASH_1, CASH_4, EQUAL, -0.4 ),
(CASH_1, CASH_4, MINOR_ADV, -0.4 ),
(CASH_1, CASH_4, MAJOR_ADV, -400 ),
(CASH_2, CASH_1, MAJOR_DISADV, NO ),
(CASH_2, CASH_1, MINOR_DISADV, 0 ),
(CASH_2, CASH_1, EQUAL, 50 ),
(CASH_2, CASH_1, MINOR_ADV, 100 ),
(CASH_2, CASH_1, MAJOR_ADV, 0.5 ),
(CASH_2, CASH_2, MAJOR_DISADV, NO ),
(CASH_2, CASH_2, MINOR_DISADV, -0.5 ),
(CASH_2, CASH_2, EQUAL, 0 ),
(CASH_2, CASH_2, MINOR_ADV, 100 ),
(CASH_2, CASH_2, MAJOR_ADV, 0.25 ),
(CASH_2, CASH_3, MAJOR_DISADV, -0.8 ),
(CASH_2, CASH_3, MINOR_DISADV, -0.6 ),
(CASH_2, CASH_3, EQUAL, -0.4 ),
(CASH_2, CASH_3, MINOR_ADV, -0.4 ),
(CASH_2, CASH_3, MAJOR_ADV, -200 ),
(CASH_2, CASH_4, MAJOR_DISADV, -0.8 ),
(CASH_2, CASH_4, MINOR_DISADV, -0.6 ),
(CASH_2, CASH_4, EQUAL, -0.5 ),
(CASH_2, CASH_4, MINOR_ADV, -0.5 ),
(CASH_2, CASH_4, MAJOR_ADV, -0.3 ),
(CASH_3, CASH_1, MAJOR_DISADV, NO ),
(CASH_3, CASH_1, MINOR_DISADV, 0 ),
(CASH_3, CASH_1, EQUAL, 200 ),
(CASH_3, CASH_1, MINOR_ADV, 250 ),
(CASH_3, CASH_1, MAJOR_ADV, 0.5 ),
(CASH_3, CASH_2, MAJOR_DISADV, NO ),
(CASH_3, CASH_2, MINOR_DISADV, -0.5 ),
(CASH_3, CASH_2, EQUAL, 100 ),
(CASH_3, CASH_2, MINOR_ADV, 100 ),
(CASH_3, CASH_2, MAJOR_ADV, 0.5 ),
(CASH_3, CASH_3, MAJOR_DISADV, -0.7 ),
(CASH_3, CASH_3, MINOR_DISADV, -0.5 ),
(CASH_3, CASH_3, EQUAL, 100 ),
(CASH_3, CASH_3, MINOR_ADV, 100 ),
(CASH_3, CASH_3, MAJOR_ADV, 0.5 ),
(CASH_3, CASH_4, MAJOR_DISADV, -0.8 ),
(CASH_3, CASH_4, MINOR_DISADV, -0.6 ),
(CASH_3, CASH_4, EQUAL, -0.25 ),
(CASH_3, CASH_4, MINOR_ADV, 0 ),
(CASH_3, CASH_4, MAJOR_ADV, 0.3 ),
(CASH_4, CASH_1, MAJOR_DISADV, NO ),
(CASH_4, CASH_1, MINOR_DISADV, -0.3 ),
(CASH_4, CASH_1, EQUAL, 250 ),
(CASH_4, CASH_1, MINOR_ADV, 300 ),
(CASH_4, CASH_1, MAJOR_ADV, 0.5 ),
(CASH_4, CASH_2, MAJOR_DISADV, NO ),
(CASH_4, CASH_2, MINOR_DISADV, -0.3 ),
(CASH_4, CASH_2, EQUAL, 100 ),
(CASH_4, CASH_2, MINOR_ADV, 300 ),
(CASH_4, CASH_2, MAJOR_ADV, 0.4 ),
(CASH_4, CASH_3, MAJOR_DISADV, -0.8 ),
(CASH_4, CASH_3, MINOR_DISADV, -0.4 ),
(CASH_4, CASH_3, EQUAL, 200 ),
(CASH_4, CASH_3, MINOR_ADV, 300 ),
(CASH_4, CASH_3, MAJOR_ADV, 0.4 ),
(CASH_4, CASH_4, MAJOR_DISADV, -0.8 ),
(CASH_4, CASH_4, MINOR_DISADV, -0.4 ),
(CASH_4, CASH_4, EQUAL, 0 ),
(CASH_4, CASH_4, MINOR_ADV, 200 ),
(CASH_4, CASH_4, MAJOR_ADV, 0.4 ),
)
@classmethod
def CreateDecisionTree(cls) :
cls._decisionTree | |
= kwargs.get(
'ncbiTaxonId', None)
self.sourceAccessions = kwargs.get(
'sourceAccessions', None)
self.sourceDivergence = kwargs.get(
'sourceDivergence', None)
self.sourceURI = kwargs.get(
'sourceURI', None)
class ReferenceSet(ProtocolElement):
"""
A `ReferenceSet` is a set of `Reference`s which typically comprise
a reference assembly, such as `GRCh38`. A `ReferenceSet` defines a
common coordinate space for comparing reference-aligned
experimental data.
"""
_schemaSource = """
{"type": "record", "name": "ReferenceSet", "namespace": "org.ga4gh.models", "doc": "", "fields":
[{"name": "id", "type": "string", "doc": ""}, {"name": "md5checksum", "type": "string", "doc": ""},
{"name": "ncbiTaxonId", "type": ["null", "int"], "doc": "", "default": null}, {"name":
"description", "type": ["null", "string"], "doc": "", "default": null}, {"name": "assemblyId",
"type": ["null", "string"], "doc": "", "default": null}, {"name": "sourceURI", "type": ["null",
"string"], "doc": "", "default": null}, {"name": "sourceAccessions", "type": {"type": "array",
"items": "string"}, "doc": ""}, {"name": "isDerived", "type": "boolean", "doc": "", "default":
false}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"id",
"md5checksum",
"sourceAccessions",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'assemblyId', 'description', 'id', 'isDerived', 'md5checksum',
'ncbiTaxonId', 'sourceAccessions', 'sourceURI'
]
def __init__(self, **kwargs):
self.assemblyId = kwargs.get(
'assemblyId', None)
self.description = kwargs.get(
'description', None)
self.id = kwargs.get(
'id', None)
self.isDerived = kwargs.get(
'isDerived', False)
self.md5checksum = kwargs.get(
'md5checksum', None)
self.ncbiTaxonId = kwargs.get(
'ncbiTaxonId', None)
self.sourceAccessions = kwargs.get(
'sourceAccessions', None)
self.sourceURI = kwargs.get(
'sourceURI', None)
class SearchCallSetsRequest(SearchRequest):
"""
This request maps to the body of `POST /callsets/search` as JSON.
"""
_schemaSource = """
{"type": "record", "name": "SearchCallSetsRequest", "namespace": "org.ga4gh.methods", "doc": "",
"fields": [{"name": "variantSetId", "type": "string", "doc": ""}, {"name": "name", "type": ["null",
"string"], "doc": "", "default": null}, {"name": "pageSize", "type": ["null", "int"], "doc": "",
"default": null}, {"name": "pageToken", "type": ["null", "string"], "doc": "", "default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"variantSetId",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'name', 'pageSize', 'pageToken', 'variantSetId'
]
def __init__(self, **kwargs):
self.name = kwargs.get(
'name', None)
self.pageSize = kwargs.get(
'pageSize', None)
self.pageToken = kwargs.get(
'pageToken', None)
self.variantSetId = kwargs.get(
'variantSetId', None)
class SearchCallSetsResponse(SearchResponse):
"""
This is the response from `POST /callsets/search` expressed as
JSON.
"""
_schemaSource = """
{"type": "record", "name": "SearchCallSetsResponse", "namespace": "org.ga4gh.methods", "doc": "",
"fields": [{"name": "callSets", "type": {"type": "array", "items": {"type": "record", "name":
"CallSet", "namespace": "org.ga4gh.models", "doc": "", "fields": [{"name": "id", "type": "string",
"doc": ""}, {"name": "name", "type": ["null", "string"], "doc": "", "default": null}, {"name":
"sampleId", "type": ["null", "string"], "doc": ""}, {"name": "variantSetIds", "type": {"type":
"array", "items": "string"}, "doc": "", "default": []}, {"name": "created", "type": ["null",
"long"], "doc": "", "default": null}, {"name": "updated", "type": ["null", "long"], "doc": "",
"default": null}, {"name": "info", "type": {"type": "map", "values": {"type": "array", "items":
"string"}}, "doc": "", "default": {}}]}}, "doc": "", "default": []}, {"name": "nextPageToken",
"type": ["null", "string"], "doc": "", "default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
_valueListName = "callSets"
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'callSets': CallSet,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'callSets': CallSet,
}
return embeddedTypes[fieldName]
__slots__ = [
'callSets', 'nextPageToken'
]
def __init__(self, **kwargs):
self.callSets = kwargs.get(
'callSets', [])
self.nextPageToken = kwargs.get(
'nextPageToken', None)
class SearchDatasetsRequest(SearchRequest):
"""
This request maps to the body of `POST /datasets/search` as JSON.
"""
_schemaSource = """
{"type": "record", "name": "SearchDatasetsRequest", "namespace": "org.ga4gh.methods", "doc": "",
"fields": [{"name": "pageSize", "type": ["null", "int"], "doc": "", "default": null}, {"name":
"pageToken", "type": ["null", "string"], "doc": "", "default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'pageSize', 'pageToken'
]
def __init__(self, **kwargs):
self.pageSize = kwargs.get(
'pageSize', None)
self.pageToken = kwargs.get(
'pageToken', None)
class SearchDatasetsResponse(SearchResponse):
"""
This is the response from `POST /datasets/search` expressed as
JSON.
"""
_schemaSource = """
{"type": "record", "name": "SearchDatasetsResponse", "namespace": "org.ga4gh.methods", "doc": "",
"fields": [{"name": "datasets", "type": {"type": "array", "items": {"type": "record", "name":
"Dataset", "namespace": "org.ga4gh.models", "doc": "", "fields": [{"name": "id", "type": "string",
"doc": ""}, {"name": "description", "type": ["null", "string"], "doc": "", "default": null}]}},
"doc": "", "default": []}, {"name": "nextPageToken", "type": ["null", "string"], "doc": "",
"default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
_valueListName = "datasets"
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'datasets': Dataset,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'datasets': Dataset,
}
return embeddedTypes[fieldName]
__slots__ = [
'datasets', 'nextPageToken'
]
def __init__(self, **kwargs):
self.datasets = kwargs.get(
'datasets', [])
self.nextPageToken = kwargs.get(
'nextPageToken', None)
class SearchReadGroupSetsRequest(SearchRequest):
"""
This request maps to the body of `POST /readgroupsets/search` as
JSON.
"""
_schemaSource = """
{"type": "record", "name": "SearchReadGroupSetsRequest", "namespace": "org.ga4gh.methods", "doc":
"", "fields": [{"name": "datasetId", "type": "string", "doc": ""}, {"name": "name", "type": ["null",
"string"], "doc": "", "default": null}, {"name": "pageSize", "type": ["null", "int"], "doc": "",
"default": null}, {"name": "pageToken", "type": ["null", "string"], "doc": "", "default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {
"datasetId",
}
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {}
return embeddedTypes[fieldName]
__slots__ = [
'datasetId', 'name', 'pageSize', 'pageToken'
]
def __init__(self, **kwargs):
self.datasetId = kwargs.get(
'datasetId', None)
self.name = kwargs.get(
'name', None)
self.pageSize = kwargs.get(
'pageSize', None)
self.pageToken = kwargs.get(
'pageToken', None)
class SearchReadGroupSetsResponse(SearchResponse):
"""
This is the response from `POST /readgroupsets/search` expressed
as JSON.
"""
_schemaSource = """
{"type": "record", "name": "SearchReadGroupSetsResponse", "namespace": "org.ga4gh.methods", "doc":
"", "fields": [{"name": "readGroupSets", "type": {"type": "array", "items": {"type": "record",
"name": "ReadGroupSet", "namespace": "org.ga4gh.models", "fields": [{"name": "id", "type": "string",
"doc": ""}, {"name": "datasetId", "type": ["null", "string"], "doc": "", "default": null}, {"name":
"name", "type": ["null", "string"], "doc": "", "default": null}, {"name": "stats", "type": ["null",
{"type": "record", "name": "ReadStats", "fields": [{"name": "alignedReadCount", "type": ["null",
"long"], "doc": "", "default": null}, {"name": "unalignedReadCount", "type": ["null", "long"],
"doc": "", "default": null}, {"name": "baseCount", "type": ["null", "long"], "doc": "", "default":
null}]}], "doc": "", "default": null}, {"name": "readGroups", "type": {"type": "array", "items":
{"type": "record", "name": "ReadGroup", "fields": [{"name": "id", "type": "string", "doc": ""},
{"name": "datasetId", "type": ["null", "string"], "doc": "", "default": null}, {"name": "name",
"type": ["null", "string"], "doc": "", "default": null}, {"name": "description", "type": ["null",
"string"], "doc": "", "default": null}, {"name": "sampleId", "type": ["null", "string"], "doc": ""},
{"name": "experiment", "type": ["null", {"type": "record", "name": "Experiment", "doc": "",
"fields": [{"name": "id", "type": "string", "doc": ""}, {"name": "name", "type": ["null", "string"],
"doc": "", "default": null}, {"name": "description", "type": ["null", "string"], "doc": "",
"default": null}, {"name": "recordCreateTime", "type": "string", "doc": ""}, {"name":
"recordUpdateTime", "type": "string", "doc": ""}, {"name": "runTime", "type": ["null", "string"],
"doc": "", "default": null}, {"name": "molecule", "type": ["null", "string"], "doc": "", "default":
null}, {"name": "strategy", "type": ["null", "string"], "doc": "", "default": null}, {"name":
"selection", "type": ["null", "string"], "doc": "", "default": null}, {"name": "library", "type":
["null", "string"], "doc": "", "default": null}, {"name": "libraryLayout", "type": ["null",
"string"], "doc": "", "default": null}, {"name": "instrumentModel", "type": ["null", "string"],
"doc": ""}, {"name": "instrumentDataFile", "type": ["null", "string"], "doc": "", "default": null},
{"name": "sequencingCenter", "type": ["null", "string"], "doc": ""}, {"name": "platformUnit",
"type": ["null", "string"], "doc": "", "default": null}, {"name": "info", "type": {"type": "map",
"values": {"type": "array", "items": "string"}}, "doc": "", "default": {}}]}], "doc": ""}, {"name":
"predictedInsertSize", "type": ["null", "int"], "doc": "", "default": null}, {"name": "created",
"type": ["null", "long"], "doc": "", "default": null}, {"name": "updated", "type": ["null", "long"],
"doc": "", "default": null}, {"name": "stats", "type": ["null", "ReadStats"], "doc": "", "default":
null}, {"name": "programs", "type": {"type": "array", "items": {"type": "record", "name": "Program",
"fields": [{"name": "commandLine", "type": ["null", "string"], "doc": "", "default": null}, {"name":
"id", "type": ["null", "string"], "doc": "", "default": null}, {"name": "name", "type": ["null",
"string"], "doc": "", "default": null}, {"name": "prevProgramId", "type": ["null", "string"], "doc":
"", "default": null}, {"name": "version", "type": ["null", "string"], "doc": "", "default":
null}]}}, "doc": "", "default": []}, {"name": "referenceSetId", "type": ["null", "string"], "doc":
"", "default": null}, {"name": "info", "type": {"type": "map", "values": {"type": "array", "items":
"string"}}, "doc": "", "default": {}}]}}, "doc": "", "default": []}]}}, "doc": "", "default": []},
{"name": "nextPageToken", "type": ["null", "string"], "doc": "", "default": null}]}
"""
schema = avro_parse(_schemaSource)
requiredFields = {}
_valueListName = "readGroupSets"
@classmethod
def isEmbeddedType(cls, fieldName):
embeddedTypes = {
'readGroupSets': ReadGroupSet,
}
return fieldName in embeddedTypes
@classmethod
def getEmbeddedType(cls, fieldName):
embeddedTypes = {
'readGroupSets': ReadGroupSet,
}
return embeddedTypes[fieldName]
__slots__ = [
'nextPageToken', 'readGroupSets'
]
def __init__(self, **kwargs):
self.nextPageToken = kwargs.get(
'nextPageToken', None)
self.readGroupSets = kwargs.get(
'readGroupSets', [])
class SearchReadsRequest(SearchRequest):
"""
This request maps to the body of `POST /reads/search` as JSON. If
a reference is specified, all queried `ReadGroup`s must be aligned
to `ReferenceSet`s containing that same `Reference`. If | |
# Setup
from __future__ import print_function
from rh_logger.api import logger
import rh_logger
import logging
import os
import numpy as np
import time
import sys
from scipy.spatial import distance
from scipy import spatial
import cv2
import argparse
from mb_aligner.common import utils
from rh_renderer import models
from mb_aligner.alignment.fine_matchers import PMCC_filter
import multiprocessing as mp
from rh_renderer.tilespec_affine_renderer import TilespecAffineRenderer
import threading
from scipy.spatial import cKDTree as KDTree
from collections import defaultdict
# import pyximport
# pyximport.install()
# from ..common import cv_wrap_module
threadLocal = threading.local()
class BlockMatcherPMCCDispatcher(object):
class BlockMatcherPMCC(object):
def __init__(self, sec1, sec2, sec1_to_sec2_transform, **kwargs):
self._scaling = kwargs.get("scaling", 0.2)
self._template_size = kwargs.get("template_size", 200)
self._search_window_size = kwargs.get("search_window_size", 8 * self._template_size)
logger.report_event("Actual template size: {} and window search size: {} (after scaling)".format(self._template_size * self._scaling, self._search_window_size * self._scaling), log_level=logging.INFO)
# Parameters for PMCC filtering
self._min_corr = kwargs.get("min_correlation", 0.2)
self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
self._max_rod = kwargs.get("maximal_ROD", 0.9)
self._use_clahe = kwargs.get("use_clahe", False)
if self._use_clahe:
self._clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
#self._debug_dir = kwargs.get("debug_dir", None)
self._debug_save_matches = None
self._template_scaled_side = self._template_size * self._scaling / 2
self._search_window_scaled_side = self._search_window_size * self._scaling / 2
self._sec1 = sec1
self._sec2 = sec2
self._sec1_to_sec2_transform = sec1_to_sec2_transform
self._scale_transformation = np.array([
[ self._scaling, 0., 0. ],
[ 0., self._scaling, 0. ]
])
# For section1 there will be a single renderer with transformation and scaling
self._sec1_scaled_renderer = TilespecAffineRenderer(self._sec1.tilespec)
self._sec1_scaled_renderer.add_transformation(self._sec1_to_sec2_transform.get_matrix())
self._sec1_scaled_renderer.add_transformation(self._scale_transformation)
# for section2 there will only be a single renderer (no need to transform back to sec1)
self._sec2_scaled_renderer = TilespecAffineRenderer(self._sec2.tilespec)
self._sec2_scaled_renderer.add_transformation(self._scale_transformation)
def set_debug_dir(self, debug_dir):
self._debug_save_matches = True
self._debug_dir = debug_dir
def match_sec1_to_sec2_mfov(self, sec1_pts):
# Apply the mfov transformation to compute estimated location on sec2
sec1_mfov_pts_on_sec2 = self._sec1_to_sec2_transform.apply(np.atleast_2d(sec1_pts)) * self._scaling
valid_matches = [[], [], []]
invalid_matches = [[], []]
for sec1_pt, sec2_pt_estimated in zip(sec1_pts, sec1_mfov_pts_on_sec2):
# Fetch the template around img1_point (after transformation)
from_x1, from_y1 = sec2_pt_estimated - self._template_scaled_side
to_x1, to_y1 = sec2_pt_estimated + self._template_scaled_side
sec1_template, sec1_template_start_point = self._sec1_scaled_renderer.crop(from_x1, from_y1, to_x1, to_y1)
# Fetch a large sub-image around img2_point (using search_window_scaled_size)
from_x2, from_y2 = sec2_pt_estimated - self._search_window_scaled_side
to_x2, to_y2 = sec2_pt_estimated + self._search_window_scaled_side
sec2_search_window, sec2_search_window_start_point = self._sec2_scaled_renderer.crop(from_x2, from_y2, to_x2, to_y2)
# execute the PMCC match
# Do template matching
if np.any(np.array(sec2_search_window.shape) == 0) or np.any(np.array(sec1_template.shape) == 0):
continue
if sec1_template.shape[0] >= sec2_search_window.shape[0] or sec1_template.shape[1] >= sec2_search_window.shape[1]:
continue
if self._use_clahe:
sec2_search_window_clahe = self._clahe.apply(sec2_search_window)
sec1_template_clahe = self._clahe.apply(sec1_template)
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window_clahe, sec1_template_clahe, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
else:
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window, sec1_template, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
if pmcc_result is None:
invalid_matches[0].append(sec1_pt)
invalid_matches[1].append(reason)
# debug_out_fname1 = "temp_debug/debug_match_sec1{}-{}_template.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# debug_out_fname2 = "temp_debug/debug_match_sec1{}-{}_search_window.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# cv2.imwrite(debug_out_fname1, sec1_template)
# cv2.imwrite(debug_out_fname2, sec2_search_window)
else:
# Compute the location of the matched point on img2 in non-scaled coordinates
matched_location_scaled = np.array([reason[1], reason[0]]) + np.array([from_x2, from_y2]) + self._template_scaled_side
sec2_pt = matched_location_scaled / self._scaling
logger.report_event("{}: match found: {} and {} (orig assumption: {})".format(os.getpid(), sec1_pt, sec2_pt, sec2_pt_estimated / self._scaling), log_level=logging.DEBUG)
if self._debug_save_matches:
debug_out_fname1 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image1.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
debug_out_fname2 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image2.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
cv2.imwrite(debug_out_fname1, sec1_template)
sec2_cut_out = sec2_search_window[int(reason[0]):int(reason[0] + 2 * self._template_scaled_side), int(reason[1]):int(reason[1] + 2 * self._template_scaled_side)]
cv2.imwrite(debug_out_fname2, sec2_cut_out)
valid_matches[0].append(np.array(sec1_pt))
valid_matches[1].append(sec2_pt)
valid_matches[2].append(match_val)
return valid_matches, invalid_matches
def match_sec2_to_sec1_mfov(self, sec2_pts):
# Assume that only sec1 renderer was transformed and not sec2 (and both scaled)
sec2_pts = np.asarray(sec2_pts)
sec2_pts_scaled = sec2_pts * self._scaling
mat = self._sec1_to_sec2_transform.get_matrix()
inverse_mat = np.linalg.inv(mat)
#inverse_model = BlockMatcherPMCC.inverse_transform(self._sec1_to_sec2_transform)
#sec2_pts_on_sec1 = inverse_model.apply(sec2_pts)
valid_matches = [[], [], []]
invalid_matches = [[], []]
for sec2_pt, sec2_pt_scaled in zip(sec2_pts, sec2_pts_scaled):
# sec1_pt_estimated is after the sec1_to_sec2 transform
sec1_pt_estimated = sec2_pt_scaled
# Fetch the template around sec2_pt_scaled (no transformation, just scaling)
from_x2, from_y2 = sec2_pt_scaled - self._template_scaled_side
to_x2, to_y2 = sec2_pt_scaled + self._template_scaled_side
sec2_template, sec2_template_start_point = self._sec2_scaled_renderer.crop(from_x2, from_y2, to_x2, to_y2)
# Fetch a large sub-image around sec1_pt_estimated (after transformation, using search_window_scaled_size)
from_x1, from_y1 = sec1_pt_estimated - self._search_window_scaled_side
to_x1, to_y1 = sec1_pt_estimated + self._search_window_scaled_side
sec1_search_window, sec1_search_window_start_point = self._sec1_scaled_renderer.crop(from_x1, from_y1, to_x1, to_y1)
# execute the PMCC match
# Do template matching
if np.any(np.array(sec1_search_window.shape) == 0) or np.any(np.array(sec2_template.shape) == 0):
continue
if sec2_template.shape[0] >= sec1_search_window.shape[0] or sec2_template.shape[1] >= sec1_search_window.shape[1]:
continue
if self._use_clahe:
sec1_search_window_clahe = self._clahe.apply(sec1_search_window)
sec2_template_clahe = self._clahe.apply(sec2_template)
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec1_search_window_clahe, sec2_template_clahe, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
else:
pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec1_search_window, sec2_template, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
if pmcc_result is None:
invalid_matches[0].append(sec2_pt)
invalid_matches[1].append(reason)
# debug_out_fname1 = "temp_debug/debug_match_sec2{}-{}_template.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt_estimated[0]), int(sec1_pt_estimated[1]))
# debug_out_fname2 = "temp_debug/debug_match_sec2{}-{}_search_window.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt_estimated[0]), int(sec1_pt_estimated[1]))
# cv2.imwrite(debug_out_fname1, sec2_template)
# cv2.imwrite(debug_out_fname2, sec1_search_window)
else:
# Compute the location of the matched point on img2 in non-scaled coordinates
matched_location_scaled = np.array([reason[1], reason[0]]) + np.array([from_x1, from_y1]) + self._template_scaled_side
sec1_pt = matched_location_scaled / self._scaling
sec1_pt = np.dot(inverse_mat[:2,:2], sec1_pt) + inverse_mat[:2,2]
logger.report_event("{}: match found: {} and {} (orig assumption: {})".format(os.getpid(), sec2_pt, sec1_pt, np.dot(inverse_mat[:2,:2], sec1_pt_estimated / self._scaling) + inverse_mat[:2,2]), log_level=logging.DEBUG)
if self._debug_save_matches:
debug_out_fname1 = os.path.join(self._debug_dir, "debug_match_sec2_{}-{}_sec1_{}-{}_image1.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt[0]), int(sec1_pt[1])))
debug_out_fname2 = os.path.join(self._debug_dir, "debug_match_sec2_{}-{}_sec1_{}-{}_image2.png".format(int(sec2_pt[0]), int(sec2_pt[1]), int(sec1_pt[0]), int(sec1_pt[1])))
cv2.imwrite(debug_out_fname1, sec2_template)
sec1_cut_out = sec1_search_window[int(reason[0]):int(reason[0] + 2 * self._template_scaled_side), int(reason[1]):int(reason[1] + 2 * self._template_scaled_side)]
cv2.imwrite(debug_out_fname2, sec1_cut_out)
valid_matches[0].append(sec2_pt)
valid_matches[1].append(sec1_pt)
valid_matches[2].append(match_val)
return valid_matches, invalid_matches
def __init__(self, **kwargs):
self._matcher_kwargs = kwargs
self._mesh_spacing = kwargs.get("mesh_spacing", 1500)
# self._scaling = kwargs.get("scaling", 0.2)
# self._template_size = kwargs.get("template_size", 200)
# self._search_window_size = kwargs.get("search_window_size", 8 * template_size)
# logger.report_event("Actual template size: {} and window search size: {} (after scaling)".format(template_size * scaling, search_window_size * scaling), log_level=logging.INFO)
#
# # Parameters for PMCC filtering
# self._min_corr = kwargs.get("min_correlation", 0.2)
# self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
# self._max_rod = kwargs.get("maximal_ROD", 0.9)
# self._use_clahe = kwargs.get("use_clahe", False)
self._debug_dir = kwargs.get("debug_dir", None)
if self._debug_dir is not None:
logger.report_event("Debug mode - on", log_level=logging.INFO)
# Create a debug directory
import datetime
self._debug_dir = os.path.join(self._debug_dir, 'debug_matches_{}'.format(datetime.datetime.now().isoformat()))
os.mkdirs(self._debug_dir)
@staticmethod
def _is_point_in_img(img_bbox, point):
"""Returns True if the given point lies inside the image as denoted by the given tile_tilespec"""
# TODO - instead of checking inside the bbox, need to check inside the polygon after transformation
if point[0] > img_bbox[0] and point[1] > img_bbox[2] and \
point[0] < img_bbox[1] and point[1] < img_bbox[3]:
return True
return False
@staticmethod
def sum_invalid_matches(invalid_matches):
if len(invalid_matches[1]) == 0:
return [0] * 5
hist, _ = np.histogram(invalid_matches[1], bins=5)
return hist
@staticmethod
def _perform_matching(sec1_mfov_tile_idx, sec1, sec2, sec1_to_sec2_mfov_transform, sec1_mfov_mesh_pts, sec2_mfov_mesh_pts, debug_dir, matcher_args):
# fine_matcher_key = "block_matcher_{},{},{}".format(sec1.canonical_section_name, sec2.canonical_section_name, sec1_mfov_tile_idx[0])
# fine_matcher = getattr(threadLocal, fine_matcher_key, None)
# if fine_matcher is None:
# fine_matcher = BlockMatcherPMCCDispatcher.BlockMatcherPMCC(sec1, sec2, sec1_to_sec2_mfov_transform, **matcher_args)
# if debug_dir is not None:
# fine_matcher.set_debug_dir(debug_dir)
#
# setattr(threadLocal, fine_matcher_key, fine_matcher)
fine_matcher = BlockMatcherPMCCDispatcher.BlockMatcherPMCC(sec1, sec2, sec1_to_sec2_mfov_transform, **matcher_args)
if debug_dir is not None:
fine_matcher.set_debug_dir(debug_dir)
logger.report_event("Block-Matching+PMCC layers: {} with {} (mfov1 {}) {} mesh points1, {} mesh points2".format(sec1.canonical_section_name, sec2.canonical_section_name, sec1_mfov_tile_idx, len(sec1_mfov_mesh_pts), len(sec2_mfov_mesh_pts)), log_level=logging.INFO)
logger.report_event("Block-Matching+PMCC layers: {} -> {}".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
valid_matches1, invalid_matches1 = fine_matcher.match_sec1_to_sec2_mfov(sec1_mfov_mesh_pts)
logger.report_event("Block-Matching+PMCC layers: {} -> {} valid matches: {}, invalid_matches: {} {}".format(sec1.canonical_section_name, sec2.canonical_section_name, len(valid_matches1[0]), len(invalid_matches1[0]), BlockMatcherPMCCDispatcher.sum_invalid_matches(invalid_matches1)), log_level=logging.INFO)
logger.report_event("Block-Matching+PMCC layers: {} <- {}".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
valid_matches2, invalid_matches2 = fine_matcher.match_sec2_to_sec1_mfov(sec2_mfov_mesh_pts)
logger.report_event("Block-Matching+PMCC layers: {} <- {} valid matches: {}, invalid_matches: {} {}".format(sec1.canonical_section_name, sec2.canonical_section_name, len(valid_matches2[0]), len(invalid_matches2[0]), BlockMatcherPMCCDispatcher.sum_invalid_matches(invalid_matches2)), log_level=logging.INFO)
return sec1_mfov_tile_idx, valid_matches1, valid_matches2
# def inverse_transform(model):
# mat = model.get_matrix()
# new_model = models.AffineModel(np.linalg.inv(mat))
# return new_model
def match_layers_fine_matching(self, sec1, sec2, sec1_cache, sec2_cache, sec1_to_sec2_mfovs_transforms, pool):
starttime = time.time()
logger.report_event("Block-Matching+PMCC layers: {} with {} (bidirectional)".format(sec1.canonical_section_name, sec2.canonical_section_name), log_level=logging.INFO)
# take just the models (w/o the filtered match points)
sec1_to_sec2_mfovs_transforms = {k:v[0] for k, v in sec1_to_sec2_mfovs_transforms.items()}
# create a processes shared per-mfov transform from sec1 to sec2 (and from sec2 to sec1 too)
mfovs1_centers_sec2centers = [[], [], []] # lists of mfovs indexes, mfovs centers, and mfovs centers after transformation to sec2
missing_mfovs1_transforms_centers = [[], []] # lists of missing mfovs in sec1 and their centers
for mfov1 in sec1.mfovs():
mfov1_center = np.array([(mfov1.bbox[0] + mfov1.bbox[1])/2, (mfov1.bbox[2] + mfov1.bbox[3])/2])
if mfov1.mfov_index in sec1_to_sec2_mfovs_transforms and sec1_to_sec2_mfovs_transforms[mfov1.mfov_index] is not None:
mfovs1_centers_sec2centers[0].append(mfov1.mfov_index)
mfovs1_centers_sec2centers[1].append(mfov1_center)
sec1_mfov_model = sec1_to_sec2_mfovs_transforms[mfov1.mfov_index]
mfovs1_centers_sec2centers[2].append(sec1_mfov_model.apply(mfov1_center)[0])
else:
missing_mfovs1_transforms_centers[0].append(mfov1.mfov_index)
missing_mfovs1_transforms_centers[1].append(mfov1_center)
# # find the transformations from sec2 to sec1
# mfovs1_centers_sec2centers = [np.array(mfovs1_centers_sec2centers[0]), np.array(mfovs1_centers_sec2centers[1]), np.array(mfovs1_centers_sec2centers[2])]
# mfovs1_centers_sec2_kdtree = KDTree(mfovs1_centers_sec2centers[2])
# mfovs2_centers = [np.array([(mfov2.bbox[0] + mfov2.bbox[1])/2, (mfov2.bbox[2] + mfov2.bbox[3])/2]) for mfov2 in sec2.mfovs()]
# mfovs2_closest_centers_mfovs1_idxs = mfovs1_centers_sec2_kdtree.query(mfovs2_centers)[1]
# sec2_to_sec1_mfovs_transforms = {mfov2.mfov_index:
# inverse_transform(
# sec1_to_sec2_mfovs_transforms[
# mfovs1_centers_sec2centers[0][mfovs2_closest_centers_mfovs1_idxs[i]]
# ]
# )
# for i, mfov2 in enumerate(sec2.mfovs())}
# estimate the transformation for mfovs in sec1 that do not have one (look at closest neighbor)
if len(missing_mfovs1_transforms_centers[0]) > 0:
mfovs1_centers_sec1_kdtree = KDTree(mfovs1_centers_sec2centers[1])
mfovs1_missing_closest_centers_mfovs1_idxs = mfovs1_centers_sec1_kdtree.query(missing_mfovs1_transforms_centers[1])[1]
missing_mfovs1_sec2_centers = []
for i, (mfov1_index, mfov1_closest_mfov_idx) in enumerate(zip(missing_mfovs1_transforms_centers[0], mfovs1_missing_closest_centers_mfovs1_idxs)):
model = sec1_to_sec2_mfovs_transforms[
mfovs1_centers_sec2centers[0][mfov1_closest_mfov_idx]
]
sec1_to_sec2_mfovs_transforms[mfov1_index] = model
missing_mfovs1_sec2_centers.append(model.apply(np.atleast_2d(missing_mfovs1_transforms_centers[1][i]))[0])
# update the mfovs1_centers_sec2centers lists to include the | |
: 'disable',
'configured' : 'disable',
'starting' : 'disable',
'paused' : 'disable' }
}
# Translate drp alias to detector name
# For example: 'cam_1' -> 'cam'
def detector_name(drp_alias):
return drp_alias.rsplit('_', 1)[0]
# Count the number of drp segments matching a detector name.
# If only_active=True, count only the active segments.
def segment_count(det_name, platform_dict, *, only_active=False):
count = 0
try:
for v in platform_dict['drp'].values():
if only_active and not (v['active'] == 1):
# skip inactive segment
continue
if det_name == detector_name(v['proc_info']['alias']):
count += 1
except KeyError:
pass
return count
def timestampStr():
current = datetime.now(timezone.utc)
nsec = 1000 * current.microsecond
sec = int(current.timestamp()) - POSIX_TIME_AT_EPICS_EPOCH
return '%010d-%09d' % (sec, nsec)
def create_msg(key, msg_id=None, sender_id=None, body={}):
if msg_id is None:
msg_id = timestampStr()
msg = {'header': {
'key': key,
'msg_id': msg_id,
'sender_id': sender_id},
'body': body}
return msg
def error_msg(message):
body = {'err_info': message}
return create_msg('error', body=body)
def fileReport_msg(path):
body = {'path': path}
return create_msg('fileReport', body=body)
def progress_msg(transition, elapsed, total):
body = {'transition': transition, 'elapsed': int(elapsed), 'total': int(total)}
return create_msg('progress', body=body)
def back_pull_port(platform):
return PORT_BASE + platform
def back_pub_port(platform):
return PORT_BASE + platform + 10
def front_rep_port(platform):
return PORT_BASE + platform + 20
def front_pub_port(platform):
return PORT_BASE + platform + 30
def get_readout_group_mask(body):
mask = 0
if 'drp' in body:
for key, node_info in body['drp'].items():
try:
mask |= (1 << node_info['det_info']['readout'])
except KeyError:
pass
return mask
def wait_for_answers(socket, wait_time, msg_id):
"""
Wait and return all messages from socket that match msg_id
Parameters
----------
socket: zmq socket
wait_time: int, wait time in milliseconds
msg_id: int or None, expected msg_id of received messages
"""
global report_keys
remaining = wait_time
start = time.time()
while socket.poll(remaining) == zmq.POLLIN:
try:
msg = socket.recv_json()
except Exception as ex:
logging.error('recv_json(): %s' % ex)
continue
else:
logging.debug('recv_json(): %s' % msg)
# handle async reports
if msg['header']['key'] in report_keys:
yield msg
continue
# if msg_id is none take the msg_id of the first message as reference
if msg_id is None:
msg_id = msg['header']['msg_id']
if msg['header']['msg_id'] == msg_id:
yield msg
else:
logging.error('unexpected msg_id: got %s but expected %s' %
(msg['header']['msg_id'], msg_id))
remaining = max(0, int(wait_time - 1000*(time.time() - start)))
class CollectionManager():
def __init__(self, args):
self.platform = args.p
self.alias = args.u
self.config_alias = args.C # e.g. BEAM/NOBEAM
self.cfg_dbase = args.d
self.xpm_master = args.x
self.pv_base = args.B
self.context = zmq.Context(1)
self.back_pull = self.context.socket(zmq.PULL)
self.back_pub = self.context.socket(zmq.PUB)
self.front_rep = self.context.socket(zmq.REP)
self.front_pub = self.context.socket(zmq.PUB)
self.back_pull.bind('tcp://*:%d' % back_pull_port(args.p))
self.back_pub.bind('tcp://*:%d' % back_pub_port(args.p))
self.front_rep.bind('tcp://*:%d' % front_rep_port(args.p))
self.front_pub.bind('tcp://*:%d' % front_pub_port(args.p))
self.slow_update_rate = args.S
self.slow_update_enabled = False
self.slow_update_exit = Event()
self.phase2_timeout = args.T
self.user = args.user
self.password = <PASSWORD>
self.url = args.url
self.experiment_name = None
self.rollcall_timeout = args.rollcall_timeout
self.bypass_activedet = False
if args.r:
# active detectors file from command line
self.activedetfilename = args.r
else:
# default active detectors file
homedir = os.path.expanduser('~')
self.activedetfilename = '%s/.psdaq/p%d.activedet.json' % (homedir, self.platform)
if self.activedetfilename == '/dev/null':
# active detectors file bypassed
self.bypass_activedet = True
logging.warning("active detectors file disabled. Default settings will be used.")
else:
logging.info("active detectors file: %s" % self.activedetfilename)
if self.slow_update_rate:
# initialize slow update thread
self.slow_update_thread = Thread(target=self.slow_update_func, name='slowupdate')
# initialize poll set
self.poller = zmq.Poller()
self.poller.register(self.back_pull, zmq.POLLIN)
self.poller.register(self.front_rep, zmq.POLLIN)
# initialize EPICS context
self.ctxt = Context('pva')
# name PVs
self.pvListMsgHeader = [] # filled in at alloc
self.pvListXPM = [] # filled in at alloc
self.pv_xpm_base = self.pv_base + ':XPM:%d' % self.xpm_master
self.pvGroupL0Enable = self.pv_xpm_base+':GroupL0Enable'
self.pvGroupL0Disable = self.pv_xpm_base+':GroupL0Disable'
self.pvGroupMsgInsert = self.pv_xpm_base+':GroupMsgInsert'
self.pvGroupL0Reset = self.pv_xpm_base+':GroupL0Reset'
self.groups = 0 # groups bitmask
self.cmstate = {}
self.phase1Info = {}
self.level_keys = {'drp', 'teb', 'meb', 'control'}
# parse instrument_name[:station_number]
if ':' in args.P:
self.instrument, station_number = args.P.split(':', maxsplit=1)
try:
self.station = int(station_number)
except ValueError:
logging.error("Invalid station number '%s', using platform" % station_number)
self.station = self.platform
else:
self.instrument = args.P
self.station = self.platform
logging.debug('instrument=%s, station=%d' % (self.instrument, self.station))
self.ids = set()
self.handle_request = {
'selectplatform': self.handle_selectplatform,
'getinstrument': self.handle_getinstrument,
'getstate': self.handle_getstate,
'storejsonconfig': self.handle_storejsonconfig,
'getstatus': self.handle_getstatus
}
self.lastTransition = 'reset'
self.recording = False
self.collectMachine = Machine(self, DaqControl.states, initial='reset')
self.collectMachine.add_transition('reset', '*', 'reset',
conditions='condition_reset', after='report_status')
self.collectMachine.add_transition('rollcall', ['reset', 'unallocated'], 'unallocated',
conditions='condition_rollcall', after='report_status')
self.collectMachine.add_transition('alloc', 'unallocated', 'allocated',
conditions='condition_alloc', after='report_status')
self.collectMachine.add_transition('dealloc', 'allocated', 'unallocated',
conditions='condition_dealloc', after='report_status')
self.collectMachine.add_transition('connect', 'allocated', 'connected',
conditions='condition_connect', after='report_status')
self.collectMachine.add_transition('disconnect', 'connected', 'allocated',
conditions='condition_disconnect', after='report_status')
self.collectMachine.add_transition('configure', 'connected', 'configured',
conditions='condition_configure', after='report_status')
self.collectMachine.add_transition('unconfigure', 'configured', 'connected',
conditions='condition_unconfigure', after='report_status')
self.collectMachine.add_transition('beginrun', 'configured', 'starting',
conditions='condition_beginrun', after='report_status')
self.collectMachine.add_transition('endrun', 'starting', 'configured',
conditions='condition_endrun', after='report_status')
self.collectMachine.add_transition('beginstep', 'starting', 'paused',
conditions='condition_beginstep', after='report_status')
self.collectMachine.add_transition('endstep', 'paused', 'starting',
conditions='condition_endstep', after='report_status')
self.collectMachine.add_transition('enable', 'paused', 'running',
after=['after_enable', 'report_status'],
conditions='condition_enable')
self.collectMachine.add_transition('disable', 'running', 'paused',
before='before_disable',
conditions='condition_disable', after='report_status')
# slowupdate is an internal transition
# do not report status after slowupdate transition
self.collectMachine.add_transition('slowupdate', 'running', None,
conditions='condition_slowupdate')
logging.info('Initial state = %s' % self.state)
if self.slow_update_rate:
# start slow update thread
self.slow_update_enabled = False
self.slow_update_thread.start()
# start main loop
self.run()
if self.slow_update_rate:
# stop slow update thread
self.slow_update_exit.set()
time.sleep(0.5)
#
# cmstate_levels - return copy of cmstate with only drp/teb/meb entries
#
def cmstate_levels(self):
return {k: self.cmstate[k] for k in self.cmstate.keys() & self.level_keys}
#
# pv_put -
#
def pv_put(self, pvName, val):
retval = False
try:
self.ctxt.put(pvName, val)
except TimeoutError:
logging.error("self.ctxt.put('%s', %d) timed out" % (pvName, val))
except Exception:
logging.error("self.ctxt.put('%s', %d) failed" % (pvName, val))
else:
retval = True
logging.debug("self.ctxt.put('%s', %d)" % (pvName, val))
return retval
def service_requests(self):
# msg['header']['key'] formats:
# setstate.STATE
# setconfig.CONFIG_ALIAS
# setrecord.RECORD_FLAG
# setbypass.BYPASS_FLAG
# TRANSITION
# REQUEST
answer = None
try:
msg = self.front_rep.recv_json()
key = msg['header']['key'].split(".")
logging.debug("service_requests: key = %s" % key)
body = msg['body']
if key[0] == 'setstate':
# handle_setstate() sends reply internally
self.phase1Info.update(body)
self.handle_setstate(key[1])
answer = None
elif key[0] == 'setconfig':
# handle_setconfig() sends reply internally
self.handle_setconfig(key[1])
answer = None
elif key[0] == 'setrecord':
# handle_setrecord() sends reply internally
if key[1] == '0':
self.handle_setrecord(False)
else:
self.handle_setrecord(True)
answer = None
elif key[0] == 'setbypass':
# handle_setbypass() sends reply internally
if key[1] == '0':
self.handle_setbypass(False)
else:
self.handle_setbypass(True)
answer = None
elif key[0] in DaqControl.transitions:
# is body dict not-empty?
if body:
self.phase1Info[key[1]] = body
print('***',key[1],phase1Info)
# send 'ok' reply before calling handle_trigger()
self.front_rep.send_json(create_msg('ok'))
# drop slowupdate transition if not in running state,
# due to race condition between slowupdate and disable
if key[0] == 'slowupdate' and self.state != 'running':
logging.debug('dropped slowupdate transition in state %s' % self.state)
return
retval = self.handle_trigger(key[0], stateChange=False)
answer = None
try:
# send error message, if any, to front_pub socket
self.report_error(retval['body']['err_info'])
except KeyError:
pass
else:
answer = self.handle_request[key[0]](body)
except KeyError:
answer = create_msg('error')
if answer is not None:
self.front_rep.send_json(answer)
#
# register_file -
#
def register_file(self, body):
if self.experiment_name is None:
logging.error('register_file(): experiment_name is None')
return
path = body['path']
logging.info('data file: %s' % path)
self.front_pub.send_json(fileReport_msg(path))
# register the file
# url prefix: https://pswww.slac.stanford.edu/ws-auth/devlgbk/
serverURLPrefix = "{0}lgbk/{1}/ws/".format(self.url, self.experiment_name)
logging.debug('serverURLPrefix = %s' % serverURLPrefix)
try:
resp = requests.post(serverURLPrefix + "register_file", json=body,
auth=HTTPBasicAuth(self.user, self.password))
except Exception as ex:
logging.error('register_file error. HTTP request: %s' % ex)
else:
logging.debug("register_file response: %s" % resp.text)
if resp.status_code == requests.codes.ok:
if resp.json().get("success", None):
logging.debug("register_file success")
else:
logging.error("register_file failure")
else:
logging.error("register_file error: status code %d" % resp.status_code)
return
#
# confirm_response -
#
def confirm_response(self, socket, wait_time, msg_id, ids, *, progress_txt=None):
global report_keys
logging.debug('confirm_response(): ids = %s' % ids)
msgs = []
reports = []
begin_time = datetime.now(timezone.utc)
end_time = begin_time + timedelta(milliseconds=wait_time)
while len(ids) > 0 and datetime.now(timezone.utc) < end_time:
for msg in wait_for_answers(socket, 1000, msg_id):
if msg['header']['key'] in report_keys:
reports.append(msg)
elif msg['header']['sender_id'] in ids:
msgs.append(msg)
ids.remove(msg['header']['sender_id'])
logging.debug('confirm_response(): removed %s from ids' % msg['header']['sender_id'])
else:
logging.debug('confirm_response(): %s not in ids' % msg['header']['sender_id'])
if len(ids) == 0:
break
if progress_txt is not None:
self.progressReport(begin_time, end_time, progress_txt=progress_txt)
for ii in ids:
logging.debug('id %s did not respond' % ii)
return ids, msgs, reports
#
# process_reports
#
def process_reports(self, report_list):
for msg in report_list:
try:
if msg['header']['key'] == 'fileReport':
self.register_file(msg['body'])
elif msg['header']['key'] == 'error':
self.report_error(msg['body']['err_info'])
except KeyError as ex:
logging.error('process_reports() KeyError: %s' % ex)
def service_status(self):
msg = self.back_pull.recv_json()
logging.debug('service_status() received msg \'%s\'' % msg)
self.process_reports([msg])
def run(self):
try:
while True:
socks = dict(self.poller.poll())
if self.front_rep in socks and socks[self.front_rep] == zmq.POLLIN:
self.service_requests()
elif self.back_pull in socks and socks[self.back_pull] == zmq.POLLIN:
self.service_status()
except KeyboardInterrupt:
logging.info('KeyboardInterrupt')
def handle_trigger(self, key, *, stateChange=True):
logging.debug('handle_trigger(\'%s\', stateChange=\'%s\') in state \'%s\'' % (key, stateChange, self.state))
| |
0x40, 0x81, 0x3E, 0x1C, 0x38, 0x71, 0xF0, 0x0B, 0x06, 0x07, 0x01,
0x03, 0x00, 0x41, 0x00, 0x20, 0x80, 0x10, 0x40, 0x08, 0x20, 0x04, 0x10,
0x02, 0x08, 0x01, 0x04, 0x00, 0x82, 0x00, 0x41, 0x00, 0x20, 0x80, 0x13,
0xF0, 0x3E, 0x07, 0xC0, 0x30, 0x60, 0x80, 0x22, 0x00, 0x24, 0x00, 0x50,
0x00, 0x60, 0x00, 0xC0, 0x01, 0x80, 0x03, 0x00, 0x05, 0x00, 0x12, 0x00,
0x22, 0x00, 0x83, 0x06, 0x01, 0xF0, 0x00, 0xF1, 0xFC, 0x05, 0xC1, 0x81,
0xC0, 0x10, 0x60, 0x02, 0x18, 0x00, 0xC4, 0x00, 0x11, 0x00, 0x04, 0x40,
0x01, 0x10, 0x00, 0x44, 0x00, 0x11, 0x80, 0x08, 0x60, 0x02, 0x14, 0x01,
0x04, 0xC1, 0x81, 0x0F, 0x80, 0x40, 0x00, 0x10, 0x00, 0x04, 0x00, 0x01,
0x00, 0x00, 0x40, 0x00, 0x10, 0x00, 0x3F, 0xC0, 0x00, 0x0F, 0xE3, 0xC6,
0x0E, 0x86, 0x00, 0xE1, 0x00, 0x18, 0xC0, 0x06, 0x20, 0x00, 0x88, 0x00,
0x22, 0x00, 0x08, 0x80, 0x02, 0x20, 0x00, 0x84, 0x00, 0x61, 0x00, 0x18,
0x20, 0x0A, 0x06, 0x0C, 0x80, 0x7C, 0x20, 0x00, 0x08, 0x00, 0x02, 0x00,
0x00, 0x80, 0x00, 0x20, 0x00, 0x08, 0x00, 0x02, 0x00, 0x0F, 0xF0, 0xF8,
0x7C, 0x11, 0x8C, 0x2C, 0x00, 0x70, 0x00, 0xC0, 0x01, 0x00, 0x02, 0x00,
0x04, 0x00, 0x08, 0x00, 0x10, 0x00, 0x20, 0x00, 0x40, 0x00, 0x80, 0x01,
0x00, 0x3F, 0xFC, 0x00, 0x0F, 0xD1, 0x83, 0x98, 0x04, 0x80, 0x24, 0x00,
0x30, 0x00, 0xF0, 0x00, 0xFC, 0x00, 0x30, 0x00, 0xE0, 0x03, 0x00, 0x1C,
0x01, 0xF0, 0x1A, 0x7F, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08,
0x00, 0x08, 0x00, 0xFF, 0xFC, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08,
0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08, 0x00, 0x08,
0x00, 0x08, 0x00, 0x08, 0x01, 0x06, 0x0F, 0x03, 0xF8, 0xF0, 0x3E, 0x08,
0x01, 0x04, 0x00, 0x82, 0x00, 0x41, 0x00, 0x20, 0x80, 0x10, 0x40, 0x08,
0x20, 0x04, 0x10, 0x02, 0x08, 0x01, 0x04, 0x00, 0x82, 0x00, 0x41, 0x00,
0xE0, 0x41, 0xD0, 0x1F, 0x8E, 0xFE, 0x0F, 0xE2, 0x00, 0x20, 0x60, 0x0C,
0x0C, 0x01, 0x80, 0x80, 0x20, 0x18, 0x0C, 0x01, 0x01, 0x00, 0x30, 0x60,
0x02, 0x08, 0x00, 0x41, 0x00, 0x0C, 0x60, 0x00, 0x88, 0x00, 0x19, 0x00,
0x01, 0x40, 0x00, 0x38, 0x00, 0xFC, 0x07, 0xE4, 0x00, 0x10, 0x80, 0x02,
0x18, 0x20, 0xC3, 0x0E, 0x18, 0x21, 0x42, 0x04, 0x28, 0x40, 0x8D, 0x88,
0x19, 0x93, 0x03, 0x22, 0x60, 0x2C, 0x68, 0x05, 0x85, 0x00, 0xA0, 0xA0,
0x1C, 0x1C, 0x01, 0x81, 0x80, 0x7C, 0x1F, 0x18, 0x03, 0x06, 0x03, 0x01,
0x83, 0x00, 0x63, 0x00, 0x1B, 0x00, 0x07, 0x00, 0x03, 0x80, 0x03, 0x60,
0x03, 0x18, 0x03, 0x06, 0x03, 0x01, 0x83, 0x00, 0x61, 0x00, 0x33, 0xF0,
0x7E, 0xFC, 0x1F, 0x90, 0x01, 0x8C, 0x00, 0x86, 0x00, 0xC1, 0x80, 0x40,
0xC0, 0x60, 0x20, 0x20, 0x18, 0x30, 0x04, 0x10, 0x03, 0x08, 0x00, 0x8C,
0x00, 0x64, 0x00, 0x16, 0x00, 0x0E, 0x00, 0x07, 0x00, 0x01, 0x00, 0x01,
0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, 0x60, 0x00, 0x20, 0x07, 0xFE, 0x00,
0xFF, 0xF4, 0x01, 0x20, 0x09, 0x00, 0x80, 0x08, 0x00, 0x80, 0x08, 0x00,
0xC0, 0x04, 0x00, 0x40, 0x04, 0x00, 0x40, 0x14, 0x00, 0xA0, 0x07, 0xFF,
0xE0, 0x07, 0x0C, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x30, 0xC0, 0x30, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x0C, 0x07, 0xFF, 0xFF, 0xFF, 0x80, 0xE0, 0x30, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x08, 0x07, 0x0C, 0x10, 0x10, 0x10, 0x10,
0x10, 0x10, 0x10, 0x10, 0x10, 0x30, 0xE0, 0x1C, 0x00, 0x44, 0x0D, 0x84,
0x36, 0x04, 0x40, 0x07, 0x00 ]
FreeMono18pt7bGlyphs = [
[ 0, 0, 0, 21, 0, 1 ], # 0x20 ' '
[ 0, 4, 22, 21, 8, -21 ], # 0x21 '!'
[ 11, 11, 10, 21, 5, -20 ], # 0x22 '"'
[ 25, 14, 24, 21, 3, -21 ], # 0x23 '#'
[ 67, 13, 26, 21, 4, -22 ], # 0x24 '$'
[ 110, 15, 21, 21, 3, -20 ], # 0x25 '%'
[ 150, 12, 18, 21, 4, -17 ], # 0x26 '&'
[ 177, 4, 10, 21, 8, -20 ], # 0x27 '''
[ 182, 5, 25, 21, 10, -20 ], # 0x28 '('
[ 198, 5, 25, 21, 6, -20 ], # 0x29 ')'
[ 214, 13, 12, 21, 4, -20 ], # 0x2A '#'
[ 234, 15, 17, 21, 3, -17 ], # 0x2B '+'
[ 266, 7, 10, 21, 5, -4 ], # 0x2C ','
[ 275, 15, 1, 21, 3, -9 ], # 0x2D '-'
[ 277, 5, 5, 21, 8, -4 ], # 0x2E '.'
[ 281, 13, 26, 21, 4, -22 ], # 0x2F '/'
[ 324, 13, 21, 21, 4, -20 ], # 0x30 '0'
[ 359, 13, 21, 21, 4, -20 ], # 0x31 '1'
[ 394, 13, 21, 21, 3, -20 ], # 0x32 '2'
[ 429, 14, 21, 21, 3, -20 ], # 0x33 '3'
[ 466, 12, 21, 21, 4, -20 ], # 0x34 '4'
[ 498, 14, 21, 21, 3, -20 ], # 0x35 '5'
[ 535, 12, 21, 21, 5, -20 ], # 0x36 '6'
[ 567, 12, 21, 21, 4, -20 ], # 0x37 '7'
[ 599, 13, 21, 21, 4, -20 ], # 0x38 '8'
[ 634, 12, 21, 21, 5, -20 ], # 0x39 '9'
[ 666, 5, 15, 21, 8, -14 ], # 0x3A ':'
[ 676, 7, 20, 21, 5, -14 ], # 0x3B ''
[ 694, 15, 16, 21, 3, -17 ], # 0x3C '<'
[ 724, 17, 6, 21, 2, -12 ], # 0x3D '='
[ 737, 15, 16, 21, 3, -17 ], # 0x3E '>'
[ 767, 12, 20, 21, 5, -19 ], # 0x3F '?'
[ 797, 13, 23, 21, 4, -20 ], # 0x40 '@'
[ 835, 21, 20, 21, 0, -19 ], # 0x41 'A'
[ 888, 18, 20, 21, 1, -19 ], # 0x42 'B'
[ 933, 17, 20, 21, 2, -19 ], # 0x43 'C'
[ 976, 16, 20, 21, 2, -19 ], # 0x44 'D'
[ 1016, 17, 20, 21, 1, -19 ], # 0x45 'E'
[ 1059, 17, 20, 21, 1, -19 ], # 0x46 'F'
[ 1102, 17, 20, 21, 2, -19 ], # 0x47 'G'
[ 1145, 16, 20, 21, 2, -19 ], # 0x48 'H'
[ 1185, 13, 20, 21, 4, -19 ], # 0x49 'I'
[ 1218, 17, 20, 21, 3, -19 ], # 0x4A 'J'
[ 1261, 18, 20, 21, 1, -19 ], # 0x4B 'K'
[ 1306, 15, 20, 21, 3, -19 ], # 0x4C 'L'
[ 1344, 19, 20, 21, 1, -19 ], # 0x4D 'M'
[ 1392, 18, 20, 21, 1, -19 ], # 0x4E 'N'
[ 1437, 17, 20, 21, 2, -19 ], # 0x4F 'O'
[ 1480, 16, 20, 21, 1, -19 ], # 0x50 'P'
[ 1520, 17, 24, 21, 2, -19 ], # 0x51 'Q'
[ 1571, 19, 20, 21, 1, -19 ], # 0x52 'R'
[ 1619, 14, 20, 21, 3, -19 ], # 0x53 'S'
[ 1654, 15, 20, 21, 3, -19 ], # 0x54 'T'
[ 1692, 17, 20, 21, 2, -19 ], # 0x55 'U'
[ 1735, 21, 20, 21, 0, -19 ], # 0x56 'V'
[ 1788, 19, 20, 21, 1, -19 ], # 0x57 'W'
[ 1836, 19, 20, 21, 1, -19 ], # 0x58 'X'
[ 1884, 17, 20, 21, 2, -19 ], # 0x59 'Y'
[ 1927, 13, 20, 21, 4, -19 ], # 0x5A 'Z'
[ 1960, 5, 25, 21, 10, -20 ], # 0x5B '['
[ 1976, 13, 26, 21, 4, -22 ], # 0x5C '\'
[ 2019, 5, 25, 21, 6, -20 ], # 0x5D ']'
[ 2035, 13, 9, 21, 4, -20 ], # 0x5E '^'
[ 2050, 21, 1, 21, 0, 4 ], # 0x5F '_'
[ 2053, 6, 5, 21, 5, -21 ], # 0x60 '`'
[ 2057, 16, 15, 21, 3, -14 ], # 0x61 'a'
[ 2087, 18, 21, 21, 1, -20 ], # 0x62 'b'
[ 2135, 15, 15, 21, 3, -14 | |
<filename>TDMS/MvImport/MvCameraControl_header.py
# generated by 'xml2py'
# flags '-c -d -v C:\test_h\MvCameraControl.xml -o MvCameraControl_header.py'
from ctypes import *
STRING = c_char_p
PixelType_Gvsp_BayerBG12 = 17825811
MV_TRIGGER_SOURCE_LINE1 = 1
PixelType_Gvsp_Mono8_Signed = 17301506
MV_BALANCEWHITE_AUTO_ONCE = 2
PixelType_Gvsp_BayerGB8 = 17301514
MV_BALANCEWHITE_AUTO_OFF = 0
MV_Image_Jpeg = 2
PixelType_Gvsp_Mono12 = 17825797
MV_GAMMA_SELECTOR_SRGB = 2
PixelType_Gvsp_Coord3D_ABC32f_Planar = 39846081
PixelType_Gvsp_Coord3D_AC32f = 36176066
MV_EXPOSURE_AUTO_MODE_ONCE = 1
MV_GAMMA_SELECTOR_USER = 1
AM_WO = 2
PixelType_Gvsp_BayerBG10 = 17825807
PixelType_Gvsp_RGB10_Planar = 36700194
PixelType_Gvsp_BayerGB12 = 17825810
MV_BALANCEWHITE_AUTO_CONTINUOUS = 1
PixelType_Gvsp_BayerRG8 = 17301513
PixelType_Gvsp_COORD3D_DEPTH_PLUS_MASK = -2112094207
PixelType_Gvsp_RGB12_Planar = 36700195
PixelType_Gvsp_Mono10 = 17825795
PixelType_Gvsp_Undefined = -1
PixelType_Gvsp_BayerRG10_Packed = 17563687
PixelType_Gvsp_BayerGR16 = 17825838
PixelType_Gvsp_BayerBG12_Packed = 17563693
PixelType_Gvsp_BayerGB16 = 17825840
MV_TRIGGER_MODE_OFF = 0
PixelType_Gvsp_BayerRG16 = 17825839
PixelType_Gvsp_YCBCR709_411_8_CBYYCRYY = 34340930
PixelType_Gvsp_BayerBG16 = 17825841
PixelType_Gvsp_RGB8_Planar = 35127329
PixelType_Gvsp_RGB8_Packed = 35127316
PixelType_Gvsp_BGR8_Packed = 35127317
PixelType_Gvsp_RGBA8_Packed = 35651606
PixelType_Gvsp_YCBCR422_8_CBYCRY = 34603075
PixelType_Gvsp_RGB565_Packed = 34603061
PixelType_Gvsp_YCBCR422_8 = 34603067
PixelType_Gvsp_YUV444_Packed = 35127328
PixelType_Gvsp_YCBCR709_422_8_CBYCRY = 34603077
PixelType_Gvsp_YCBCR709_422_8 = 34603073
PixelType_Gvsp_RGB10_Packed = 36700184
PixelType_Gvsp_YCBCR8_CBYCR = 35127354
PixelType_Gvsp_YCBCR709_8_CBYCR = 35127360
PixelType_Gvsp_YCBCR601_411_8_CBYYCRYY = 34340927
IFT_IBoolean = 3
PixelType_Gvsp_RGB12_Packed = 36700186
PixelType_Gvsp_YUV422_YUYV_Packed = 34603058
PixelType_Gvsp_YCBCR601_422_8 = 34603070
PixelType_Gvsp_RGB16_Packed = 36700211
PixelType_Gvsp_BGR12_Packed = 36700187
PixelType_Gvsp_BayerGB12_Packed = 17563692
PixelType_Gvsp_BGR565_Packed = 34603062
PixelType_Gvsp_YCBCR601_422_8_CBYCRY = 34603076
PixelType_Gvsp_YUV411_Packed = 34340894
PixelType_Gvsp_BayerRG12_Packed = 17563691
PixelType_Gvsp_RGB10V1_Packed = 35651612
PixelType_Gvsp_YUV422_Packed = 34603039
MV_GAIN_MODE_ONCE = 1
MV_GAIN_MODE_CONTINUOUS = 2
MV_GAIN_MODE_OFF = 0
MV_GIGE_TRANSTYPE_MULTICAST = 1
MV_GIGE_TRANSTYPE_UNICAST = 0
AM_NI = 0
IFT_IValue = 0
PixelType_Gvsp_BGRA8_Packed = 35651607
MV_GIGE_TRANSTYPE_LIMITEDBROADCAST = 2
MV_GIGE_TRANSTYPE_CAMERADEFINED = 4
MV_GIGE_TRANSTYPE_SUBNETBROADCAST = 3
PixelType_Gvsp_BGR10_Packed = 36700185
MV_GIGE_TRANSTYPE_UNICAST_WITHOUT_RECV = 65536
V_Guru = 2
MV_GIGE_TRANSTYPE_UNICAST_DEFINED_PORT = 5
MV_GIGE_TRANSTYPE_MULTICAST_WITHOUT_RECV = 65537
PixelType_Gvsp_BayerGB10 = 17825806
IFT_ICategory = 8
PixelType_Gvsp_Coord3D_ABC32f = 39846080
MV_EXPOSURE_MODE_TRIGGER_WIDTH = 1
PixelType_Gvsp_BayerRG10 = 17825805
IFT_IEnumeration = 9
IFT_IFloat = 5
PixelType_Gvsp_RGB16_Planar = 36700196
PixelType_Gvsp_Mono14 = 17825829
IFT_IString = 6
PixelType_Gvsp_YCBCR411_8_CBYYCRYY = 34340924
PixelType_Gvsp_Mono12_Packed = 17563654
PixelType_Gvsp_Mono8 = 17301505
AM_CycleDetect = 6
PixelType_Gvsp_Mono4p = 17039417
PixelType_Gvsp_Mono10_Packed = 17563652
AM_Undefined = 5
MV_EXPOSURE_MODE_TIMED = 0
PixelType_Gvsp_BayerRG12 = 17825809
PixelType_Gvsp_BayerGR12 = 17825808
IFT_IEnumEntry = 10
AM_RW = 4
PixelType_Gvsp_Mono16 = 17825799
PixelType_Gvsp_BayerGR8 = 17301512
IFT_IInteger = 2
AM_RO = 3
MV_EXPOSURE_AUTO_MODE_OFF = 0
PixelType_Gvsp_Mono2p = 16908344
PixelType_Gvsp_BayerGR12_Packed = 17563690
PixelType_Gvsp_BayerGB10_Packed = 17563688
PixelType_Gvsp_BayerGR10_Packed = 17563686
PixelType_Gvsp_BayerBG10_Packed = 17563689
PixelType_Gvsp_YCBCR601_8_CBYCR = 35127357
IFT_IPort = 11
IFT_IBase = 1
V_Invisible = 3
V_Beginner = 0
PixelType_Gvsp_Jpeg = -2145910783
MV_Image_Undefined = 0
MV_EXPOSURE_AUTO_MODE_CONTINUOUS = 2
MV_Image_Bmp = 1
MV_TRIGGER_SOURCE_SOFTWARE = 7
IFT_IRegister = 7
MV_Image_Png = 3
MV_ACQ_MODE_SINGLE = 0
MV_Image_Tif = 4
V_Expert = 1
MV_ACQ_MODE_CONTINUOUS = 2
MV_ACQ_MODE_MUTLI = 1
PixelType_Gvsp_BayerGR10 = 17825804
AM_NA = 1
V_Undefined = 99
MV_TRIGGER_SOURCE_FrequencyConverter = 8
IFT_ICommand = 4
MV_TRIGGER_MODE_ON = 1
PixelType_Gvsp_RGB10V2_Packed = 35651613
PixelType_Gvsp_BayerBG8 = 17301515
MV_TRIGGER_SOURCE_LINE2 = 2
PixelType_Gvsp_RGB12V1_Packed = 35913780
MV_TRIGGER_SOURCE_LINE3 = 3
MV_TRIGGER_SOURCE_COUNTER0 = 4
MV_TRIGGER_SOURCE_LINE0 = 0
PixelType_Gvsp_Mono1p = 16842807
int8_t = c_int8
int16_t = c_int16
int32_t = c_int32
int64_t = c_int64
uint8_t = c_uint8
uint16_t = c_uint16
uint32_t = c_uint32
uint64_t = c_uint64
int_least8_t = c_byte
int_least16_t = c_short
int_least32_t = c_int
int_least64_t = c_long
uint_least8_t = c_ubyte
uint_least16_t = c_ushort
uint_least32_t = c_uint
uint_least64_t = c_ulong
int_fast8_t = c_byte
int_fast16_t = c_long
int_fast32_t = c_long
int_fast64_t = c_long
uint_fast8_t = c_ubyte
uint_fast16_t = c_ulong
uint_fast32_t = c_ulong
uint_fast64_t = c_ulong
intptr_t = c_long
uintptr_t = c_ulong
intmax_t = c_long
uintmax_t = c_ulong
# CameraParams.h 21
class _MV_GIGE_DEVICE_INFO_(Structure):
pass
_MV_GIGE_DEVICE_INFO_._fields_ = [
# CameraParams.h 21
('nIpCfgOption', c_uint),
('nIpCfgCurrent', c_uint),
('nCurrentIp', c_uint),
('nCurrentSubNetMask', c_uint),
('nDefultGateWay', c_uint),
('chManufacturerName', c_ubyte * 32),
('chModelName', c_ubyte * 32),
('chDeviceVersion', c_ubyte * 32),
('chManufacturerSpecificInfo', c_ubyte * 48),
('chSerialNumber', c_ubyte * 16),
('chUserDefinedName', c_ubyte * 16),
('nNetExport', c_uint),
('nReserved', c_uint * 4),
]
MV_GIGE_DEVICE_INFO = _MV_GIGE_DEVICE_INFO_
# CameraParams.h 42
class _MV_USB3_DEVICE_INFO_(Structure):
pass
_MV_USB3_DEVICE_INFO_._fields_ = [
# CameraParams.h 42
('CrtlInEndPoint', c_ubyte),
('CrtlOutEndPoint', c_ubyte),
('StreamEndPoint', c_ubyte),
('EventEndPoint', c_ubyte),
('idVendor', c_ushort),
('idProduct', c_ushort),
('nDeviceNumber', c_uint),
('chDeviceGUID', c_ubyte * 64),
('chVendorName', c_ubyte * 64),
('chModelName', c_ubyte * 64),
('chFamilyName', c_ubyte * 64),
('chDeviceVersion', c_ubyte * 64),
('chManufacturerName', c_ubyte * 64),
('chSerialNumber', c_ubyte * 64),
('chUserDefinedName', c_ubyte * 64),
('nbcdUSB', c_uint),
('nReserved', c_uint * 3),
]
MV_USB3_DEVICE_INFO = _MV_USB3_DEVICE_INFO_
# CameraParams.h 64
class _MV_CC_DEVICE_INFO_(Structure):
pass
# CameraParams.h 76
class N19_MV_CC_DEVICE_INFO_3DOT_0E(Union):
pass
N19_MV_CC_DEVICE_INFO_3DOT_0E._fields_ = [
# CameraParams.h 76
('stGigEInfo', MV_GIGE_DEVICE_INFO),
('stUsb3VInfo', MV_USB3_DEVICE_INFO),
]
_MV_CC_DEVICE_INFO_._fields_ = [
# CameraParams.h 64
('nMajorVer', c_ushort),
('nMinorVer', c_ushort),
('nMacAddrHigh', c_uint),
('nMacAddrLow', c_uint),
('nTLayerType', c_uint),
('nReserved', c_uint * 4),
('SpecialInfo', N19_MV_CC_DEVICE_INFO_3DOT_0E),
]
MV_CC_DEVICE_INFO = _MV_CC_DEVICE_INFO_
# CameraParams.h 86
class _MV_NETTRANS_INFO_(Structure):
pass
_MV_NETTRANS_INFO_._fields_ = [
# CameraParams.h 86
('nReviceDataSize', int64_t),
('nThrowFrameCount', c_int),
('nReserved', c_uint * 5),
]
MV_NETTRANS_INFO = _MV_NETTRANS_INFO_
# CameraParams.h 101
class _MV_CC_DEVICE_INFO_LIST_(Structure):
pass
_MV_CC_DEVICE_INFO_LIST_._fields_ = [
# CameraParams.h 101
('nDeviceNum', c_uint),
('pDeviceInfo', POINTER(MV_CC_DEVICE_INFO) * 256),
]
MV_CC_DEVICE_INFO_LIST = _MV_CC_DEVICE_INFO_LIST_
# CameraParams.h 110
class _MV_FRAME_OUT_INFO_(Structure):
pass
# values for enumeration 'MvGvspPixelType'
MvGvspPixelType = c_int # enum
_MV_FRAME_OUT_INFO_._fields_ = [
# CameraParams.h 110
('nWidth', c_ushort),
('nHeight', c_ushort),
('enPixelType', MvGvspPixelType),
('nFrameNum', c_uint),
('nDevTimeStampHigh', c_uint),
('nDevTimeStampLow', c_uint),
('nReserved0', c_uint),
('nHostTimeStamp', int64_t),
('nFrameLen', c_uint),
('nLostPacket', c_uint),
('nReserved', c_uint * 2),
]
MV_FRAME_OUT_INFO = _MV_FRAME_OUT_INFO_
# CameraParams.h 129
class _MV_FRAME_OUT_INFO_EX_(Structure):
pass
_MV_FRAME_OUT_INFO_EX_._fields_ = [
# CameraParams.h 129
('nWidth', c_ushort),
('nHeight', c_ushort),
('enPixelType', MvGvspPixelType),
('nFrameNum', c_uint),
('nDevTimeStampHigh', c_uint),
('nDevTimeStampLow', c_uint),
('nReserved0', c_uint),
('nHostTimeStamp', int64_t),
('nFrameLen', c_uint),
('nSecondCount', c_uint),
('nCycleCount', c_uint),
('nCycleOffset', c_uint),
('fGain', c_float),
('fExposureTime', c_float),
('nAverageBrightness', c_uint),
('nRed', c_uint),
('nGreen', c_uint),
('nBlue', c_uint),
('nFrameCounter', c_uint),
('nTriggerIndex', c_uint),
('nInput', c_uint),
('nOutput', c_uint),
('nOffsetX', c_ushort),
('nOffsetY', c_ushort),
('nChunkWidth', c_ushort),
('nChunkHeight', c_ushort),
('nLostPacket', c_uint),
('nReserved', c_uint * 39),
]
MV_FRAME_OUT_INFO_EX = _MV_FRAME_OUT_INFO_EX_
# CameraParams.h 176
class _MV_DISPLAY_FRAME_INFO_(Structure):
pass
_MV_DISPLAY_FRAME_INFO_._fields_ = [
# CameraParams.h 176
('hWnd', c_void_p),
('pData', POINTER(c_ubyte)),
('nDataLen', c_uint),
('nWidth', c_ushort),
('nHeight', c_ushort),
('enPixelType', MvGvspPixelType),
('nRes', c_uint * 4),
]
MV_DISPLAY_FRAME_INFO = _MV_DISPLAY_FRAME_INFO_
# values for enumeration 'MV_SAVE_IAMGE_TYPE'
MV_SAVE_IAMGE_TYPE = c_int # enum
# CameraParams.h 198
class _MV_SAVE_IMAGE_PARAM_T_(Structure):
pass
_MV_SAVE_IMAGE_PARAM_T_._fields_ = [
# CameraParams.h 198
('pData', POINTER(c_ubyte)),
('nDataLen', c_uint),
('enPixelType', MvGvspPixelType),
('nWidth', c_ushort),
('nHeight', c_ushort),
('pImageBuffer', POINTER(c_ubyte)),
('nImageLen', c_uint),
('nBufferSize', c_uint),
('enImageType', MV_SAVE_IAMGE_TYPE),
]
MV_SAVE_IMAGE_PARAM = _MV_SAVE_IMAGE_PARAM_T_
# CameraParams.h 214
class _MV_SAVE_IMAGE_PARAM_T_EX_(Structure):
pass
_MV_SAVE_IMAGE_PARAM_T_EX_._fields_ = [
# CameraParams.h 214
('pData', POINTER(c_ubyte)),
('nDataLen', c_uint),
('enPixelType', MvGvspPixelType),
('nWidth', c_ushort),
('nHeight', c_ushort),
('pImageBuffer', POINTER(c_ubyte)),
('nImageLen', c_uint),
('nBufferSize', c_uint),
('enImageType', MV_SAVE_IAMGE_TYPE),
('nJpgQuality', c_uint),
('iMethodValue', c_uint),
('nReserved', c_uint * 3),
]
MV_SAVE_IMAGE_PARAM_EX = _MV_SAVE_IMAGE_PARAM_T_EX_
# CameraParams.h 236
class _MV_PIXEL_CONVERT_PARAM_T_(Structure):
pass
_MV_PIXEL_CONVERT_PARAM_T_._fields_ = [
# CameraParams.h 236
('nWidth', c_ushort),
('nHeight', c_ushort),
('enSrcPixelType', MvGvspPixelType),
('pSrcData', POINTER(c_ubyte)),
('nSrcDataLen', c_uint),
('enDstPixelType', MvGvspPixelType),
('pDstBuffer', POINTER(c_ubyte)),
('nDstLen', c_uint),
('nDstBufferSize', c_uint),
('nRes', c_uint * 4),
]
MV_CC_PIXEL_CONVERT_PARAM = _MV_PIXEL_CONVERT_PARAM_T_
# values for enumeration '_MV_CAM_ACQUISITION_MODE_'
_MV_CAM_ACQUISITION_MODE_ = c_int # enum
MV_CAM_ACQUISITION_MODE = _MV_CAM_ACQUISITION_MODE_
# values for enumeration '_MV_CAM_GAIN_MODE_'
_MV_CAM_GAIN_MODE_ = c_int # enum
MV_CAM_GAIN_MODE = _MV_CAM_GAIN_MODE_
# values for enumeration '_MV_CAM_EXPOSURE_MODE_'
_MV_CAM_EXPOSURE_MODE_ = c_int # enum
MV_CAM_EXPOSURE_MODE = _MV_CAM_EXPOSURE_MODE_
# values for enumeration '_MV_CAM_EXPOSURE_AUTO_MODE_'
_MV_CAM_EXPOSURE_AUTO_MODE_ = c_int # enum
MV_CAM_EXPOSURE_AUTO_MODE = _MV_CAM_EXPOSURE_AUTO_MODE_
# values for enumeration '_MV_CAM_TRIGGER_MODE_'
_MV_CAM_TRIGGER_MODE_ = c_int # enum
MV_CAM_TRIGGER_MODE = _MV_CAM_TRIGGER_MODE_
# values for enumeration '_MV_CAM_GAMMA_SELECTOR_'
_MV_CAM_GAMMA_SELECTOR_ = c_int # enum
MV_CAM_GAMMA_SELECTOR = _MV_CAM_GAMMA_SELECTOR_
# values for enumeration '_MV_CAM_BALANCEWHITE_AUTO_'
_MV_CAM_BALANCEWHITE_AUTO_ = c_int # enum
MV_CAM_BALANCEWHITE_AUTO = _MV_CAM_BALANCEWHITE_AUTO_
# values for enumeration '_MV_CAM_TRIGGER_SOURCE_'
_MV_CAM_TRIGGER_SOURCE_ = c_int # enum
MV_CAM_TRIGGER_SOURCE = _MV_CAM_TRIGGER_SOURCE_
# values for enumeration '_MV_GIGE_TRANSMISSION_TYPE_'
_MV_GIGE_TRANSMISSION_TYPE_ = c_int # enum
MV_GIGE_TRANSMISSION_TYPE = _MV_GIGE_TRANSMISSION_TYPE_
# CameraParams.h 377
class _MV_ALL_MATCH_INFO_(Structure):
pass
_MV_ALL_MATCH_INFO_._fields_ = [
# CameraParams.h 377
('nType', c_uint),
('pInfo', c_void_p),
('nInfoSize', c_uint),
]
MV_ALL_MATCH_INFO = _MV_ALL_MATCH_INFO_
# CameraParams.h 387
class _MV_MATCH_INFO_NET_DETECT_(Structure):
pass
_MV_MATCH_INFO_NET_DETECT_._fields_ = [
# CameraParams.h 387
('nReviceDataSize', int64_t),
('nLostPacketCount', int64_t),
('nLostFrameCount', c_uint),
('nReserved', c_uint * 5),
]
MV_MATCH_INFO_NET_DETECT = _MV_MATCH_INFO_NET_DETECT_
# CameraParams.h 396
class _MV_MATCH_INFO_USB_DETECT_(Structure):
pass
_MV_MATCH_INFO_USB_DETECT_._fields_ = [
# CameraParams.h 396
('nReviceDataSize', int64_t),
('nRevicedFrameCount', c_uint),
('nErrorFrameCount', c_uint),
('nReserved', c_uint * 2),
]
MV_MATCH_INFO_USB_DETECT = _MV_MATCH_INFO_USB_DETECT_
# CameraParams.h 404
class _MV_IMAGE_BASIC_INFO_(Structure):
pass
_MV_IMAGE_BASIC_INFO_._fields_ = [
# CameraParams.h 404
('nWidthValue', c_ushort),
('nWidthMin', c_ushort),
('nWidthMax', c_uint),
('nWidthInc', c_uint),
('nHeightValue', c_uint),
('nHeightMin', c_uint),
('nHeightMax', c_uint),
('nHeightInc', c_uint),
('fFrameRateValue', c_float),
('fFrameRateMin', c_float),
('fFrameRateMax', c_float),
('enPixelType', c_uint),
('nSupportedPixelFmtNum', c_uint),
('enPixelList', c_uint * 64),
('nReserved', c_uint * 8),
]
MV_IMAGE_BASIC_INFO = _MV_IMAGE_BASIC_INFO_
# values for enumeration 'MV_XML_InterfaceType'
MV_XML_InterfaceType = c_int # enum
# values for enumeration 'MV_XML_AccessMode'
MV_XML_AccessMode = c_int # enum
# values for enumeration 'MV_XML_Visibility'
MV_XML_Visibility = c_int # enum
# CameraParams.h 500
class _MV_EVENT_OUT_INFO_(Structure):
pass
_MV_EVENT_OUT_INFO_._fields_ = [
# CameraParams.h 500
('EventName', c_char * 128),
('nEventID', c_ushort),
('nStreamChannel', c_ushort),
('nBlockIdHigh', c_uint),
('nBlockIdLow', c_uint),
('nTimestampHigh', c_uint),
('nTimestampLow', c_uint),
('pEventData', c_void_p),
('nEventDataSize', c_uint),
('nReserved', c_uint * 16),
]
MV_EVENT_OUT_INFO = _MV_EVENT_OUT_INFO_
# CameraParams.h 520
class _MV_CC_FILE_ACCESS_T(Structure):
pass
_MV_CC_FILE_ACCESS_T._fields_ = [
# CameraParams.h 520
('pUserFileName', STRING),
('pDevFileName', STRING),
('nReserved', c_uint * 32),
]
MV_CC_FILE_ACCESS = _MV_CC_FILE_ACCESS_T
# CameraParams.h 529
class _MV_CC_FILE_ACCESS_PROGRESS_T(Structure):
pass
_MV_CC_FILE_ACCESS_PROGRESS_T._fields_ = [
# CameraParams.h 529
('nCompleted', int64_t),
('nTotal', int64_t),
('nReserved', c_uint * 8),
]
MV_CC_FILE_ACCESS_PROGRESS = _MV_CC_FILE_ACCESS_PROGRESS_T
# CameraParams.h 538
class _MV_TRANSMISSION_TYPE_T(Structure):
pass
_MV_TRANSMISSION_TYPE_T._fields_ = [
# CameraParams.h 538
('enTransmissionType', MV_GIGE_TRANSMISSION_TYPE),
('nDestIp', c_uint),
('nDestPort', c_ushort),
('nReserved', c_uint * 32),
]
MV_TRANSMISSION_TYPE = _MV_TRANSMISSION_TYPE_T
# CameraParams.h 548
class _MV_XML_NODE_FEATURE_(Structure):
pass
_MV_XML_NODE_FEATURE_._fields_ = [
# CameraParams.h 548
('enType', MV_XML_InterfaceType),
('enVisivility', MV_XML_Visibility),
('strDescription', c_char * 512),
('strDisplayName', c_char * 64),
('strName', c_char * 64),
('strToolTip', c_char * 512),
('nReserved', c_uint * 4),
]
MV_XML_NODE_FEATURE = _MV_XML_NODE_FEATURE_
# CameraParams.h 561
class _MV_XML_NODES_LIST_(Structure):
pass
_MV_XML_NODES_LIST_._fields_ = [
# CameraParams.h 561
('nNodeNum', c_uint),
('stNodes', MV_XML_NODE_FEATURE * 128),
]
MV_XML_NODES_LIST = _MV_XML_NODES_LIST_
# CameraParams.h 569
class _MV_XML_FEATURE_Value_(Structure):
pass
_MV_XML_FEATURE_Value_._fields_ = [
# CameraParams.h 569
('enType', MV_XML_InterfaceType),
('strDescription', c_char * 512),
('strDisplayName', c_char * 64),
('strName', c_char * 64),
('strToolTip', c_char * 512),
('nReserved', c_uint * 4),
]
MV_XML_FEATURE_Value = _MV_XML_FEATURE_Value_
# CameraParams.h 579
class _MV_XML_FEATURE_Base_(Structure):
pass
_MV_XML_FEATURE_Base_._fields_ = [
# CameraParams.h 579
('enAccessMode', MV_XML_AccessMode),
]
MV_XML_FEATURE_Base = _MV_XML_FEATURE_Base_
# CameraParams.h 584
class _MV_XML_FEATURE_Integer_(Structure):
pass
_MV_XML_FEATURE_Integer_._fields_ = [
# CameraParams.h 584
('strName', c_char * 64),
('strDisplayName', c_char * 64),
('strDescription', c_char * 512),
('strToolTip', c_char * 512),
('enVisivility', MV_XML_Visibility),
('enAccessMode', MV_XML_AccessMode),
('bIsLocked', c_int),
('nValue', int64_t),
('nMinValue', int64_t),
('nMaxValue', int64_t),
('nIncrement', int64_t),
('nReserved', c_uint * 4),
]
MV_XML_FEATURE_Integer = _MV_XML_FEATURE_Integer_
# CameraParams.h 603
class _MV_XML_FEATURE_Boolean_(Structure):
pass
_MV_XML_FEATURE_Boolean_._fields_ = [
# CameraParams.h 603
('strName', c_char * 64),
('strDisplayName', c_char * 64),
('strDescription', c_char * 512),
('strToolTip', c_char * 512),
('enVisivility', MV_XML_Visibility),
('enAccessMode', MV_XML_AccessMode),
('bIsLocked', c_int),
('bValue', c_bool),
('nReserved', c_uint * 4),
]
MV_XML_FEATURE_Boolean = _MV_XML_FEATURE_Boolean_
# CameraParams.h 618
class _MV_XML_FEATURE_Command_(Structure):
pass
_MV_XML_FEATURE_Command_._fields_ | |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_cloud_guard_target
short_description: Manage a Target resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a Target resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new Target
version_added: "2.9.0"
author: Oracle (@oracle)
options:
display_name:
description:
- DetectorTemplate Identifier
- Required for create using I(state=present).
- Required for update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
compartment_id:
description:
- Compartment Identifier where the resource is created
- Required for create using I(state=present).
- Required for update when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- Required for delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
type: str
description:
description:
- The target description.
type: str
target_resource_type:
description:
- possible type of targets(compartment/HCMCloud/ERPCloud)
- Required for create using I(state=present).
type: str
choices:
- "COMPARTMENT"
- "ERPCLOUD"
- "HCMCLOUD"
target_resource_id:
description:
- Resource ID which the target uses to monitor
- Required for create using I(state=present).
type: str
target_detector_recipes:
description:
- List of detector recipes to associate with target
- This parameter is updatable.
type: list
elements: dict
suboptions:
detector_recipe_id:
description:
- Identifier for DetectorRecipe.
type: str
detector_rules:
description:
- Overrides to be applied to Detector Rule associated with the target
type: list
elements: dict
suboptions:
detector_rule_id:
description:
- Identifier for DetectorRule.
type: str
required: true
details:
description:
- ""
type: dict
required: true
suboptions:
condition_groups:
description:
- Condition group corresponding to each compartment
type: list
elements: dict
suboptions:
compartment_id:
description:
- compartment associated with condition
type: str
required: true
condition:
description:
- ""
type: dict
required: true
suboptions:
kind:
description:
- Type of condition object
type: str
choices:
- "SIMPLE"
- "COMPOSITE"
required: true
parameter:
description:
- parameter Key
- Applicable when kind is 'SIMPLE'
type: str
operator:
description:
- type of operator
- Applicable when kind is 'SIMPLE'
type: str
choices:
- "IN"
- "NOT_IN"
- "EQUALS"
- "NOT_EQUALS"
value:
description:
- type of operator
- Applicable when kind is 'SIMPLE'
type: str
value_type:
description:
- type of value
- Applicable when kind is 'SIMPLE'
type: str
choices:
- "MANAGED"
- "CUSTOM"
left_operand:
description:
- ""
- Applicable when kind is 'COMPOSITE'
type: dict
suboptions:
kind:
description:
- Type of condition object
type: str
choices:
- "COMPOSITE"
- "SIMPLE"
required: true
composite_operator:
description:
- ""
- Applicable when kind is 'COMPOSITE'
type: str
choices:
- "AND"
- "OR"
right_operand:
description:
- ""
- Applicable when kind is 'COMPOSITE'
type: dict
suboptions:
kind:
description:
- Type of condition object
type: str
choices:
- "COMPOSITE"
- "SIMPLE"
required: true
target_detector_recipe_id:
description:
- Identifier for DetectorRecipe.
- This parameter is updatable.
type: str
target_responder_recipes:
description:
- List of responder recipes to associate with target
- This parameter is updatable.
type: list
elements: dict
suboptions:
responder_recipe_id:
description:
- Identifier for ResponderRecipe.
type: str
responder_rules:
description:
- Override responder rules associated with reponder recipe in a target.
type: list
elements: dict
suboptions:
responder_rule_id:
description:
- Identifier for ResponderRule.
type: str
required: true
details:
description:
- ""
type: dict
required: true
suboptions:
condition:
description:
- ""
type: dict
suboptions:
kind:
description:
- Type of condition object
type: str
choices:
- "SIMPLE"
- "COMPOSITE"
required: true
parameter:
description:
- parameter Key
- Applicable when kind is 'SIMPLE'
type: str
operator:
description:
- type of operator
- Applicable when kind is 'SIMPLE'
type: str
choices:
- "IN"
- "NOT_IN"
- "EQUALS"
- "NOT_EQUALS"
value:
description:
- type of operator
- Applicable when kind is 'SIMPLE'
type: str
value_type:
description:
- type of value
- Applicable when kind is 'SIMPLE'
type: str
choices:
- "MANAGED"
- "CUSTOM"
left_operand:
description:
- ""
- Applicable when kind is 'COMPOSITE'
type: dict
suboptions:
kind:
description:
- Type of condition object
type: str
choices:
- "COMPOSITE"
- "SIMPLE"
required: true
composite_operator:
description:
- ""
- Applicable when kind is 'COMPOSITE'
type: str
choices:
- "AND"
- "OR"
right_operand:
description:
- ""
- Applicable when kind is 'COMPOSITE'
type: dict
suboptions:
kind:
description:
- Type of condition object
type: str
choices:
- "COMPOSITE"
- "SIMPLE"
required: true
configurations:
description:
- Configurations associated with the ResponderRule
type: list
elements: dict
suboptions:
config_key:
description:
- Unique name of the configuration
type: str
required: true
name:
description:
- configuration name
type: str
required: true
value:
description:
- configuration value
type: str
required: true
mode:
description:
- Execution Mode for ResponderRule
type: str
choices:
- "AUTOACTION"
- "USERACTION"
target_responder_recipe_id:
description:
- Identifier for ResponderRecipe.
- This parameter is updatable.
type: str
lifecycle_state:
description:
- The current state of the DetectorRule.
- This parameter is updatable.
type: str
choices:
- "CREATING"
- "UPDATING"
- "ACTIVE"
- "INACTIVE"
- "DELETING"
- "DELETED"
- "FAILED"
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\\"bar-key\\": \\"value\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
- This parameter is updatable.
type: dict
target_id:
description:
- OCID of target
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the Target.
- Use I(state=present) to create or update a Target.
- Use I(state=absent) to delete a Target.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create target
oci_cloud_guard_target:
display_name: display_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
target_resource_type: COMPARTMENT
target_resource_id: "ocid1.targetresource.oc1..xxxxxxEXAMPLExxxxxx"
- name: Update target using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_cloud_guard_target:
display_name: display_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state: CREATING
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update target
oci_cloud_guard_target:
display_name: display_name_example
target_id: "ocid1.target.oc1..xxxxxxEXAMPLExxxxxx"
- name: Delete target
oci_cloud_guard_target:
target_id: "ocid1.target.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete target using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_cloud_guard_target:
display_name: display_name_example
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
"""
RETURN = """
target:
description:
- Details of the Target resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- Unique identifier that is immutable on creation
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- Target Identifier, can be renamed
returned: on success
type: str
sample: display_name_example
compartment_id:
description:
- Compartment Identifier where the resource is created
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
description:
description:
- The target description.
returned: on success
type: str
sample: description_example
target_resource_type:
description:
- possible type of targets
returned: on success
type: str
sample: COMPARTMENT
target_resource_id:
description:
- Resource ID which the target uses to monitor
returned: on success
type: str
sample: "ocid1.targetresource.oc1..xxxxxxEXAMPLExxxxxx"
recipe_count:
description:
- Total number of recipes attached to target
returned: on success
type: int
sample: 56
target_detector_recipes:
description:
- List of detector recipes associated with target
returned: on success
type: complex
contains:
id:
description:
- Ocid for detector recipe
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- DisplayName of detector recipe
returned: on success
type: str
sample: display_name_example
description:
description:
- Detector recipe description
returned: on success
type: str
sample: description_example
compartment_id:
description:
- compartmentId of detector recipe
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
detector_recipe_id:
description:
- Unique identifier for Detector Recipe of which this is an extension
returned: on success
type: str
sample: "ocid1.detectorrecipe.oc1..xxxxxxEXAMPLExxxxxx"
owner:
description:
- Owner of detector recipe
returned: on | |
<reponame>AssembleSoftware/IoTPy
"""
This module has implementations of the prime-number sieve of
Erasthostenes. See:
https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
EXAMPLE 1
The first example computes all primes up to N, for some positive
integer N. This example illustrates how one agent's action can
create other agents.
EXAMPLE 2
The second example computes all primes up to the N-th prime. This
example illustrates the interactions between two asynchronous
computations, one of which stops the other. The two agents are:
(1) An agent that generates primes until a shared variable, stop,
becomes True, and
(2) an agent that detects that N primes have been generated, and
then changes the value of the shared variable, stop, to True.
"""
import sys
import os
import math
sys.path.append("../")
from IoTPy.core.stream import Stream, _no_value
from IoTPy.agent_types.op import map_element
from IoTPy.agent_types.sink import sink_element
from IoTPy.agent_types.merge import merge_asynch
from IoTPy.helper_functions.recent_values import recent_values
def sieve(in_stream, prime_stream):
"""
Function used by both examples of prime number sieve.
Parameters
----------
in_stream: input Stream of positive integers
prime_stream: Stream of prime numbers
Notes
-----
This agent assumes that the first element of in_stream is a prime
number p. It appends that prime number to prime_stream and then
creates another sieve agent and passes this new agent the stream
of elements of in_stream that are not divisible by p.
Operation:
sieve creates a single sink agent. The sink agent has a single
input stream, in_stream. The agent encapsulates stateful function
f which has an initial state of 0. (Sinks have no output streams.)
Let the first element of in_stream be p. We assume that p
is a prime number. So, function f appends p to prime_stream. Many
agents append prime numbers to prime_stream, but at most one agent
does so at a time.
When function f discovers an element of in_stream that is not a
multiple of p, f creates a new sieve agent which takes a new
stream out_stream as its input stream. out_stream consists of
elements of in_stream that are not multiples of p.
"""
#---------------------------------------------------------------
# The function encapsulated by the agent
#---------------------------------------------------------------
def f(v, state, prime_stream, out_stream):
"""
Parameters
----------
v: an element of an input stream
state: int
Initially 0, to indicate that no elements of the input stream
have been read. Then it becomes the first element of the input
stream, and from then on state remains unchanged.
prime_stream: Stream
A stream of prime numbers. This function appends a prime number
to this stream.
out_stream: Stream
Initially an empty stream. It consists of elements of the
input stream that are not multiples of state (after state
becomes the first element of the input stream).
"""
if state == 0:
# This is the first value read on the input stream.
# Assumption: first value must be a prime number. So append it
# to the stream of primes.
prime_stream.append(v)
# Make the new state the first value on the input stream. This
# state remains unchanged from now onwards.
state = v
# Create an agent that sieves out_stream.
sieve(out_stream, prime_stream)
# Put elements of the input stream that are not multiples of state
# on the output stream.
if v % state != 0:
out_stream.append(v)
# A stateful function encapsulated by a sink agent must return the
# next state. So, return state.
return state
#---------------------------------------------------------------
# Create the agent
#---------------------------------------------------------------
# Create a sink agent that encapsulates a stateful function f with
# an initial state of 0. Pass parameters prime_stream and
# out_stream from the sink agent to its encapsulated function.
sink_element(func=f, in_stream=in_stream, state=0,
prime_stream=prime_stream, out_stream=Stream())
def primes_example_1(N):
"""
This function returns a stream which consists of all primes up to
N.
Parameters
----------
N : int
Integer greater than 2.
Returns
-------
prime_stream: Stream
Sequence of primes less than or equal to N.
"""
# 1. Define streams
numbers = Stream('integers from 2')
prime_stream = Stream('prime numbers')
# 2. Define agents
sieve(numbers, prime_stream)
# 3. Put values into input stream.
numbers.extend(list(range(2, N)))
return prime_stream
def primes_example_2(N):
"""
Agent used in example 2 in which prime_stream is the sequence of
primes up to the N-th prime
Parameters
----------
N: int
positive integer
Returns: first_N, prime_stream
-------
first_N: list
The first N primes
prime_stream: Stream
Stream of prime numbers. May have more than N primes
Notes
-----
sieve creates a single sink agent. The sink agent has a single
input stream, in_stream. The agent encapsulates stateful function
f which has an initial state of 0. (Sinks have no output streams.)
Let the first element of in_stream be p. This agent assumes that p
is a prime number. So, the agent appends p to prime_stream. Many
agents append prime numbers to prime_stream, but at most one agent
can do so at a time.
When the agent discovers an element of in_stream that is not a
multiple of p, the agent creates a new sieve agent which takes a
new stream out_stream as its input stream. out_stream consists of
elements of in_stream that are not multiples of p.
"""
def execute_until_stop_message(v, state, function):
function_state, finished_execution = state
if finished_execution:
return (_no_value, True)
index, input_value = v
if index == 1:
# This value is from stop_stream
# Make finished_execution become True because a message
# was received on stop_stream.
finished_execution = True
# From now onwards, no messages are appended to the output
# stream, and finished_execution remains True forever.
return (_no_value, (function_state, True))
# index is 0. So, this value is from state_stream.
output_value, next_function_state = function(
input_value, function_state)
# next_state = (next_function_state, finished_execution)
return output_value, (next_function_state, finished_execution)
def generate_numbers_until_stop_message(index_and_value, state):
# state is initially False and switches to True if a message
# is received in stop_stream. If state becomes True then it
# remains True thereafter. After state becomes True no values
# are appended to the output stream.
# The elements of the input stream are tuples: index and
# value.
# index is 0 for state_stream and 1 for stop_stream.
index, value = index_and_value
if index == 1:
# This value is from stop_stream
# Make state True because a message was received on
# stop_stream.
# From now onwards, no messages are appended to the output
# stream, and state remains True.
return (_no_value, True)
# index is 0. So, this value is from state_stream.
if state:
# Do not append values to the output stream, and state
# remains True
return (_no_value, state)
else:
# Append the next value to the output stream, and state
# remains False.
return (value+1, state)
def detect_finished_then_send_stop(v, state, N):
length, stop = state
# If stop is True then computation must stop
length += 1
if length >= N and not stop:
stop = True
return (True, (length, stop))
else:
return (_no_value, (length, stop))
def first_N_elements(in_stream, N, first_N):
def first_N_elements_of_stream(v, state, N, first_N):
if state < N:
first_N.append(v)
state += 1
return state
sink_element(func=first_N_elements_of_stream, in_stream=in_stream,
state=0, N=N, first_N=first_N)
#-----------------------------------------------------------------
# Define streams
#-----------------------------------------------------------------
state_stream = Stream(name='numbers 2, 3, 4, ...')
stop_stream = Stream(name='stop!')
prime_stream = Stream(name='prime numbers')
first_N = []
#-----------------------------------------------------------------
# Define agents
#-----------------------------------------------------------------
# Create agent that generates 2, 3, 4... until it receives a
# message on stop_stream
## merge_asynch(func=generate_numbers_until_stop_message,
## in_streams=[state_stream, stop_stream],
## out_stream=state_stream, state=False)
def g(v, state):
return v+1, state
merge_asynch(func=execute_until_stop_message,
in_streams=[state_stream, stop_stream],
out_stream=state_stream, state=(None, False),
function=g)
# Create an agent that sieves state_stream to create prime_stream
# which is a sequence of primes.
# We do this by creating a sink agent that encapsulates a stateful
# function f with an initial state of 0. Pass parameters
# prime_stream and out_stream from the sink agent to its
# encapsulated function f.
sieve(in_stream=state_stream, prime_stream=prime_stream)
# Create an agent that sends a message on stop_stream when the
# length of prime_stream exceeds N.
map_element(func=detect_finished_then_send_stop,
in_stream=prime_stream, out_stream=stop_stream,
state=(0, False), N=N)
first_N_elements(in_stream=prime_stream, N=N, first_N=first_N)
state_stream.append(2)
return first_N, prime_stream
def test():
scheduler = | |
None:
self.documentSchema = str()
self.tableSchema = str()
@property
def documentSchema(self) -> str:
"""Getter: The native espresso document schema."""
return self._inner_dict.get('documentSchema') # type: ignore
@documentSchema.setter
def documentSchema(self, value: str) -> None:
"""Setter: The native espresso document schema."""
self._inner_dict['documentSchema'] = value
@property
def tableSchema(self) -> str:
"""Getter: The espresso table schema definition."""
return self._inner_dict.get('tableSchema') # type: ignore
@tableSchema.setter
def tableSchema(self, value: str) -> None:
"""Setter: The espresso table schema definition."""
self._inner_dict['tableSchema'] = value
class FixedTypeClass(DictWrapper):
"""Fixed field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.FixedType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "FixedTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class ForeignKeySpecClass(DictWrapper):
"""Description of a foreign key in a schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.ForeignKeySpec")
def __init__(self,
foreignKey: Union["DatasetFieldForeignKeyClass", "UrnForeignKeyClass"],
):
super().__init__()
self.foreignKey = foreignKey
@classmethod
def construct_with_defaults(cls) -> "ForeignKeySpecClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.foreignKey = DatasetFieldForeignKeyClass.construct_with_defaults()
@property
def foreignKey(self) -> Union["DatasetFieldForeignKeyClass", "UrnForeignKeyClass"]:
"""Getter: Foreign key definition in metadata schema."""
return self._inner_dict.get('foreignKey') # type: ignore
@foreignKey.setter
def foreignKey(self, value: Union["DatasetFieldForeignKeyClass", "UrnForeignKeyClass"]) -> None:
"""Setter: Foreign key definition in metadata schema."""
self._inner_dict['foreignKey'] = value
class KafkaSchemaClass(DictWrapper):
"""Schema holder for kafka schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.KafkaSchema")
def __init__(self,
documentSchema: str,
):
super().__init__()
self.documentSchema = documentSchema
@classmethod
def construct_with_defaults(cls) -> "KafkaSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.documentSchema = str()
@property
def documentSchema(self) -> str:
"""Getter: The native kafka document schema. This is a human readable avro document schema."""
return self._inner_dict.get('documentSchema') # type: ignore
@documentSchema.setter
def documentSchema(self, value: str) -> None:
"""Setter: The native kafka document schema. This is a human readable avro document schema."""
self._inner_dict['documentSchema'] = value
class KeyValueSchemaClass(DictWrapper):
"""Schema text of a key-value store schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.KeyValueSchema")
def __init__(self,
keySchema: str,
valueSchema: str,
):
super().__init__()
self.keySchema = keySchema
self.valueSchema = valueSchema
@classmethod
def construct_with_defaults(cls) -> "KeyValueSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.keySchema = str()
self.valueSchema = str()
@property
def keySchema(self) -> str:
"""Getter: The raw schema for the key in the key-value store."""
return self._inner_dict.get('keySchema') # type: ignore
@keySchema.setter
def keySchema(self, value: str) -> None:
"""Setter: The raw schema for the key in the key-value store."""
self._inner_dict['keySchema'] = value
@property
def valueSchema(self) -> str:
"""Getter: The raw schema for the value in the key-value store."""
return self._inner_dict.get('valueSchema') # type: ignore
@valueSchema.setter
def valueSchema(self, value: str) -> None:
"""Setter: The raw schema for the value in the key-value store."""
self._inner_dict['valueSchema'] = value
class MapTypeClass(DictWrapper):
"""Map field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.MapType")
def __init__(self,
keyType: Union[None, str]=None,
valueType: Union[None, str]=None,
):
super().__init__()
self.keyType = keyType
self.valueType = valueType
@classmethod
def construct_with_defaults(cls) -> "MapTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.keyType = self.RECORD_SCHEMA.field_map["keyType"].default
self.valueType = self.RECORD_SCHEMA.field_map["valueType"].default
@property
def keyType(self) -> Union[None, str]:
"""Getter: Key type in a map"""
return self._inner_dict.get('keyType') # type: ignore
@keyType.setter
def keyType(self, value: Union[None, str]) -> None:
"""Setter: Key type in a map"""
self._inner_dict['keyType'] = value
@property
def valueType(self) -> Union[None, str]:
"""Getter: Type of the value in a map"""
return self._inner_dict.get('valueType') # type: ignore
@valueType.setter
def valueType(self, value: Union[None, str]) -> None:
"""Setter: Type of the value in a map"""
self._inner_dict['valueType'] = value
class MySqlDDLClass(DictWrapper):
"""Schema holder for MySql data definition language that describes an MySql table."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.MySqlDDL")
def __init__(self,
tableSchema: str,
):
super().__init__()
self.tableSchema = tableSchema
@classmethod
def construct_with_defaults(cls) -> "MySqlDDLClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.tableSchema = str()
@property
def tableSchema(self) -> str:
"""Getter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
return self._inner_dict.get('tableSchema') # type: ignore
@tableSchema.setter
def tableSchema(self, value: str) -> None:
"""Setter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
self._inner_dict['tableSchema'] = value
class NullTypeClass(DictWrapper):
"""Null field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.NullType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "NullTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class NumberTypeClass(DictWrapper):
"""Number data type: long, integer, short, etc.."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.NumberType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "NumberTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class OracleDDLClass(DictWrapper):
"""Schema holder for oracle data definition language that describes an oracle table."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.OracleDDL")
def __init__(self,
tableSchema: str,
):
super().__init__()
self.tableSchema = tableSchema
@classmethod
def construct_with_defaults(cls) -> "OracleDDLClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.tableSchema = str()
@property
def tableSchema(self) -> str:
"""Getter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
return self._inner_dict.get('tableSchema') # type: ignore
@tableSchema.setter
def tableSchema(self, value: str) -> None:
"""Setter: The native schema in the dataset's platform. This is a human readable (json blob) table schema."""
self._inner_dict['tableSchema'] = value
class OrcSchemaClass(DictWrapper):
"""Schema text of an ORC schema."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.OrcSchema")
def __init__(self,
schema: str,
):
super().__init__()
self.schema = schema
@classmethod
def construct_with_defaults(cls) -> "OrcSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.schema = str()
@property
def schema(self) -> str:
"""Getter: The native schema for ORC file format."""
return self._inner_dict.get('schema') # type: ignore
@schema.setter
def schema(self, value: str) -> None:
"""Setter: The native schema for ORC file format."""
self._inner_dict['schema'] = value
class OtherSchemaClass(DictWrapper):
"""Schema holder for undefined schema types."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.OtherSchema")
def __init__(self,
rawSchema: str,
):
super().__init__()
self.rawSchema = rawSchema
@classmethod
def construct_with_defaults(cls) -> "OtherSchemaClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.rawSchema = str()
@property
def rawSchema(self) -> str:
"""Getter: The native schema in the dataset's platform."""
return self._inner_dict.get('rawSchema') # type: ignore
@rawSchema.setter
def rawSchema(self, value: str) -> None:
"""Setter: The native schema in the dataset's platform."""
self._inner_dict['rawSchema'] = value
class PrestoDDLClass(DictWrapper):
"""Schema holder for presto data definition language that describes a presto view."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.PrestoDDL")
def __init__(self,
rawSchema: str,
):
super().__init__()
self.rawSchema = rawSchema
@classmethod
def construct_with_defaults(cls) -> "PrestoDDLClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.rawSchema = str()
@property
def rawSchema(self) -> str:
"""Getter: The raw schema in the dataset's platform. This includes the DDL and the columns extracted from DDL."""
return self._inner_dict.get('rawSchema') # type: ignore
@rawSchema.setter
def rawSchema(self, value: str) -> None:
"""Setter: The raw schema in the dataset's platform. This includes the DDL and the columns extracted from DDL."""
self._inner_dict['rawSchema'] = value
class RecordTypeClass(DictWrapper):
"""Record field type."""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.RecordType")
def __init__(self,
):
super().__init__()
@classmethod
def construct_with_defaults(cls) -> "RecordTypeClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
pass
class SchemaFieldClass(DictWrapper):
"""SchemaField to describe metadata related to dataset schema. Schema normalization rules: http://go/tms-schema"""
RECORD_SCHEMA = get_schema_type("com.linkedin.pegasus2avro.schema.SchemaField")
def __init__(self,
fieldPath: str,
type: "SchemaFieldDataTypeClass",
nativeDataType: str,
jsonPath: Union[None, str]=None,
nullable: Optional[bool]=None,
description: Union[None, str]=None,
recursive: Optional[bool]=None,
globalTags: Union[None, "GlobalTagsClass"]=None,
glossaryTerms: Union[None, "GlossaryTermsClass"]=None,
):
super().__init__()
self.fieldPath = fieldPath
self.jsonPath = jsonPath
if nullable is None:
# default: False
self.nullable = self.RECORD_SCHEMA.field_map["nullable"].default
else:
self.nullable = nullable
self.description = description
self.type = type
self.nativeDataType = nativeDataType
if recursive is None:
# default: False
self.recursive = self.RECORD_SCHEMA.field_map["recursive"].default
else:
self.recursive = recursive
self.globalTags = globalTags
self.glossaryTerms = glossaryTerms
@classmethod
def construct_with_defaults(cls) -> "SchemaFieldClass":
self = cls.construct({})
self._restore_defaults()
return self
def _restore_defaults(self) -> None:
self.fieldPath = str()
self.jsonPath = self.RECORD_SCHEMA.field_map["jsonPath"].default
self.nullable = self.RECORD_SCHEMA.field_map["nullable"].default
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.