input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
C . PRO A 1 66 ? 44.193 -1.538 12.100 1.00 10.80 ? 67 PRO A C 1
ATOM 309 O O . PRO A 1 66 ? 44.750 -2.270 11.328 1.00 13.57 ? 67 PRO A O 1
ATOM 310 C CB . PRO A 1 66 ? 42.074 -0.905 10.846 1.00 10.38 ? 67 PRO A CB 1
ATOM 311 C CG . PRO A 1 66 ? 42.256 -0.445 9.422 1.00 10.25 ? 67 PRO A CG 1
ATOM 312 C CD . PRO A 1 66 ? 43.613 0.102 9.229 1.00 11.57 ? 67 PRO A CD 1
ATOM 313 N N . MET A 1 67 ? 44.315 -1.669 13.394 1.00 10.27 ? 68 MET A N 1
ATOM 314 C CA . MET A 1 67 ? 45.126 -2.756 13.958 1.00 9.23 ? 68 MET A CA 1
ATOM 315 C C . MET A 1 67 ? 44.540 -4.176 13.771 1.00 9.78 ? 68 MET A C 1
ATOM 316 O O . MET A 1 67 ? 45.274 -5.190 13.735 1.00 9.27 ? 68 MET A O 1
ATOM 317 C CB . MET A 1 67 ? 45.258 -2.485 15.448 1.00 9.68 ? 68 MET A CB 1
ATOM 318 C CG . MET A 1 67 ? 46.138 -1.285 15.765 1.00 9.90 ? 68 MET A CG 1
ATOM 319 S SD . MET A 1 67 ? 46.258 -1.104 17.546 1.00 11.08 ? 68 MET A SD 1
ATOM 320 C CE . MET A 1 67 ? 44.647 -0.399 17.837 1.00 9.54 ? 68 MET A CE 1
ATOM 321 N N . CYS A 1 68 ? 43.229 -4.252 13.783 1.00 9.30 ? 69 CYS A N 1
ATOM 322 C CA . CYS A 1 68 ? 42.523 -5.509 13.926 1.00 10.25 ? 69 CYS A CA 1
ATOM 323 C C . CYS A 1 68 ? 43.160 -6.347 15.105 1.00 11.14 ? 69 CYS A C 1
ATOM 324 O O . CYS A 1 68 ? 43.441 -5.780 16.163 1.00 10.56 ? 69 CYS A O 1
ATOM 325 C CB . CYS A 1 68 ? 42.425 -6.268 12.610 1.00 10.80 ? 69 CYS A CB 1
ATOM 326 S SG . CYS A 1 68 ? 41.659 -5.247 11.375 1.00 12.67 ? 69 CYS A SG 1
ATOM 327 N N . SER A 1 69 ? 43.489 -7.608 14.900 1.00 10.51 ? 70 SER A N 1
ATOM 328 C CA . SER A 1 69 ? 43.867 -8.513 16.009 1.00 10.31 ? 70 SER A CA 1
ATOM 329 C C . SER A 1 69 ? 45.252 -8.268 16.443 1.00 9.50 ? 70 SER A C 1
ATOM 330 O O . SER A 1 69 ? 45.670 -8.853 17.474 1.00 9.51 ? 70 SER A O 1
ATOM 331 C CB . SER A 1 69 ? 43.753 -9.992 15.625 1.00 10.42 ? 70 SER A CB 1
ATOM 332 O OG . SER A 1 69 ? 42.412 -10.326 15.486 1.00 11.54 ? 70 SER A OG 1
ATOM 333 N N . THR A 1 70 ? 45.977 -7.369 15.791 1.00 8.37 ? 71 THR A N 1
ATOM 334 C CA . THR A 1 70 ? 47.339 -7.002 16.331 1.00 9.14 ? 71 THR A CA 1
ATOM 335 C C . THR A 1 70 ? 47.181 -6.282 17.652 1.00 9.49 ? 71 THR A C 1
ATOM 336 O O . THR A 1 70 ? 48.092 -6.238 18.437 1.00 10.83 ? 71 THR A O 1
ATOM 337 C CB . THR A 1 70 ? 48.270 -6.237 15.375 1.00 8.78 ? 71 THR A CB 1
ATOM 338 O OG1 . THR A 1 70 ? 47.744 -4.946 15.098 1.00 9.50 ? 71 THR A OG1 1
ATOM 339 C CG2 . THR A 1 70 ? 48.515 -7.100 14.064 1.00 7.78 ? 71 THR A CG2 1
ATOM 340 N N . SER A 1 71 ? 46.035 -5.695 17.864 1.00 9.32 ? 72 SER A N 1
ATOM 341 C CA . SER A 1 71 ? 45.722 -5.094 19.167 1.00 8.66 ? 72 SER A CA 1
ATOM 342 C C . SER A 1 71 ? 45.758 -6.023 20.319 1.00 9.01 ? 72 SER A C 1
ATOM 343 O O . SER A 1 71 ? 45.833 -5.597 21.460 1.00 9.51 ? 72 SER A O 1
ATOM 344 C CB . SER A 1 71 ? 44.383 -4.311 19.104 1.00 8.44 ? 72 SER A CB 1
ATOM 345 O OG . SER A 1 71 ? 43.288 -5.148 18.888 1.00 8.81 ? 72 SER A OG 1
ATOM 346 N N . LYS A 1 72 ? 45.604 -7.299 20.052 1.00 11.07 ? 73 LYS A N 1
ATOM 347 C CA . LYS A 1 72 ? 45.652 -8.340 21.097 1.00 10.68 ? 73 LYS A CA 1
ATOM 348 C C . LYS A 1 72 ? 46.932 -8.439 21.811 1.00 11.33 ? 73 LYS A C 1
ATOM 349 O O . LYS A 1 72 ? 46.885 -8.763 23.033 1.00 10.33 ? 73 LYS A O 1
ATOM 350 C CB . LYS A 1 72 ? 45.245 -9.767 20.538 1.00 11.25 ? 73 LYS A CB 1
ATOM 351 C CG . LYS A 1 72 ? 43.782 -9.782 20.045 1.00 10.95 ? 73 LYS A CG 1
ATOM 352 C CD . LYS A 1 72 ? 43.381 -11.011 19.247 1.00 11.74 ? 73 LYS A CD 1
ATOM 353 C CE . LYS A 1 72 ? 41.894 -10.923 19.018 1.00 12.26 ? 73 LYS A CE 1
ATOM 354 N NZ . LYS A 1 72 ? 41.423 -11.683 17.867 1.00 15.41 ? 73 LYS A NZ 1
ATOM 355 N N . VAL A 1 73 ? 48.039 -8.023 21.192 1.00 10.23 ? 74 VAL A N 1
ATOM 356 C CA . VAL A 1 73 ? 49.320 -7.932 21.911 1.00 10.44 ? 74 VAL A CA 1
ATOM 357 C C . VAL A 1 73 ? 49.195 -6.946 23.084 1.00 9.98 ? 74 VAL A C 1
ATOM 358 O O . VAL A 1 73 ? 49.620 -7.258 24.194 1.00 10.69 ? 74 VAL A O 1
ATOM 359 C CB . VAL A 1 73 ? 50.504 -7.581 21.050 1.00 10.87 ? 74 VAL A CB 1
ATOM 360 C CG1 . VAL A 1 73 ? 51.760 -7.415 21.902 1.00 12.39 ? 74 VAL A CG1 1
ATOM 361 C CG2 . VAL A 1 73 ? 50.596 -8.659 19.930 1.00 10.94 ? 74 VAL A CG2 1
ATOM 362 N N . MET A 1 74 ? 48.671 -5.747 22.798 1.00 9.49 ? 75 MET A N 1
ATOM 363 C CA . MET A 1 74 ? 48.497 -4.702 23.863 1.00 9.28 ? 75 MET A CA 1
ATOM 364 C C . MET A 1 74 ? 47.646 -5.214 25.013 1.00 10.45 ? 75 MET A C 1
ATOM 365 O O . MET A 1 74 ? 47.909 -4.924 26.210 1.00 9.95 ? 75 MET A O 1
ATOM 366 C CB . MET A 1 74 ? 47.882 -3.385 23.264 1.00 9.70 ? 75 MET A CB 1
ATOM 367 C CG . MET A 1 74 ? 48.122 -2.217 24.230 1.00 9.59 ? 75 MET A CG 1
ATOM 368 S SD . MET A 1 74 ? 49.816 -1.686 24.402 1.00 12.32 ? 75 MET A SD 1
ATOM 369 | |
2, 3, 4, 5, 6, 7, 8, 9, 10], "
"but got 11")
def test_printable_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= PrintableString "
"B ::= PrintableString (SIZE (16)) "
"C ::= PrintableString (SIZE (0..31)) "
"END",
'uper')
datas = [
('A',
(string.ascii_uppercase
+ string.ascii_lowercase
+ string.digits
+ " '()+,-./:=?"),
b'\x4a\x83\x0a\x1c\x48\xb1\xa3\xc8\x93\x2a\x5c\xc9\xb3\xa7\xd0\xa3'
b'\x4a\x9d\x4a\xb5\xab\xd8\xb3\x6b\x0e\x2c\x79\x32\xe6\xcf\xa3\x4e'
b'\xad\x7b\x36\xee\xdf\xc3\x8f\x2e\x7d\x3a\xf6\xef\xe3\xcf\xa6\x0c'
b'\x59\x33\x68\xd5\xb3\x77\x0e\x50\x27\x50\xa5\x5a\xc5\xab\x97\xba'
b'\x7a\xfc'),
('B',
'0123456789abcdef',
b'\x60\xc5\x93\x36\x8d\x5b\x37\x70\xe7\x0e\x2c\x79\x32\xe6'),
('C', '', b'\x00'),
('C', '2', b'\x0b\x20')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Bad character '[' should raise an exception.
with self.assertRaises(asn1tools.EncodeError) as cm:
foo.encode('A', '[')
self.assertEqual(
str(cm.exception),
"expected a character in ' '()+,-./0123456789:=?ABCDEFGHIJKLMNO"
"PQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', but got '[' (0x5b)'")
def test_graphic_string(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= GraphicString "
"END",
'uper')
datas = [
('A', '', b'\x00'),
('A', '2', b'\x01\x32')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_sequence_of(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= SEQUENCE OF INTEGER "
"B ::= SEQUENCE SIZE (2) OF INTEGER "
"C ::= SEQUENCE SIZE (1..5) OF INTEGER "
"D ::= SEQUENCE SIZE (1..2, ...) OF INTEGER "
"END",
'uper')
datas = [
('A', [], b'\x00'),
('A', [1], b'\x01\x01\x01'),
('A', [1, 2], b'\x02\x01\x01\x01\x02'),
('A', 1000 * [1, 2], b'\x87\xd0' + 1000 * b'\x01\x01\x01\x02'),
('B', [1, 2], b'\x01\x01\x01\x02'),
('B', [4663, 222322233], b'\x02\x12\x37\x04\x0d\x40\x5e\x39'),
('C', [1], b'\x00\x20\x20'),
('C', [1, 2], b'\x20\x20\x20\x20\x40'),
('D', [2, 1], b'\x40\x40\x80\x40\x40')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
# Long sequences are not yet supported.
with self.assertRaises(NotImplementedError) as cm:
foo.encode('A', 16384 * [1])
self.assertEqual(str(cm.exception),
'Length determinant >=16384 is not yet supported.')
def test_real(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= REAL "
"END",
'uper')
datas = [
('A', 0.0, b'\x00'),
('A', -0.0, b'\x00'),
('A', float('inf'), b'\x01\x40'),
('A', float('-inf'), b'\x01\x41'),
('A', 1.0, b'\x03\x80\x00\x01'),
('A',
1.1,
b'\x09\x80\xcd\x08\xcc\xcc\xcc\xcc\xcc\xcd'),
('A',
1234.5678,
b'\x09\x80\xd6\x13\x4a\x45\x6d\x5c\xfa\xad'),
('A', 8, b'\x03\x80\x03\x01'),
('A', 0.625, b'\x03\x80\xfd\x05'),
('A',
10000000000000000146306952306748730309700429878646550592786107871697963642511482159104,
b'\x0a\x81\x00\xe9\x02\x92\xe3\x2a\xc6\x85\x59'),
('A',
0.00000000000000000000000000000000000000000000000000000000000000000000000000000000000001,
b'\x0a\x81\xfe\xae\x13\xe4\x97\x06\x5c\xd6\x1f')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_utc_time(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= UTCTime "
"END",
'uper')
datas = [
('A',
'010203040506Z',
b'\x0d\x60\xc5\x83\x26\x0c\xd8\x34\x60\xd5\x83\x6b\x40')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_generalized_time(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= GeneralizedTime "
"END",
'uper')
datas = [
('A',
'20001231235959.999Z',
b'\x13\x64\xc1\x83\x06\x2c\x99\xb1\x64\xcd\xab\x96\xae\x57\x39\x72'
b'\xe6\xd0')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_enumerated(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= ENUMERATED { one(1) } "
"B ::= ENUMERATED { zero(0), one(1), ... } "
"C ::= ENUMERATED { one(1), four(4), two(2), ..., six(6), nine(9) } "
"D ::= ENUMERATED { a, ..., "
"aa, ab, ac, ad, ae, af, ag, ah, ai, aj, ak, al, am, an, ao, ap, "
"aq, ar, as, at, au, av, aw, ax, ay, az, ba, bb, bc, bd, be, bf, "
"bg, bh, bi, bj, bk, bl, bm, bn, bo, bp, bq, br, bs, bt, bu, bv, "
"bw, bx, by, bz, ca, cb, cc, cd, ce, cf, cg, ch, ci, cj, ck, cl, "
"cm, cn, co, cp, cq, cr, cs, ct, cu, cv, cw, cx, cy, cz, da, db, "
"dc, dd, de, df, dg, dh, di, dj, dk, dl, dm, dn, do, dp, dq, dr, "
"ds, dt, du, dv, dw, dx, dy, dz, ea, eb, ec, ed, ee, ef, eg, eh, "
"ei, ej, ek, el, em, en, eo, ep, eq, er, es, et, eu, ev, ew, ex, "
"ey, ez, fa, fb, fc, fd, fe, ff, fg, fh, fi, fj, fk, fl, fm, fn, "
"fo, fp, fq, fr, fs, ft, fu, fv, fw, fx, fy, fz, ga, gb, gc, gd, "
"ge, gf, gg, gh, gi, gj, gk, gl, gm, gn, go, gp, gq, gr, gs, gt, "
"gu, gv, gw, gx, gy, gz, ha, hb, hc, hd, he, hf, hg, hh, hi, hj, "
"hk, hl, hm, hn, ho, hp, hq, hr, hs, ht, hu, hv, hw, hx, hy, hz, "
"ia, ib, ic, id, ie, if, ig, ih, ii, ij, ik, il, im, in, io, ip, "
"iq, ir, is, it, iu, iv, iw, ix, iy, iz, ja, jb, jc, jd, je, jf, "
"jg, jh, ji, jj, jk, jl, jm, jn, jo, jp, jq, jr, js, jt, ju, jv, "
"jw, jx, jy, jz } "
"END",
'uper')
datas = [
('A', 'one', b''),
('B', 'zero', b'\x00'),
('B', 'one', b'\x40'),
('C', 'one', b'\x00'),
('C', 'two', b'\x20'),
('C', 'four', b'\x40'),
('C', 'six', b'\x80'),
('C', 'nine', b'\x81'),
('D', 'aa', b'\x80'),
('D', 'cl', b'\xbf'),
('D', 'cm', b'\xc0\x50\x00'),
('D', 'jv', b'\xc0\x7f\xc0'),
('D', 'jw', b'\xc0\x80\x40\x00'),
('D', 'jz', b'\xc0\x80\x40\xc0')
]
for type_name, decoded, encoded in datas:
self.assert_encode_decode(foo, type_name, decoded, encoded)
def test_sequence(self):
foo = asn1tools.compile_string(
"Foo DEFINITIONS AUTOMATIC TAGS ::= "
"BEGIN "
"A ::= SEQUENCE {} "
"B ::= SEQUENCE { "
" a INTEGER DEFAULT 0 "
"} "
"C ::= SEQUENCE { "
" a BOOLEAN, "
" ... "
"} "
"D ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]] "
"} "
"E ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" ... "
"} "
"F ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" ..., "
" c BOOLEAN "
"} "
"G ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN "
" ]], "
" [[ "
" c BOOLEAN "
" ]], "
" ..., "
" d BOOLEAN "
"} "
"H ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" ... "
"} "
"I ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN "
"} "
"J ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN OPTIONAL "
"} "
"K ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b BOOLEAN, "
" c BOOLEAN "
"} "
"L ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b BOOLEAN, "
" c BOOLEAN "
" ]] "
"} "
"M ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" [[ "
" b SEQUENCE { "
" a INTEGER"
" } OPTIONAL, "
" c BOOLEAN "
" ]] "
"} "
"N ::= SEQUENCE { "
" a BOOLEAN DEFAULT TRUE "
"} "
"O ::= SEQUENCE { "
" ..., "
" a BOOLEAN DEFAULT TRUE "
"} "
"P ::= SEQUENCE { "
" ..., "
" [[ "
" a BOOLEAN, "
" b BOOLEAN DEFAULT TRUE "
" ]] "
"} "
"Q ::= SEQUENCE { "
" a C, "
" b INTEGER "
"} "
"R ::= SEQUENCE { "
" a D, "
" b INTEGER "
"} "
"S ::= SEQUENCE { "
" a BOOLEAN, "
" ..., "
" b SEQUENCE { "
" a BOOLEAN, "
" b BOOLEAN OPTIONAL, "
" ... "
" } "
"} "
"END",
'uper')
datas = [
('A', {}, b''),
('O', {}, b'\x00'),
('B', {'a': 0}, b'\x00'),
('B', {'a': 1}, b'\x80\x80\x80'),
('C', {'a': True}, b'\x40'),
('D', {'a': True}, b'\x40'),
('E', {'a': True}, b'\x40'),
('H', {'a': True}, b'\x40'),
('I', {'a': True}, b'\x40'),
('J', {'a': True}, b'\x40'),
('K', {'a': True}, b'\x40'),
('L', {'a': True}, b'\x40'),
('M', {'a': True}, b'\x40'),
('N', {'a': True}, b'\x00'),
('N', {'a': | |
<reponame>gokceneraslan/ProDy<filename>prody/dynamics/mode.py
# -*- coding: utf-8 -*-
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""This module defines classes for handling mode data.
.. _normalmode-operations:
*******************************************************************************
Normal Mode Algebra
*******************************************************************************
In this example we will compare modes from two ANMs for the same protein,
but everything applies to comparison of ANMs and PCAs (as long as they contain
same number of atoms).
Let's get started by getting ANM models for two related protein structures:
>>> from prody import *
>>> str1 = parsePDB('1p38')
>>> str2 = parsePDB('1r39')
**Find and align matching chains**
>>> matches = matchChains(str1, str2)
>>> match = matches[0]
>>> ch1 = match[0]
>>> ch2 = match[1]
Minimize RMSD by superposing ``ch2`` onto ``ch1``:
>>> ch2, t = superpose(ch2, ch1)
>>> # t is transformation, which is already applied to ch2
>>> rmsd = calcRMSD(ch1, ch2)
>>> print( '{0:.2f}'.format(rmsd) ) # Print rmsd with some formatting
0.90
**Get ANM models for each chain**
>>> anm1, ch1 = calcANM(ch1)
>>> anm2, ch2 = calcANM(ch2)
>>> print( anm1[0] )
Mode 1 from ANM 1p38
Let's rename these :class:`ANM` instances, so that they print short:
>>> anm1.setTitle('1p38_anm')
>>> anm2.setTitle('1r39_anm')
This is how they print now:
>>> print( anm1[0] )
Mode 1 from ANM 1p38_anm
>>> print( anm2[0] )
Mode 1 from ANM 1r39_anm
Calculate overlap
===============================================================================
We need Numpy in this part:
>>> import numpy as np
>>> np.set_printoptions(precision=3)
Multiplication of two :class:`Mode` instances returns dot product
of their eigenvectors. This dot product is the overlap or cosine correlation
between modes.
Let's calculate overlap for slowest modes:
>>> overlap = anm1[0] * anm2[0]
>>> print( '{0:.3f}'.format(overlap) )
-0.984
This show that the overlap between these two modes is 0.98, which is not
surprising since ANM modes come from structures of the *same* protein.
To compare multiple modes, convert a list of modes to a :func:`numpy.array`:
>>> print( np.array(list(anm1[:3])) * np.array(list(anm2[:3])) )
[-0.98402119545 -0.98158348545 -0.991357811832]
This shows that slowest three modes are almost identical.
We could also generate a matrix of overlaps using :func:`numpy.outer`:
>>> outer = np.outer( np.array(list(anm1[:3])), np.array(list(anm2[:3])) )
>>> print( outer.astype(np.float64).round(2) )
[[-0.98 -0.14 -0. ]
[ 0.15 -0.98 0.08]
[ 0.01 -0.08 -0.99]]
This could also be printed in a pretty table format using
:func:`~.printOverlapTable`:
>>> printOverlapTable(anm1[:3], anm2[:3])
Overlap Table
ANM 1r39_anm
#1 #2 #3
ANM 1p38_anm #1 -0.98 -0.14 0.00
ANM 1p38_anm #2 +0.15 -0.98 +0.08
ANM 1p38_anm #3 +0.01 -0.08 -0.99
<BLANKLINE>
**Scaling**
:class:`Mode` instances can be scaled, but after this operation they will
become :class:`Vector` instances:
>>> anm1[0] * 10
<Vector: 10*(Mode 1 from ANM 1p38_anm)>
Linear combination
===============================================================================
It is also possible to linearly combine normal modes:
>>> anm1[0] * 3 + anm1[1] + anm1[2] * 2
<Vector: 3*(Mode 1 from ANM 1p38_anm) + Mode 2 from ANM 1p38_anm + 2*(Mode 3 \
from ANM 1p38_anm)>
Or, we could use eigenvalues for linear combination:
>>> lincomb = anm1[0] * anm1[0].getEigval() + anm1[1] * anm1[1].getEigval()
It is the name of the :class:`Vector` instance that keeps track of operations.
>>> print( lincomb.getTitle() )
0.148971269751*(Mode 1 from ANM 1p38_anm) + 0.24904210757*(Mode 2 from ANM \
1p38_anm)
Approximate a deformation vector
===============================================================================
Let's get the deformation vector between *ch1* and *ch2*:
>>> defvec = calcDeformVector(ch1, ch2)
>>> defvec_magnitude = abs(defvec)
>>> print( '{0:.2f}'.format(defvec_magnitude) )
16.69
Let's see how deformation projects onto ANM modes:
>>> print( np.array(list(anm1[:3])) * defvec )
[-5.60860594784 2.15393365959 -3.13701609199]
We can use these numbers to combine ANM modes:
>>> approximate_defvec = np.sum( (np.array(list(anm1[:3])) * defvec) * \
np.array(list(anm1[:3])) )
>>> print( approximate_defvec )
-5.60860594784*(Mode 1 from ANM 1p38_anm) + 2.15393365959*(Mode 2 from ANM \
1p38_anm) + -3.13701609199*(Mode 3 from ANM 1p38_anm)
Let's deform 1r39 chain along this approximate deformation vector and see
how RMSD changes:
>>> ch2.setCoords(ch2.getCoords() - approximate_defvec.getArrayNx3())
>>> rmsd = calcRMSD(ch1, ch2)
>>> print( '{0:.2f}'.format(rmsd) )
0.82
RMSD decreases from 0.89 A to 0.82 A.
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2010-2012 <NAME>'
import numpy as np
import prody
__all__ = ['Mode', 'Vector']
class VectorBase(object):
"""A base class for :class:`Mode` and :class:`Vector`.
This base class defines some shared methods, such as scalar multiplication
or addition of mode instances.
Defined operations are:
* Absolute value (abs(mode)) returns mode length
* Additive inverse (-mode)
* Mode addition (mode1 + mode2)
* Mode subtraction (mode1 - mode2)
* Scalar multiplication (x*mode or mode*x)
* Division by a scalar (mode/x)
* Dot product (mode1*mode2)
* Power (mode**x)
"""
__slots__ = []
def __abs__(self):
return np.sqrt((self._getArray()**2).sum())
def __neg__(self):
return Vector(-self._getArray(), '-({0:s})'.format(str(self)),
self.is3d())
def __div__(self, other):
if isinstance(other, (int, float, long)):
return Vector(self._getArray() / other,
'({1:s})/{0}'.format(other, str(self)), self.is3d())
else:
raise TypeError('{0} is not a scalar'.format(other))
def __idiv__(self, other):
return self.__div__(other)
def __mul__(self, other):
"""Return scaled mode or dot product between modes."""
if isinstance(other, (int, float, long)):
return Vector(other * self._getArray(),
'{0}*({1:s})'.format(other, str(self)), self.is3d())
elif isinstance(other, VectorBase):
return np.dot(self._getArray(), other._getArray())
else:
raise TypeError('{0} is not a scalar or a mode'.format(other))
def __rmul__(self, other):
"""Return scaled mode or dot product between modes."""
if isinstance(other, (int, float, long)):
return Vector(other * self._getArray(),
'{0}*({1:s})'.format(other, str(self)), self.is3d())
elif isinstance(other, VectorBase):
return np.dot(self._getArray(), other._getArray())
else:
raise TypeError('{0} is not a scalar or a mode'.format(other))
def __imul__(self, other):
return self.__mul__(other)
def __add__(self, other):
if isinstance(other, VectorBase):
if len(self) != len(other):
raise ValueError('modes do not have the same length')
return Vector(self._getArray() + other._getArray(),
'{0:s} + {1:s}'.format(str(self), str(other)),
self.is3d())
else:
raise TypeError('{0} is not a mode instance'.format(other))
def __radd__(self, other):
if isinstance(other, VectorBase):
if len(self) != len(other):
raise ValueError('modes do not have the same length')
return Vector(self._getArray() + other._getArray(),
'{0:s} + {1:s}'.format(str(other), str(self)),
self.is3d())
else:
raise TypeError('{0} is not a mode instance'.format(other))
def __iadd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, VectorBase):
if len(self) != len(other):
raise ValueError('modes do not have the same length')
return Vector(self._getArray() - other._getArray(),
'{0:s} - {1:s}'.format(str(self), str(other)),
self.is3d())
else:
raise TypeError('{0} is not a mode instance'.format(other))
def __rsub__(self, other):
if isinstance(other, VectorBase):
if len(self) != len(other):
raise ValueError('modes do not have the same length')
return Vector(other._getArray() - self._getArray(),
'{0:s} - {1:s}'.format(str(other), str(self)),
self.is3d())
else:
raise TypeError('{0} is not a mode instance'.format(other))
def __isub__(self, other):
return self.__sub__(other)
def __pow__(self, other):
if isinstance(other, (int, float, long)):
return Vector(self._getArray() ** other,
'({0:s})**{1}'.format(str(self), other), self.is3d())
else:
raise TypeError('{0} is not a scalar'.format(other))
def getArray(self):
"""Return a copy of array."""
pass
def _getArray(self):
"""Return array."""
pass
def numAtoms(self):
"""Return number of atoms."""
pass
def is3d(self):
"""Return true if vector is 3d."""
pass
def getArrayNx3(self):
"""Return a copy of array with shape (N, 3)."""
if self.is3d():
return self.getArray().reshape((self.numAtoms(), 3))
else:
return self.getArray()
def _getArrayNx3(self):
"""Return a copy of array with shape (N, 3)."""
if self.is3d():
return self._getArray().reshape((self.numAtoms(), 3))
else:
return self._getArray()
class Mode(VectorBase):
"""A class to provide access to and operations on mode data.
"""
__slots__ = ['_model', '_index']
def __init__(self, model, index):
"""Initialize mode object as part of an NMA model.
:arg model: a normal mode analysis instance
:type model: :class:`~.ANM`, :class:`~.GNM`, or :class:`~.PCA`
:arg index: index of the mode
:type index: int"""
self._model = model
self._index = int(index)
def __len__(self):
return self._model._dof
def __repr__(self):
return '<Mode: {0:d} from {1:s}>'.format(self._index + 1,
str(self._model))
def __str__(self):
return 'Mode {0:d} from {1:s}'.format(self._index+1, str(self._model))
def __int__(self):
return self._index
def __float__(self):
return self.getEigval()
def is3d(self):
"""Return ``True`` if mode instance is from a 3-dimensional model."""
return self._model._is3d
def numAtoms(self):
"""Return number of atoms."""
return self._model._n_atoms
def numDOF(self):
"""Return number of degrees of freedom (three times the number of
atoms)."""
return | |
# Copyright (c) Hikvision Research Institute. All rights reserved.
import math
import cv2
import numpy as np
from numpy import random
import mmcv
from mmdet.datasets.pipelines import Resize as MMDetResize
from mmdet.datasets.pipelines import RandomFlip as MMDetRandomFlip
from mmdet.datasets.pipelines import RandomCrop as MMDetRandomCrop
from ..builder import PIPELINES
@PIPELINES.register_module()
class Resize(MMDetResize):
"""Resize images & bbox & mask & keypoint.
Args:
keypoint_clip_border (bool, optional): Whether to clip the objects
outside the border of the image. Defaults to True.
"""
def __init__(self,
*args,
keypoint_clip_border=True,
**kwargs):
super(Resize, self).__init__(*args, **kwargs)
self.keypoint_clip_border = keypoint_clip_border
def _resize_keypoints(self, results):
"""Resize bounding boxes with ``results['scale_factor']``."""
for key in results.get('keypoint_fields', []):
keypoints = results[key].copy()
keypoints[:,
0::3] = keypoints[:, 0::3] * results['scale_factor'][0]
keypoints[:,
1::3] = keypoints[:, 1::3] * results['scale_factor'][1]
if self.keypoint_clip_border:
img_shape = results['img_shape']
keypoints[:, 0::3] = np.clip(keypoints[:, 0::3], 0,
img_shape[1])
keypoints[:, 1::3] = np.clip(keypoints[:, 1::3], 0,
img_shape[0])
results[key] = keypoints
def _resize_areas(self, results):
"""Resize mask areas with ``results['scale_factor']``."""
for key in results.get('area_fields', []):
areas = results[key].copy()
areas = areas * results['scale_factor'][0] * results[
'scale_factor'][1]
results[key] = areas
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
'keep_ratio' keys are added into result dict.
"""
results = super(Resize, self).__call__(results)
self._resize_keypoints(results)
self._resize_areas(results)
return results
def __repr__(self):
repr_str = super(Resize, self).__repr__()[:-1] + ', '
repr_str += f'keypoint_clip_border={self.keypoint_clip_border})'
return repr_str
@PIPELINES.register_module()
class RandomFlip(MMDetRandomFlip):
"""Flip the image & bbox & mask & keypoint.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
"""
def keypoint_flip(self, keypoints, img_shape, direction, flip_pairs):
"""Flip keypoints horizontally.
Args:
keypoints (numpy.ndarray): person's keypoints, shape (..., 3*17)
img_shape (tuple[int]): Image shape (height, width)
direction (str): Flip direction. Options are 'horizontal',
'vertical'.
Returns:
numpy.ndarray: Flipped bounding boxes.
"""
assert keypoints.shape[-1] % 3 == 0
flipped = keypoints.copy()
if direction == 'horizontal':
w = img_shape[1]
flipped = flipped.reshape(flipped.shape[0], flipped.shape[1] // 3,
3)
valid_idx = flipped[..., -1] > 0
flipped[valid_idx, 0] = w - flipped[valid_idx, 0]
for pair in flip_pairs:
flipped[:, pair, :] = flipped[:, pair[::-1], :]
flipped[..., 0] = np.clip(flipped[..., 0], 0, w)
flipped = flipped.reshape(flipped.shape[0], keypoints.shape[1])
elif direction == 'vertical':
raise NotImplementedError
elif direction == 'diagonal':
raise NotImplementedError
else:
raise ValueError(f"Invalid flipping direction '{direction}'")
return flipped
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added \
into result dict.
"""
results = super(RandomFlip, self).__call__(results)
if results['flip']:
# flip keypoints
for key in results.get('keypoint_fields', []):
results[key] = self.keypoint_flip(
results[key], results['img_shape'],
results['flip_direction'],
results['ann_info']['flip_pairs'])
return results
@PIPELINES.register_module()
class RandomCrop(MMDetRandomCrop):
"""Random crop the image & bboxes & masks & keypoint.
The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
then the cropped results are generated.
Args:
kpt_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
- The keys for bboxes, keypoints and areas must be aligned. That is,
`gt_bboxes` corresponds to `gt_keypoints` and `gt_areas`, and
`gt_bboxes_ignore` corresponds to `gt_keypoints_ignore` and
`gt_areas_ignore`.
"""
def __init__(self,
*args,
kpt_clip_border=True,
**kwargs):
super(RandomCrop, self).__init__(*args, **kwargs)
self.kpt_clip_border = kpt_clip_border
# The key correspondence from bboxes to kpts and areas.
self.bbox2kpt = {
'gt_bboxes': 'gt_keypoints',
'gt_bboxes_ignore': 'gt_keypoints_ignore'
}
self.bbox2area = {
'gt_bboxes': 'gt_areas',
'gt_bboxes_ignore': 'gt_areas_ignore'
}
def _crop_data(self, results, crop_size, allow_negative_crop):
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps, keypoints.
Args:
results (dict): Result dict from loading pipeline.
crop_size (tuple): Expected absolute size after cropping, (h, w).
allow_negative_crop (bool): Whether to allow a crop that does not
contain any bbox area. Default to False.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
assert crop_size[0] > 0 and crop_size[1] > 0
for key in results.get('img_fields', ['img']):
img = results[key]
margin_h = max(img.shape[0] - crop_size[0], 0)
margin_w = max(img.shape[1] - crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results[key] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
# e.g. gt_bboxes and gt_bboxes_ignore
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
if self.bbox_clip_border:
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
# If the crop does not contain any gt-bbox area and
# allow_negative_crop is False, skip this image.
if (key == 'gt_bboxes' and not valid_inds.any()
and not allow_negative_crop):
return None
results[key] = bboxes[valid_inds, :]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
if self.recompute_bbox:
results[key] = results[mask_key].get_bboxes()
# keypoint fields, e.g. gt_keypoints
kpt_key = self.bbox2kpt.get(key)
if kpt_key in results:
results[kpt_key] = results[kpt_key][valid_inds]
# area fields, e.g. gt_areas
area_key = self.bbox2area.get(key)
if area_key in results:
results[area_key] = results[area_key][valid_inds]
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
# crop keypoints accordingly and clip to the image boundary
for key in results.get('keypoint_fields', []):
# e.g. gt_keypoints
if len(results[key]) > 0:
kpt_offset = np.array([offset_w, offset_h], dtype=np.float32)
keypoints = results[key].copy()
keypoints = keypoints.reshape(keypoints.shape[0], -1, 3)
keypoints[..., :2] = keypoints[..., :2] - kpt_offset
invalid_idx = (keypoints[..., 0] < 0).astype(np.int8) | \
(keypoints[..., 1] < 0).astype(np.int8) | \
(keypoints[..., 0] > img_shape[1]).astype(np.int8) | \
(keypoints[..., 1] > img_shape[0]).astype(np.int8) | \
(keypoints[..., 2] < 0.1).astype(np.int8)
assert key == 'gt_keypoints'
gt_valid = ~invalid_idx.all(1)
results['gt_bboxes'] = results['gt_bboxes'][gt_valid]
results['gt_areas'] = results['gt_areas'][gt_valid]
results['gt_labels'] = results['gt_labels'][gt_valid]
keypoints[invalid_idx > 0, :] = 0
keypoints = keypoints[gt_valid]
if len(keypoints) == 0:
return None
keypoints = keypoints.reshape(keypoints.shape[0], -1)
if self.kpt_clip_border:
keypoints[:, 0::3] = np.clip(keypoints[:, 0::3], 0,
img_shape[1])
keypoints[:, 1::3] = np.clip(keypoints[:, 1::3], 0,
img_shape[0])
results[key] = keypoints
# assert len(results['gt_bboxes']) == len(results['gt_keypoints'])
return results
def __repr__(self):
repr_str = super(RandomCrop, self).__repr__()[:-1] + ', '
repr_str += f'kpt_clip_border={self.kpt_clip_border})'
return repr_str
@PIPELINES.register_module()
class KeypointRandomAffine:
"""Random affine transform data augmentation.
This operation randomly generates affine transform matrix which including
rotation, translation, shear and scaling transforms.
Args:
max_rotate_degree (float): Maximum degrees of rotation transform.
Default: 10.
max_translate_ratio (float): Maximum ratio of translation.
Default: 0.1.
scaling_ratio_range (tuple[float]): Min and max ratio of
scaling transform. Default: (0.5, 1.5).
max_shear_degree (float): Maximum degrees of shear
transform. Default: 2.
border (tuple[int]): Distance from height and width sides of input
image to adjust output shape. Only used in mosaic dataset.
Default: (0, 0).
border_val (tuple[int]): Border padding values of 3 channels.
Default: (114, 114, 114).
min_bbox_size (float): Width and height threshold to filter bboxes.
If the height or width of a box is smaller than this value, it
will be removed. Default: 2.
min_area_ratio (float): Threshold of area ratio between
original bboxes and wrapped bboxes. If smaller than this value,
the box will be removed. Default: 0.2.
max_aspect_ratio (float): Aspect ratio of width and height
threshold to filter bboxes. If max(h/w, w/h) larger than this
value, the box will be removed.
"""
def __init__(self,
max_rotate_degree=10.0,
max_translate_ratio=0.1,
scaling_ratio_range=(0.5, 1.5),
max_shear_degree=2.0,
border=(0, 0),
border_val=(114, 114, 114),
min_bbox_size=2,
min_area_ratio=0.2,
max_aspect_ratio=20):
assert 0 <= max_translate_ratio <= 1
assert scaling_ratio_range[0] <= scaling_ratio_range[1]
assert scaling_ratio_range[0] > 0
self.max_rotate_degree = max_rotate_degree
self.max_translate_ratio = max_translate_ratio
self.scaling_ratio_range = scaling_ratio_range
self.max_shear_degree = max_shear_degree
self.border = border
self.border_val = border_val
self.min_bbox_size = min_bbox_size
self.min_area_ratio = min_area_ratio
self.max_aspect_ratio = max_aspect_ratio
def __call__(self, results):
img = results['img']
height = img.shape[0] + self.border[0] * 2
width = img.shape[1] + self.border[1] * 2
# Center
center_matrix = np.eye(3, dtype=np.float32)
center_matrix[0, 2] = -img.shape[1] / 2 # x translation (pixels)
center_matrix[1, 2] = -img.shape[0] / 2 # y translation (pixels)
| |
example (Car) doesn't need to use this
metaclass, as it doesn't have any properties that meet this condition.
"""
@dataclass
class VehicleWithWheels(metaclass=property_wizard):
_wheels: Union[int, str] = field(default=4)
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
@dataclass
class Vehicle(VehicleWithWheels, metaclass=property_wizard):
_windows: Union[int, str] = field(default=6)
@property
def windows(self) -> int:
return self._windows
@windows.setter
def windows(self, windows: Union[int, str]):
self._windows = int(windows)
@dataclass
class Car(Vehicle):
my_list: List[str] = field(default_factory=list)
v = Car()
log.debug(v)
assert v.wheels == 4
assert v.windows == 6
assert v.my_list == []
# Note that my IDE complains here, and suggests `_wheels` as a possible
# keyword argument to the constructor method; however, that's wrong and
# will error if you try it way.
v = Car(wheels=3, windows=5, my_list=['hello', 'world'])
log.debug(v)
assert v.wheels == 3
assert v.windows == 5
assert v.my_list == ['hello', 'world']
v = Car('6', '7', ['testing'])
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
assert v.windows == 7, 'The constructor should use our setter method'
assert v.my_list == ['testing']
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
v.windows = '321'
assert v.windows == 321, 'Expected assignment to use the setter method'
# NOTE: the below test cases are added for coverage purposes
def test_property_wizard_with_public_property_and_underscored_field_without_default_value():
"""
Using `property_wizard` when the dataclass has a public property, and an
underscored field *without* a default value explicitly set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
_wheels: Union[int, str]
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_underscored_field_with_default_factory():
"""
Using `property_wizard` when the dataclass has a public property, and an
underscored field has only `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
_wheels: Union[int, str] = field(default_factory=str)
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
with pytest.raises(ValueError):
# Setter raises ValueError, as `wheels` will be a string by default
_ = Vehicle()
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_underscored_field_without_default_or_default_factory():
"""
Using `property_wizard` when the dataclass has a public property, and an
underscored field has neither `default` or `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
_wheels: Union[int, str] = field()
@property
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_without_default_value():
"""
Using `property_wizard` when the dataclass has an underscored property,
and a public field *without* a default value explicitly set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str]
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_public_property_and_public_field_is_property():
"""
Using `property_wizard` when the dataclass has an underscored property,
and a public field is also defined as a property.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# The value of `wheels` here will be ignored, since `wheels` is simply
# re-assigned on the following property definition.
wheels = property
# Defines the default value for `wheels`, since it won't work if we
# define it above. The `init=False` is needed since otherwise IDEs
# seem to suggest `_wheels` as a parameter to the constructor method,
# which shouldn't be the case.
#
# Note: if are *ok* with the default value for the type (0 in this
# case), then you can remove the below line and annotate the above
# line instead as `wheels: Union[int, str] = property`
_wheels: Union[int, str] = field(default=4, init=False)
@wheels
def wheels(self) -> int:
return self._wheels
@wheels.setter
def wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 4
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_with_default():
"""
Using `property_wizard` when the dataclass has an underscored property,
and the public field has `default` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str] = field(default=2)
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 2
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_with_default_factory():
"""
Using `property_wizard` when the dataclass has an underscored property,
and the public field has only `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str] = field(default_factory=str)
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
with pytest.raises(ValueError):
# Setter raises ValueError, as `wheels` will be a string by default
_ = Vehicle()
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_underscored_property_and_public_field_without_default_or_default_factory():
"""
Using `property_wizard` when the dataclass has an underscored property,
and the public field has neither `default` or `default_factory` set.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str] = field()
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels == 0
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_where_annotated_type_contains_none():
"""
Using `property_wizard` when the annotated type for the dataclass field
associated with a property is here a :class:`Union` type that contains
`None`. As such, the field is technically an `Optional` so the default
value will be `None` if no value is specified via the constructor.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
wheels: Union[int, str, None]
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
# TypeError: int() argument is `None`
with pytest.raises(TypeError):
_ = Vehicle()
v = Vehicle(wheels=3)
log.debug(v)
assert v.wheels == 3
v = Vehicle('6')
log.debug(v)
assert v.wheels == 6, 'The constructor should use our setter method'
v.wheels = '123'
assert v.wheels == 123, 'Expected assignment to use the setter method'
def test_property_wizard_with_literal_type():
"""
Using `property_wizard` when the dataclass field associated with a
property is annotated with a :class:`Literal` type.
"""
@dataclass
class Vehicle(metaclass=property_wizard):
# Annotate `wheels` as a literal that should only be set to 1 or 0
# (similar to how the binary numeral system works, for example)
#
# Note: we can assign a default value for `wheels` explicitly, so that
# the IDE doesn't complain when we omit the argument to the
# constructor method, but it's technically not required.
wheels: Literal[1, '1', 0, '0']
@property
def _wheels(self) -> int:
return self._wheels
@_wheels.setter
def _wheels(self, wheels: Union[int, str]):
self._wheels = int(wheels)
v = Vehicle()
log.debug(v)
assert v.wheels | |
r] = 1
r += 1
# reaction 86
# I5_out --> 0
N[I5_out, r] = -1
r += 1
# reaction 87
# I5_out --> 0
N[I5_out, r] = -1
"""
# not S2: I5_S2
"""
r += 1
# reaction 88
# 0 --> L_I5_S2
N[L_I5_S2, r] = 1
r += 1
# reaction 89
# L_I5_S2 --> 0
N[L_I5_S2, r] = -1
r += 1
# reaction 90
# 0 --> I5_out
N[I5_out, r] = 1
r += 1
# reaction 91
# I5_out --> 0
N[I5_out, r] = -1
r += 1
# reaction 92
# I5_out --> 0
N[I5_out, r] = -1
"""
# not I5: I5_I5
"""
r += 1
# reaction 93
# 0 --> L_I5_I5
N[L_I5_I5, r] = 1
r += 1
# reaction 94
# L_I5_I5 --> 0
N[L_I5_I5, r] = -1
r += 1
# reaction 95
# 0 --> I5_out
N[I5_out, r] = 1
r += 1
# reaction 96
# I5_out --> 0
N[I5_out, r] = -1
r += 1
# reaction 97
# I5_out --> 0
N[I5_out, r] = -1
##################### I6
"""
# not S0: I6_S0
"""
r += 1
# reaction 98
# 0 --> L_I6_S0
N[L_I6_S0, r] = 1
r += 1
# reaction 99
# 0 --> L_I6_S0
N[L_I6_S0, r] = -1
r += 1
# reaction 100
# 0 --> I6_out
N[I6_out, r] = 1
r += 1
# reaction 101
# I6_out --> 0
N[I6_out, r] = -1
r += 1
# reaction 102
# I6_out --> 0
N[I6_out, r] = -1
"""
# not S1: I6_S1
"""
r += 1
# reaction 103
# 0 --> L_I6_S1
N[L_I6_S1, r] = 1
r += 1
# reaction 104
# L_I6_S1 --> 0
N[L_I6_S1, r] = -1
r += 1
# reaction 105
# 0 --> I6_out
N[L_I6_S1, r] = 1
r += 1
# reaction 106
# I6_out --> 0
N[I6_out, r] = -1
r += 1
# reaction 107
# I6_out --> 0
N[I6_out, r] = -1
"""
# yes S2: I6_S2
"""
r += 1
# reaction 108
# 0 --> I6_out
N[I6_out, r] = 1
r += 1
# reaction 109
# I6_out --> 0
N[I6_out, r] = -1
r += 1
# reaction 110
# I6_out --> 0
N[I6_out, r] = -1
"""
# not I6: I6_I6
"""
r += 1
# reaction 111
# 0 --> L_I6_I6
N[L_I6_I6, r] = 1
r += 1
# reaction 112
# L_I6_I6 --> 0
N[L_I6_I6, r] = -1
r += 1
# reaction 113
# 0 --> I6_out
N[I6_out, r] = 1
r += 1
# reaction 114
# I6_out --> 0
N[I6_out, r] = -1
r += 1
# reaction 115
# I6_out --> 0
N[I6_out, r] = -1
##################### I7
"""
# not S0: I7_S0
"""
r += 1
# reaction 116
# 0 --> L_I7_S0
N[L_I7_S0, r] = 1
r += 1
# reaction 117
# 0 --> L_I7_S0
N[L_I7_S0, r] = -1
r += 1
# reaction 118
# 0 --> I7_out
N[I7_out, r] = 1
r += 1
# reaction 119
# I7_out --> 0
N[I7_out, r] = -1
r += 1
# reaction 120
# I7_out --> 0
N[I7_out, r] = -1
"""
# not S1: I7_S1
"""
r += 1
# reaction 121
# 0 --> L_I7_S1
N[L_I7_S1, r] = 1
r += 1
# reaction 122
# L_I7_S1 --> 0
N[L_I7_S1, r] = -1
r += 1
# reaction 123
# 0 --> I7_out
N[L_I7_S1, r] = 1
r += 1
# reaction 124
# I7_out --> 0
N[I7_out, r] = -1
r += 1
# reaction 125
# I7_out --> 0
N[I7_out, r] = -1
"""
# not S2: I7_S2
"""
r += 1
# reaction 126
# 0 --> L_I7_S2
N[L_I7_S2, r] = 1
r += 1
# reaction 127
# L_I7_S2 --> 0
N[L_I7_S2, r] = -1
r += 1
# reaction 128
# 0 --> I7_out
N[I7_out, r] = 1
r += 1
# reaction 129
# I7_out --> 0
N[I7_out, r] = -1
r += 1
# reaction 130
# I7_out --> 0
N[I7_out, r] = -1
"""
# not I7: I7_I7
"""
r += 1
# reaction 131
# 0 --> L_I7_I7
N[L_I7_I7, r] = 1
r += 1
# reaction 132
# L_I7_I7 --> 0
N[L_I7_I7, r] = -1
r += 1
# reaction 133
# 0 --> I7_out
N[I7_out, r] = 1
r += 1
# reaction 134
# I7_out --> 0
N[I7_out, r] = -1
r += 1
# reaction 135
# I7_out --> 0
N[I7_out, r] = -1
##################### out
"""
# not I0: I0
"""
r += 1
# reaction 136
# 0 --> L_I0
N[L_I0, r] = 1
r += 1
# reaction 137
# L_I0 --> 0
N[L_I0, r] = -1
r += 1
# reaction 138
# 0 --> out
N[out, r] = 1
r += 1
# reaction 139
# out --> 0
N[out, r] = -1
r += 1
# reaction 140
# out --> 0
N[out, r] = -1
"""
# not I1: I1
"""
r += 1
# reaction 141
# 0 --> L_I1
N[L_I1, r] = 1
r += 1
# reaction 142
# L_I1 --> 0
N[L_I1, r] = -1
r += 1
# reaction 143
# 0 --> out
N[out, r] = 1
r += 1
# reaction 144
# out --> 0
N[out, r] = -1
r += 1
# reaction 145
# out --> 0
N[out, r] = -1
"""
# not I2: I2
"""
r += 1
# reaction 146
# 0 --> L_I2
N[L_I2, r] = 1
r += 1
# reaction 147
# L_I2 --> 0
N[L_I2, r] = -1
r += 1
# reaction 148
# 0 --> out
N[out, r] = 1
r += 1
# reaction 149
# out --> 0
N[out, r] = -1
r += 1
# reaction 150
# out --> 0
N[out, r] = -1
"""
# not I3: I3
"""
r += 1
# reaction 151
# 0 --> L_I3
N[L_I3, r] = 1
r += 1
# reaction 152
# L_I3 --> 0
N[L_I3, r] = -1
r += 1
# reaction 153
# 0 --> out
N[out, r] = 1
r += 1
# reaction 154
# out --> 0
N[out, r] = -1
r += 1
# reaction 155
# out --> 0
N[out, r] = -1
"""
# not I4: I4
"""
r += 1
# reaction 156
# 0 --> L_I4
N[L_I4, r] = 1
r += 1
# reaction 157
# L_I4 --> 0
N[L_I4, r] = -1
r += 1
# reaction 158
# 0 --> out
N[out, r] = 1
r += 1
# reaction 159
# out --> 0
N[out, r] = -1
r += 1
# reaction 160
# out --> 0
N[out, r] = -1
"""
# not I5: I5
"""
r += 1
# reaction 161
# 0 --> L_I5
N[L_I5, r] = 1
r += 1
# reaction 162
# L_I5 --> 0
N[L_I5, r] = -1
r += 1
# reaction 163
# 0 --> out
N[out, r] = 1
r += 1
# reaction 164
# out --> 0
N[out, r] = -1
r += 1
# reaction 165
# out --> 0
N[out, r] = -1
"""
# not I6: I6
"""
r += 1
# reaction 166
# 0 --> L_I6
N[L_I6, r] = 1
r += 1
# reaction 167
# L_I6 --> 0
N[L_I6, r] = -1
r += 1
# reaction 168
# 0 --> out
N[out, r] = 1
r += 1
| |
<gh_stars>10-100
# -*- coding: utf-8 -*-
import json
from datetime import datetime
from unittest.mock import Mock
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Count
from django.test import TestCase
from django.urls import resolve, reverse
from djiffy.models import Canvas, Manifest
from derrida.books.models import (CreatorType, DerridaWork, DerridaWorkSection,
Instance, InstanceCatalogue, InstanceCreator,
InstanceLanguage, Journal, Language,
OwningInstitution, Publisher, Reference,
ReferenceType, Subject, Work, WorkLanguage,
WorkSubject)
from derrida.interventions.models import Intervention
from derrida.people.models import Person
from derrida.places.models import Place
class TestOwningInstitution(TestCase):
fixtures = ['sample_work_data.json']
def test_str(self):
long_name = 'New York Society Library'
short_name = 'NYSL'
inst = OwningInstitution(name=long_name)
# should use long name if no short name is set
assert str(inst) == long_name
inst.short_name = short_name
assert str(inst) == short_name
def test_instance_count(self):
# test abstract book count mix-in via owning institution model
# tests that html for admin form is rendered correctly
pl = Place.objects.first()
inst = OwningInstitution.objects.create(name='NYSL',
place=pl)
# new institution has no books associated
change_url = reverse('admin:books_instance_changelist')
admin_book_count = inst.instance_count()
assert change_url in admin_book_count
assert 'id__exact=%s' % inst.pk in admin_book_count
assert '0' in admin_book_count
# create a book and associated it with the institution
pub = Publisher.objects.create(name='<NAME>')
wk = Work.objects.create(primary_title='Some title')
instance = Instance.objects.create(work=wk)
# publisher=pub, pub_place=pl,
# is_extant=False, is_annotated=False, is_digitized=False)
cat = InstanceCatalogue.objects.create(institution=inst,
instance=instance, is_current=False)
inst_book_count = inst.instance_count()
assert change_url in inst_book_count
assert 'id__exact=%s' % inst.pk in inst_book_count
assert '1' in inst_book_count
class TestDerridaWork(TestCase):
def setUp(self):
testwork, c = DerridaWork.objects.get_or_create(
short_title='Ceci n\'est pas un livre',
full_citation=('Ceci n\'est pas un livre: '
'and other tales of deconstructionism'),
is_primary=True,
)
def test_str(self):
'''Test that DerridaWork produces its expected string'''
short_title = 'Ceci n\'est pas un livre'
testwork = DerridaWork.objects.get(short_title=short_title)
assert str(testwork) == short_title
class TestDerridaSection(TestCase):
def test_str(self):
assert str(DerridaWorkSection(name='Chapter 1')) == 'Chapter 1'
class TestReference(TestCase):
fixtures = ['sample_work_data']
def setUp(self):
self.manif = Manifest.objects.create()
self.la_vie = Instance.objects.get(work__short_title__contains="La vie")
self.dg = DerridaWork.objects.get(pk=1)
self.quotation = ReferenceType.objects.get(name='Quotation')
def test_str(self):
# Writing this out because complicated output
desired_output = 'De la grammatologie, 110a: %s, 10s, Quotation' % \
self.la_vie.display_title()
reference = Reference.objects.create(
instance=self.la_vie,
derridawork=self.dg,
derridawork_page='110',
derridawork_pageloc='a',
book_page='10s',
reference_type=self.quotation
)
assert str(reference) == desired_output
def test_get_absolute_url(self):
ref = Reference.objects.create(
instance=self.la_vie,
derridawork=self.dg,
derridawork_page='110',
derridawork_pageloc='a',
book_page='10s',
reference_type=self.quotation
)
ref_url = ref.get_absolute_url()
resolved_url = resolve(ref_url)
assert resolved_url.url_name == 'reference'
assert resolved_url.namespace == 'books'
assert resolved_url.kwargs['derridawork_slug'] == self.dg.slug
assert resolved_url.kwargs['page'] == ref.derridawork_page
assert resolved_url.kwargs['pageloc'] == ref.derridawork_pageloc
def test_instance_ids_with_digital_editions(self):
# check static method, so we don't need cls or self
Reference.instance_ids_with_digital_editions()
# no instances have associated canvases so this should return an
# empty list as a JSON string
data = Reference.instance_ids_with_digital_editions()
assert json.loads(data) == []
# add a canvas to la_vie, then it should appear in the list
self.la_vie.digital_edition = self.manif
self.la_vie.save()
data = Reference.instance_ids_with_digital_editions()
assert json.loads(data) == [self.la_vie.pk]
def test_instance_slug(self):
# create a reference
ref = Reference.objects.create(
instance=self.la_vie,
derridawork=self.dg,
derridawork_page='110',
derridawork_pageloc='a',
book_page='10s',
reference_type=self.quotation
)
# not a book section (none in test set are)
# should return the slug of its instance
assert ref.instance_slug == self.la_vie.slug
# make work into a book section as a 'collected in'
la_vie_collected = Instance.objects.create(work=self.la_vie.work,
slug='la-vie-collected')
self.la_vie.collected_in = la_vie_collected
self.la_vie.save()
# should return the slug for the collection
assert ref.instance_slug == la_vie_collected.slug
def test_get_instance_url(self):
# create a reference
ref = Reference.objects.create(
instance=self.la_vie,
derridawork=self.dg,
derridawork_page='110',
derridawork_pageloc='a',
book_page='10s',
reference_type=self.quotation
)
# not a book section (none in test set are)
# should return the slug of its instance
assert ref.instance_url == self.la_vie.get_absolute_url()
# make work into a book section as a 'collected in'
la_vie_collected = Instance.objects.create(work=self.la_vie.work,
slug='la-vie-collected')
self.la_vie.collected_in = la_vie_collected
self.la_vie.save()
# should return the slug for the collection
assert ref.instance_url == la_vie_collected.get_absolute_url()
def test_book(self):
ref = Reference.objects.create(
instance=self.la_vie,
derridawork=self.dg,
derridawork_page='110',
derridawork_pageloc='a',
reference_type=self.quotation
)
# not a book section (none in test set are)
# should return the instance
assert ref.book == self.la_vie
# create a book section and reassociated the reference
vie_part_wk = Work.objects.create()
vie_part = Instance.objects.create(work=vie_part_wk,
collected_in=self.la_vie)
ref.instance = vie_part
# book should return the collected work
assert ref.book == self.la_vie
def test_get_section(self):
ref = Reference.objects.create(
instance=self.la_vie,
derridawork=self.dg,
derridawork_page=110,
derridawork_pageloc='a',
book_page='10s',
reference_type=self.quotation
)
assert ref.get_section() == 'Part 1'
def test_chapter(self):
ref = Reference.objects.create(
instance=self.la_vie,
derridawork=self.dg,
derridawork_page=110,
derridawork_pageloc='a',
book_page='10s',
reference_type=self.quotation
)
# Create sections to search through
DerridaWorkSection.objects.create(
name='Part 1',
order=1,
derridawork=self.dg,
start_page=None,
end_page=None
)
DerridaWorkSection.objects.create(
name='Chapter 1',
order=2,
derridawork=self.dg,
start_page=1,
end_page=107
)
DerridaWorkSection.objects.create(
name='Chapter 3',
order=2,
derridawork=self.dg,
start_page=108,
end_page=120
)
assert ref.get_chapter() == 'Chapter 3'
class TestReferenceQuerySet(TestCase):
fixtures = ['test_references.json']
def setUp(self):
self.ref_qs = Reference.objects.all()
def test_order_by_source_page(self):
pages = sorted([ref.derridawork_page for ref in self.ref_qs.all()])
assert list(Reference.objects.order_by_source_page()
.values_list('derridawork_page', flat=True)) \
== pages
def test_order_by_author(self):
authors = sorted(
['; '.join([str(p) for p in ref.instance.work.authors.all()])
for ref in self.ref_qs])
qs_authors = list(self.ref_qs.order_by_author() \
.values_list('instance__work__authors__authorized_name', flat=True))
qs_authors = ['' if name is None else name for name in qs_authors]
assert qs_authors == authors
def test_summary_values(self):
ref = self.ref_qs.first()
ref_values = self.ref_qs.summary_values().first()
assert ref_values['id'] == ref.pk
assert ref_values['instance__slug'] == ref.instance.slug
assert ref_values['derridawork__slug'] == ref.derridawork.slug
assert ref_values['derridawork_page'] == ref.derridawork_page
assert ref_values['derridawork_pageloc'] == ref.derridawork_pageloc
# no author by default
assert 'author' not in ref_values
# author should be included if requested
ref_values = self.ref_qs.summary_values(include_author=True).first()
assert ref_values['author'] == \
ref.instance.work.authors.first().authorized_name
class TestWork(TestCase):
fixtures = ['sample_work_data.json']
def test_str(self):
la_vie = Work.objects.get(short_title__contains="La vie")
assert '%s (%s)' % (la_vie.short_title, la_vie.year) \
== str(la_vie)
# no date
la_vie.year = None
assert '%s (n.d.)' % (la_vie.short_title, )
def test_author_names(self):
la_vie = Work.objects.get(short_title__contains="La vie")
assert la_vie.author_names() == "L\u00e9vi-Strauss"
def test_instance_count(self):
la_vie = Work.objects.get(short_title__contains="La vie")
inst_count = la_vie.instance_count()
instance_list_url = reverse('admin:books_instance_changelist')
assert instance_list_url in inst_count
assert 'id__exact=%s' % la_vie.pk in inst_count
assert '1' in inst_count
class TestInstance(TestCase):
fixtures = ['sample_work_data', 'test_references']
def test_display_title(self):
la_vie = Instance.objects.get(work__short_title__contains="La vie")
# short title from work if no alternate title
assert la_vie.display_title() == la_vie.work.short_title
# alternate title from instance if set
la_vie.alternate_title = 'Family Life'
assert la_vie.display_title() == la_vie.alternate_title
def test_str(self):
la_vie = Instance.objects.get(work__short_title__contains="La vie")
assert '%s (%s)' % (la_vie.display_title(), la_vie.copyright_year) \
== str(la_vie)
# no date
la_vie.copyright_year = None
assert '%s (n.d.)' % (la_vie.display_title(), )
def test_generate_base_slug(self):
work = Work.objects.create(primary_title='Ulysses')
inst = Instance(work=work)
# short title, no author or year
assert inst.generate_base_slug() == 'ulysses'
# single-name author
joyce = Person.objects.create(authorized_name='Joyce')
work.authors.add(joyce)
assert inst.generate_base_slug() == 'joyce-ulysses'
# comma-delimited author name
joyce.authorized_name = '<NAME>'
joyce.save()
assert inst.generate_base_slug() == 'joyce-ulysses'
# work year - used if no instance copyright year
work.year = 1922
work.save()
assert inst.generate_base_slug() == 'joyce-ulysses-1922'
# copyright year used when available
inst.copyright_year = 1950
assert inst.generate_base_slug() == 'joyce-ulysses-1950'
# long titles truncated to ten words
work.primary_title = 'A portrait of the artist as a young man: ' + \
' the strange story of <NAME>'
work.save()
assert inst.generate_base_slug() == 'joyce-a-portrait-of-the-artist-as-a-young-man-1950'
# should handle unicode in titles
emil_wk = Work.objects.create(primary_title="Émile ou de l'éducation")
emil = Instance(work=emil_wk)
assert emil.generate_base_slug() == 'emile-ou-de-leducation'
# handle unicode in author names
zizek = Person.objects.create(authorized_name='Žižek')
emil_wk.authors.add(zizek)
assert emil.generate_base_slug() == 'zizek-emile-ou-de-leducation'
def test_generate_safe_slug(self):
# should ignore itself when checking for duplicates
la_vie = Instance.objects.get(work__short_title__contains="La vie")
assert la_vie.generate_safe_slug() == la_vie.generate_base_slug()
# new instance of the same edition, copy field not set, no other copies
la_vie2 = Instance(work=la_vie.work, copyright_year=la_vie.copyright_year)
assert la_vie2.generate_safe_slug() == '%s-B' % la_vie.generate_base_slug()
# should also set copy value
assert la_vie2.copy == 'B'
# new instance of the same edition, with copy field set
la_vie2 = Instance(work=la_vie.work, copyright_year=la_vie.copyright_year,
copy='C')
# should use copy field as set if it is unique
assert la_vie2.generate_safe_slug() == \
'%s-%s' % (la_vie.generate_base_slug(), la_vie2.copy)
la_vie2.save()
# create additional copy to test against multiple
la_vie4 = Instance.objects.create(work=la_vie.work,
copyright_year=la_vie.copyright_year, copy='D')
# creating a new instance of the same edition with copy field unset
la_vie3 = Instance(work=la_vie.work, copyright_year=la_vie.copyright_year)
# should increment letter from the last copy letter encountered
assert la_vie3.generate_safe_slug() == '%s-E' % la_vie.generate_base_slug()
# should also set copy value
assert la_vie3.copy == 'E'
def test_save(self):
# on save, if empty slug, should set one with generate safe slug
la_vie = Instance.objects.get(work__short_title__contains="La vie")
la_vie.slug = ''
expected_slug = la_vie.generate_safe_slug()
la_vie.save()
la_vie.refresh_from_db()
assert la_vie.slug == expected_slug
def test_get_absolute_url(self):
la_vie = Instance.objects.get(work__short_title__contains="La vie")
item_url = la_vie.get_absolute_url()
resolved_url = resolve(item_url)
assert resolved_url.url_name == 'detail'
assert resolved_url.namespace == 'books'
assert resolved_url.kwargs['slug'] == la_vie.slug
def test_item_type(self):
la_vie = Instance.objects.get(work__short_title__contains="La vie")
assert la_vie.item_type == 'Book'
# journal article
la_vie.journal = Journal.objects.all().first()
| |
"""Type embeddings."""
import logging
import os
import time
import pandas as pd
import torch
import ujson as json
from bootleg import log_rank_0_debug, log_rank_0_info
from bootleg.embeddings import EntityEmb
from bootleg.layers.helper_modules import PositionAwareAttention
from bootleg.utils import data_utils, model_utils, utils
from bootleg.utils.model_utils import selective_avg
logger = logging.getLogger(__name__)
# Base type embedding
class TypeEmb(EntityEmb):
"""Type embedding base class.
Forward returns batch x M x K x dim relation embedding.
Args:
main_args: main args
emb_args: specific embedding args
entity_symbols: entity symbols
key: unique embedding key
cpu: bool of if one cpu or not
normalize: bool if normalize embeddings or not
dropout1d_perc: 1D dropout percent
dropout2d_perc: 2D dropout percent
Attributes:
merge_func: determines how the types for a single candidate will be merged.
Support average, softattn, and addattn. Specified in config.
"""
def __init__(
self,
main_args,
emb_args,
entity_symbols,
key,
cpu,
normalize,
dropout1d_perc,
dropout2d_perc,
):
super(TypeEmb, self).__init__(
main_args=main_args,
emb_args=emb_args,
entity_symbols=entity_symbols,
key=key,
cpu=cpu,
normalize=normalize,
dropout1d_perc=dropout1d_perc,
dropout2d_perc=dropout2d_perc,
)
allowable_keys = {
"max_types",
"type_dim",
"type_labels",
"type_vocab",
"merge_func",
"attn_hidden_size",
"regularize_mapping",
}
correct, bad_key = utils.assert_keys_in_dict(allowable_keys, emb_args)
if not correct:
raise ValueError(f"The key {bad_key} is not in {allowable_keys}")
assert (
"max_types" in emb_args
), "Type embedding requires max_types to be set in args"
assert (
"type_dim" in emb_args
), "Type embedding requires type_dim to be set in args"
assert (
"type_labels" in emb_args
), "Type embedding requires type_labels to be set in args. A Dict from QID -> TypeId or TypeName"
assert (
"type_vocab" in emb_args
), "Type embedding requires type_vocab to be set in args. A Dict from TypeName -> TypeId"
assert (
self.cpu is False
), f"We don't support putting type embeddings on CPU right now"
self.merge_func = self.average_types
self.orig_dim = emb_args.type_dim
self.add_attn = None
# Function for merging multiple types
if "merge_func" in emb_args:
assert emb_args.merge_func in ["average", "addattn",], (
f"{key}: You have set the type merge_func to be {emb_args.merge_func} but"
f" that is not in the allowable list of [average, addattn]"
)
if emb_args.merge_func == "addattn":
if "attn_hidden_size" in emb_args:
attn_hidden_size = emb_args.attn_hidden_size
else:
attn_hidden_size = 100
# Softmax of types using the sentence context
self.add_attn = PositionAwareAttention(
input_size=self.orig_dim, attn_size=attn_hidden_size, feature_size=0
)
self.merge_func = self.add_attn_merge
self.max_types = emb_args.max_types
(
eid2typeids_table,
self.type2row_dict,
num_types_with_unk,
self.prep_file,
) = self.prep(
data_config=main_args.data_config,
emb_args=emb_args,
entity_symbols=entity_symbols,
)
self.register_buffer("eid2typeids_table", eid2typeids_table, persistent=False)
# self.eid2typeids_table.requires_grad = False
self.num_types_with_pad_and_unk = num_types_with_unk + 1
# Regularization mapping goes from typeid to 2d dropout percent
if "regularize_mapping" in emb_args:
typeid2reg = torch.zeros(self.num_types_with_pad_and_unk)
else:
typeid2reg = None
if not self.from_pretrained:
if "regularize_mapping" in emb_args:
if self.dropout1d_perc > 0 or self.dropout2d_perc > 0:
logger.warning(
f"You have 1D or 2D regularization set with a regularize_mapping. Do you mean to do this?"
)
log_rank_0_info(
logger,
f"Using regularization mapping in enity embedding from {emb_args.regularize_mapping}",
)
typeid2reg = self.load_regularization_mapping(
main_args.data_config,
self.num_types_with_pad_and_unk,
self.type2row_dict,
emb_args.regularize_mapping,
)
self.register_buffer("typeid2reg", typeid2reg)
assert self.eid2typeids_table.shape[1] == emb_args.max_types, (
f"Something went wrong with loading type file."
f" The given max types {emb_args.max_types} does not match that "
f"of type table {self.eid2typeids_table.shape[1]}"
)
log_rank_0_debug(
logger,
f"{key}: Type embedding with {self.max_types} types with dim {self.orig_dim}. "
f"Setting merge_func to be {self.merge_func.__name__} in type emb.",
)
@classmethod
def prep(cls, data_config, emb_args, entity_symbols):
"""Prep the type id table.
Args:
data_config: data config
emb_args: embedding args
entity_symbols: entity synbols
Returns: torch tensor from EID to type IDS, type ID to row in type embedding matrix,
and number of types with unk type
"""
type_str = os.path.splitext(emb_args.type_labels.replace("/", "_"))[0]
prep_dir = data_utils.get_emb_prep_dir(data_config)
prep_file = os.path.join(
prep_dir, f"type_table_{type_str}_{emb_args.max_types}.pt"
)
utils.ensure_dir(os.path.dirname(prep_file))
if not data_config.overwrite_preprocessed_data and os.path.exists(prep_file):
log_rank_0_debug(logger, f"Loading existing type table from {prep_file}")
start = time.time()
eid2typeids_table, type2row_dict, num_types_with_unk = torch.load(prep_file)
log_rank_0_debug(
logger,
f"Loaded existing type table in {round(time.time() - start, 2)}s",
)
else:
start = time.time()
type_labels = os.path.join(data_config.emb_dir, emb_args.type_labels)
type_vocab = os.path.join(data_config.emb_dir, emb_args.type_vocab)
log_rank_0_debug(logger, f"Building type table from {type_labels}")
eid2typeids_table, type2row_dict, num_types_with_unk = cls.build_type_table(
type_labels=type_labels,
type_vocab=type_vocab,
max_types=emb_args.max_types,
entity_symbols=entity_symbols,
)
torch.save(
(eid2typeids_table, type2row_dict, num_types_with_unk), prep_file
)
log_rank_0_debug(
logger,
f"Finished building and saving type table in {round(time.time() - start, 2)}s.",
)
return eid2typeids_table, type2row_dict, num_types_with_unk, prep_file
@classmethod
def build_type_table(cls, type_labels, type_vocab, max_types, entity_symbols):
"""Builds the EID to type ids table.
Args:
type_labels: QID to type ids or type names json mapping
type_vocab: type name to type ids
max_types: maximum number of types for an entity
entity_symbols: entity symbols
Returns: torch tensor from EID to type IDS, type ID to row in type embedding matrix,
and number of types with unk type
"""
with open(type_vocab) as f:
vocab = json.load(f)
all_type_ids = set(list(vocab.values()))
assert (
0 not in all_type_ids
), f"The type id of 0 is reserved for UNK type. Please offset the typeids by 1"
# all eids are initially assigned to unk types
# if they occur in the type file, then they are assigned the types in the file plus padded types
eid2typeids = torch.zeros(
entity_symbols.num_entities_with_pad_and_nocand, max_types
)
eid2typeids[0] = torch.zeros(1, max_types)
max_type_id_all = max(all_type_ids)
type_hit = 0
type2row_dict = {}
with open(type_labels) as f:
qid2typeid = json.load(f)
for qid, row_types in qid2typeid.items():
if not entity_symbols.qid_exists(qid):
continue
# assign padded types to the last row
typeids = torch.ones(max_types) * -1
if len(row_types) > 0:
type_hit += 1
# increment by 1 to account for unk row
typeids_list = []
for type_id_or_name in row_types:
# If typename, map to typeid
if type(type_id_or_name) is str:
type_id = vocab[type_id_or_name]
else:
type_id = type_id_or_name
assert (
type_id > 0
), f"Typeid for {qid} is 0. That is reserved. Please offset by 1"
assert (
type_id in all_type_ids
), f"Typeid for {qid} isn't in vocab"
typeids_list.append(type_id)
type2row_dict[type_id] = type_id
num_types = min(len(typeids_list), max_types)
typeids[:num_types] = torch.tensor(typeids_list)[:num_types]
eid2typeids[entity_symbols.get_eid(qid)] = typeids
# + 1 bc we need to account for pad row
labeled_num_types = max_type_id_all + 1
# assign padded types to the last row of the type embedding
# make sure adding type labels doesn't add new types
assert (max_type_id_all + 1) <= labeled_num_types
eid2typeids[eid2typeids == -1] = labeled_num_types
log_rank_0_debug(
logger,
f"{round(type_hit/entity_symbols.num_entities, 2)*100}% of entities are assigned types",
)
return eid2typeids.long(), type2row_dict, labeled_num_types
@classmethod
def load_regularization_mapping(
cls, data_config, num_types_with_pad_and_unk, type2row_dict, reg_file
):
"""Reads in a csv file with columns [qid, regularization].
In the forward pass, the entity id with associated qid will be
regularized with probability regularization.
Args:
data_config: data config
num_entities_with_pad_and_nocand: number of types including pad and null option
type2row_dict: Dict from typeID to row id in the type embedding matrix
reg_file: regularization csv file
Returns: Tensor where each value is the regularization value for EID
"""
reg_str = os.path.splitext(os.path.basename(reg_file.replace("/", "_")))[0]
prep_dir = data_utils.get_data_prep_dir(data_config)
prep_file = os.path.join(prep_dir, f"type_regularization_mapping_{reg_str}.pt")
utils.ensure_dir(os.path.dirname(prep_file))
log_rank_0_debug(logger, f"Looking for regularization mapping in {prep_file}")
if not data_config.overwrite_preprocessed_data and os.path.exists(prep_file):
log_rank_0_debug(
logger,
f"Loading existing entity regularization mapping from {prep_file}",
)
start = time.time()
typeid2reg = torch.load(prep_file)
log_rank_0_debug(
logger,
f"Loaded existing entity regularization mapping in {round(time.time() - start, 2)}s",
)
else:
start = time.time()
log_rank_0_debug(
logger, f"Building entity regularization mapping from {reg_file}"
)
typeid2reg_raw = pd.read_csv(reg_file)
assert (
"typeid" in typeid2reg_raw.columns
and "regularization" in typeid2reg_raw.columns
), f"Expected typeid and regularization as the column names for {reg_file}"
# default of no mask
typeid2reg_arr = [0.0] * num_types_with_pad_and_unk
for row_idx, row in typeid2reg_raw.iterrows():
# Happens when we filter QIDs not in our entity db and the max typeid is smaller than the total number
if int(row["typeid"]) not in type2row_dict:
continue
typeid = type2row_dict[int(row["typeid"])]
typeid2reg_arr[typeid] = row["regularization"]
typeid2reg = torch.Tensor(typeid2reg_arr)
torch.save(typeid2reg, prep_file)
log_rank_0_debug(
logger,
f"Finished building and saving entity regularization mapping in {round(time.time() - start, 2)}s.",
)
return typeid2reg
def _selective_avg_types(self, type_ids, embeds):
"""Selects the average embedding, ignoring padded types.
Args:
type_ids: type ids
embeds: embeddings
Returns: average embedding
"""
# mask of True means keep in the average
mask = (type_ids < (self.num_types_with_pad_and_unk - 1)) & (type_ids > 0)
average_val = selective_avg(mask, embeds)
num_unk_types = (type_ids == 0).sum(3) == type_ids.shape[-1]
unk_types = torch.where(
num_unk_types.unsqueeze(3),
embeds[:, :, :, 0],
torch.zeros_like(average_val),
)
return average_val + unk_types
def average_types(self, batch_type_ids, batch_type_emb, extras=None):
"""Averages the type embeddings for each candidate.
Args:
batch_type_ids: type ids for a batch
batch_type_emb: type embeddings for a batch
extras: extras to allow for | |
<reponame>AquaBindi/libsass-python-freebsd<filename>sass.py<gh_stars>0
""":mod:`sass` --- Binding of ``libsass``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This simple C extension module provides a very simple binding of ``libsass``,
which is written in C/C++. It contains only one function and one exception
type.
>>> import sass
>>> sass.compile(string='a { b { color: blue; } }')
'a b {\n color: blue; }\n'
"""
from __future__ import absolute_import
import collections
import inspect
import os
import os.path
import re
import sys
import warnings
from six import string_types, text_type
from _sass import OUTPUT_STYLES, compile_filename, compile_string
__all__ = ('MODES', 'OUTPUT_STYLES', 'SOURCE_COMMENTS', 'CompileError',
'SassColor', 'SassError', 'SassFunction', 'SassList', 'SassMap',
'SassNumber', 'SassWarning', 'and_join', 'compile')
__version__ = '0.8.0'
#: (:class:`collections.Mapping`) The dictionary of output styles.
#: Keys are output name strings, and values are flag integers.
OUTPUT_STYLES = OUTPUT_STYLES
#: (:class:`collections.Mapping`) The dictionary of source comments styles.
#: Keys are mode names, and values are corresponding flag integers.
#:
#: .. versionadded:: 0.4.0
#:
#: .. deprecated:: 0.6.0
SOURCE_COMMENTS = {'none': 0, 'line_numbers': 1, 'default': 1, 'map': 2}
#: (:class:`collections.Set`) The set of keywords :func:`compile()` can take.
MODES = set(['string', 'filename', 'dirname'])
class CompileError(ValueError):
"""The exception type that is raised by :func:`compile()`.
It is a subtype of :exc:`exceptions.ValueError`.
"""
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
if os.path.isdir(path):
return
raise
class SassFunction(object):
"""Custom function for Sass. It can be instantiated using
:meth:`from_lambda()` and :meth:`from_named_function()` as well.
:param name: the function name
:type name: :class:`str`
:param arguments: the argument names
:type arguments: :class:`collections.Sequence`
:param callable_: the actual function to be called
:type callable_: :class:`collections.Callable`
.. versionadded:: 0.7.0
"""
__slots__ = 'name', 'arguments', 'callable_'
@classmethod
def from_lambda(cls, name, lambda_):
"""Make a :class:`SassFunction` object from the given ``lambda_``
function. Since lambda functions don't have their name, it need
its ``name`` as well. Arguments are automatically inspected.
:param name: the function name
:type name: :class:`str`
:param lambda_: the actual lambda function to be called
:type lambda_: :class:`types.LambdaType`
:returns: a custom function wrapper of the ``lambda_`` function
:rtype: :class:`SassFunction`
"""
argspec = inspect.getargspec(lambda_)
if argspec.varargs or argspec.keywords or argspec.defaults:
raise TypeError(
'functions cannot have starargs or defaults: {0} {1}'.format(
name, lambda_
)
)
return cls(name, argspec.args, lambda_)
@classmethod
def from_named_function(cls, function):
"""Make a :class:`SassFunction` object from the named ``function``.
Function name and arguments are automatically inspected.
:param function: the named function to be called
:type function: :class:`types.FunctionType`
:returns: a custom function wrapper of the ``function``
:rtype: :class:`SassFunction`
"""
if not getattr(function, '__name__', ''):
raise TypeError('function must be named')
return cls.from_lambda(function.__name__, function)
def __init__(self, name, arguments, callable_):
if not isinstance(name, string_types):
raise TypeError('name must be a string, not ' + repr(name))
elif not isinstance(arguments, collections.Sequence):
raise TypeError('arguments must be a sequence, not ' +
repr(arguments))
elif not callable(callable_):
raise TypeError(repr(callable_) + ' is not callable')
self.name = name
self.arguments = tuple(
arg if arg.startswith('$') else '$' + arg
for arg in arguments
)
self.callable_ = callable_
@property
def signature(self):
"""Signature string of the function."""
return '{0}({1})'.format(self.name, ', '.join(self.arguments))
def __call__(self, *args, **kwargs):
return self.callable_(*args, **kwargs)
def __str__(self):
return self.signature
def compile_dirname(
search_path, output_path, output_style, source_comments, include_paths,
precision, custom_functions,
):
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
for dirpath, _, filenames in os.walk(search_path):
filenames = [
filename for filename in filenames
if filename.endswith('.scss') and not filename.startswith('_')
]
for filename in filenames:
input_filename = os.path.join(dirpath, filename)
relpath_to_file = os.path.relpath(input_filename, search_path)
output_filename = os.path.join(output_path, relpath_to_file)
output_filename = re.sub('.scss$', '.css', output_filename)
input_filename = input_filename.encode(fs_encoding)
s, v, _ = compile_filename(
input_filename, output_style, source_comments, include_paths,
precision, None, custom_functions,
)
if s:
v = v.decode('UTF-8')
mkdirp(os.path.dirname(output_filename))
with open(output_filename, 'w') as output_file:
output_file.write(v)
else:
return False, v
return True, None
def compile(**kwargs):
"""There are three modes of parameters :func:`compile()` can take:
``string``, ``filename``, and ``dirname``.
The ``string`` parameter is the most basic way to compile SASS.
It simply takes a string of SASS code, and then returns a compiled
CSS string.
:param string: SASS source code to compile. it's exclusive to
``filename`` and ``dirname`` parameters
:type string: :class:`str`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param include_paths: an optional list of paths to find ``@import``\ ed
SASS/CSS source files
:type include_paths: :class:`collections.Sequence`, :class:`str`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions>`_ description
:type custom_functions: :class:`collections.Set`,
:class:`collections.Sequence`,
:class:`collections.Mapping`
:returns: the compiled CSS string
:rtype: :class:`str`
:raises sass.CompileError: when it fails for any reason
(for example the given SASS has broken syntax)
The ``filename`` is the most commonly used way. It takes a string of
SASS filename, and then returns a compiled CSS string.
:param filename: the filename of SASS source code to compile.
it's exclusive to ``string`` and ``dirname`` parameters
:type filename: :class:`str`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param source_map_filename: use source maps and indicate the source map
output filename. :const:`None` means not
using source maps. :const:`None` by default.
note that it implies ``source_comments``
is also :const:`True`
:type source_map_filename: :class:`str`
:param include_paths: an optional list of paths to find ``@import``\ ed
SASS/CSS source files
:type include_paths: :class:`collections.Sequence`, :class:`str`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions>`_ description
:type custom_functions: :class:`collections.Set`,
:class:`collections.Sequence`,
:class:`collections.Mapping`
:returns: the compiled CSS string, or a pair of the compiled CSS string
and the source map string if ``source_comments='map'``
:rtype: :class:`str`, :class:`tuple`
:raises sass.CompileError: when it fails for any reason
(for example the given SASS has broken syntax)
:raises exceptions.IOError: when the ``filename`` doesn't exist or
cannot be read
The ``dirname`` is useful for automation. It takes a pair of paths.
The first of the ``dirname`` pair refers the source directory, contains
several SASS source files to compiled. SASS source files can be nested
in directories. The second of the pair refers the output directory
that compiled CSS files would be saved. Directory tree structure of
the source directory will be maintained in the output directory as well.
If ``dirname`` parameter is used the function returns :const:`None`.
:param dirname: a pair of ``(source_dir, output_dir)``.
it's exclusive to ``string`` and ``filename``
parameters
:type dirname: :class:`tuple`
:param output_style: an optional coding style of the compiled result.
choose one of: ``'nested'`` (default), ``'expanded'``,
``'compact'``, ``'compressed'``
:type output_style: :class:`str`
:param source_comments: whether to add comments about source lines.
:const:`False` by default
:type source_comments: :class:`bool`
:param include_paths: an optional list of paths to find ``@import``\ ed
SASS/CSS source files
:type include_paths: :class:`collections.Sequence`, :class:`str`
:param precision: optional precision for numbers. :const:`5` by default.
:type precision: :class:`int`
:param custom_functions: optional mapping of custom functions.
see also below `custom functions
<custom-functions>`_ description
:type custom_functions: :class:`collections.Set`,
:class:`collections.Sequence`,
:class:`collections.Mapping`
:raises sass.CompileError: when it fails for any reason
(for example the given SASS has broken syntax)
.. _custom-functions:
The ``custom_functions`` parameter can take three types of forms:
:class:`~collections.Set`/:class:`~collections.Sequence` of \
:class:`SassFunction`\ s
It is the most general form. Although pretty verbose, it can take
any kind of callables like type objects, unnamed functions,
and user-defined callables.
.. code-block:: python
sass.compile(
...,
custom_functions={
sass.SassFunction('func-name', ('$a', '$b'), some_callable),
...
}
)
:class:`~collections.Mapping` of names to functions
Less general, but easier-to-use form. Although it's not it can take
any kind of callables, it can take any kind of *functions* defined
using :keyword:`def`/:keyword:`lambda` syntax.
It cannot take callables other than them since inspecting arguments
is not always available for every kind of callables.
.. code-block:: python
sass.compile(
...,
custom_functions={
'func-name': lambda a, b: ...,
...
}
)
:class:`~collections.Set`/:class:`~collections.Sequence` of \
named functions
Not general, but the easiest-to-use form for *named* functions.
It can take only named functions, defined using :keyword:`def`.
It cannot take lambdas sinc names are unavailable for them.
.. code-block:: python
def func_name(a, b):
return ...
sass.compile(
...,
custom_functions={func_name}
)
.. versionadded:: 0.4.0
Added ``source_comments`` and ``source_map_filename`` parameters.
.. versionchanged:: 0.6.0
The | |
0
needNodes[nexti][offset] = True
# Will need this offset when variable evaluates to 1
noffset = esys.mbox.add(offset, nnz[previ])
needNodes[nexti][noffset] = True
previ = nexti
# Now build BDD from bottom up
rilist = list(ilist)
rilist.reverse()
if esys.mbox.modulus <= modulusAuto:
# Unbounded range. Need to consider all possible offsets
lasti = rilist[0]
needLeaves = {}
for offset in needNodes[lasti].keys():
needLeaves[offset] = True
noffset = offset + nnz[lasti]
needLeaves[noffset] = True
valueList = needLeaves.keys()
else:
valueList = esys.mbox.values()
leafList = { offset : (esys.manager.leaf1 if offset == ncval else esys.manager.leaf0) for offset in valueList }
nodes = { i : {} for i in rilist }
lasti = rilist[0]
for offset in needNodes[lasti].keys():
low = leafList[offset]
noffset = esys.mbox.add(offset, nnz[lasti])
high = leafList[noffset]
var = esys.varMap[lasti]
root = low if low == high else esys.manager.findOrMake(var, high, low)
nodes[lasti][offset] = root
nexti = lasti
for previ in rilist[1:]:
for offset in needNodes[previ].keys():
low = nodes[nexti][offset]
noffset = esys.mbox.add(offset, nnz[previ])
high = nodes[nexti][noffset]
var = esys.varMap[previ]
root = low if low == high else esys.manager.findOrMake(var, high, low)
nodes[previ][offset] = root
nexti = previ
self.root = nodes[ilist[0]][0]
self.size = esys.manager.getSize(self.root)
# Remove reference to BDD when no longer needed
def retireBdd(self):
self.root = None
self.size = 0
# Does this equation have no solution with modular arithmetic
def isInfeasible(self):
# All zero coefficients and non-zero constant
return self.cval != 0 and len(self) == 0
def __str__(self):
return self.formatSparse()
# Maintain set of sparse equations, including index from each index i to those equations having nonzero value there
class EquationSet:
# Unique ID assigned when registered
nextId = 1
# Mapping from id to equation
equDict = {}
# Mapping from index to list of equation IDs having nonzero entry at that index
nzMap = {}
# Total number of nonzero terms added
termCount = 0
# Largest equation added
termMax = 0
def __init__(self, elist = [], writer = None):
self.nextId = 1
self.writer = SimpleWriter() if writer is None else writer
self.equDict = {}
self.nzMap = {}
self.termCount = 0
self.termMax = 0
for e in elist:
self.addEquation(e)
def addIndex(self, eid, idx):
if idx in self.nzMap:
self.nzMap[idx].append(eid)
else:
self.nzMap[idx] = [eid]
def removeIndex(self, eid, idx):
nlist = [j for j in self.nzMap[idx] if j != eid]
if len(nlist) == 0:
del self.nzMap[idx]
else:
self.nzMap[idx] = nlist
def analyzeEquation(self, e):
count = len(e)
self.termCount += count
self.termMax = max(self.termMax, count)
def addEquation(self, e, assignId = False):
eid = self.nextId
if assignId:
e.evalId = eid
self.nextId += 1
self.equDict[eid] = e
for idx in e.nz:
self.addIndex(eid, idx)
self.analyzeEquation(e)
return eid
def removeEquation(self, eid):
e = self[eid]
e.id = None
for idx in e.nz:
self.removeIndex(eid, idx)
del self.equDict[eid]
def lookup(self, idx):
if idx in self.nzMap:
return self.nzMap[idx]
else:
return []
def rootList(self):
return [e.root for e in self.equDict.values()]
def __getitem__(self, id):
return self.equDict[id]
def __len__(self):
return len(self.equDict)
def currentEids(self):
return list(self.equDict.keys())
def currentIndices(self):
return list(self.nzMap.keys())
def show(self):
eidList = sorted(self.currentEids())
for eid in eidList:
self.writer.write(" #%d:%s\n" % (eid, str(self[eid])))
# How many total equations have been generated
def equationCount(self):
return self.nextId - 1
# Support pivot selection
# Must update scores whenever possible change to any nonzero in same row as pivot.
class PivotHelper:
# Track set of indices. For each have generation indicating most recent computation of
# its pivot score. Earlier ones may still be in queue and should be ignored.
generationMap = {}
# Set of indices that have been affected since last pivot selection.
# Implemented as dictionary mapping indices to True
touchedSet = {}
# Priority queue.
# Holds tuple of form (score, idx, ... , generation)
pqueue = None
# evalFunction defines how to create score for index. Should return tuple
# with score in first position.
evalFunction = None
def __init__(self, evalFunction):
self.evalFunction = evalFunction
self.generationMap = {}
self.touchedSet = {}
self.pqueue = queue.PriorityQueue()
def touch(self, ids):
for id in ids:
self.touchedSet[id] = True
if id not in self.generationMap:
self.generationMap[id] = 0
def deleteIndex(self, id):
if id in self.generationMap:
del self.generationMap[id]
if id in self.touchedSet:
del self.touchedSet[id]
def update(self):
for id in self.touchedSet.keys():
tup = self.evalFunction(id)
self.generationMap[id] += 1
qtup = tup + (self.generationMap[id],)
self.pqueue.put(qtup)
self.touchedSet = {}
def select(self):
self.update()
while not self.pqueue.empty():
qtup = self.pqueue.get()
id = qtup[1]
generation = qtup[-1]
if id is not None and id in self.generationMap and generation == self.generationMap[id]:
tup = qtup[:-1]
return tup
raise QueueException("Queue emptied out without finding pivot")
# System of equations.
# Support LU decomposition of Gaussian elimination to see if system has any solutions
class EquationSystem:
# Variable Count
N = 10
modulus = modulusAuto
verbose = False
# Class to support math operations
mbox = None
writer = None
## Solver state
# Eliminated equations
sset = None
# Remaining equations
rset = None
# When not doing justifications, record equations and their dependencies for future justification
# Set of equations involved in evaluation, including the leaf equations
eset = None
# Each element is tuple (eid, [opidlist]). For original equations, opidlist is empty
justificationSteps = []
# Supporting BDD operation
manager = None
# Mapping from variable Id to variable
varMap = None
# Mapping from variable Id to level
levelMap = None
# Supporting pivot selection
pivotHelper = None
## Accumulating data
# Mapping from variable ID to True
varUsed = {}
# Number of equations
# Total number of elimination steps
stepCount = 0
# Number of times pivot evaluation computed
pivotEvaluationCount = 0
# Sum of pivot degrees
pivotDegreeSum = 0
# Max of pivot degrees
pivotDegreeMax = 0
# Total number of vector operations
combineCount = 0
def __init__(self, N, modulus = modulusAuto, verbose = True, manager = None, writer = None):
self.N = N
self.modulus = modulus
self.verbose = verbose
self.justificationSteps = []
self.manager = manager
if manager is not None:
self.varMap = { var.id : var for var in manager.variables }
self.levelMap = { var.id : var.level for var in manager.variables }
self.writer = SimpleWriter() if writer is None else writer
self.mbox = ModMath(modulus)
self.sset = EquationSet(writer = self.writer)
self.rset = EquationSet(writer = self.writer)
self.eset = EquationSet(writer = self.writer)
self.pivotHelper = PivotHelper(self.evaluatePivot)
self.varUsed = {}
self.stepCount = 0
self.pivotEvaluationCount = 0
self.pivotDegreeSum = 0
self.pivotDegreeMax = 0
self.combineCount = 0
# Add new equation to main set
def addInitialEquation(self, e):
eid = self.rset.addEquation(e)
for i in e.nz:
self.varUsed[i] = True
self.pivotHelper.touch(e.indices())
if delayJustification:
evid = self.eset.addEquation(e, assignId = True)
self.justificationSteps.append((evid,[]))
if self.manager is not None:
e.buildBdd(self)
return eid
# Construct BDD representation of equation and generate its justification
# operandList is list of equations from which this one was derived
def justifyEquation(self, e, operandList):
e.buildBdd(self)
rvList = [(eq.root,eq.validation) for eq in operandList]
done = False
while not done and len(rvList) > 2:
r1,v1 = rvList[0]
r2,v2 = rvList[1]
validation = None
antecedents = [v1,v2]
nr,imp = self.manager.applyAndJustify(r1, r2)
if nr == self.manager.leaf0:
comment = "Validation of Empty clause"
done = True
else:
comment = "Validation of %s" % nr.label()
if imp == resolver.tautologyId:
if nr == r1:
validation = v1
elif nr == r2:
validation = v2
else:
antecedents += [imp]
if validation is None:
validation = self.manager.prover.createClause([nr.id], antecedents, comment)
rvList = [(nr,validation)] + rvList[2:]
if not done:
if len(rvList) == 2:
# Do final conjunction and implication in combination
r1,v1 = rvList[0]
r2,v2 = rvList[1]
antecedents = [v1,v2]
check, implication = self.manager.applyAndJustifyImply(r1, r2, e.root)
if not check:
raise ProofGenerationException("Implication failed when spawning equation %s: %s % %s -/-> %s\n" % (str(e), r1.label(), r2.label(), e.root.label()))
else:
r1, v1 = rvList[0]
antecedents = [v1]
check, implication = self.manager.justifyImply(r1, e.root)
if not check:
raise ProofGenerationException("Implication failed when spawning equation %s: %s -/-> %s\n" % (str(e), r1.label(), e.root.label()))
if implication != resolver.tautologyId:
antecedents += [implication]
done = e.root == self.manager.leaf0
if done:
comment = | |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import itertools
import math
import warnings
from collections import defaultdict
from math import lgamma
from math import log
import numpy as np
from cgpm.utils import validation as vu
from cgpm.cgpm import CGpm
CGPM_SIMULATE_NARGS = CGpm.simulate.func_code.co_argcount
colors = ['red', 'blue', 'green', 'magenta', 'orange', 'purple', 'brown',
'black']
def gen_rng(seed=None):
if seed is None:
seed = np.random.randint(low=1, high=2**31)
return np.random.RandomState(seed)
def get_prng(seed=None):
if seed is None:
seed = np.random.randint(low=1, high=2**31)
return np.random.RandomState(seed)
def curve_color(k):
return (colors[k], .7) if k < len(colors) else ('gray', .3)
def merged(*dicts):
result = {}
for d in dicts:
result.update(d)
return result
def mergedl(dicts):
return merged(*dicts)
def lchain(*args):
return list(itertools.chain(*args))
def flatten_cgpms(cgpms, tpe):
return list(itertools.chain.from_iterable(
cgpm.cgpms if isinstance(cgpm, tpe) else [cgpm] for cgpm in cgpms
))
def is_disjoint(*args):
return not set.intersection(*(set(a) for a in args))
def get_intersection(left, right):
if right is None:
return {} if isinstance(left, dict) else []
if isinstance(right, dict):
return {i: right[i] for i in right if i in left}
elif isinstance(right, list):
return [i for i in right if i in left]
else:
assert False, 'Unknown args type.'
def log_normalize(logp):
"""Normalizes a np array of log probabilites."""
return np.subtract(logp, logsumexp(logp))
def normalize(p):
"""Normalizes a np array of probabilites."""
return np.asarray(p, dtype=float) / sum(p)
def logp_crp(N, Nk, alpha):
"""Returns the log normalized P(N,K|alpha), where N is the number of
customers and K is the number of tables.
http://gershmanlab.webfactional.com/pubs/GershmanBlei12.pdf#page=4 (eq 8)
"""
return len(Nk)*log(alpha) + np.sum(lgamma(c) for c in Nk) \
+ lgamma(alpha) - lgamma(N+alpha)
def logp_crp_unorm(N, K, alpha):
"""Returns the log unnormalized P(N,K|alpha), where N is the number of
customers and K is the number of tables. Use for effeciency to avoid
computing terms that are not a function of alpha.
"""
return K*log(alpha) + lgamma(alpha) - lgamma(N+alpha)
def logp_crp_gibbs(Nk, Z, i, alpha, m):
"""Compute the CRP probabilities for a Gibbs transition of customer i,
with table counts Nk, table assignments Z, and m auxiliary tables."""
# XXX F ME
K = sorted(Nk) if isinstance(Nk, dict) else xrange(len(Nk))
singleton = Nk[Z[i]] == 1
m_aux = m-1 if singleton else m
p_table_aux = alpha/float(m)
p_current = lambda : p_table_aux if singleton else Nk[Z[i]]-1
p_other = lambda t : Nk[t]
p_table = lambda t: p_current() if t == Z[i] else p_other(t)
return [log(p_table(t)) for t in K] + [log(p_table_aux)]*m_aux
def logp_crp_fresh(N, Nk, alpha, m=1):
"""Compute the CRP probabilities for a fresh customer i=N+1, with
table counts Nk, total customers N=sum(Nk), and m auxiliary tables."""
log_crp_numer = np.log(Nk + [alpha/m]*m)
logp_crp_denom = log(N + alpha)
return log_crp_numer - logp_crp_denom
def log_pflip(logp, array=None, size=None, rng=None):
"""Categorical draw from a vector logp of log probabilities."""
p = np.exp(log_normalize(logp))
return pflip(p, array=array, size=size, rng=rng)
def pflip(p, array=None, size=None, rng=None):
"""Categorical draw from a vector p of probabilities."""
if array is None:
array = range(len(p))
if len(p) == 1:
return array[0] if size is None else [array[0]] * size
if rng is None:
rng = gen_rng()
p = normalize(p)
if 10.**(-8.) < math.fabs(1.-sum(p)):
warnings.warn('pflip probability vector sums to %f.' % sum(p))
return rng.choice(array, size=size, p=p)
def logsumexp(array):
# https://github.com/probcomp/bayeslite/blob/master/src/math_util.py
if len(array) == 0:
return float('-inf')
m = max(array)
# m = +inf means addends are all +inf, hence so are sum and log.
# m = -inf means addends are all zero, hence so is sum, and log is
# -inf. But if +inf and -inf are among the inputs, or if input is
# NaN, let the usual computation yield a NaN.
if math.isinf(m) and min(array) != -m and \
all(not math.isnan(a) for a in array):
return m
# Since m = max{a_0, a_1, ...}, it follows that a <= m for all a,
# so a - m <= 0; hence exp(a - m) is guaranteed not to overflow.
return m + math.log(sum(math.exp(a - m) for a in array))
def logmeanexp(array):
# https://github.com/probcomp/bayeslite/blob/master/src/math_util.py
inf = float('inf')
if len(array) == 0:
# logsumexp will DTRT, but math.log(len(array)) will fail.
return -inf
# Treat -inf values as log 0 -- they contribute zero to the sum in
# logsumexp, but one to the count.
#
# If we pass -inf values through to logsumexp, and there are also
# +inf values, then we get NaN -- but if we had averaged exp(-inf)
# = 0 and exp(+inf) = +inf, we would sensibly get +inf, whose log
# is still +inf, not NaN. So strip -inf values first.
#
# Can't say `a > -inf' because that excludes NaNs, but we want to
# include them so they propagate.
noninfs = [a for a in array if not a == -inf]
# probs = map(exp, logprobs)
# log(mean(probs)) = log(sum(probs) / len(probs))
# = log(sum(probs)) - log(len(probs))
# = log(sum(map(exp, logprobs))) - log(len(logprobs))
# = logsumexp(logprobs) - log(len(logprobs))
return logsumexp(noninfs) - math.log(len(array))
def logmeanexp_weighted(log_A, log_W):
# https://github.com/probcomp/bayeslite/blob/master/src/math_util.py
# Given log W_0, log W_1, ..., log W_{n-1} and log A_0, log A_1,
# ... log A_{n-1}, compute
#
# log ((W_0 A_0 + ... + W_{n-1} A_{n-1})/(W_0 + ... + W_{n-1}))
# = log (exp log (W_0 A_0) + ... + exp log (W_{n-1} A_{n-1}))
# - log (exp log W_0 + ... + exp log W_{n-1})
# = log (exp (log W_0 + log A_0) + ... + exp (log W_{n-1} + log A_{n-1}))
# - log (exp log W_0 + ... + exp log W_{n-1})
# = logsumexp (log W_0 + log A_0, ..., log W_{n-1} + log A_{n-1})
# - logsumexp (log W_0, ..., log W_{n-1})
#
# XXX Pathological cases -- infinities, NaNs.
assert len(log_W) == len(log_A)
return logsumexp([log_w + log_a for log_w, log_a in zip(log_W, log_A)]) \
- logsumexp(log_W)
def log_linspace(a, b, n):
"""linspace from a to b with n entries over log scale."""
return np.exp(np.linspace(log(a), log(b), n))
def log_nCk(n, k):
"""log(choose(n,k)) with overflow protection."""
if n == 0 or k == 0 or n == k:
return 0
return log(n) + lgamma(n) - log(k) - lgamma(k) - log(n-k) - lgamma(n-k)
def simulate_crp(N, alpha, rng=None):
"""Generates random N-length partition from the CRP with parameter alpha."""
if rng is None:
rng = gen_rng()
assert N > 0 and alpha > 0.
alpha = float(alpha)
partition = [0]*N
Nk = [1]
for i in xrange(1,N):
K = len(Nk)
ps = np.zeros(K+1)
for k in xrange(K):
ps[k] = float(Nk[k])
ps[K] = alpha
ps /= (float(i) - 1 + alpha)
assignment = pflip(ps, rng=rng)
if assignment == K:
Nk.append(1)
elif assignment < K:
Nk[assignment] += 1
else:
raise ValueError("Invalid assignment: %i, max=%i" % (assignment, K))
partition[i] = assignment
assert max(partition)+1 == len(Nk)
assert len(partition)==N
assert sum(Nk) == N
K = len(Nk)
# if K > 1:
# rng.shuffle(partition)
return partition
def simulate_crp_constrained(N, alpha, Cd, Ci, Rd, Ri, rng=None):
"""Simulates a CRP with N customers and concentration alpha. Cd is a list,
where each entry is a list of friends. Ci is a list of tuples, where each
tuple is a pair of enemies."""
if rng is None:
rng = gen_rng()
vu.validate_crp_constrained_input(N, Cd, Ci, Rd, Ri)
assert N > 0 and alpha > 0.
# Initial partition.
Z = [-1]*N
# Friends dictionary from Cd.
friends = {col: block for block in Cd for col in block}
# Assign customers.
for cust in xrange(N):
# If the customer has been assigned, skip.
if Z[cust] > -1:
continue
# Find valid tables for cust and friends.
assert all(Z[f] == -1 for f in friends.get(cust, [cust]))
prob_table = [0] * (max(Z)+1)
for t in xrange(max(Z)+1):
# Current customers at table t.
| |
from unittest.mock import Mock
from twisted.cred import portal
from twisted.cred.checkers import AllowAnonymousAccess, InMemoryUsernamePasswordDatabaseDontUse
from twisted.internet import reactor, defer
from twisted.spread import pb
from jasmin.interceptor.configs import InterceptorPBConfig, InterceptorPBClientConfig
from jasmin.interceptor.interceptor import InterceptorPB
from jasmin.interceptor.proxies import InterceptorPBProxy
from jasmin.protocols.smpp.stats import SMPPServerStatsCollector
from jasmin.routing.Filters import TagFilter
from jasmin.routing.Interceptors import DefaultInterceptor
from jasmin.routing.Routes import StaticMTRoute
from jasmin.routing.jasminApi import *
from jasmin.routing.proxies import RouterPBProxy
from tests.routing.test_router import SubmitSmTestCaseTools
from tests.routing.test_router_smpps import SMPPClientTestCases
from jasmin.tools.cred.portal import JasminPBRealm
from jasmin.tools.spread.pb import JasminPBPortalRoot
from smpp.pdu import pdu_types
@defer.inlineCallbacks
def waitFor(seconds):
# Wait seconds
waitDeferred = defer.Deferred()
reactor.callLater(seconds, waitDeferred.callback, None)
yield waitDeferred
class ProvisionWithoutInterceptorPB:
script = 'Default script that generates a syntax error !'
@defer.inlineCallbacks
def setUp(self):
if hasattr(self, 'ipb_client'):
yield SMPPClientTestCases.setUp(self, interceptorpb_client=self.ipb_client)
else:
yield SMPPClientTestCases.setUp(self)
# Connect to RouterPB
yield self.connect('127.0.0.1', self.pbPort)
# Provision mt interceptor
self.mt_interceptor = MTInterceptorScript(self.script)
yield self.mtinterceptor_add(DefaultInterceptor(self.mt_interceptor), 0)
# Disconnect from RouterPB
self.disconnect()
# Get stats singletons
self.stats_smpps = SMPPServerStatsCollector().get(cid=self.smpps_config.id)
@defer.inlineCallbacks
def tearDown(self):
yield SMPPClientTestCases.tearDown(self)
class ProvisionInterceptorPB(ProvisionWithoutInterceptorPB):
@defer.inlineCallbacks
def setUp(self, authentication=False):
"This will launch InterceptorPB and provide a client connected to it."
# Launch a client in a disconnected state
# it will be connected on demand through the self.ipb_connect() method
self.ipb_client = InterceptorPBProxy()
yield ProvisionWithoutInterceptorPB.setUp(self)
# Initiating config objects without any filename
# will lead to setting defaults and that's what we
# need to run the tests
InterceptorPBConfigInstance = InterceptorPBConfig()
# Launch the interceptor server
pbInterceptor_factory = InterceptorPB(InterceptorPBConfigInstance)
# Configure portal
p = portal.Portal(JasminPBRealm(pbInterceptor_factory))
if not authentication:
p.registerChecker(AllowAnonymousAccess())
else:
c = InMemoryUsernamePasswordDatabaseDontUse()
c.addUser('test_user', md5('test_password'.encode('ascii')).digest())
p.registerChecker(c)
jPBPortalRoot = JasminPBPortalRoot(p)
self.pbInterceptor_server = reactor.listenTCP(0, pb.PBServerFactory(jPBPortalRoot))
self.pbInterceptor_port = self.pbInterceptor_server.getHost().port
@defer.inlineCallbacks
def ipb_connect(self, config=None):
if config is None:
# Default test config (username is None for anonymous connection)
config = InterceptorPBClientConfig()
config.username = None
config.port = self.pbInterceptor_port
if config.username is not None:
yield self.ipb_client.connect(
config.host,
config.port,
config.username,
config.password
)
else:
yield self.ipb_client.connect(
config.host,
config.port
)
@defer.inlineCallbacks
def tearDown(self):
yield ProvisionWithoutInterceptorPB.tearDown(self)
# Disconnect ipb and shutdown pbInterceptor_server
if self.ipb_client.isConnected:
self.ipb_client.disconnect()
yield self.pbInterceptor_server.stopListening()
class SmppsSubmitSmNoInterceptorPBTestCases(ProvisionWithoutInterceptorPB, RouterPBProxy, SMPPClientTestCases,
SubmitSmTestCaseTools):
@defer.inlineCallbacks
def test_interceptorpb_not_set(self):
_ic = self.stats_smpps.get('interceptor_count')
_iec = self.stats_smpps.get('interceptor_error_count')
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Bind
yield self.smppc_factory.connectAndBind()
# Install mocks
self.smpps_factory.lastProto.sendPDU = Mock(wraps=self.smpps_factory.lastProto.sendPDU)
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on final destination smpp server (third party mocker)
self.assertEqual(0, len(self.SMSCPort.factory.lastClient.submitRecords))
# Run tests on Jasmin's SMPPs
self.assertEqual(self.smpps_factory.lastProto.sendPDU.call_count, 2)
# smpps response was a submit_sm_resp with ESME_ROK
response_pdu = self.smpps_factory.lastProto.sendPDU.call_args_list[0][0][0]
self.assertEqual(response_pdu.id, pdu_types.CommandId.submit_sm_resp)
self.assertEqual(response_pdu.seqNum, 2)
self.assertEqual(response_pdu.status, pdu_types.CommandStatus.ESME_RSYSERR)
self.assertTrue('message_id' not in response_pdu.params)
self.assertEqual(_ic, self.stats_smpps.get('interceptor_count'))
self.assertEqual(_iec + 1, self.stats_smpps.get('interceptor_error_count'))
class SmppsSubmitSmInterceptionTestCases(ProvisionInterceptorPB, RouterPBProxy, SMPPClientTestCases,
SubmitSmTestCaseTools):
update_message_sript = "routable.pdu.params['short_message'] = 'Intercepted message'"
raise_any_exception = "raise Exception('Exception from interceptor script')"
return_ESME_RINVESMCLASS = "smpp_status = 67"
return_HTTP_300 = "http_status = 300"
@defer.inlineCallbacks
def test_interceptorpb_not_connected(self):
_ic = self.stats_smpps.get('interceptor_count')
_iec = self.stats_smpps.get('interceptor_error_count')
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Bind
yield self.smppc_factory.connectAndBind()
# Install mocks
self.smpps_factory.lastProto.sendPDU = Mock(wraps=self.smpps_factory.lastProto.sendPDU)
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on final destination smpp server (third party mocker)
self.assertEqual(0, len(self.SMSCPort.factory.lastClient.submitRecords))
# Run tests on Jasmin's SMPPs
self.assertEqual(self.smpps_factory.lastProto.sendPDU.call_count, 2)
# smpps response was a submit_sm_resp with ESME_ROK
response_pdu = self.smpps_factory.lastProto.sendPDU.call_args_list[0][0][0]
self.assertEqual(response_pdu.id, pdu_types.CommandId.submit_sm_resp)
self.assertEqual(response_pdu.seqNum, 2)
self.assertEqual(response_pdu.status, pdu_types.CommandStatus.ESME_RSYSERR)
self.assertTrue('message_id' not in response_pdu.params)
self.assertEqual(_ic, self.stats_smpps.get('interceptor_count'))
self.assertEqual(_iec + 1, self.stats_smpps.get('interceptor_error_count'))
@defer.inlineCallbacks
def test_syntax_error(self):
_ic = self.stats_smpps.get('interceptor_count')
_iec = self.stats_smpps.get('interceptor_error_count')
# Connect to InterceptorPB
yield self.ipb_connect()
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Bind
yield self.smppc_factory.connectAndBind()
# Install mocks
self.smpps_factory.lastProto.sendPDU = Mock(wraps=self.smpps_factory.lastProto.sendPDU)
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on final destination smpp server (third party mocker)
self.assertEqual(0, len(self.SMSCPort.factory.lastClient.submitRecords))
# Run tests on Jasmin's SMPPs
self.assertEqual(self.smpps_factory.lastProto.sendPDU.call_count, 2)
# smpps response was a submit_sm_resp with ESME_ROK
response_pdu = self.smpps_factory.lastProto.sendPDU.call_args_list[0][0][0]
self.assertEqual(response_pdu.id, pdu_types.CommandId.submit_sm_resp)
self.assertEqual(response_pdu.seqNum, 2)
self.assertEqual(response_pdu.status, pdu_types.CommandStatus.ESME_RSYSERR)
self.assertTrue('message_id' not in response_pdu.params)
self.assertEqual(_ic, self.stats_smpps.get('interceptor_count'))
self.assertEqual(_iec + 1, self.stats_smpps.get('interceptor_error_count'))
@defer.inlineCallbacks
def test_success(self):
_ic = self.stats_smpps.get('interceptor_count')
_iec = self.stats_smpps.get('interceptor_error_count')
# Re-provision interceptor with correct script
# Connect to RouterPB
yield self.connect('127.0.0.1', self.pbPort)
mt_interceptor = MTInterceptorScript(self.update_message_sript)
yield self.mtinterceptor_add(DefaultInterceptor(mt_interceptor), 0)
# Disconnect from RouterPB
self.disconnect()
# Connect to InterceptorPB
yield self.ipb_connect()
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Bind
yield self.smppc_factory.connectAndBind()
# Install mocks
self.smpps_factory.lastProto.sendPDU = Mock(wraps=self.smpps_factory.lastProto.sendPDU)
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on final destination smpp server (third party mocker)
self.assertEqual(1, len(self.SMSCPort.factory.lastClient.submitRecords))
# Run tests on Jasmin's SMPPs
self.assertEqual(self.smpps_factory.lastProto.sendPDU.call_count, 2)
# smpps response was a submit_sm_resp with ESME_ROK
response_pdu = self.smpps_factory.lastProto.sendPDU.call_args_list[0][0][0]
self.assertEqual(response_pdu.id, pdu_types.CommandId.submit_sm_resp)
self.assertEqual(response_pdu.seqNum, 2)
self.assertEqual(response_pdu.status, pdu_types.CommandStatus.ESME_ROK)
self.assertNotEqual(None, response_pdu.params['message_id'])
# Message content has been updated
self.assertEqual(b'Intercepted message',
self.SMSCPort.factory.lastClient.submitRecords[0].params['short_message'])
self.assertEqual(_ic + 1, self.stats_smpps.get('interceptor_count'))
self.assertEqual(_iec, self.stats_smpps.get('interceptor_error_count'))
@defer.inlineCallbacks
def test_any_exception_from_script(self):
_ic = self.stats_smpps.get('interceptor_count')
_iec = self.stats_smpps.get('interceptor_error_count')
# Re-provision interceptor with script raising an exception
# Connect to RouterPB
yield self.connect('127.0.0.1', self.pbPort)
mt_interceptor = MTInterceptorScript(self.raise_any_exception)
yield self.mtinterceptor_add(DefaultInterceptor(mt_interceptor), 0)
# Disconnect from RouterPB
self.disconnect()
# Connect to InterceptorPB
yield self.ipb_connect()
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Bind
yield self.smppc_factory.connectAndBind()
# Install mocks
self.smpps_factory.lastProto.sendPDU = Mock(wraps=self.smpps_factory.lastProto.sendPDU)
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on final destination smpp server (third party mocker)
self.assertEqual(0, len(self.SMSCPort.factory.lastClient.submitRecords))
# Run tests on Jasmin's SMPPs
self.assertEqual(self.smpps_factory.lastProto.sendPDU.call_count, 2)
# smpps response was a submit_sm_resp with ESME_ROK
response_pdu = self.smpps_factory.lastProto.sendPDU.call_args_list[0][0][0]
self.assertEqual(response_pdu.id, pdu_types.CommandId.submit_sm_resp)
self.assertEqual(response_pdu.seqNum, 2)
self.assertEqual(response_pdu.status, pdu_types.CommandStatus.ESME_RSYSERR)
self.assertTrue('message_id' not in response_pdu.params)
self.assertEqual(_ic, self.stats_smpps.get('interceptor_count'))
self.assertEqual(_iec + 1, self.stats_smpps.get('interceptor_error_count'))
@defer.inlineCallbacks
def test_ESME_RINVESMCLASS_from_script(self):
_ic = self.stats_smpps.get('interceptor_count')
_iec = self.stats_smpps.get('interceptor_error_count')
# Re-provision interceptor with script returning a ESME_RINVESMCLASS
# Connect to RouterPB
yield self.connect('127.0.0.1', self.pbPort)
mt_interceptor = MTInterceptorScript(self.return_ESME_RINVESMCLASS)
yield self.mtinterceptor_add(DefaultInterceptor(mt_interceptor), 0)
# Disconnect from RouterPB
self.disconnect()
# Connect to InterceptorPB
yield self.ipb_connect()
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Bind
yield self.smppc_factory.connectAndBind()
# Install mocks
self.smpps_factory.lastProto.sendPDU = Mock(wraps=self.smpps_factory.lastProto.sendPDU)
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on final destination smpp server (third party mocker)
self.assertEqual(0, len(self.SMSCPort.factory.lastClient.submitRecords))
# Run tests on Jasmin's SMPPs
self.assertEqual(self.smpps_factory.lastProto.sendPDU.call_count, 2)
# smpps response was a submit_sm_resp with ESME_ROK
response_pdu = self.smpps_factory.lastProto.sendPDU.call_args_list[0][0][0]
self.assertEqual(response_pdu.id, pdu_types.CommandId.submit_sm_resp)
self.assertEqual(response_pdu.seqNum, 2)
self.assertEqual(response_pdu.status, pdu_types.CommandStatus.ESME_RINVESMCLASS)
self.assertTrue('message_id' not in response_pdu.params)
self.assertEqual(_ic, self.stats_smpps.get('interceptor_count'))
self.assertEqual(_iec + 1, self.stats_smpps.get('interceptor_error_count'))
@defer.inlineCallbacks
def test_HTTP_300_from_script(self):
"Will ensure if script defines only http error it will implicitly cause a smpp ESME_RUNKNOWNERR error"
_ic = self.stats_smpps.get('interceptor_count')
_iec = self.stats_smpps.get('interceptor_error_count')
# Re-provision interceptor with script returning a HTTP 300
# Connect to RouterPB
yield self.connect('127.0.0.1', self.pbPort)
mt_interceptor = MTInterceptorScript(self.return_HTTP_300)
yield self.mtinterceptor_add(DefaultInterceptor(mt_interceptor), 0)
# Disconnect from RouterPB
self.disconnect()
# Connect to InterceptorPB
yield self.ipb_connect()
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Bind
yield self.smppc_factory.connectAndBind()
# Install mocks
self.smpps_factory.lastProto.sendPDU = Mock(wraps=self.smpps_factory.lastProto.sendPDU)
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on final destination smpp server (third party mocker)
self.assertEqual(0, len(self.SMSCPort.factory.lastClient.submitRecords))
# Run tests on Jasmin's SMPPs
self.assertEqual(self.smpps_factory.lastProto.sendPDU.call_count, 2)
# smpps response was a submit_sm_resp with ESME_ROK
response_pdu = self.smpps_factory.lastProto.sendPDU.call_args_list[0][0][0]
self.assertEqual(response_pdu.id, pdu_types.CommandId.submit_sm_resp)
self.assertEqual(response_pdu.seqNum, 2)
self.assertEqual(response_pdu.status, pdu_types.CommandStatus.ESME_RUNKNOWNERR)
self.assertTrue('message_id' not in response_pdu.params)
self.assertEqual(_ic, self.stats_smpps.get('interceptor_count'))
self.assertEqual(_iec + 1, self.stats_smpps.get('interceptor_error_count'))
@defer.inlineCallbacks
def test_tagging(self):
"""Refs #495
Will tag message inside interceptor script and assert
routing based tagfilter were correctly done
"""
# Re-provision interceptor with correct script
# Connect to RouterPB
yield self.connect('127.0.0.1', self.pbPort)
mt_interceptor = MTInterceptorScript("routable.addTag(10)")
yield self.mtinterceptor_add(DefaultInterceptor(mt_interceptor), 0)
# Disconnect from RouterPB
self.disconnect()
# Connect to InterceptorPB
yield self.ipb_connect()
yield self.connect('127.0.0.1', self.pbPort)
yield self.prepareRoutingsAndStartConnector()
# Change routing rules by shadowing (high order value) default route with a
# static route having a tagfilter
yield self.mtroute_flush()
yield self.mtroute_add(StaticMTRoute([TagFilter(10)], self.c1, 0.0), 1000)
# Bind
yield self.smppc_factory.connectAndBind()
# Send a SMS MT through smpps interface
yield self.smppc_factory.lastProto.sendDataRequest(self.SubmitSmPDU)
# Wait 3 seconds for submit_sm_resp
yield waitFor(3)
# Unbind & Disconnect
yield self.smppc_factory.smpp.unbindAndDisconnect()
yield self.stopSmppClientConnectors()
# Run tests on | |
import os
import numpy as np
import pandas as pd
import six
import csv
from .core import pop_na, STReader, STBuffer
from .core import SortOrder
from .core import csv_headers
from bmtk.utils.io import bmtk_world_comm
"""
class DiskBuffer(object):
def __init__(self, cache_dir, **kwargs):
self.cache_dir = cache_dir
self.mpi_rank = kwargs.get('MPI_rank', MPI_rank)
self.mpi_size = kwargs.get('MPI_size', MPI_size)
self.cached_fname = os.path.join(self.cache_dir, '.spikes.cache.node{}.csv'.format(self.mpi_rank))
if self.mpi_rank == 0:
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
barrier()
self.cached_fhandle = open(self.cached_fname, 'w')
self._csv_reader = None
def append(self, timestamp, population, node_id):
self.cached_fhandle.write('{} {} {}\n'.format(timestamp, population, node_id))
def extend(self, timestamps, population, node_id):
pass
def __iter__(self):
self.cached_fhandle.flush()
barrier()
self._csv_reader = csv.reader(open(self.cached_fname, 'r'), delimiter=' ')
return self
def __next__(self):
#try:
r = next(self._csv_reader)
return [np.float64(r[0]), r[1], np.uint64(r[2])]
#except StopIteration:
# return StopIteration
#exit()
#return next(self._csv_reader)
next = __next__
class MemoryBuffer(object):
pass
class STBufferedWriter(STBuffer, STReader):
def __init__(self, buffer_dir=None, default_pop=None, **kwargs):
if default_pop is None or isinstance(default_pop, six.string_types) or np.isscalar(default_pop):
self.default_pop = pop_na
elif len(default_pop) == 1:
self.default_pop = default_pop[0]
else:
self.default_pop = Exception
self._buffer = DiskBuffer('cache')
# self._populations = set([default_pop])
self._populations_counts = {self.default_pop: 0}
self._units = kwargs.get('units', 'ms')
def add_spike(self, node_id, timestamp, population=None):
if population is None:
population = self.default_pop
if population not in self._populations_counts:
self._populations_counts[population] = 0
self._populations_counts[population] += 1
# self._populations.add(population)
self._buffer.write(timestamp=timestamp, population=population, node_id=node_id)
def add_spikes(self, node_ids, timestamps, population=None, **kwargs):
if population is None:
population = self.default_pop
if np.isscalar(node_ids):
for ts in timestamps:
self.add_spike(node_ids, ts, population)
else:
if len(node_ids) != len(timestamps):
raise Exception('timestamps and node_ids must be of the same length.')
for node_id, ts in zip(node_ids, timestamps):
self.add_spike(node_id, ts, population)
def import_spikes(self, obj):
pass
def flush(self):
pass
@property
def populations(self):
return list(self._populations_counts.keys())
@property
def units(self):
return self._units
@units.setter
def units(self, v):
self._units = v
def nodes(self, populations=None):
raise NotImplementedError()
def n_spikes(self, population=None):
return self._populations_counts[population]
def time_range(self, populations=None):
raise NotImplementedError()
def get_times(self, node_id, population=None, time_window=None, **kwargs):
raise NotImplementedError()
def to_dataframe(self, node_ids=None, populations=None, time_window=None, sort_order=SortOrder.none, **kwargs):
raise NotImplementedError()
def spikes(self, node_ids=None, populations=None, time_window=None, sort_order=SortOrder.none, **kwargs):
if sort_order == SortOrder.by_time or sort_order == SortOrder.by_time:
raise Exception("Can't sort by time or node-id")
for n in self._buffer:
if n[1] == populations:
yield n
raise StopIteration
def __len__(self):
return len(self.to_dataframe())
"""
def _spikes_filter1(p, t, time_window, populations):
return p in populations and time_window[0] <= t <= time_window[1]
def _spikes_filter2(p, t, populations):
return p in populations
def _spikes_filter3(p, t, time_window):
return time_window[0] <= t <= time_window[1]
def _create_filter(populations, time_window):
from functools import partial
if populations is None and time_window is None:
return lambda p, t: True
if populations is None:
return partial(_spikes_filter3, time_window=time_window)
populations = [populations] if np.isscalar(populations) else populations
if time_window is None:
return partial(_spikes_filter2, populations=populations)
else:
return partial(_spikes_filter1, populations=populations, time_window=time_window)
class STMemoryBuffer(STBuffer, STReader):
""" A Class for creating, storing and reading multi-population spike-trains - especially for saving the spikes of a
large scale network simulation. Keeps a running tally of the (timestamp, population-name, node_id) for each
individual spike.
The spikes are stored in memory and very large and/or epiletic simulations may run into memory issues. Not designed
to work with parallel simulations.
"""
def __init__(self, default_population=None, **kwargs):
self._default_population = default_population or pop_na
# look into storing data using numpy arrays or pandas series.
self._node_ids = []
self._timestamps = []
self._populations = []
self._pop_counts = {self._default_population: 0} # A count of spikes per population
self._units = kwargs.get('units', 'ms') # for backwards compatability default to milliseconds
def add_spike(self, node_id, timestamp, population=None, **kwargs):
population = population or self._default_population
self._node_ids.append(node_id)
self._timestamps.append(timestamp)
self._populations.append(population)
self._pop_counts[population] = self._pop_counts.get(population, 0) + 1
def add_spikes(self, node_ids, timestamps, population=None, **kwargs):
if np.isscalar(node_ids):
node_ids = [node_ids]*len(timestamps)
for node_id, ts in zip(node_ids, timestamps):
self.add_spike(node_id, ts, population)
def import_spikes(self, obj, **kwargs):
pass
def flush(self):
pass # not necessary since everything is stored in memory
def close(self):
pass # don't need to do anything
@property
def populations(self):
return list(self._pop_counts.keys())
def nodes(self, populations=None):
return list(set(self._node_ids))
@property
def units(self):
return self._units
@units.setter
def units(self, v):
self._units = v
def n_spikes(self, population=None):
return self._pop_counts.get(population, 0)
def time_range(self, populations=None):
return np.min(self._timestamps), np.max(self._timestamps)
def get_times(self, node_id, population=None, time_window=None, **kwargs):
population = population or self._default_population
mask = (np.array(self._node_ids) == node_id) & (np.array(self._populations) == population)
ts = np.array(self._timestamps)
if time_window:
mask &= (time_window[0] <= ts) & (ts <= time_window[1])
return ts[mask]
def to_dataframe(self, node_ids=None, populations=None, time_window=None, sort_order=SortOrder.none, **kwargs):
# raise NotImplementedError()
populations = populations or self._default_population
# TODO: Filter by population, node-id and time
# TODO: Sort dataframe if needed
return pd.DataFrame({'node_id': self._node_ids, 'population': self._populations, 'timestamps': self._timestamps})
def spikes(self, node_ids=None, populations=None, time_window=None, sort_order=SortOrder.none, **kwargs):
if sort_order == SortOrder.by_time:
sort_indx = np.argsort(self._timestamps)
elif sort_order == SortOrder.by_time:
sort_indx = np.argsort(self._node_ids)
else:
sort_indx = range(len(self._timestamps))
filter = _create_filter(populations, time_window)
for i in sort_indx:
t = self._timestamps[i]
p = self._populations[i]
if filter(p=p, t=t):
yield t, p, self._node_ids[i]
return
#raise StopIteration
def __len__(self):
return len(self.to_dataframe())
class STCSVBuffer(STBuffer, STReader):
""" A Class for creating, storing and reading multi-population spike-trains - especially for saving the spikes of a
large scale network simulation. Keeps a running tally of the (timestamp, population-name, node_id) for each
individual spike.
Uses a caching mechanism to periodically save spikes to the disk. Will encure a runtime performance penality but
will always have an upper bound on the maximum memory used.
If running parallel simulations should use the STMPIBuffer adaptor instead.
"""
def __init__(self, cache_dir=None, default_population=None, **kwargs):
self._default_population = default_population or pop_na
# Keep a file handle open for writing spike information
self._cache_dir = cache_dir or '.'
self._buffer_filename = self._cache_fname(self._cache_dir)
self._buffer_handle = open(self._buffer_filename, 'w')
self._pop_counts = {self._default_population: 0}
self._nspikes = 0
self._units = kwargs.get('units', 'ms')
def _cache_fname(self, cache_dir):
# TODO: Potential problem if multiple SpikeTrains are opened at the same time, add salt to prevent collisions
if not os.path.exists(self._cache_dir):
os.mkdirs(self._cache_dir)
return os.path.join(cache_dir, '.bmtk.spikes.cache.csv')
def add_spike(self, node_id, timestamp, population=None, **kwargs):
population = population or pop_na
# NOTE: I looked into using a in-memory buffer to save data and caching only when they reached a threshold,
# however on my computer it was actually slower than just calling file.write() each time. Likely the python
# file writer is more efficent than what I could write. However still would like to benchmark on a NSF.
self._buffer_handle.write('{} {} {}\n'.format(timestamp, population, node_id))
self._nspikes += 1
self._pop_counts[population] = self._pop_counts.get(population, 0) + 1
def add_spikes(self, node_ids, timestamps, population=None, **kwargs):
if np.isscalar(node_ids):
for ts in timestamps:
self.add_spike(node_ids, ts, population)
else:
for node_id, ts in zip(node_ids, timestamps):
self.add_spike(node_id, ts, population)
@property
def populations(self):
return list(self._pop_counts.keys())
@property
def units(self):
return self._units
@units.setter
def units(self, v):
self._units = v
def nodes(self, populations=None):
return list(set(self._node_ids))
def n_spikes(self, population=None):
return self._pop_counts.get(population, 0)
def time_range(self, populations=None):
return np.min(self._timestamps), np.max(self._timestamps)
def get_times(self, node_id, population=None, time_window=None, **kwargs):
return np.array([t[0] for t in self.spikes(population=population, time_window=time_window) if t[1] == node_id])
def to_dataframe(self, node_ids=None, populations=None, time_window=None, sort_order=SortOrder.none, **kwargs):
raise NotImplementedError()
def flush(self):
self._buffer_handle.flush()
def close(self):
self._buffer_handle.close()
if os.path.exists(self._buffer_filename):
os.remove(self._buffer_filename)
def spikes(self, node_ids=None, populations=None, time_window=None, sort_order=SortOrder.none, **kwargs):
self.flush()
self._sort_buffer_file(self._buffer_filename, sort_order)
filter = _create_filter(populations, time_window)
with open(self._buffer_filename, 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ')
for row in csv_reader:
t = float(row[0])
p = row[1]
if filter(p=p, t=t):
yield t, p, int(row[2])
return
#raise StopIteration
def _sort_buffer_file(self, file_name, sort_order):
if sort_order == SortOrder.by_time:
sort_col = 'time'
elif sort_order == SortOrder.by_id:
sort_col = 'node'
else:
return
tmp_spikes_ds = pd.read_csv(file_name, sep=' ', names=['time', 'population', 'node'])
tmp_spikes_ds = tmp_spikes_ds.sort_values(by=sort_col)
tmp_spikes_ds.to_csv(file_name, sep=' ', index=False, header=False)
class STMPIBuffer(STCSVBuffer):
def __init__(self, cache_dir=None, default_population=None, **kwargs):
self.mpi_rank = kwargs.get('MPI_rank', bmtk_world_comm.MPI_rank)
self.mpi_size = kwargs.get('MPI_size', bmtk_world_comm.MPI_size)
super(STMPIBuffer, self).__init__(cache_dir, default_population=default_population, **kwargs)
def _cache_fname(self, cache_dir):
if self.mpi_rank == 0:
if not os.path.exists(self._cache_dir):
os.mkdirs(self._cache_dir)
bmtk_world_comm.barrier()
return os.path.join(self._cache_dir, '.bmtk.spikes.cache.node{}.csv'.format(self.mpi_rank))
def _all_cached_files(self):
return [os.path.join(self._cache_dir, '.bmtk.spikes.cache.node{}.csv'.format(r)) for r in range(bmtk_world_comm.MPI_size)]
@property
def populations(self):
self._gather()
return list(self._pop_counts.keys())
def n_spikes(self, population=None):
self._gather()
return self._pop_counts.get(population, 0)
def _gather(self):
self._pop_counts = {}
for fn in self._all_cached_files():
if not os.path.exists(fn):
continue
with open(fn, 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=' ')
for row in csv_reader:
pop = row[1]
self._pop_counts[pop] = self._pop_counts.get(pop, 0) + 1
def spikes(self, node_ids=None, populations=None, time_window=None, sort_order=SortOrder.none, **kwargs):
self.flush()
filter = _create_filter(populations, time_window)
if sort_order == SortOrder.by_time or sort_order == SortOrder.by_id:
for file_name in self._all_cached_files():
if not os.path.exists(file_name):
continue
self._sort_buffer_file(file_name, sort_order)
return self._sorted_itr(filter, 0 if sort_order == SortOrder.by_time else 1)
else:
return self._unsorted_itr(filter)
def _unsorted_itr(self, filter):
for fn | |
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Streamlit.
How to use Streamlit in 3 seconds:
1. Write an app
>>> import streamlit as st
>>> st.write(anything_you_want)
2. Run your app
$ streamlit run my_script.py
3. Use your app
A new tab will open on your browser. That's your Streamlit app!
4. Modify your code, save it, and watch changes live on your browser.
Take a look at the other commands in this module to find out what else
Streamlit can do:
>>> dir(streamlit)
Or try running our "Hello World":
$ streamlit hello
For more detailed info, see https://docs.streamlit.io.
"""
# IMPORTANT: Prefix with an underscore anything that the user shouldn't see.
# NOTE: You'll see lots of "noqa: F821" in this file. That's because we
# manually mess with the local namespace so the linter can't know that some
# identifiers actually exist in the namespace.
# Must be at the top, to avoid circular dependency.
from streamlit import logger as _logger
from streamlit import config as _config
from streamlit.proto.RootContainer_pb2 import RootContainer
_LOGGER = _logger.get_logger("root")
# Give the package a version.
import pkg_resources as _pkg_resources
from typing import List
# This used to be pkg_resources.require('streamlit') but it would cause
# pex files to fail. See #394 for more details.
__version__ = _pkg_resources.get_distribution("streamlit").version
import contextlib as _contextlib
import re as _re
import sys as _sys
import textwrap as _textwrap
import threading as _threading
import traceback as _traceback
import urllib.parse as _parse
import click as _click
from streamlit import code_util as _code_util
from streamlit import env_util as _env_util
from streamlit import source_util as _source_util
from streamlit import string_util as _string_util
from streamlit.delta_generator import DeltaGenerator as _DeltaGenerator
from streamlit.report_thread import add_report_ctx as _add_report_ctx
from streamlit.report_thread import get_report_ctx as _get_report_ctx
from streamlit.script_runner import StopException
from streamlit.script_runner import RerunException as _RerunException
from streamlit.script_request_queue import RerunData as _RerunData
from streamlit.errors import StreamlitAPIException
from streamlit.proto import ForwardMsg_pb2 as _ForwardMsg_pb2
# Modules that the user should have access to. These are imported with "as"
# syntax pass mypy checking with implicit_reexport disabled.
from streamlit.caching import cache as cache # noqa: F401
# This is set to True inside cli._main_run(), and is False otherwise.
# If False, we should assume that DeltaGenerator functions are effectively
# no-ops, and adapt gracefully.
_is_running_with_streamlit = False
def _update_logger():
_logger.set_log_level(_config.get_option("logger.level").upper())
_logger.update_formatter()
_logger.init_tornado_logs()
# Make this file only depend on config option in an asynchronous manner. This
# avoids a race condition when another file (such as a test file) tries to pass
# in an alternative config.
_config.on_config_parsed(_update_logger, True)
_main = _DeltaGenerator(root_container=RootContainer.MAIN)
sidebar = _DeltaGenerator(root_container=RootContainer.SIDEBAR, parent=_main)
# DeltaGenerator methods:
altair_chart = _main.altair_chart # noqa: E221
area_chart = _main.area_chart # noqa: E221
audio = _main.audio # noqa: E221
balloons = _main.balloons # noqa: E221
bar_chart = _main.bar_chart # noqa: E221
bokeh_chart = _main.bokeh_chart # noqa: E221
button = _main.button # noqa: E221
checkbox = _main.checkbox # noqa: E221
code = _main.code # noqa: E221
dataframe = _main.dataframe # noqa: E221
date_input = _main.date_input # noqa: E221
pydeck_chart = _main.pydeck_chart # noqa: E221
empty = _main.empty # noqa: E221
error = _main.error # noqa: E221
exception = _main.exception # noqa: E221
file_uploader = _main.file_uploader # noqa: E221
graphviz_chart = _main.graphviz_chart # noqa: E221
header = _main.header # noqa: E221
help = _main.help # noqa: E221
image = _main.image # noqa: E221
info = _main.info # noqa: E221
json = _main.json # noqa: E221
latex = _main.latex # noqa: E221
line_chart = _main.line_chart # noqa: E221
map = _main.map # noqa: E221
markdown = _main.markdown # noqa: E221
multiselect = _main.multiselect # noqa: E221
number_input = _main.number_input # noqa: E221
plotly_chart = _main.plotly_chart # noqa: E221
progress = _main.progress # noqa: E221
pyplot = _main.pyplot # noqa: E221
radio = _main.radio # noqa: E221
selectbox = _main.selectbox # noqa: E221
select_slider = _main.select_slider # noqa: E221
slider = _main.slider # noqa: E221
subheader = _main.subheader # noqa: E221
success = _main.success # noqa: E221
table = _main.table # noqa: E221
text = _main.text # noqa: E221
text_area = _main.text_area # noqa: E221
text_input = _main.text_input # noqa: E221
time_input = _main.time_input # noqa: E221
title = _main.title # noqa: E221
vega_lite_chart = _main.vega_lite_chart # noqa: E221
video = _main.video # noqa: E221
warning = _main.warning # noqa: E221
write = _main.write # noqa: E221
color_picker = _main.color_picker # noqa: E221
# Config
get_option = _config.get_option
from streamlit.commands.page_config import set_page_config
def _beta_warning(func, date):
"""Wrapper for functions that are no longer in beta.
Wrapped functions will run as normal, but then proceed to show an st.warning
saying that the beta_ version will be removed in ~3 months.
Parameters
----------
func: function
The `st.` function that used to be in beta.
date: str
A date like "2020-01-01", indicating the last day we'll guarantee
support for the beta_ prefix.
"""
def wrapped(*args, **kwargs):
# Note: Since we use a wrapper, beta_ functions will not autocomplete
# correctly on VSCode.
result = func(*args, **kwargs)
warning(
f"`st.{func.__name__}` has graduated out of beta. "
+ f"On {date}, the beta_ version will be removed.\n\n"
+ f"Before then, update your code from `st.beta_{func.__name__}` to `st.{func.__name__}`."
)
return result
# Update the wrapped func's name & docstring so st.help does the right thing
wrapped.__name__ = "beta_" + func.__name__
wrapped.__doc__ = func.__doc__
return wrapped
beta_container = _main.beta_container # noqa: E221
beta_expander = _main.beta_expander # noqa: E221
beta_columns = _main.beta_columns # noqa: E221
def set_option(key, value):
"""Set config option.
Currently, only the following config options can be set within the script itself:
* client.caching
* client.displayEnabled
* deprecation.*
Calling with any other options will raise StreamlitAPIException.
Run `streamlit config show` in the terminal to see all available options.
Parameters
----------
key : str
The config option key of the form "section.optionName". To see all
available options, run `streamlit config show` on a terminal.
value
The new value to assign to this config option.
"""
opt = _config._config_options[key]
if opt.scriptable:
_config.set_option(key, value)
return
raise StreamlitAPIException(
"{key} cannot be set on the fly. Set as command line option, e.g. streamlit run script.py --{key}, or in config.toml instead.".format(
key=key
)
)
def experimental_show(*args):
"""Write arguments and *argument names* to your app for debugging purposes.
Show() has similar properties to write():
1. You can pass in multiple arguments, all of which will be debugged.
2. It returns None, so it's "slot" in the app cannot be reused.
Note: This is an experimental feature. See
https://docs.streamlit.io/en/latest/api.html#pre-release-features for more information.
Parameters
----------
*args : any
One or many objects to debug in the App.
Example
-------
>>> dataframe = pd.DataFrame({
... 'first column': [1, 2, 3, 4],
... 'second column': [10, 20, 30, 40],
... }))
>>> st.experimental_show(dataframe)
Notes
-----
This is an experimental feature with usage limitations:
- The method must be called with the name `show`.
- Must be called in one line of code, and only once per line.
- When passing multiple arguments the inclusion of `,` or `)` in a string
argument may cause an error.
"""
if not args:
return
try:
import inspect
# Get the calling line of code
current_frame = inspect.currentframe()
if current_frame is None:
warning("`show` not enabled in the shell")
return
lines = inspect.getframeinfo(current_frame.f_back)[3]
if not lines:
warning("`show` not enabled in the shell")
return
# Parse arguments from the line
line = lines[0].split("show", 1)[1]
inputs = _code_util.get_method_args_from_code(args, line)
# Escape markdown and add deltas
for idx, input in enumerate(inputs):
escaped = _string_util.escape_markdown(input)
markdown("**%s**" % escaped)
write(args[idx])
except Exception:
_, exc, exc_tb = _sys.exc_info()
exception(exc)
def experimental_get_query_params():
"""Return the query parameters that is currently showing in the browser's URL bar.
Returns
-------
dict
The current query parameters as a dict. "Query parameters" are the part of the URL that comes
after the first "?".
Example
-------
Let's say the user's web browser is at
`http://localhost:8501/?show_map=True&selected=asia&selected=america`.
Then, you can get the query parameters using the following:
>>> st.experimental_get_query_params()
{"show_map": ["True"], "selected": ["asia", "america"]}
Note that the values in the returned dict are *always* lists. This is
because we internally use Python's urllib.parse.parse_qs(), which behaves
this way. | |
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
create_resp = await blob.upload_blob(
data, blob_type=BlobType.PageBlob, raw_response_hook=callback)
props = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data, bsc)
self.assertEqual(props.etag, create_resp.get('etag'))
self.assertEqual(props.last_modified, create_resp.get('last_modified'))
self.assert_upload_progress(LARGE_BLOB_SIZE, self.config.max_page_size, progress)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_bytes_with_index(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
index = 1024
# Act
await blob.upload_blob(data[index:], blob_type=BlobType.PageBlob)
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data[1024:], bsc)
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_bytes_with_index_and_count(self, storage_account_name, storage_account_key):
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
index = 512
count = 1024
# Act
create_resp = await blob.upload_blob(data[index:], length=count, blob_type=BlobType.PageBlob)
props = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data[index:index + count], bsc)
self.assertEqual(props.etag, create_resp.get('etag'))
self.assertEqual(props.last_modified, create_resp.get('last_modified'))
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_path(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = 'create_blob_from_p.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
create_resp = await blob.upload_blob(stream, blob_type=BlobType.PageBlob)
props = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data, bsc)
self.assertEqual(props.etag, create_resp.get('etag'))
self.assertEqual(props.last_modified, create_resp.get('last_modified'))
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_path_with_progress(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = 'create_blob_from_path_with_p.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
await blob.upload_blob(stream, blob_type=BlobType.PageBlob, raw_response_hook=callback)
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data, bsc)
self.assert_upload_progress(len(data), self.config.max_page_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_stream(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = '_create_blob_from_s.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data)
with open(FILE_PATH, 'rb') as stream:
create_resp = await blob.upload_blob(stream, length=blob_size, blob_type=BlobType.PageBlob)
props = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data[:blob_size], bsc)
self.assertEqual(props.etag, create_resp.get('etag'))
self.assertEqual(props.last_modified, create_resp.get('last_modified'))
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_stream_with_empty_pages(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
# data is almost all empty (0s) except two ranges
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = bytearray(LARGE_BLOB_SIZE)
data[512: 1024] = self.get_random_bytes(512)
data[8192: 8196] = self.get_random_bytes(4)
FILE_PATH = '_with_empty_pages.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data)
with open(FILE_PATH, 'rb') as stream:
create_resp = await blob.upload_blob(stream, length=blob_size, blob_type=BlobType.PageBlob)
props = await blob.get_blob_properties()
# Assert
# the uploader should have skipped the empty ranges
await self.assertBlobEqual(self.container_name, blob.blob_name, data[:blob_size], bsc)
ranges = await blob.get_page_ranges()
page_ranges, cleared = list(ranges)
self.assertEqual(len(page_ranges), 2)
self.assertEqual(page_ranges[0]['start'], 0)
self.assertEqual(page_ranges[0]['end'], 4095)
self.assertEqual(page_ranges[1]['start'], 8192)
self.assertEqual(page_ranges[1]['end'], 12287)
self.assertEqual(props.etag, create_resp.get('etag'))
self.assertEqual(props.last_modified, create_resp.get('last_modified'))
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_stream_non_seekable(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = 'blob_from_stream_non_see.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data)
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StoragePageBlobAsyncTest.NonSeekableFile(stream)
await blob.upload_blob(
non_seekable_file,
length=blob_size,
max_concurrency=1,
blob_type=BlobType.PageBlob)
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data[:blob_size], bsc)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_stream_with_progress(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = 'rom_stream_with_progress.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
blob_size = len(data)
with open(FILE_PATH, 'rb') as stream:
await blob.upload_blob(
stream, length=blob_size, blob_type=BlobType.PageBlob, raw_response_hook=callback)
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data[:blob_size], bsc)
self.assert_upload_progress(len(data), self.config.max_page_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_stream_truncated(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = '_create_blob_from_stream_trunc.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 512
with open(FILE_PATH, 'rb') as stream:
await blob.upload_blob(stream, length=blob_size, blob_type=BlobType.PageBlob)
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data[:blob_size], bsc)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_from_stream_with_progress_truncated(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
FILE_PATH = 'from_stream_with_progress_truncated.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
blob_size = len(data) - 512
with open(FILE_PATH, 'rb') as stream:
await blob.upload_blob(
stream, length=blob_size, blob_type=BlobType.PageBlob, raw_response_hook=callback)
# Assert
await self.assertBlobEqual(self.container_name, blob.blob_name, data[:blob_size], bsc)
self.assert_upload_progress(blob_size, self.config.max_page_size, progress)
self._teardown(FILE_PATH)
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_md5_small(self, storage_account_name, storage_account_key):
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(512)
# Act
await blob.upload_blob(data, validate_content=True, blob_type=BlobType.PageBlob)
# Assert
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_create_blob_with_md5_large(self, storage_account_name, storage_account_key):
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
blob = self._get_blob_reference(bsc)
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
await blob.upload_blob(data, validate_content=True, blob_type=BlobType.PageBlob)
# Assert
@pytest.mark.skip(reason="Failing live test https://github.com/Azure/azure-sdk-for-python/issues/10473")
@pytest.mark.live_test_only
@BlobPreparer()
@AsyncStorageTestCase.await_prepared_test
async def test_incremental_copy_blob(self, storage_account_name, storage_account_key):
# parallel tests introduce random order of requests, can only run live
bsc = BlobServiceClient(self.account_url(storage_account_name, "blob"), credential=storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
source_blob = await self._create_blob(bsc, 2048)
data = self.get_random_bytes(512)
resp1 = await source_blob.upload_page(data, offset=0, length=512)
resp2 = await source_blob.upload_page(data, offset=1024, length=512)
source_snapshot_blob = await source_blob.create_snapshot()
snapshot_blob = BlobClient.from_blob_url(
source_blob.url, credential=source_blob.credential, snapshot=source_snapshot_blob)
sas_token = generate_blob_sas(
snapshot_blob.account_name,
snapshot_blob.container_name,
snapshot_blob.blob_name,
snapshot=snapshot_blob.snapshot,
account_key=snapshot_blob.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
sas_blob = BlobClient.from_blob_url(snapshot_blob.url, credential=sas_token)
# Act
dest_blob = bsc.get_blob_client(self.container_name, 'dest_blob')
copy = await dest_blob.start_copy_from_url(sas_blob.url, incremental_copy=True)
# Assert
self.assertIsNotNone(copy)
self.assertIsNotNone(copy['copy_id'])
self.assertEqual(copy['copy_status'], 'pending')
copy_blob = await self._wait_for_async_copy(dest_blob)
self.assertEqual(copy_blob.copy.status, 'success')
self.assertIsNotNone(copy_blob.copy.destination_snapshot)
# strip off protocol
self.assertTrue(copy_blob.copy.source.endswith(sas_blob.url[5:]))
@pytest.mark.live_test_only
@BlobPreparer()
async def test_blob_tier_on_create(self, premium_storage_account_name, premium_storage_account_key):
# Test can only run live
bsc = BlobServiceClient(self.account_url(premium_storage_account_name, "blob"), credential=premium_storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
url = self.account_url(premium_storage_account_name, "blob")
credential = premium_storage_account_key
pbs = BlobServiceClient(url, credential=credential, transport=AiohttpTestTransport())
try:
container_name = self.get_resource_name('utpremiumcontainer')
container = pbs.get_container_client(container_name)
if self.is_live:
await container.create_container()
# test create_blob API
blob = self._get_blob_reference(bsc)
pblob = pbs.get_blob_client(container_name, blob.blob_name)
await pblob.create_page_blob(1024, premium_page_blob_tier=PremiumPageBlobTier.P4)
props = await pblob.get_blob_properties()
self.assertEqual(props.blob_tier, PremiumPageBlobTier.P4)
self.assertFalse(props.blob_tier_inferred)
# test create_blob_from_bytes API
blob2 = self._get_blob_reference(bsc)
pblob2 = pbs.get_blob_client(container_name, blob2.blob_name)
byte_data = self.get_random_bytes(1024)
await pblob2.upload_blob(
byte_data,
premium_page_blob_tier=PremiumPageBlobTier.P6,
blob_type=BlobType.PageBlob,
overwrite=True)
props2 = await pblob2.get_blob_properties()
self.assertEqual(props2.blob_tier, PremiumPageBlobTier.P6)
self.assertFalse(props2.blob_tier_inferred)
# test create_blob_from_path API
blob3 = self._get_blob_reference(bsc)
pblob3 = pbs.get_blob_client(container_name, blob3.blob_name)
FILE_PATH = 'test_blob_tier_on_creat.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(byte_data)
with open(FILE_PATH, 'rb') as stream:
await pblob3.upload_blob(
stream,
blob_type=BlobType.PageBlob,
premium_page_blob_tier=PremiumPageBlobTier.P10,
overwrite=True)
props3 = await pblob3.get_blob_properties()
self.assertEqual(props3.blob_tier, PremiumPageBlobTier.P10)
self.assertFalse(props3.blob_tier_inferred)
finally:
await container.delete_container()
self._teardown(FILE_PATH)
@BlobPreparer()
async def test_blob_tier_set_tier_api(self, premium_storage_account_name, premium_storage_account_key):
bsc = BlobServiceClient(self.account_url(premium_storage_account_name, "blob"), credential=premium_storage_account_key, connection_data_block_size=4 * 1024, max_page_size=4 * 1024, transport=AiohttpTestTransport())
await self._setup(bsc)
url = self.account_url(premium_storage_account_name, "blob")
credential = premium_storage_account_key
pbs = BlobServiceClient(url, credential=credential, transport=AiohttpTestTransport())
try:
container_name = self.get_resource_name('utpremiumcontainer')
container = pbs.get_container_client(container_name)
if self.is_live:
try:
await container.create_container()
except ResourceExistsError:
pass
blob = self._get_blob_reference(bsc)
pblob = pbs.get_blob_client(container_name, blob.blob_name)
await pblob.create_page_blob(1024)
blob_ref = await pblob.get_blob_properties()
self.assertEqual(PremiumPageBlobTier.P10, blob_ref.blob_tier)
self.assertIsNotNone(blob_ref.blob_tier)
self.assertTrue(blob_ref.blob_tier_inferred)
pcontainer = pbs.get_container_client(container_name)
blobs = []
async for b in pcontainer.list_blobs():
blobs.append(b)
# Assert
| |
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-8 no prefetching:',
'key':'figure-4-hashjoin-ph-8-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'auto'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-8 auto prefetching:',
'key':'figure-4-hashjoin-ph-8-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'man'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-8 manual prefetching:',
'key':'figure-4-hashjoin-ph-8-man', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'no'},
'cmd':'s16e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 no prefetching:',
'key':'figure-4-graph500-s16-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'auto'},
'cmd':'s16e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 auto prefetching:',
'key':'figure-4-graph500-s16-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'man-inorder'},
'cmd':'s16e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 manual in order prefetching:',
'key':'figure-4-graph500-s16-man-inorder', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'man-outoforder'},
'cmd':'s16e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 manual out of order prefetching:',
'key':'figure-4-graph500-s16-man-outoforder', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'no'},
'cmd':'s21e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 no prefetching:',
'key':'figure-4-graph500-s21-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'auto'},
'cmd':'s21e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 auto prefetching:',
'key':'figure-4-graph500-s21-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'man-inorder'},
'cmd':'s21e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 manual in order prefetching:',
'key':'figure-4-graph500-s21-man-inorder', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'man-outoforder'},
'cmd':'s21e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 manual out of order prefetching:',
'key':'figure-4-graph500-s21-man-outoforder', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
# Reproducing Figure 5 ###################################################################################
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-cg'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'Reproducing experiments for Figure 5',
'subtitle':'Validating nas-cg no prefetching:',
'key':'figure-5-nas-cg-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-cg'],
'env':{'CK_COMPILE_TYPE':'auto'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-cg auto prefetching:',
'key':'figure-5-nas-cg-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-cg'],
'env':{'CK_COMPILE_TYPE':'auto-nostride'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-cg auto-nostride prefetching:',
'key':'figure-5-nas-cg-auto-nostride', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-is'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-is no prefetching:',
'key':'figure-5-nas-is-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-is'],
'env':{'CK_COMPILE_TYPE':'auto'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-is auto prefetching:',
'key':'figure-5-nas-is-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-is'],
'env':{'CK_COMPILE_TYPE':'auto-nostride'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-is auto-nostride prefetching:',
'key':'figure-5-nas-is-auto-nostride', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['randacc'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating randacc no prefetching:',
'key':'figure-5-randacc-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['randacc'],
'env':{'CK_COMPILE_TYPE':'auto'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating randacc auto prefetching:',
'key':'figure-5-randacc-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['randacc'],
'env':{'CK_COMPILE_TYPE':'auto-nostride'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating randacc auto-nostride prefetching:',
'key':'figure-5-randacc-auto-nostride', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-2'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-2 no prefetching:',
'key':'figure-5-hashjoin-ph-2-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-2'],
'env':{'CK_COMPILE_TYPE':'auto'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-2 auto prefetching:',
'key':'figure-5-hashjoin-ph-2-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-2'],
'env':{'CK_COMPILE_TYPE':'auto-nostride'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-2 auto-nostride prefetching:',
'key':'figure-5-hashjoin-ph-2-auto-nostride', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-8 no prefetching:',
'key':'figure-5-hashjoin-ph-8-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'auto'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-8 auto prefetching:',
'key':'figure-5-hashjoin-ph-8-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'auto-nostride'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-8 auto-nostride prefetching:',
'key':'figure-5-hashjoin-ph-8-auto-nostride', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'no'},
'cmd':'s16e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 no prefetching:',
'key':'figure-5-graph500-s16-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'auto'},
'cmd':'s16e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 auto prefetching:',
'key':'figure-5-graph500-s16-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'auto-nostride'},
'cmd':'s16e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 auto-nostride prefetching:',
'key':'figure-5-graph500-s16-auto-nostride', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'no'},
'cmd':'s21e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 no prefetching:',
'key':'figure-5-graph500-s21-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'auto'},
'cmd':'s21e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 auto prefetching:',
'key':'figure-5-graph500-s21-auto', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['graph500'],
'env':{'CK_COMPILE_TYPE':'auto-nostride'},
'cmd':'s21e10',
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating graph500 auto-nostride prefetching:',
'key':'figure-5-graph500-s21-auto-nostride', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
# Reproducing Figure 6 ###################################################################################
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-is'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'Reproducing experiments for Figure 5',
'subtitle':'Validating nas-is no prefetching:',
'key':'figure-6-nas-is-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
for x in [2, 4, 8, 16, 32, 64, 128, 256]:
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-is'],
'env':{'CK_COMPILE_TYPE':'offset', 'CK_FETCHDIST':str(x)},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-is prefetching distance: ' + str(x),
'key':'figure-6-nas-is-prefetching-dist-' + str(x), 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-cg'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-cg no prefetching:',
'key':'figure-6-nas-cg-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
for x in [2, 4, 8, 16, 32, 64, 128, 256]:
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['nas-cg'],
'env':{'CK_COMPILE_TYPE':'offset', 'CK_FETCHDIST':str(x)},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating nas-cg prefetching distance: ' + str(x),
'key':'figure-6-nas-cg-prefetching-dist-' + str(x), 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['randacc'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating randacc no prefetching:',
'key':'figure-6-randacc-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
for x in [2, 4, 8, 16, 32, 64, 128, 256]:
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['randacc'],
'env':{'CK_COMPILE_TYPE':'offset', 'CK_FETCHDIST':str(x)},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating randacc prefetching distance: ' + str(x),
'key':'figure-6-randacc-prefetching-dist-' + str(x), 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-2'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-2 no prefetching:',
'key':'figure-6-hashjoin-ph-2-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
for x in [2, 4, 8, 16, 32, 64, 128, 256]:
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-2'],
'env':{'CK_COMPILE_TYPE':'offset', 'CK_FETCHDIST':str(x)},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-2 prefetching distance: ' + str(x),
'key':'figure-6-hashjoin-ph-2-prefetching-dist-' + str(x), 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
# Reproducing Figure 7 ###################################################################################
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'no'},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'Reproducing Experiments for Figure 7',
'subtitle':'Validating hashjoin-ph-8 no prefetching:',
'key':'figure-7-hashjoin-ph-8-no-prefetching', 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
for x in [1, 2, 3, 4]:
r=experiment({'host_os':hos, 'target_os':tos, 'device_id':tdid, 'out':oo,
'program_uoa':cfg['programs_uoa']['hashjoin-ph-8'],
'env':{'CK_COMPILE_TYPE':'prefetches', 'CK_NUMPREFETCHES':str(x)},
'deps':deps,
'quiet':q, 'record':rec, 'record_repo_uoa':rruid, 'record_data_uoa':rduid, 'os_abi':os_abi,
'title':'',
'subtitle':'Validating hashjoin-ph-8 prefetching elements: ' + str(x),
'key':'figure-7-hashjoin-ph-8-prefetching-elements-' + str(x), 'results':results})
if r['return']>0:
log({'string':''})
log({'string':'Experiment failed ('+r['error']+')'})
return {'return':0}
##############################################################################
# open PLUTON dashboard
def dashboard(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
i['action']='browser'
i['cid']=''
i['module_uoa']=''
i['template']='cgo2017'
return ck.access(i)
##############################################################################
# show experiment dashboard
def show(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
import copy
st=''
ckey=''
h='<center>\n'
h+='\n\n<script language="JavaScript">function copyToClipboard (text) {window.prompt ("Copy to clipboard: Ctrl+C, Enter", text);}</script>\n\n'
# Check host URL prefix and default | |
of objects.
Args:
clean_magnitude1, clean_magnitude2 (list of floats) -- Objects with flag values of zero and/or quality cuts performed.
error1, error2 (list of floats) -- 1 and 2 refer to the matched catalogs.
Returns:
binned_hax_mag_median (list of floats) -- List of medians of the horizontal axis magnitude in each bin.
binned_vax_mag_median (list of floats) -- List of medians of the vertical axis magnitude in each bin. Vertical axis is computed via clean_magnitude1 - clean_magnitude2.
binned_err_median (list of floats) -- Median of the error in each bin.
bins (list of floats) -- Bins used. Binned according to horizontal axis.
binned_hax_mag_list, binned_vax_mag_list, binned_err_list (list of lists of floats) -- Stores values in each bin (horizontal axis magnitude, vertical axis magnitude, error, respectively).
"""
### !!!!! Comment this block out if errors are to be computed using both catalogs regardless of origin (measured catalog or truth catalog) ###
if 'meas' in AXLABEL1 and 'meas' not in AXLABEL2:
error2 = np.zeros(len(error1))
if PRINTOUTS:
print 'Using measured catalog (catalog1) for error calculation ... '
if 'meas' in AXLABEL2 and 'meas' not in AXLABEL1:
error1 = np.zeros(len(error2))
if PRINTOUTS:
print 'Using measured catalog (catalog2) for error calculation ... '
if 'meas' in AXLABEL1 and 'meas' in AXLABEL2:
if PRINTOUTS:
print 'Using measured catalog (catalog1 AND catalog2) for error calculation ... '
if 'true' in AXLABEL1 and 'true' in AXLABEL2:
sys.exit('Errors are to be computed using the measured catalog(s), not the truth catalog(s).')
### Define bins ###
step = 0.5
# Find the absolute min and max of the magnitudes in the matched catalog #
limlow1, limlow2 = min(clean_magnitude1), min(clean_magnitude2)
limhigh1, limhigh2 = max(clean_magnitude1), max(clean_magnitude2)
limlow, limhigh = min([limlow1, limlow2]), max([limhigh1, limhigh2])
# Define bins limits by ints #
limlow, limhigh = int(limlow), int(limhigh)
# Introduce magnitude cutoff to tame errors #
if 'gal' in MATCH_CAT1 or 'gal' in MATCH_CAT2:
limhigh = 26
if 'star' in MATCH_CAT1 or 'star' in MATCH_CAT2:
limhigh = 24
if PRINTOUTS:
print 'Forcing magnitudes to be binned with max ', limhigh, '...'
bins = np.arange(limlow, limhigh, step)
# Stores median of values in each bin #
binned_hax_mag_median, binned_vax_mag_median, binned_err_median = [], [], []
# List of lists. Stores all values in each bin #
binned_hax_mag_list, binned_vax_mag_list, binned_err_list = [], [], []
counter_empty_bin = 0
# Bin magnitude errors according to the magnitude on the horizontal axis #
if SWAP_HAX:
hax_mag = clean_magnitude2
if SWAP_HAX is False:
hax_mag = clean_magnitude1
# Magnitude on the vertical axis (vax) #
vax_mag = np.array(clean_magnitude1) - np.array(clean_magnitude2)
### Write filter header to log file ###
FD_MAG_BINS.write('Filter: ' + str(filter_name) + '\n')
### Populate each bin ###
for j in np.arange(limlow, limhigh, step):
binned_hax_mag_temp, binned_vax_mag_temp, binned_err_temp, counter_err = [], [], [], 0
for i in np.arange(0, len(clean_magnitude1)):
# Do not calculate errors using outlier magnitudes (chosen to be |Delta-M| > 3). Bin magnitude errors according to the magnitude on the horizontal axis of the plot #
if hax_mag[i] >= j and hax_mag[i] < j+step and abs(vax_mag[i]) < 3:
binned_err_temp.append((error1[i]**2 + error2[i]**2)**0.5)
binned_hax_mag_temp.append(hax_mag[i])
binned_vax_mag_temp.append(vax_mag[i])
counter_err += 1
# Written in log file, hence 'minor' #
if PRINTOUTS_MINOR:
print ' For magnitude, number of objects in bin ', round(j, 2), '-', round(j+step, 2), ': ', counter_err, '...'
### Write to log file ###
if counter_err == 0:
write_median, write_err = None, None
if counter_err > 0:
write_median, write_err = np.median(binned_hax_mag_temp), np.median(binned_err_temp)
FD_MAG_BINS.write(str(counter_err) + '\t' + str(round(j, 2)) + '\t' + str(round(j+step, 2)) + '\t' + str(write_median)+ '\t' + str(write_err) + '\n')
### Tame error calculation and normalization by adding zeros to empty bins and bins with a small number of points ###
# Define 'small' #
if STACK_REALIZATIONS:
CONST = 30
if STACK_REALIZATIONS is False:
CONST = 10
if counter_err <= CONST:
counter_empty_bin += 1
binned_err_median.append(None)
binned_hax_mag_median.append(None)
binned_vax_mag_median.append(None)
# Add to list of lists to keep bin structure #
binned_err_list.append(None)
binned_hax_mag_list.append(None)
binned_vax_mag_list.append(None)
if counter_err > CONST:
binned_err_median.append(np.median(binned_err_temp))
binned_hax_mag_median.append(np.median(binned_hax_mag_temp))
binned_vax_mag_median.append(np.median(binned_vax_mag_temp))
# Add to list of lists to keep bin structure #
binned_err_list.append(binned_err_temp)
binned_hax_mag_list.append(binned_hax_mag_temp)
binned_vax_mag_list.append(binned_vax_mag_temp)
if PRINTOUTS:
if SWAP_HAX:
print 'Binned clean_magnitude2 with step size: ', step, ', and minimum: ', limlow, ', and maximum: ', limhigh, '...'
if SWAP_HAX is False:
print 'Binned clean_magnitude1 with step size: ', step, ', and minimum: ', limlow, ', and maximum: ', limhigh, '...'
print ' Calculated errors using objects where |DeltaM| < 3 ... '
print ' Excluded bins with less than ', CONST, ' objects ... \n'
return binned_hax_mag_median, binned_vax_mag_median, binned_err_median, bins, binned_hax_mag_list, binned_vax_mag_list, binned_err_list
def normalize_plot_maintain_bin_structure(clean_magnitude1, clean_magnitude2, error1, error2, filter_name):
"""Normalize the vertical axis using error and uphold the bin structure.
Args:
clean_magnitude1, clean_magnitude2 (list of floats) --
error1, error2 (list of floats) --
Returns:
norm_dm_list (list of list of floats) --
bins (list of floats) -- Bins used in error calculation.
"""
# List of lists. Stores all values in each bin #
norm_dm_list, hax_mag_list = [], []
# binned_err_median: stores median of vales in bin. *_list: stores all values in each bin #
binned_err_median, bins, binned_hax_mag_list, binned_vax_mag_list = bin_and_cut_measured_magnitude_error(clean_magnitude1=clean_magnitude1, clean_magnitude2=clean_magnitude2, error1=error1, error2=error2, filter_name=filter_name)[2:-1]
# Loop through bins (b) #
for b in np.arange(0, len(binned_vax_mag_list)):
# Normalized Delta-Magnitudes (dm) in current bin (icb) #
norm_dm_icb, hax_mag_icb = [], []
# 0 is a placeholder for empty bins and bins with few objects #
if binned_err_median[b] is None:
norm_dm_list.append(None)
hax_mag_list.append(None)
#if vax_mag_icb != 0:
if binned_err_median[b] is not None:
vax_mag_icb = binned_vax_mag_list[b]
for i in np.arange(0, len(vax_mag_icb)):
norm_dm_icb.append(vax_mag_icb[i]/binned_err_median[b])
hax_mag_icb.append(binned_hax_mag_list[b][i])
# List of lists to keep bin structure #
hax_mag_list.append(hax_mag_icb)
norm_dm_list.append(norm_dm_icb)
return norm_dm_list, bins, hax_mag_list
def normalize_plot(norm_delta_mag_list, bins, hax_mag_list):
"""Normalize plot to 1-sigma curve using tame magnitude errors only (use bin_and_cut_measured_magnitude_error()).
Args:
norm_dm_list (list of list of floats) -- Normalized delta magnitudes in each bin.
bins (list of floats) -- Bins used in error calculation.
hax_mag_list (list of list of floats) -- Magnitudes on the horizontal axis. Bin structure preserved.
Returns:
norm_dm (list of floats) -- Delta-Magnitude normalized by error. Delta-Magnitude computed via magnitude1 - magnitude2.
hax_mag (list of floats) -- Magnitude to be plotted on the horizontal axis.
"""
### Remove zeros so that lists can be flattened. Zeros (int) were placeholders for missing lists due to empty or small bin. ###
norm_delta_mag_list[:] = [temp for temp in norm_delta_mag_list if temp is not None]
hax_mag_list[:] = [temp for temp in hax_mag_list if temp is not None]
### Flatten lists ###
hax_mag = [item for sublist in hax_mag_list for item in sublist]
norm_dm = [item for sublist in norm_delta_mag_list for item in sublist]
### Check ###
idx = []
for b in np.arange(0, len(bins)-1):
for j in np.arange(0, len(hax_mag)):
if hax_mag[j] >= bins[b] and hax_mag[j] <= bins[b+1]:
idx.append(j)
norm_dm, hax_mag = np.array(norm_dm), np.array(hax_mag)
norm_dm, hax_mag = norm_dm[idx], hax_mag[idx]
return norm_dm, hax_mag, bins
def one_sigma_counter(norm_delta_mag, clean_magnitude1, bins, hax_mag):
"""Find the number of objects within 1-sigma, where 1-sigma is calculated according to the error. This function is only called if NORMALIZE is True.
Args:
norm_delta_mag (list of floats) -- Normalized Delta-Magnitude.
Returns:
counter_1sig (int) -- Number of objects within 1-sigma curve.
"""
counter_1sig = 0
# Cutoffs were introduced in error calculation. Consider only points not cutoff #
maglow, maghigh = min(bins), max(bins)
hax_mag, norm_delta_mag = np.array(hax_mag), np.array(norm_delta_mag)
norm_delta_mag = norm_delta_mag[(hax_mag >= maglow) & (hax_mag <= maghigh)]
for k in norm_delta_mag:
if abs(k) < 1.0:
counter_1sig += 1
if PRINTOUTS:
print 'Fraction of objects within 1-sigma: ', counter_1sig, ' / ', len(norm_delta_mag), ' = ', str(float(counter_1sig) / len(norm_delta_mag))
print ' Fraction of objects considered (objects plotted on normalized plot / objects plotted on scatter plot): ', str(float(len(norm_delta_mag)) / len(clean_magnitude1)), '\n'
return counter_1sig
def get_flag_type(df, k):
"""Print the flag type() once.
Args:
df (pandas DataFrame)
k (int) -- Counter implemented so printout is not repeated.
Returns:
0
"""
if k == 0:
for flag_hdr in FLAG_HDR_LIST:
print 'HEADER:', str(flag_hdr), ' -- EXAMPLE:', df[flag_hdr][0], ' -- TYPE:', type(df[flag_hdr][0])
k += 1
return 0
def get_color(filter_name):
"""Color code plot such that each griz band is a different color.
Args:
filter_name (str) -- Allowed values: 'g' 'r' 'i' 'z'
Returns:
color (str)
cmap (str) -- Colormap used for Delta-Magnitude colorbars.
"""
if filter_name == 'g':
color, cmap = 'green', 'Greens'
if filter_name == 'r':
color, cmap = 'orange', 'Oranges'
if filter_name == 'i':
#color, cmap = 'purple', 'Purples'
color, cmap = 'darkgrey', 'Greys'
if filter_name == 'z':
#color, cmap = 'blue', 'Blues'
color, cmap = 'navy', 'Blues'
return color, cmap
def logger(delta_mag, tile_name, filter_name, realization_number, clean_magnitude1, full_magnitude1, bins, hax_mag):
"""Write to log files to record number of objects plotted and number of objects within 1sigma.
Args:
filter_name (str) -- Allowed values: 'g' 'r' 'i' 'z'.
clean_magnitude1 (list of floats) -- Objects with nonzero flags and/or quality cuts removed.
full_magnitude (list of floats) -- Values read | |
= vstruct.VArray([ v_uint32() for i in xrange(15) ])
self.SecondLevelCacheSize = v_uint32()
self.HalReserved = vstruct.VArray([ v_uint32() for i in xrange(16) ])
self.Unused2 = v_uint32()
self._pad0108 = v_bytes(size=4)
self.KdVersionBlock = v_ptr64()
self.Unused3 = v_ptr64()
self.PcrAlign1 = vstruct.VArray([ v_uint32() for i in xrange(24) ])
self._pad0180 = v_bytes(size=8)
self.Prcb = KPRCB()
class IMAGE_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Machine = v_uint16()
self.NumberOfSections = v_uint16()
self.TimeDateStamp = v_uint32()
self.PointerToSymbolTable = v_uint32()
self.NumberOfSymbols = v_uint32()
self.SizeOfOptionalHeader = v_uint16()
self.Characteristics = v_uint16()
class CM_KEY_INDEX(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint16()
self.Count = v_uint16()
self.List = vstruct.VArray([ v_uint32() for i in xrange(1) ])
class FILE_STANDARD_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocationSize = LARGE_INTEGER()
self.EndOfFile = LARGE_INTEGER()
self.NumberOfLinks = v_uint32()
self.DeletePending = v_uint8()
self.Directory = v_uint8()
self._pad0018 = v_bytes(size=2)
class RELATION_LIST(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.TagCount = v_uint32()
self.FirstLevel = v_uint32()
self.MaxLevel = v_uint32()
self.Entries = vstruct.VArray([ v_ptr64() for i in xrange(1) ])
class PI_RESOURCE_ARBITER_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceArbiterList = LIST_ENTRY()
self.ResourceType = v_uint8()
self._pad0018 = v_bytes(size=7)
self.ArbiterInterface = v_ptr64()
self.DeviceNode = v_ptr64()
self.ResourceList = LIST_ENTRY()
self.BestResourceList = LIST_ENTRY()
self.BestConfig = LIST_ENTRY()
self.ActiveArbiterList = LIST_ENTRY()
self.State = v_uint8()
self.ResourcesChanged = v_uint8()
self._pad0070 = v_bytes(size=6)
class _unnamed_26939(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VetoType = v_uint32()
self.DeviceIdVetoNameBuffer = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0008 = v_bytes(size=2)
class _unnamed_26936(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NotificationCode = v_uint32()
self.NotificationData = v_uint32()
class _unnamed_21404(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PowerState = v_uint32()
class _unnamed_26934(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Notification = v_ptr64()
class TOKEN_SOURCE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SourceName = vstruct.VArray([ v_uint8() for i in xrange(8) ])
self.SourceIdentifier = LUID()
class _unnamed_25968(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.DeviceClass = _unnamed_26924()
self._pad0020 = v_bytes(size=12)
class _unnamed_22291(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NumberOfSystemCacheViews = v_uint32()
self.WritableUserReferences = v_uint32()
self.SubsectionRoot = v_ptr64()
class _unnamed_26931(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NotificationStructure = v_ptr64()
self.DeviceIds = vstruct.VArray([ v_uint16() for i in xrange(1) ])
self._pad0010 = v_bytes(size=6)
class TRACE_ENABLE_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LoggerId = v_uint16()
self.Level = v_uint8()
self.InternalFlag = v_uint8()
self.EnableFlags = v_uint32()
class _unnamed_27360(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CellData = CELL_DATA()
class PEB_LDR_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.Initialized = v_uint8()
self._pad0008 = v_bytes(size=3)
self.SsHandle = v_ptr64()
self.InLoadOrderModuleList = LIST_ENTRY()
self.InMemoryOrderModuleList = LIST_ENTRY()
self.InInitializationOrderModuleList = LIST_ENTRY()
self.EntryInProgress = v_ptr64()
self.ShutdownInProgress = v_uint8()
self._pad0050 = v_bytes(size=7)
self.ShutdownThreadId = v_ptr64()
class DBGKD_WRITE_BREAKPOINT64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.BreakPointAddress = v_uint64()
self.BreakPointHandle = v_uint32()
self._pad0010 = v_bytes(size=4)
class FSRTL_ADVANCED_FCB_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.NodeTypeCode = v_uint16()
self.NodeByteSize = v_uint16()
self.Flags = v_uint8()
self.IsFastIoPossible = v_uint8()
self.Flags2 = v_uint8()
self.Reserved = v_uint8()
self.Resource = v_ptr64()
self.PagingIoResource = v_ptr64()
self.AllocationSize = LARGE_INTEGER()
self.FileSize = LARGE_INTEGER()
self.ValidDataLength = LARGE_INTEGER()
self.FastMutex = v_ptr64()
self.FilterContexts = LIST_ENTRY()
self.PushLock = EX_PUSH_LOCK()
self.FileContextSupportPointer = v_ptr64()
class _unnamed_21271(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Vpb = v_ptr64()
self.DeviceObject = v_ptr64()
class DIAGNOSTIC_BUFFER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint64()
self.CallerType = v_uint32()
self._pad0010 = v_bytes(size=4)
self.ProcessImageNameOffset = v_uint64()
self.ProcessId = v_uint32()
self.ServiceTag = v_uint32()
self.ReasonOffset = v_uint64()
class MM_PAGE_ACCESS_INFO_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.File = _unnamed_25217()
class SECURITY_SUBJECT_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ClientToken = v_ptr64()
self.ImpersonationLevel = v_uint32()
self._pad0010 = v_bytes(size=4)
self.PrimaryToken = v_ptr64()
self.ProcessAuditId = v_ptr64()
class _unnamed_22464(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Balance = v_uint64()
class X86_DBGKD_CONTROL_SET(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TraceFlag = v_uint32()
self.Dr7 = v_uint32()
self.CurrentSymbolStart = v_uint32()
self.CurrentSymbolEnd = v_uint32()
class PROFILE_PARAMETER_BLOCK(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Status = v_uint16()
self.Reserved = v_uint16()
self.DockingState = v_uint16()
self.Capabilities = v_uint16()
self.DockID = v_uint32()
self.SerialNumber = v_uint32()
class ALPC_MESSAGE_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AllocatedAttributes = v_uint32()
self.ValidAttributes = v_uint32()
class POP_THERMAL_ZONE_METRICS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MetricsResource = ERESOURCE()
self.ActiveCount = v_uint32()
self.PassiveCount = v_uint32()
self.LastActiveStartTick = LARGE_INTEGER()
self.AverageActiveTime = LARGE_INTEGER()
self.LastPassiveStartTick = LARGE_INTEGER()
self.AveragePassiveTime = LARGE_INTEGER()
self.StartTickSinceLastReset = LARGE_INTEGER()
class PCW_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Data = v_ptr64()
self.Size = v_uint32()
self._pad0010 = v_bytes(size=4)
class DEVICE_RELATIONS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self._pad0008 = v_bytes(size=4)
self.Objects = vstruct.VArray([ v_ptr64() for i in xrange(1) ])
class ETW_PROVIDER_TABLE_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.RefCount = v_uint32()
self.State = v_uint32()
self.RegEntry = v_ptr64()
self.Caller = v_ptr64()
class MMSUBSECTION_FLAGS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SubsectionAccessed = v_uint16()
self.SubsectionStatic = v_uint16()
class INTERFACE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self._pad0008 = v_bytes(size=4)
self.Context = v_ptr64()
self.InterfaceReference = v_ptr64()
self.InterfaceDereference = v_ptr64()
class STRING32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint16()
self.MaximumLength = v_uint16()
self.Buffer = v_uint32()
class WMI_LOGGER_CONTEXT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LoggerId = v_uint32()
self.BufferSize = v_uint32()
self.MaximumEventSize = v_uint32()
self.CollectionOn = v_uint32()
self.LoggerMode = v_uint32()
self.AcceptNewEvents = v_uint32()
self.GetCpuClock = v_ptr64()
self.StartTime = LARGE_INTEGER()
self.LogFileHandle = v_ptr64()
self.LoggerThread = v_ptr64()
self.LoggerStatus = v_uint32()
self._pad0040 = v_bytes(size=4)
self.NBQHead = v_ptr64()
self.OverflowNBQHead = v_ptr64()
self.QueueBlockFreeList = SLIST_HEADER()
self.GlobalList = LIST_ENTRY()
self.BatchedBufferList = v_ptr64()
self.LoggerName = UNICODE_STRING()
self.LogFileName = UNICODE_STRING()
self.LogFilePattern = UNICODE_STRING()
self.NewLogFileName = UNICODE_STRING()
self.ClockType = v_uint32()
self.MaximumFileSize = v_uint32()
self.LastFlushedBuffer = v_uint32()
self.FlushTimer = v_uint32()
self.FlushThreshold = v_uint32()
self._pad00d0 = v_bytes(size=4)
self.ByteOffset = LARGE_INTEGER()
self.MinimumBuffers = v_uint32()
self.BuffersAvailable = v_uint32()
self.NumberOfBuffers = v_uint32()
self.MaximumBuffers = v_uint32()
self.EventsLost = v_uint32()
self.BuffersWritten = v_uint32()
self.LogBuffersLost = v_uint32()
self.RealTimeBuffersDelivered = v_uint32()
self.RealTimeBuffersLost = v_uint32()
self._pad0100 = v_bytes(size=4)
self.SequencePtr = v_ptr64()
self.LocalSequence = v_uint32()
self.InstanceGuid = GUID()
self.FileCounter = v_uint32()
self.BufferCallback = v_ptr64()
self.PoolType = v_uint32()
self._pad0130 = v_bytes(size=4)
self.ReferenceTime = ETW_REF_CLOCK()
self.Consumers = LIST_ENTRY()
self.NumConsumers = v_uint32()
self._pad0158 = v_bytes(size=4)
self.TransitionConsumer = v_ptr64()
self.RealtimeLogfileHandle = v_ptr64()
self.RealtimeLogfileName = UNICODE_STRING()
self.RealtimeWriteOffset = LARGE_INTEGER()
self.RealtimeReadOffset = LARGE_INTEGER()
self.RealtimeLogfileSize = LARGE_INTEGER()
self.RealtimeLogfileUsage = v_uint64()
self.RealtimeMaximumFileSize = v_uint64()
self.RealtimeBuffersSaved = v_uint32()
self._pad01a8 = v_bytes(size=4)
self.RealtimeReferenceTime = ETW_REF_CLOCK()
self.NewRTEventsLost = v_uint32()
self._pad01c0 = v_bytes(size=4)
self.LoggerEvent = KEVENT()
self.FlushEvent = KEVENT()
self.FlushTimeOutTimer = KTIMER()
self.FlushDpc = KDPC()
self.LoggerMutex = KMUTANT()
self.LoggerLock = EX_PUSH_LOCK()
self.BufferListSpinLock = v_uint64()
self.ClientSecurityContext = SECURITY_CLIENT_CONTEXT()
self.SecurityDescriptor = EX_FAST_REF()
self.BufferSequenceNumber = v_uint64()
self.Flags = v_uint32()
self.RequestFlag = v_uint32()
self.HookIdMap = RTL_BITMAP()
self._pad0330 = v_bytes(size=8)
class THREAD_PERFORMANCE_DATA(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.Version = v_uint16()
self.ProcessorNumber = PROCESSOR_NUMBER()
self.ContextSwitches = v_uint32()
self.HwCountersCount = v_uint32()
self.UpdateCount = v_uint64()
self.WaitReasonBitMap = v_uint64()
self.HardwareCounters = v_uint64()
self.CycleTime = COUNTER_READING()
self.HwCounters = vstruct.VArray([ COUNTER_READING() for i in xrange(16) ])
class IO_STACK_LOCATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MajorFunction = v_uint8()
self.MinorFunction = v_uint8()
self.Flags = v_uint8()
self.Control = v_uint8()
self._pad0008 = v_bytes(size=4)
self.Parameters = _unnamed_21040()
self.DeviceObject = v_ptr64()
self.FileObject = v_ptr64()
self.CompletionRoutine = v_ptr64()
self.Context = v_ptr64()
class DBGKD_READ_WRITE_MSR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Msr = v_uint32()
self.DataValueLow = v_uint32()
self.DataValueHigh = v_uint32()
class ARBITER_QUERY_CONFLICT_PARAMETERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.PhysicalDeviceObject = v_ptr64()
self.ConflictingResource = v_ptr64()
self.ConflictCount = v_ptr64()
self.Conflicts = v_ptr64()
class _unnamed_27931(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ResourceToRelease = v_ptr64()
class IMAGE_DATA_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_uint32()
self.Size = v_uint32()
class FILE_OBJECT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Type = v_uint16()
self.Size = v_uint16()
self._pad0008 = v_bytes(size=4)
self.DeviceObject = v_ptr64()
self.Vpb = v_ptr64()
self.FsContext = v_ptr64()
self.FsContext2 = v_ptr64()
self.SectionObjectPointer = v_ptr64()
self.PrivateCacheMap = v_ptr64()
self.FinalStatus = v_uint32()
self._pad0040 = v_bytes(size=4)
self.RelatedFileObject = v_ptr64()
self.LockOperation = v_uint8()
self.DeletePending = v_uint8()
self.ReadAccess = v_uint8()
self.WriteAccess = v_uint8()
self.DeleteAccess = v_uint8()
self.SharedRead = v_uint8()
self.SharedWrite = v_uint8()
self.SharedDelete = v_uint8()
self.Flags = v_uint32()
self._pad0058 = v_bytes(size=4)
self.FileName = UNICODE_STRING()
self.CurrentByteOffset = LARGE_INTEGER()
self.Waiters = v_uint32()
self.Busy = v_uint32()
self.LastLock = v_ptr64()
self.Lock = KEVENT()
self.Event = KEVENT()
self.CompletionContext = v_ptr64()
self.IrpListLock = v_uint64()
self.IrpList = LIST_ENTRY()
self.FileObjectExtension = v_ptr64()
class PPM_IDLE_STATES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Count = v_uint32()
self.Flags = _unnamed_25467()
self.TargetState = v_uint32()
self.ActualState = v_uint32()
self.OldState = v_uint32()
self._pad0018 = v_bytes(size=4)
self.TargetProcessors = KAFFINITY_EX()
self.State = vstruct.VArray([ PPM_IDLE_STATE() for i in xrange(1) ])
class MMWSLE_HASH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Index = v_uint32()
class _unnamed_27702(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AsULONG = v_uint32()
class MMPTE_PROTOTYPE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Valid = v_uint64()
class VF_TARGET_DRIVER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TreeNode = VF_AVL_TREE_NODE()
self.u1 = _unnamed_26860()
self.VerifiedData = v_ptr64()
class PCW_CALLBACK_INFORMATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.AddCounter = PCW_COUNTER_INFORMATION()
self._pad0028 = v_bytes(size=24)
class KENLISTMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cookie = v_uint32()
self._pad0008 = v_bytes(size=4)
self.NamespaceLink = KTMOBJECT_NAMESPACE_LINK()
self.EnlistmentId = GUID()
self.Mutex = KMUTANT()
self.NextSameTx = LIST_ENTRY()
self.NextSameRm = LIST_ENTRY()
self.ResourceManager = v_ptr64()
self.Transaction = v_ptr64()
self.State = v_uint32()
self.Flags = v_uint32()
self.NotificationMask = v_uint32()
self._pad00b8 = v_bytes(size=4)
self.Key = v_ptr64()
self.KeyRefCount = v_uint32()
self._pad00c8 = v_bytes(size=4)
self.RecoveryInformation = v_ptr64()
self.RecoveryInformationLength = v_uint32()
self._pad00d8 = v_bytes(size=4)
self.DynamicNameInformation = v_ptr64()
self.DynamicNameInformationLength = v_uint32()
self._pad00e8 = v_bytes(size=4)
self.FinalNotification = v_ptr64()
self.SupSubEnlistment = v_ptr64()
self.SupSubEnlHandle = v_ptr64()
self.SubordinateTxHandle = v_ptr64()
self.CrmEnlistmentEnId = GUID()
self.CrmEnlistmentTmId = GUID()
self.CrmEnlistmentRmId = GUID()
self.NextHistory = v_uint32()
self.History = vstruct.VArray([ KENLISTMENT_HISTORY() for i in xrange(20) ])
self._pad01e0 = v_bytes(size=4)
class HEAP_SUBSEGMENT(vstruct.VStruct):
def __init__(self):
| |
<filename>nomic/proposal.py
from collections import OrderedDict
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Optional, Set
import discord
import functools
from .gameflags import GameFlagsManager
from .playerdict import PlayerDict
from .repoman import GameRepoManager
from constants import colors, emoji, info, strings
import utils
class ProposalStatus(Enum):
VOTING = 'voting'
PASSED = 'passed'
FAILED = 'failed'
DELETED = 'deleted'
VOTE_ALIASES = {
'+': 'for',
'-': 'against',
'abstain': 'abstain',
'against': 'against',
'del': 'remove',
'delete': 'remove',
'for': 'for',
'remove': 'remove',
'rm': 'remove',
}
VOTE_TYPES = ('for', 'against', 'abstain')
@dataclass
class _Proposal:
game: 'ProposalManager' and GameFlagsManager
n: int
author: discord.Member
content: str
status: ProposalStatus = ProposalStatus.VOTING
message_id: Optional[int] = None
votes: PlayerDict = None
timestamp: int = None
@functools.total_ordering
class Proposal(_Proposal):
"""A dataclass representing a Nomic proposal.
Attributes:
- game
- n -- integer; proposal ID number
- author -- discord.Member
- content -- string
Optional attributes:
- status (default Proposal.Status.Voting)
- message_id (default None) -- discord.Message or the ID of one (converted
to integer ID)
- votes (default {}) -- PlayerDict of ints; positive numbers are votes
for, negative numbers are votes against, and zero is an abstention
- timestamp (default now)
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not isinstance(self.author, discord.Member):
self.author = self.game.get_member(self.author)
# if isinstance(self.message_id, discord.Message):
# self.message_id = self.message_id.id
self.votes = PlayerDict(self.game, self.votes)
self.status = ProposalStatus(self.status)
if self.timestamp is None:
self.timestamp = utils.now()
def export(self) -> dict:
return OrderedDict(
n=self.n,
author=self.author and self.author.id,
content=self.content,
status=self.status.value,
message_id=self.message_id,
votes=self.votes.export(),
timestamp=self.timestamp,
)
async def set_vote(self, player: discord.Member, new_vote_amount: int):
self.game.assert_locked()
if self.status != ProposalStatus.VOTING:
return False
if new_vote_amount == 0 and not self.game.flags.allow_vote_abstain:
new_vote_amount = None
if player in self.votes and not self.game.flags.allow_vote_change:
return False
if new_vote_amount and abs(new_vote_amount) > 1 and not self.game.flags.allow_vote_multi:
new_vote_amount //= abs(new_vote_amount)
self.votes[player] = new_vote_amount
if new_vote_amount is None:
del self.votes[player]
await self.refresh()
self.game.save()
return True
async def vote_for(self, player: discord.Member, amount: int = 1):
old_vote_amount = self.votes.get(player)
if old_vote_amount is None:
new_vote_amount = amount
elif old_vote_amount < 0:
new_vote_amount = None
else:
new_vote_amount = old_vote_amount + amount
return await self.set_vote(player, new_vote_amount)
async def vote_against(self, player: discord.Member, amount: int = 1):
old_vote_amount = self.votes.get(player)
if old_vote_amount is None:
new_vote_amount = -amount
elif old_vote_amount > 0:
new_vote_amount = None
else:
new_vote_amount = old_vote_amount - amount
return await self.set_vote(player, new_vote_amount)
async def vote_abstain(self, player: discord.Member):
old_vote_amount = self.votes.get(player)
if old_vote_amount == 0:
new_vote_amount = None
else:
new_vote_amount = 0
return await self.set_vote(player, new_vote_amount)
async def vote_abstain_or_remove(self, player: discord.Member):
old_vote_amount = self.votes.get(player)
if old_vote_amount is None:
new_vote_amount = 0
else:
new_vote_amount = None
return await self.set_vote(player, new_vote_amount)
async def vote_remove(self, player: discord.Member):
return await self.set_vote(player, None)
@property
def votes_for(self) -> int:
return sum(v for v in self.votes.values() if v > 0)
@property
def votes_against(self) -> int:
return -sum(v for v in self.votes.values() if v < 0)
@property
def votes_abstain(self) -> int:
return sum(v == 0 for v in self.votes.values())
async def set_status(self, new_status: ProposalStatus):
self.game.assert_locked()
self.status = new_status
await self.refresh()
self.game.save()
async def set_content(self, new_content: str):
self.game.assert_locked()
self.content = new_content
await self.refresh()
self.game.save()
async def refresh(self):
await self.game.refresh_proposal(self)
async def repost(self):
await self.game.repost_proposal(self)
async def fetch_message(self) -> discord.Message:
try:
return await self.game.proposals_channel.fetch_message(self.message_id)
except (discord.NotFound, discord.Forbidden, discord.HTTPException):
return None
@property
def discord_link(self) -> str:
return utils.discord.MESSAGE_LINK_FORMAT.format(
guild=self.game.guild,
channel=self.game.proposals_channel,
message_id=self.message_id,
)
@property
def github_link(self):
return f'{info.GITHUB_REPO_LINK}/blob/{self.game.repo.name}/proposals.md#{self.n}'
@property
def embed(self) -> discord.Embed:
"""Return an embed displaying this proposal."""
# Make the title; e.g. "Proposal #10 -- Passed"
title = f"Proposal #{self.n}"
if self.status != ProposalStatus.VOTING:
title += " \N{EM DASH} "
title += self.status.value.capitalize()
if self.status == ProposalStatus.DELETED:
return discord.Embed(
color=colors.DELETED,
title=title,
)
embed = discord.Embed(
color={
ProposalStatus.VOTING: colors.INFO,
ProposalStatus.PASSED: colors.SUCCESS,
ProposalStatus.FAILED: colors.ERROR,
}[self.status],
title=title,
description=self.content,
timestamp=datetime.fromtimestamp(self.timestamp),
)
# Make an embed field for each type of vote
for vote_type in VOTE_TYPES:
total = 0
value = ''
# Count the votes and list the users
for player, vote_amount in self.votes.items():
if vote_type == 'for':
if vote_amount <= 0:
continue
elif vote_type == 'against':
if vote_amount >= 0:
continue
vote_amount *= -1
elif vote_type == 'abstain':
if vote_amount != 0:
continue
vote_amount = 1
value += player.mention
if vote_amount > 1:
value += f" ({vote_amount}x)"
value += "\n"
total += vote_amount
name = vote_type.capitalize()
if total:
name += f" ({total})"
if vote_type == 'abstain' and total == 0:
continue
embed.add_field(
name=name,
value=value or strings.EMPTY_LIST,
inline=True,
)
# Set the footer
embed.set_footer(**utils.discord.embed_happened_footer("Submitted", self.author))
return embed
@property
def markdown(self):
s = f"<a name='{self.n}'/>"
s += "\n\n"
s += f"## #{self.n}"
if self.status != ProposalStatus.VOTING:
s += f" \N{EM DASH} {self.status.value.capitalize()}"
s += "\n\n"
if self.status != ProposalStatus.DELETED:
s += self.content
s += "\n\n"
return s
def __str__(self):
return f"proposal #{self.n}"
def __lt__(self, other):
return self.n < other.n
def __eq__(self, other):
return type(self) == type(other) and self.n == other.n
def __hash__(self):
# None of these values should ever change, and they should uniquely
# identify this proposal.
return hash((self.game.guild.id, self.n, self.timestamp))
class ProposalManager(GameRepoManager):
def load(self):
db = self.get_db('proposals')
self.proposals_channel = db.get('channel')
if self.proposals_channel:
self.proposals_channel = self.guild.get_channel(self.proposals_channel)
self.proposals = []
if db.get('proposals'):
for proposal in db['proposals']:
self.proposals.append(Proposal(game=self, **proposal))
def save(self):
db = self.get_db('proposals')
db.replace(OrderedDict(
channel=self.proposals_channel and self.proposals_channel.id,
proposals=[p.export() for p in self.proposals],
))
db.save()
with open(self.get_file('proposals.md'), 'w') as f:
f.write(f"# {self.guild.name} \N{EM DASH} Proposals")
f.write('\n\n')
for p in self.proposals:
f.write(p.markdown)
async def commit_proposals_and_log(self,
agent: discord.Member,
action: str,
proposal: Proposal,
post: str = '',
link_to_proposal: bool = True,
**kwargs):
"""Commit the proposals Markdown file and log the event."""
if await self.repo.is_clean('proposals.md'):
return
commit_msg = markdown_msg = f"{utils.discord.fake_mention(agent)} {action} "
commit_msg += str(proposal)
if link_to_proposal:
markdown_msg += f"[{proposal}](../proposals.md#{proposal.n})"
else:
markdown_msg += str(proposal)
await self.commit('proposals.md', msg=commit_msg + post)
await self.log(markdown_msg + post, **kwargs)
async def refresh_proposal(self, *proposals: Proposal):
"""Update the messages for one or more proposals.
May throw `TypeError`, `ValueError`, or `discord.Forbidden` exceptions.
"""
self.assert_locked()
for proposal in sorted(set(proposals)):
try:
m = await proposal.fetch_message()
await m.clear_reactions()
await m.edit(embed=proposal.embed)
if proposal.status == ProposalStatus.VOTING:
await m.add_reaction(emoji.VOTE_FOR)
await m.add_reaction(emoji.VOTE_AGAINST)
await m.add_reaction(emoji.VOTE_ABSTAIN)
except discord.NotFound:
await self.repost_proposal(proposal)
return
async def repost_proposal(self, *proposals: Proposal):
"""Remove and repost the messages for one or more proposals.
May throw `TypeError`, `ValueError`, or `discord.Forbidden` exceptions.
"""
self.assert_locked()
proposal_range = range(min(proposals).n, len(self.proposals) + 1)
proposals = list(map(self.get_proposal, proposal_range))
proposal_messages = []
for proposal in proposals:
m = await proposal.fetch_message()
if m:
proposal_messages.append(m)
if proposal_messages:
await utils.discord.safe_bulk_delete(proposal_messages)
for proposal in proposals:
m = await self.proposals_channel.send(embed=discord.Embed(
color=colors.TEMPORARY,
title=f"Preparing proposal #{proposal.n}\N{HORIZONTAL ELLIPSIS}",
))
proposal.message_id = m.id
self.save()
await self.refresh_proposal(*proposals)
def has_proposal(self, n: int) -> bool:
return isinstance(n, int) and 1 <= n <= len(self.proposals)
def get_proposal(self, n: int) -> Optional[Proposal]:
if self.has_proposal(n):
return self.proposals[n - 1]
async def get_proposal_messages(self) -> Set[discord.Message]:
messages = set()
for proposal in self.proposals:
messages.add(await proposal.fetch_message())
return messages
async def add_proposal(self, **kwargs):
self.assert_locked()
n = len(self.proposals) + 1
new_proposal = Proposal(game=self, n=n, **kwargs)
self.proposals.append(new_proposal)
# ProposalManager.repost_proposal() calls BaseGame.save() so we
# don't have to do that here.
await self.repost_proposal(new_proposal)
return new_proposal
async def permadel_proposal(self, proposal: Proposal):
self.assert_locked()
if not proposal.n == len(self.proposals):
raise RuntimeError("Cannot delete any proposal other than the last one")
del self.proposals[proposal.n - 1]
self.save()
await (await proposal.fetch_message()).delete()
async def log_proposal_submit(self,
agent: discord.Member,
proposal: Proposal):
await self.commit_proposals_and_log(
agent, "submitted", proposal, link_to_commit=True
)
async def log_proposal_permadel(self,
agent: discord.Member,
proposal: Proposal):
await self.commit_proposals_and_log(
agent, "permanently deleted", proposal, link_to_proposal=False, link_to_commit=True
)
async def log_proposal_change_status(self,
agent: discord.Member,
proposal: Proposal):
if proposal.status == ProposalStatus.VOTING:
action = "reopened"
else:
action = proposal.status.value
await self.commit_proposals_and_log(
agent, action, proposal, link_to_commit=True
)
async def log_proposal_change_content(self,
agent: discord.Member,
proposal: Proposal):
await self.commit_proposals_and_log(
agent, "edited", proposal, link_to_commit=True
)
async def log_proposal_vote(self,
agent: discord.Member,
proposal: Proposal,
player: discord.Member,
old_vote_amount: Optional[int],
new_vote_amount: Optional[int]):
if old_vote_amount == new_vote_amount:
return
if new_vote_amount is None:
action = "removed their vote from"
elif old_vote_amount is not None:
action = "changed their vote on"
elif new_vote_amount == 0:
action = "abstained on"
elif new_vote_amount > 0:
action = "voted for"
elif new_vote_amount < 0:
action = "voted against"
else:
action = "WTFed"
if player != agent:
post = f" on behalf of {utils.discord.fake_mention(player)}"
else:
post = ''
if abs(old_vote_amount or 0) > 1 | |
or negative for a win by the minimizer player.
Only needed for terminal states."""
jersi_rewards = self.__jersi_state.get_rewards()
if jersi_rewards[self.__maximizer_player] == Reward.DRAW:
mcts_reward = 0
elif jersi_rewards[self.__maximizer_player] == Reward.WIN:
mcts_reward = 1
else:
mcts_reward = -1
return mcts_reward
def getPossibleActions(self):
return self.__jersi_state.get_actions()
def takeAction(self, action):
return MctsState(self.__jersi_state.take_action(action), self.__maximizer_player)
class MinimaxState:
__slots__ = ('__jersi_state', '__maximizer_player')
def __init__(self, jersi_state, maximizer_player):
self.__jersi_state = jersi_state
self.__maximizer_player = maximizer_player
def get_jersi_state(self):
return self.__jersi_state
def get_current_jersi_maximizer_player(self):
return self.__maximizer_player
def get_current_player(self):
""" Returns 1 if it is the maximizer player's turn to choose an action,
or -1 for the minimiser player"""
return 1 if self.__jersi_state.get_current_player() == self.__maximizer_player else -1
def is_terminal(self):
return self.__jersi_state.is_terminal()
def get_reward(self):
"""Returns the reward for this state: 0 for a draw,
positive for a win by maximizer player or negative for a win by the minimizer player.
Only needed for terminal states."""
jersi_rewards = self.__jersi_state.get_rewards()
if jersi_rewards[self.__maximizer_player] == Reward.DRAW:
minimax_reward = 0
elif jersi_rewards[self.__maximizer_player] == Reward.WIN:
minimax_reward = 1
else:
minimax_reward = -1
return minimax_reward
def get_actions(self, shuffle):
return self.__jersi_state.get_actions(shuffle)
def take_action(self, action):
return MinimaxState(self.__jersi_state.take_action(action), self.__maximizer_player)
def extractStatistics(mcts_searcher, action):
statistics = {}
statistics['rootNumVisits'] = mcts_searcher.root.numVisits
statistics['rootTotalReward'] = mcts_searcher.root.totalReward
statistics['actionNumVisits'] = mcts_searcher.root.children[action].numVisits
statistics['actionTotalReward'] = mcts_searcher.root.children[action].totalReward
return statistics
def jersiSelectAction(action_names):
def score_move_name(move_name):
catpures = re.sub(r"[^!]", "",move_name)
catpures = re.sub(r"!+", "100",catpures)
stacks = re.sub(r"[^=]", "",move_name).replace("=", "10")
cubes = re.sub(r"[^-]", "",move_name).replace("-", "1")
move_score = 0
if catpures != "":
move_score += float(catpures)
if stacks != "":
move_score += float(stacks)
if cubes != "":
move_score += float(cubes)
return move_score
(drop_names, move_names) = partition(lambda x: re.match(r"^.*[-=].*$", str(x)), action_names)
drop_names = list(drop_names)
move_names = list(move_names)
drop_probability = 0.05
if len(drop_names) != 0 and random.random() <= drop_probability:
action_name = random.choice(drop_names)
else:
move_weights = list(map(score_move_name, move_names))
action_name = random.choices(move_names, weights=move_weights, k=1)[0]
return action_name
def jersiRandomPolicy(state):
while not state.isTerminal():
try:
jersi_state = state.get_jersi_state()
action_names = jersi_state.get_action_names()
action_name = jersiSelectAction(action_names)
action = jersi_state.get_action_by_name(action_name)
except IndexError:
raise Exception("Non-terminal state has no possible actions: " + str(state))
state = state.takeAction(action)
return state.getReward()
class HumanSearcher():
__slots__ = ('__name', '__action_simple_name', '__use_command_line')
def __init__(self, name):
self.__name = name
self.__action_simple_name = None
self.__use_command_line = False
def get_name(self):
return self.__name
def is_interactive(self):
return True
def use_command_line(self, condition):
assert condition in (True, False)
self.__use_command_line = condition
def set_action_simple_name(self, action_name):
assert not self.__use_command_line
self.__action_simple_name = action_name
def search(self, state):
if self.__use_command_line:
return self.__search_using_command_line(state)
else:
action = state.get_action_by_simple_name(self.__action_simple_name)
self.__action_simple_name = None
return action
def __search_using_command_line(self, state):
assert self.__use_command_line
action_names = state.get_action_simple_names()
action_validated = False
while not action_validated:
action_input = Notation.simplify_notation(input("HumanSearcher: action? "))
(action_validated, validation_message) = Notation.validate_simple_notation(action_input, action_names)
print(validation_message)
action = state.get_action_by_simple_name(action_input)
print(f"HumanSearcher: action {action} has been selected")
return action
class RandomSearcher():
__slots__ = ('__name')
def __init__(self, name):
self.__name = name
def get_name(self):
return self.__name
def is_interactive(self):
return False
def search(self, state):
actions = state.get_actions()
(drop_actions, move_actions) = partition(lambda x: re.match(r"^.*[-=].*$", str(x)), actions)
drop_actions = list(drop_actions)
move_actions = list(move_actions)
if len(move_actions) == 0:
action = random.choice(drop_actions)
else:
drop_probability = 0.05
if len(drop_actions) != 0 and random.random() <= drop_probability:
action = random.choice(drop_actions)
else:
action = random.choice(move_actions)
return action
class MinimaxSearcher():
__slots__ = ('__name', '__max_depth', '__max_children',
'__distance_weight', '__capture_weight', '__center_weight')
default_weights_by_depth = dict()
default_weights_by_depth[1] = {'distance_weight':100,
'capture_weight':1_200,
'center_weight':400}
default_weights_by_depth[2] = {'distance_weight':100,
'capture_weight':1_600,
'center_weight':100}
def __init__(self, name, max_depth=1, max_children=None,
distance_weight=None, capture_weight=None, center_weight=None):
assert max_depth >= 1
if max_depth in MinimaxSearcher.default_weights_by_depth:
default_weights = MinimaxSearcher.default_weights_by_depth[max_depth]
else:
default_weights = MinimaxSearcher.default_weights_by_depth[2]
self.__name = name
self.__max_depth = max_depth
self.__max_children = max_children
if distance_weight is not None:
self.__distance_weight = distance_weight
else:
self.__distance_weight = default_weights['distance_weight']
if capture_weight is not None:
self.__capture_weight = capture_weight
else:
self.__capture_weight = default_weights['capture_weight']
if center_weight is not None:
self.__center_weight = center_weight
else:
self.__center_weight = default_weights['center_weight']
def get_name(self):
return self.__name
def is_interactive(self):
return False
def search(self, state):
initial_state = MinimaxState(state, state.get_current_player())
initial_player = initial_state.get_current_player()
(best_value, action_values) = self.negamax(state=initial_state,
player=initial_player,
return_action_values=True)
best_actions = list()
for (action, action_value) in action_values.items():
if action_value == best_value:
best_actions.append(action)
else:
pass
print("%d best_actions with best value %.1f" % (len(best_actions),best_value))
action = random.choice(best_actions)
return action
def state_value(self, state):
jersi_state = state.get_jersi_state()
jersi_maximizer_player = state.get_current_jersi_maximizer_player()
# if needed then evaluate as jersi_maximizer_player = Player.WHITE
# and use a sign
if jersi_maximizer_player == Player.WHITE:
player_sign = 1
else:
player_sign = -1
value = 0
if state.is_terminal():
rewards = jersi_state.get_rewards()
if rewards[jersi_maximizer_player] == Reward.DRAW:
# consider a draw as a victory
value = INFINITY_POSITIVE
elif rewards[jersi_maximizer_player] == Reward.WIN:
value = INFINITY_POSITIVE
else:
value = INFINITY_NEGATIVE
else:
# white and black activated fighters are used as indicators of parts of the game:
# opening part, middle part and final part
fighter_counts = jersi_state.get_fighter_counts()
if fighter_counts[Player.BLACK] == 0:
white_distance_importance = 1000
white_capture_importance = 0
white_center_importance = 0
elif fighter_counts[Player.BLACK] <= 5:
white_distance_importance = 10
white_capture_importance = 10
white_center_importance = 1
else:
white_distance_importance = 1
white_capture_importance = 1
white_center_importance = 1
if fighter_counts[Player.WHITE] == 0:
black_distance_importance = 1000
black_capture_importance = 0
black_center_importance = 0
elif fighter_counts[Player.WHITE] <= 5:
black_distance_importance = 10
black_capture_importance = 10
black_center_importance = 1
else:
black_distance_importance = 1
black_capture_importance = 1
black_center_importance = 1
# white and black distances to their goals or ends
king_distances = jersi_state.get_king_end_distances()
distance_difference = player_sign*(king_distances[Player.BLACK] - king_distances[Player.WHITE])
# white and black with captured status
capture_counts = jersi_state.get_capture_counts()
capture_difference = player_sign*(capture_counts[Player.BLACK] - capture_counts[Player.WHITE])
# white and black fighter cubes in the central zone
white_center_count = 0
black_center_count = 0
hexagon_bottom = jersi_state.get_hexagon_bottom()
hexagon_top= jersi_state.get_hexagon_top()
for hexagon_index in jersi_state.get_center_hexagon_indices():
for cube_index in [hexagon_bottom[hexagon_index], hexagon_top[hexagon_index]]:
if cube_index != Null.CUBE:
cube = Cube.all[cube_index]
if cube.sort in (CubeSort.FOOL, CubeSort.PAPER, CubeSort.ROCK, CubeSort.SCISSORS):
if cube.player == Player.WHITE:
white_center_count += 1
elif cube.player == Player.BLACK:
black_center_count += 1
else:
break
center_difference = player_sign*(white_center_count - black_center_count)
# normalize each feature in the intervall [-1, +1]
distance_norm = 8
capture_norm = 16
center_norm = 17
distance_difference = distance_difference/distance_norm
capture_difference = capture_difference/capture_norm
center_difference = center_difference/center_norm
# account for importances
distance_difference *= white_distance_importance*black_distance_importance
capture_difference *= white_capture_importance*black_capture_importance
center_difference *= white_center_importance*black_center_importance
# synthesis
value += self.__distance_weight*distance_difference
value += self.__capture_weight*capture_difference
value += self.__center_weight*center_difference
return value
def negamax(self, state, player, depth=None, alpha=None, beta=None, return_action_values=False):
def score_action(action):
captures = re.sub(r"[^!]", "", str(action))
return len(captures)
use_negascout = True
use_sort = True
if alpha is None:
alpha = INFINITY_NEGATIVE
if beta is None:
beta = INFINITY_POSITIVE
if depth is None:
depth =self.__max_depth
if depth == 0 or state.is_terminal():
# print("negamax at depth %d evaluating leaf state" % depth)
return player*self.state_value(state)
if return_action_values:
action_values = dict()
actions = state.get_actions(shuffle=False)
# reduce the number of children
if (self.__max_children is not None and len(actions) > self.__max_children):
(drop_actions, move_actions) = partition(lambda x: re.match(r"^.*[-=].*$", str(x)), actions)
drop_actions = list(drop_actions)
move_actions = list(move_actions)
if len(move_actions) > self.__max_children:
# sample the move actions according to their destination hexagons
move_actions.sort(key=lambda x: re.sub(r"/[kK]:..$", "", str(x)).replace("!","")[-2:])
selected_move_actions = list()
for action_chunk in chunks(move_actions, self.__max_children):
selected_move_actions.append(random.choice(action_chunk))
move_actions = selected_move_actions
if len(drop_actions) != 0:
drop_count = self.__max_children - len(move_actions)
# >> let us admit some tolerance regarding the __max_children criterion
# >> by adding a small fraction of drop actions
drop_probability = 0.05
drop_count = max(drop_count, int(math.ceil(drop_probability*len(move_actions))))
drop_actions = random.choices(drop_actions, k=drop_count)
actions = move_actions + drop_actions
else:
actions = move_actions
# try to optimize alpha beta and negascout by ordering capture at first positions
if use_sort:
actions.sort(key=score_action, reverse=True)
value = INFINITY_NEGATIVE
for action in actions:
# print("negamax at depth %d evaluating action %s" % (depth, action))
child_state = state.take_action(action)
if use_negascout:
# search with a null window
# print("negascout: search with a null window")
child_value = -self.negamax(state=child_state, player=-player, depth=depth - 1,
alpha=-alpha - 1, beta=-alpha)
# if it failed high, do a full re-search
if alpha < child_value < beta:
# print("negascout: full re-search")
child_value = -self.negamax(state=child_state, player=-player, depth=depth - 1,
alpha=-beta, beta=-child_value)
else:
child_value = -self.negamax(state=child_state, player=-player, depth=depth - 1,
alpha=-beta, beta=-alpha)
value = max(value, child_value)
if return_action_values:
action_values[action] = child_value
alpha = max(alpha, value)
if alpha >= beta:
break
if return_action_values:
return (value, action_values)
else:
return value
class MctsSearcher():
__slots__ = ('__name', '__time_limit', '__iteration_limit', '__capture_weight', '__searcher')
def __init__(self, name, time_limit=None, iteration_limit=None, rolloutPolicy=mcts.randomPolicy):
self.__name = name
default_time_limit = 1_000
assert time_limit is None or iteration_limit is None
if time_limit is None and iteration_limit is None:
time_limit = | |
* 2, y_offset * 2),
special_flags=pygame.BLEND_RGB_ADD)
pyimage_copy[i] = surf
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the RGB split process\nError: %s\n"
"The RGB split effect will be disregarded" % error)
return pyimage_post_processing
return pyimage_copy
def add_transition_effect(self, top, progress, progress_label, pyimage_copy):
"""
RETURN THE A LIST OF SURFACES WITH THE ADDITIONAL EFFECT (TRANSITION) OR RETURN PYIMAGE_COPY UNCHANGED
:param top: tkinter toplevel window; This window will be used for displaying the overall process
:param progress: ttk progressbar; Bar showing the overall process
:param progress_label: tkinter label showing the name of the effect, here transition effect
:param pyimage_copy: Python list containing all the pygame surfaces to be processed
:return: Return a list containing all the pygame surface post processing or unchanged if an error occur.
"""
# SAVE THE SURFACE PRIOR TRANSITION EFFECT
pyimage_post_processing = pyimage_copy.copy()
if self.gl.transition_checkbox.get() is True:
try:
start = int(self.gl.transition_start_frame.get())
except:
messagebox.showwarning("Error", "Transition effect\nIncorrect start frame value")
return pyimage_post_processing
try:
end = int(self.gl.transition_end_frame.get())
except:
messagebox.showwarning("Error", "Transition effect\nIncorrect end frame value")
return pyimage_post_processing
if start > end:
messagebox.showwarning("Error", "Transition effect\nstart frame cannot be > end frame")
return pyimage_post_processing
if end < start:
messagebox.showwarning("Error", "Transition effect\nend frame cannot be < start frame")
return pyimage_post_processing
if start < 0:
start = 0
if end > len(self.gl.pyimage):
end = len(self.gl.pyimage)
surface2 = self.gl.transition_texture
w, h = pyimage_copy[0].get_size()
surface2 = pygame.transform.smoothscale(surface2, (w, h))
if self.gl.input_format_32bit.get():
# print("Adding transition effect for 32-bit format image...")
for i in range(start, end):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='transition effect')
progress['value'] = i * 400 / (end - start)
progress.update()
top.update_idletasks()
pyimage_copy[i] = blend_to_textures_32c(pyimage_copy[i], surface2, (100 / end) * i)
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the transition process\nError: %s\n"
"The transition effect will be disregarded" % error)
return pyimage_post_processing
else:
# print("Adding transition effect for 24-bit format image...")
for i in range(start, end):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='transition effect')
progress['value'] = i * 400 / (end - start)
progress.update()
top.update_idletasks()
pyimage_copy[i] = blend_to_textures_24c(pyimage_copy[i], surface2, (100.0 / end) * i)
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the transition process\nError: %s\n"
"The transition effect will be disregarded" % error)
return pyimage_post_processing
return pyimage_copy
def add_glitch_effect(self, top, progress, progress_label, pyimage_copy):
"""
RETURN THE A LIST OF SURFACES WITH THE ADDITIONAL EFFECT (GLITCH) OR RETURN PYIMAGE_COPY UNCHANGED
:param top: tkinter toplevel window; This window will be used for displaying the overall process
:param progress: ttk progressbar; Bar showing the overall process
:param progress_label: tkinter label showing the name of the effect, here glitch effect
:param pyimage_copy: Python list containing all the pygame surfaces to be processed
:return: Return a list containing all the pygame surface post processing or unchanged if an error occur.
"""
# SAVE THE SURFACE PRIOR GLITCH EFFECT
pyimage_post_processing = pyimage_copy.copy()
if self.gl.glitch_checkbox.get() is True:
try:
start = int(self.gl.glitch_start_frame.get())
except:
messagebox.showwarning("Error", "Glitch effect\nIncorrect start frame value")
return pyimage_post_processing
try:
end = int(self.gl.glitch_end_frame.get())
except:
messagebox.showwarning("Error", "Glitch effect\nIncorrect end frame value")
return pyimage_post_processing
if start > end:
messagebox.showwarning("Error", "Glitch effect\nstart frame cannot be > end frame")
return pyimage_post_processing
if end < start:
messagebox.showwarning("Error", "Glitch effect\nend frame cannot be < start frame")
return pyimage_post_processing
if start < 0:
start = 0
if end > len(self.gl.pyimage):
end = len(self.gl.pyimage)
if self.gl.glitch_horizontal.get():
if self.gl.input_format_32bit.get():
# print("Adding glitch effect for 32-bit format image...")
for i in range(start, end):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='glitch effect')
progress['value'] = i * 400 / (end - start)
progress.update()
top.update_idletasks()
pyimage_copy[i] = horizontal_glitch32(
pyimage_copy[i].convert_alpha(), 1, 0.1, 10)
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the glitch process\nError: %s\n"
"The glitch effect will be disregarded" % error)
return pyimage_post_processing
else:
# print("Adding glitch effect for 24-bit format image...")
for i in range(start, end):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='glitch effect')
progress['value'] = i * 400 / (end - start)
progress.update()
top.update_idletasks()
pyimage_copy[i] = horizontal_glitch24(pyimage_copy[i].convert(),
1, 0.3, (50 + (360 / 30) * i) % 20)
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the glitch process\n Error: %s\n"
"The glitch effect will be disregarded" % error)
return pyimage_post_processing
if self.gl.glitch_vertical.get():
if self.gl.input_format_32bit.get():
# print("Adding glitch effect for 32-bit format image...")
for i in range(start, end):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='glitch effect')
progress['value'] = i * 400 / (end - start)
progress.update()
top.update_idletasks()
pyimage_copy[i] = vertical_glitch32_c(
pyimage_copy[i].convert_alpha(), 1, 0.1, 10).convert_alpha()
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the glitch process\nError: %s\n"
"The glitch effect will be disregarded" % error)
return pyimage_post_processing
else:
# print("Adding glitch effect for 24-bit format image...")
for i in range(start, end):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='glitch effect')
progress['value'] = i * 400 / (end - start)
progress.update()
top.update_idletasks()
pyimage_copy[i] = vertical_glitch24_c(pyimage_copy[i].convert(),
1, 0.3, (50 + (360 / 30) * i) % 20)
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the glitch process\nError: %s\n"
"The glitch effect will be disregarded" % error)
return pyimage_post_processing
return pyimage_copy
def add_sepia_effect(self, top, progress, progress_label, pyimage_copy):
# SAVE THE SURFACE PRIOR GLITCH EFFECT
pyimage_post_processing = pyimage_copy.copy()
if bool(self.gl.sepia.get()):
if bool(self.gl.input_format_32bit.get()):
# print("Adding sepia effect for 32-bit format image...")
for i in range(0, len(pyimage_copy)):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='sepia effect')
progress['value'] = i * 400 / len(pyimage_copy)
progress.update()
top.update_idletasks()
pyimage_copy[i] = sepia24_c(pyimage_copy[i])
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the sepia process\nError: %s\n"
"The sepia effect will be disregarded" % error)
return pyimage_post_processing
elif bool(self.gl.input_format_24bit.get()):
# Adding sepia effect for 24-bit format image...")
for i in range(0, len(pyimage_copy)):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='sepia effect')
progress['value'] = i * 400 / len(pyimage_copy)
progress.update()
top.update_idletasks()
pyimage_copy[i] = sepia24_c(pyimage_copy[i])
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the sepia process\nError: %s\n"
"The sepia effect will be disregarded" % error)
return pyimage_post_processing
return pyimage_copy
def add_greyscale_effect(self, top, progress, progress_label, pyimage_copy):
# SAVE THE SURFACE PRIOR GLITCH EFFECT
pyimage_post_processing = pyimage_copy.copy()
if bool(self.gl.greyscale.get()):
if bool(self.gl.input_format_32bit.get()):
# print("Adding greyscale effect for 32-bit format image...")
for i in range(0, len(pyimage_copy)):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='greyscale effect')
progress['value'] = i * 400 / len(pyimage_copy)
progress.update()
top.update_idletasks()
pyimage_copy[i] = greyscale32_c(pyimage_copy[i])
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the greyscale process\nError: %s\n"
"The greyscale effect will be disregarded" % error)
return pyimage_post_processing
elif bool(self.gl.input_format_24bit.get()):
# print("Adding greyscale effect for 24-bit format image...")
for i in range(0, len(pyimage_copy)):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='greyscale effect')
progress['value'] = i * 400 / len(pyimage_copy)
progress.update()
top.update_idletasks()
pyimage_copy[i] = greyscale24_c(pyimage_copy[i])
except Exception as error:
messagebox.showerror(
"Error", "An error occurred during the greyscale process\nError: %s\n"
"The greyscale effect will be disregarded" % error)
return pyimage_post_processing
return pyimage_copy
def add_pixelated_effect(self, top, progress, progress_label, pyimage_copy):
# SAVE THE SURFACE PRIOR GLITCH EFFECT
pyimage_post_processing = pyimage_copy.copy()
w, h = pyimage_copy[0].get_size()
try:
px_block = self.gl.pixel_size.get()
if px_block < 4:
raise ValueError
if px_block not in [4, 8, 16, 32]:
raise ValueError
except:
messagebox.showwarning("Error", "pixel effect\nIncorrect pixel block size value\n"
" The value must be either [4, 8, 16, 32]")
return pyimage_post_processing
if bool(self.gl.pixel.get()):
if bool(self.gl.input_format_32bit.get()):
# print("Adding pixelate effect for 32-bit format image...")
for i in range(0, len(pyimage_copy)):
try:
if self.gl.cancel:
raise Exception("Process aborted by user")
progress_label.configure(text='pixelate effect')
progress['value'] = i * 400 / len(pyimage_copy)
progress.update()
top.update_idletasks()
subs = create_pixel_blocks_rgba(pyimage_copy[i], px_block, int(w / px_block), int(h / px_block))
ii = 0
for surface in subs:
# avg=pygame.transform.average_color(surface)
# surface.fill(avg)
subs[ii] = pixelate32(surface)
ii += 1
new_surface = pygame.Surface((w, w)).convert_alpha()
new_surface.fill((0, 0, 0, 0))
# RECONSTRUCTION (ASSEMBLING ALL THE PIXELATED BLOCKS
ii = 0
jj = 0
for pixel_block in subs:
new_surface.blit(pixel_block, (ii, jj))
ii += px_block
if ii >= w:
jj += px_block
ii = 0
pyimage_copy[i] = new_surface.convert_alpha()
except Exception as error:
messagebox.showerror(
"Error", "An error | |
import requests
try:
import elementtree.ElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
class BasecampError(Exception):
pass
class Basecamp():
def __init__(self, url, apikey):
self.baseURL = url
if self.baseURL[-1] == '/':
self.baseURL = self.baseURL[:-1]
self.apikey = apikey
def _request(self, path, data=None, put=False, post=False, delete=False,
get=False, return_response=False):
if isinstance(data, ET._ElementInterface):
data = ET.tostring(data)
url = self.baseURL + path
headers = {'Content-Type': 'application/xml',
'Authorization': 'Bearer %s' % self.apikey}
if post:
answer = requests.post(url, data, headers=headers)
elif put:
if not data:
headers['content-length'] = '0'
answer = requests.put(url, data, headers=headers)
elif delete:
answer = requests.delete(url, headers=headers)
else:
answer = requests.get(url, headers=headers)
if ( (post and answer.status_code != 201) or
(not post and answer.status_code != 200) ):
self.last_error = self._read_answer(answer)
raise BasecampError()
if return_response:
return answer
return self._read_answer(answer)
def _read_answer(self, answer):
# compatible with python2.7
try:
return answer.text
except AttributeError:
return answer.read()
def get_last_error(self):
return self.last_error
# ---------------------------------------------------------------- #
# General
def company(self, company_id):
"""
This will return the information for the referenced company.
"""
path = '/companies/%u.xml' % company_id
return self._request(path)
def companies(self):
"""
This will return a list of all companies visible to the
requesting user.
"""
path = '/companies.xml'
return self._request(path)
def companies_per_project(self, project_id):
"""
This will return a list of all companies associated with the gieven
project.
"""
path = '/companies/%u/companies.xml' % project_id
return self._request(path)
# ---------------------------------------------------------------- #
# Categories
def file_categories(self, project_id):
"""
This will return an alphabetical list of all file categories in the
referenced project.
"""
path = '/projects/%u/categories.xml?type=attachment' % project_id
return self._request(path)
def message_categories(self, project_id):
"""
This will return an alphabetical list of all message categories in the
referenced project.
"""
path = '/projects/%u/categories.xml?type=post' % project_id
return self._request(path)
# ---------------------------------------------------------------- #
# People
def me(self):
"""
This will return the currently logged in person (you)
"""
path = '/me.xml'
return self._request(path)
def people(self):
"""
This will return all of the people visible to (and including) the
requesting user..
"""
path = '/people.xml'
return self._request(path)
def people_per_company(self, company_id):
"""
This will return all of the people in the given company.
"""
path = '/companies/%u/people.xml' % company_id
return self._request(path)
def people_per_project(self, project_id):
"""
This will return all of the people in the given project.
"""
path = '/projects/%u/people.xml' % project_id
return self._request(path)
def person(self, person_id):
"""
This will return information about the referenced person.
"""
path = '/people/%u.xml' % person_id
return self._request(path)
# ---------------------------------------------------------------- #
# Projects
def projects(self):
"""
This will return a list of all active, on-hold, and archived projects
that you have access to. The list is not ordered.
"""
path = '/projects.xml'
return self._request(path)
def project_count(self):
"""
This will return a count of all projects, by project status.
If there are no projects with a particular status, that status entry
will be omitted from the report
"""
path = '/projects/count.xml'
return self._request(path)
def project(self, project_id):
"""
This will return a single project identified by its integer ID
"""
path = '/projects/%u.xml' % project_id
return self._request(path)
# ---------------------------------------------------------------- #
# Messages
def messages_per_project(self, project_id, offset=0):
"""
This will return the 25 most recent messages in the given project
skipping a number of messages defined by offset.
"""
path = '/projects/%u/posts.xml?n=%s' % (project_id, offset)
return self._request(path)
def messages_per_category(self, project_id, category_id):
"""
This will return the 25 most recent messages in the given project
for the given category.
"""
path = '/projects/%u/cat/%s/posts.xml' % (project_id, category_id)
return self._request(path)
def messages_archived(self, project_id):
"""
This will return a summary for each message in the given project.
Note that a summary record includes only a few bits of
information about a messag, not the complete record.
"""
path = '/projects/%u/posts/archive.xml' % project_id
return self._request(path)
def messages_archived_per_category(self, project_id, category_id):
"""
This will return a summary for each message in the given project
for the given category. Note that a summary record includes
only a few bits of information about a messag,
not the complete record.
"""
path = '/projects/%u/cat/%s/posts/archive.xml' % (
project_id, category_id)
return self._request(path)
def message(self, message_id):
"""
This will return a single message record identified by its integer ID.
"""
path = '/posts/%u.xml' % message_id
return self._request(path)
def _create_message_post_elem(self, category_id, title, body,
private=False, notify=False):
post = ET.Element('post')
ET.SubElement(post, 'category-id').text = str(int(category_id) if category_id else '')
ET.SubElement(post, 'title').text = unicode(title)
ET.SubElement(post, 'body').text = unicode(body)
if notify:
ET.SubElement(post, 'notify-about-changes').text = '1'
#ET.SubElement(post, 'extended-body').text = str(extended_body)
#if bool(use_textile):
# ET.SubElement(post, 'use-textile').text = '1'
ET.SubElement(post, 'private').text = '1' if private else '0'
return post
def create_message(self, project_id, category_id, title, body,
private=False, notify=None, attachments=None):
"""
Creates a new message, optionally sending notifications to a selected
list of people. Note that you can also upload files using this
function, but you need to upload the files first and then attach them.
See the description at the top of this document for more information.
"""
path = '/projects/%u/posts.xml' % project_id
req = ET.Element('request')
req.append(self._create_message_post_elem(category_id, title, body,
private))
if notify:
for person_id in notify:
ET.SubElement(req, 'notify').text = str(int(person_id))
# TODO: Implement attachments.
if attachments is not None:
raise NotSupportedErr('Attachments are currently not implemented.')
##for attachment in attachments:
## attms = ET.SubElement(req, 'attachments')
## if attachment['name']:
## ET.SubElement(attms, 'name').text = str(attachment['name'])
## file_ = ET.SubElement(attms, 'file')
## ET.SubElement(file_, 'file').text = str(attachment['temp_id'])
## ET.SubElement(file_, 'content-type').text \
## = str(attachment['content_type'])
## ET.SubElement(file_, 'original-filename').text \
## = str(attachment['original_filename'])
return self._request(path, req, post=True, return_response=True)
def update_message(self, message_id, category_id, title, body,
private=False, notify=None):
"""
Updates an existing message, optionally sending notifications to a
selected list of people. Note that you can also upload files using
this function, but you have to format the request as
multipart/form-data. (See the ruby Basecamp API wrapper for an example
of how to do this.)
"""
path = '/posts/%u.xml' % message_id
req = ET.Element('request')
req.append(self._create_message_post_elem(category_id, title, body,
private, notify))
if notify is not None:
for person_id in notify:
ET.SubElement(req, 'notify').text = str(int(person_id))
return self._request(path, req, put=True)
def delete_message(self, message_id):
"""
Delete the specified message from the project.
"""
path = '/posts/%u.xml' % message_id
return self._request(path, delete=True)
# ---------------------------------------------------------------- #
# Comments
def comments(self, resource, resource_id):
"""
Return a list of the 50 most recent comments associated with
the specified resource, where the resource named in the URL
can be one of posts, milestones, or todo_items. For example,
to fetch the most recent comments for the todo item with an
id of 1, you would use the path: /todo_items/1/comments.xml.
"""
path = '/%s/%u/comments.xml' % (resource, resource_id)
return self._request(path)
def comment(self, comment_id):
"""
Retrieve a specific comment by its id.
"""
path = '/comments/%u.xml' % comment_id
return self._request(path)
def create_comment(self, resource, resource_id, body):
"""
Create a new comment, associating it with a specific resource,
where the resource named in the URL can be one of posts, milestones,
or todo_items. For example, to create a comment for the milestone
with an ID of 1, you would use the path: /milestones/1/comments.xml.
"""
path = '/%s/%u/comments.xml' % (resource, resource_id)
#req = ET.Element('request')
req = ET.Element('comment')
#comment = ET.SubElement(req, 'comment')
#ET.SubElement(comment, 'post-id').text = str(int(post_id))
ET.SubElement(req, 'body').text = unicode(body)
#ET.SubElement(comment, 'body').text = str(body)
return self._request(path, req, post=True, return_response=True)
def update_comment(self, comment_id, body):
"""
Update a specific comment. This can be used to edit the content of an
existing comment.
"""
path = '/comments/%u.xml' % comment_id
req = ET.Element('request')
#ET.SubElement(req, 'comment_id').text = str(int(comment_id))
comment = ET.SubElement(req, 'comment')
ET.SubElement(comment, 'body').text = unicode(body)
return self._request(path, req, put=True)
def delete_comment(self, comment_id):
"""
Delete the comment with the given id.
"""
path = '/comments/%u.xml' % comment_id
return self._request(path, delete=True)
# ---------------------------------------------------------------- #
# Lists
def todo_lists(self, responsable_party=''):
"""
Returns a list of todo-list records, with todo-item records that
are assigned to the given "responsible party". If no responsible
party is given, the current user is assumed to be the responsible
party. The responsible party may be changed by setting the
"responsible_party" query parameter to a blank string
(for unassigned items), a person-id, or a company-id prefixed by
a "c" (e.g., c1234).
"""
path = '/todo_lists.xml?responsible_party=%u' % responsable_party
| |
from plotly import tools as ptools
import plotly.io as pio
import plotly.graph_objs as go
import esppy.espapi.connections as connections
import esppy.espapi.tools as tools
import esppy.espapi.viewers as viewers
from esppy.espapi.tools import Options
import esppy.espapi.colors as colors
from base64 import b64encode
import sys
import threading
import datetime
import base64
import ipywidgets as widgets
import ipyleaflet as maps
import numpy as np
import logging
import re
import math
import random
pio.templates.default = "none"
class Visuals(Options):
_dataHeader = "_data://"
def __init__(self,**kwargs):
Options.__init__(self,**kwargs)
self._visuals = []
self._border = None
if self.hasOpt("colormap"):
self._colors = colors.Colors(colormap=self.getOpt("colormap"))
elif self.hasOpt("colors"):
self._colors = colors.Colors(colors=self.getOpt("colors"))
else:
self._colors = colors.Colors(colormap="")
self._chartStyle = tools.Options()
self._css = self.getOpt("css","visuals")
self._headerStyle = tools.Options(font_family="Arial, Helvetica, sans-serif",font_size="12pt")
self._dashboardLayout = tools.Options()
#self.setDashboardLayout(border="1px solid #c0c0c0",padding="5px")
self._axisWidth = 1
def setDashboardLayout(self,**kwargs):
opts = tools.Options(**kwargs)
for key,value in kwargs.items():
self._dashboardLayout.setOpt(key,value)
def setLayoutValues(self,layout,opts):
for key,value in opts.items():
if key == "width":
layout.width = value
elif key == "height":
layout.height = value
elif key == "border":
layout.border = value
elif key == "margin":
layout.margin = value
elif key == "padding":
layout.padding = value
def optionSet(self,name,value):
if name == "colormap":
colormap = value
self._colors = colors.Colors(colormap=colormap)
def setTitleStyle(self,**kwargs):
self._headerStyle.setOpts(**kwargs)
def setChartStyle(self,**kwargs):
self._chartStyle.setOpts(**kwargs)
def createBarChart(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = BarChart(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createLineChart(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = LineChart(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createScatterPlot(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = ScatterPlot(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createTimeSeries(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = TimeSeries(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createBubbleChart(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = BubbleChart(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createPieChart(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = PieChart(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createMap(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = Map(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createGauge(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = Gauge(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createCompass(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = Compass(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createTable(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = Table(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createImageViewer(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = ImageViewer(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createImages(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
chart = Images(self,datasource,layout,**kwargs)
chart.create()
self._visuals.append(chart)
return(chart)
def createControls(self,datasource,layout = None,**kwargs):
datasource.addDelegate(self)
controls = Controls(self,datasource,layout,**kwargs)
controls.create()
self._visuals.append(controls)
return(controls)
def createDashboard(self,**kwargs):
box = Dashboard(self,layout=widgets.Layout(width="100%"),**kwargs)
return(box)
def createWrapper(self,widget,layout = None,**kwargs):
wrapper = Wrapper(self,widget,layout,**kwargs)
self._visuals.append(wrapper)
wrapper.create()
return(wrapper)
def createModelViewer(self,connection,layout = None,**kwargs):
return(viewers.ModelViewer(self,connection,layout,**kwargs))
def createLogViewer(self,connection,layout = None,**kwargs):
return(viewers.LogViewer(self,connection,layout,**kwargs))
def createStatsViewer(self,connection,layout = None,**kwargs):
return(viewers.StatsViewer(self,connection,layout,**kwargs))
def dataChanged(self,datasource,data,clear):
for v in self._visuals:
if v._datasource == datasource:
v.draw(data,clear)
def infoChanged(self,datasource):
for v in self._visuals:
if v._datasource == datasource:
v.info(datasource.getInfo())
def handleStats(self,datasource):
for v in self._visuals:
if v._datasource == datasource:
v.draw()
def clear(self):
self._visuals = []
def getHeaderStyle(self):
content = ""
i = 0
for k,v in self._headerStyle.options.items():
s = k.replace("_","-")
if i > 0:
content += ";"
content += s + ":" + v
i += 1
return(content)
def formatTitle(self,text):
content = ""
content += "<div style='"
content += self.getHeaderStyle()
content += ";text-align:center"
content += "'>"
if text == None:
content += " "
else:
content += text
content += "</div>"
return(content)
@property
def css(self):
return(self._css)
class Chart(Options,widgets.Box):
def __init__(self,visuals,datasource = None,layout = None,**kwargs):
Options.__init__(self,**kwargs)
if layout != None:
widgets.Box.__init__(self,layout=layout)
else:
widgets.Box.__init__(self)
if self.hasOpt("width"):
self.layout.width = self.getOpt("width")
if self.hasOpt("height"):
self.layout.height = self.getOpt("height")
self._visuals = visuals
self._datasource = datasource
self.add_class(self._visuals.css + "_chart")
self._class = self.getOpt("css")
if self._class != None:
self.add_class(self._class)
self._container = widgets.Box(layout=widgets.Layout(width="100%",display="inline_flex",flex_flow="column",justify_content="center"))
self._container.add_class(self._visuals.css + "_container")
if self._class != None:
self._container.add_class(self._class + "_container")
self._header = widgets.HTML(layout=widgets.Layout(overflow="hidden",width="100%",margin="0"))
self._header.add_class(self._visuals.css + "_header")
if self._class != None:
self._header.add_class(self._class + "_header")
self._content = widgets.Box(layout=widgets.Layout(width="100%",display="inline_flex",flex_flow="row wrap",justify_content="center",align_items="center"))
#self._content = widgets.Box(layout=widgets.Layout(width="100%",display="inline_flex",flex_flow="row wrap",justify_content="center",align_items="flex-start"))
self._content.add_class(self._visuals.css + "_content")
if self._class != None:
self._content.add_class(self._class + "_content")
self._footer = widgets.Box(layout=widgets.Layout(width="100%"))
self._footer.add_class(self._visuals.css + "_footer")
if self._class != None:
self._footer.add_class(self._class + "_footer")
if True:
self._footer.children = [widgets.Text(value="footer")]
self.children = [self._container]
self._figure = None
self._data = None
self._layout = None
self._controls = None
self._chartStyle = tools.Options()
self._delegates = []
self.setDisplay()
def addDelegate(self,delegate):
tools.addTo(self._delegates,delegate)
if self._figure is not None:
self.draw()
def removeDelegate(self,delegate):
tools.removeFrom(self._delegates,delegate)
if self._figure is not None:
self.draw()
def setDisplay(self):
children = []
height = 100
if self.getOpt("show_header",True):
children.append(self._header)
height -= 10
children.append(self._content)
if self.getOpt("show_controls",False):
if self._controls == None:
self._controls = ControlPanel(self._datasource)
self._footer.children = [self._controls]
children.append(self._footer)
height -= 10
self._content.layout.height = str(height) + "%"
self._container.children = children
def setOpts(self,**kwargs):
Options.setOpts(self,**kwargs)
self.create()
def setChartStyle(self,**kwargs):
self._chartStyle.setOpts(**kwargs)
def setWidth(self,value):
self.setOpt("width",value)
def setHeight(self,value):
self.setOpt("height",value)
def createContent(self):
pass
def create(self):
self.createContent()
if self._data != None:
self._layout = go.Layout()
margin = 20
self._layout["margin"] = dict(l=margin,r=margin,b=margin,t=margin)
xRange = self.getOpt("xrange")
if xRange != None:
self._layout["xaxis"]["range"] = xRange
yRange = self.getOpt("yrange")
if yRange != None:
self._layout["yaxis"]["range"] = yRange
self._layout["xaxis"]["showticklabels"] = self.getOpt("showticks",True)
self._layout["xaxis"]["showline"] = False
self.addChartStyle()
self._figure = go.FigureWidget(data=self._data,layout=self._layout)
self._figure.add_class(self._visuals.css + "_figure")
self._content.children = [self._figure]
self.draw(None,True)
def addChartStyle(self):
for name,value in self._visuals._chartStyle.items():
self._layout[name] = value
for name,value in self._chartStyle.items():
self._layout[name] = value
def setTitle(self,title = None):
if title == None:
title = self.getOpt("title")
if self._datasource != None:
if title == None:
title = self._datasource._path
if isinstance(self._datasource,connections.EventCollection):
if self._datasource._pages > 1:
title += " (Page " + str(self._datasource._page + 1) + " of " + str(self._datasource._pages) + ")"
filter = self._datasource.getOpt("filter")
if filter != None:
title += "<br>"
title += filter
self._header.value = self._visuals.formatTitle(title)
def createMarkers(self):
o = {}
marker = {}
keys = self._datasource.getKeyValues()
text = []
for i,key in enumerate(keys):
text.append(key)
size = None
value = self.getOpt("size")
if value != None:
try:
num = int(value)
marker["size"] = num
except:
size = value
color = self.getOpt("color")
if size != None or color != None:
if size != None:
s = self._datasource.getValues(size)
if s != None and len(s) > 0:
maxsize = 60.
minsize = 5
marker["size"] = s
marker["sizemode"] = "area"
marker["sizeref"] = 2. * max(s) / (maxsize ** 2)
marker["sizemin"] = minsize
for i,v in enumerate(s):
text[i] += "<br>"
text[i] += size + "=" + str(v)
color = self.getOpt("color")
if color != None:
s = self._datasource.getValues(color)
if s != None and len(s) > 0:
marker["color"] = s
marker["showscale"] = True
marker["colorscale"] = self._visuals._colors.colorscale
if size == None or color != size:
for i,v in enumerate(s):
if size != None and v in size == False:
text[i] += "<br>"
text[i] += color + "=" + str(v)
#marker["line"] = {"width":2,"color":"#ff0000"}
o["marker"] = marker
o["text"] = text
return(o)
def info(self,data):
if self._controls != None:
self._controls.processInfo()
self.setTitle()
def getValues(self,name):
values = []
value = self.getOpt(name)
if value != None:
if type(value) is list:
for v in value:
values.append(v)
else:
values.append(value)
return(values)
class BarChart(Chart):
def __init__(self,visuals,datasource,layout,**kwargs):
Chart.__init__(self,visuals,datasource,layout,**kwargs)
def createContent(self):
values = self.getValues("y")
colors = self._visuals._colors.getFirst(len(values))
opacity = self.getOpt("opacity")
self._data = []
orientation = self.getOpt("orientation","vertical")
if orientation == "horizontal":
for i,v in enumerate(values):
self._data.append(go.Bar(x=[0],y=[""],name=v,orientation="h",marker_color=colors[i]))
else:
for i,v in enumerate(values):
self._data.append(go.Bar(x=[""],y=[0],name=v,opacity=opacity,marker_color=colors[i]))
def draw(self,data = None,clear = False):
if self._figure == None:
return
self._figure.update_xaxes(showline=True,linewidth=self._visuals._axisWidth)
self._figure.update_yaxes(showline=True,linewidth=self._visuals._axisWidth)
x = self.getValues("x")
values = self.getValues("y")
orientation = self.getOpt("orientation","vertical")
marker = {}
if orientation == "horizontal":
if len(x) > 0:
try:
data = self._datasource.getValuesBy(x,values)
except:
return
for i,v in enumerate(values):
self._figure.data[i].x = data["values"][v]
self._figure.data[i].y = data["keys"]
else:
keys = self._datasource.getKeyValues()
for i,v in enumerate(values):
y = self._datasource.getValues(v)
self._figure.data[i].x = y
self._figure.data[i].y = keys
else:
if len(x) > 0:
try:
data = self._datasource.getValuesBy(x,values)
except:
return
for i,v in enumerate(values):
self._figure.data[i].x = data["keys"]
self._figure.data[i].y = data["values"][v]
else:
keys = self._datasource.getKeyValues()
if len(keys) == 0:
for i,v in enumerate(values):
self._figure.data[i].x = [""]
self._figure.data[i].y = [0]
else:
for i,v in enumerate(values):
y = self._datasource.getValues(v)
self._figure.data[i].x = keys
self._figure.data[i].y = y
markers = None
for d in self._delegates:
if tools.supports(d,"get_bar_color"):
markers = []
colors = []
if orientation == "horizontal":
for i,v in enumerate(values):
marker = {}
for j in range(0,len(self._figure.data[i].y)):
color = d.get_bar_color(self._figure.data[i].y[j],self._figure.data[i].x[j])
colors.append(color)
marker["color"] = colors
markers.append(marker)
else:
for i,v in enumerate(values):
marker = {}
for j in range(0,len(self._figure.data[i].x)):
color = d.get_bar_color(self._figure.data[i].x[j],self._figure.data[i].y[j])
colors.append(color)
marker["color"] = colors
markers.append(marker)
if markers is not None:
for i,m in enumerate(markers):
self._figure.data[i].marker = markers[i]
self._figure.update_xaxes(automargin=True)
self._figure.update_yaxes(automargin=True)
self.setTitle()
class LineChart(Chart):
def __init__(self,visuals,datasource,layout,**kwargs):
Chart.__init__(self,visuals,datasource,layout,**kwargs)
def createContent(self):
values = self.getValues("y")
self._data = []
width = self.getOpt("line_width",2)
shape = "linear"
if self.getOpt("curved",False):
shape = "spline"
line = {"width":width,"shape":shape}
fill = self.getOpt("fill",False)
colors = self._visuals._colors.getFirst(len(values))
mode = "lines"
if fill:
mode = "none"
elif self.hasOpt("mode"):
mode = self.getOpt("mode")
for i,v in enumerate(values):
if fill:
if i == 0:
self._data.append(go.Scatter(x=[""],y=[0],name=v,mode=mode,fill="tozeroy",fillcolor=colors[i]))
else:
self._data.append(go.Scatter(x=[""],y=[0],name=v,mode=mode,fill="tonexty",fillcolor=colors[i]))
else:
line["color"] = colors[i]
self._data.append(go.Scatter(x=[""],y=[0],name=v,mode=mode,line=line))
def draw(self,data = None,clear | |
Ref:
# https://wiki.python.org/moin/TimeComplexity
# Algorithm Cheat Sheet:
# http://bigocheatsheet.com/
# List:
# index l[i] O(1)
# store l[i]=1 O(1)
# len len(l) O(1)
# l.append(1) O(1) *avrg/amortized.
# sort l.sort() O(n)
# Set:
# av worste
# add O(1)
# in O(1) O(n)?
# pop O(1)
pass
def bit_wise_ops():
# https://wiki.python.org/moin/BitwiseOperators
# A << 1 # move bits left by 1.
# B >> 1
# A & B A | B A ^ B ~A
pass
def module_import():
# Module import
# import module_name
# from module_name import name , .. | *
pass
def asterisk_usage():
# Has many useages.
# * list, position args,
# ** dist, keyword args,
# in funcs, expand args to list/dict
# in args to func, unpack list/dict : myFunc(*[1,2,3]) -> myFunc(1,2,3)
# in assignment, dynamically assign to list/dict
# https://medium.com/understand-the-python/understanding-the-asterisk-of-python-8b9daaa4a558
# Zip/Unzip used for unpackaging/packaging lists.
list(zip([1, 2, 3], ["a", "b", "c"]))
#[(1, 'a'), (2, 'b'), (3, 'c')]
# Ex: https://www.hackerrank.com/challenges/zipped/problem
pass
def stdin_console_input():
# myvar = input() # python 2: input expression. python 3: input string. Use eval(..)
# print (myvar) # 1 + 1 -> 2
# Problem: https://www.hackerrank.com/challenges/input/problem
# myvar2 = raw_input() # Python 2: input string. python 3: gone.
# print (myvar2) # 1 + 1 -> '1 + 1'
# Splitting input into list/dic:
# Problem: https://www.hackerrank.com/challenges/finding-the-percentage/problem
pass
def Classes():
# c88fb2fff01f444ca67d892a202078c3 -> cheatcheet
class myClass:
def __init__(self, val):
self.val = val
def __repr__(self):
return self.val
# Augment classes on the fly:
mc = myClass("MyVal")
hasattr(mc, "myAttr") # False
mc.myAttr = "hello"
hasattr(mc, "myAttr") # True
# ref: https://stackoverflow.com/questions/610883/how-to-know-if-an-object-has-an-attribute-in-python
def class_basic():
# class MyClass:
# #someVar = 5 #Instance var sample.
#
# my = MyClass();
# print (my.x) # 5
# my.x = 10
# print (my.x) # 10
pass
class class_for_data: # Example of how to use a class to store data. See also namedtuple
def __init__(self, *arg_list, **kwargs):
self.a = "a"
self.b = "b"
self.args = arg_list
self.kwargs = kwargs
# d = class_for_data(1,2,3, leo=":-)")
# print d.a, d.b, d.args, d.kwargs
def class_with_constructor():
pass
# # Class with constructor
# class Person:
# def __init__(self, name, age):
# self.name = name
# self.age = age
#
#
# p = Person("Leo", 31)
# print (p.name)
# print (p.age)
def class_repr_tostr():
# Proper printing of class when called with print str(myClass)
class myClass:
def __repr(self):
return "tostr like"
def class_inheritence():
# Ref: https://www.w3schools.com/python/python_inheritance.asp
class Car:
def __init__(self, name):
self.name = name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
class Audi(Car):
def __init__(self, name, ring_count):
super().__init__(name) # Note, super() with brackets.
self.ring_count = ring_count
def __repr__(self):
return "Audi(name={}, ring_count={}".format(self.name, self.ring_count)
class BMW(Car):
def __init__(self, name, coolness_factor):
super().__init__(name)
self.coolness_factor = coolness_factor
def __repr__(self):
return "BMW(name={}, coolness_factor={}".format(self.name, self.coolness_factor)
a = Audi("A4", 5)
a2 = Audi("A4", 3)
b = BMW("Model 5", 42)
s = set()
s.add(a)
s.add(a2)
s.add(b)
print(s) # {BMW(name=Model 5, coolness_factor=42, Audi(name=A4, ring_count=5}
def hashing_of_classes():
# Default Object implementation: (Pseudo code)
# class object:
# def __eq___(self, other):
# return id(self) == id(other) # Default __eq__ compares by id. Thus default class __eq__ only true on self-id comparison
# def __hash__(self):
# return hash(id(self)) / 16 # ish... Default __hash__ compares by hash value of id to guarantee uniqueness.
# For dict/set to find things like d[a], it uses hash values. So the following must hold:
# if a == b -> hash(a) == hash(b)
# Thus if we override __eq__, we must also implement __hash__
# Ref/Article: https://hynek.me/articles/hashes-and-equality/ # when hashing breaks...
# Example:
# Node class that is unique on val & color. But we can change visited state without breaking set/dict & comparison.
# (Useful for graph traversals)
class node:
def __init__(self, val, color):
self.val = val # Immutable
self.color = color # Immutable
self.visited = False # Mutable
def __repr__(self):
return "Node: {} {} {}".format(self.val, self.color, self.visited)
# eq Must be implemented for set()/dict to determine duplicates. Otherwise obj id used for eq. Obj id is mostly unique.
# I.e otherwise node(1) == node(1) -> False.
def __eq__(self, other):
return self.val == other.val and self.color == other.color
# Must be implemented if __eq__ is overridden so that property holds: if a==b, hash(a)==hash(b)
# (Otherwise you get "TypeError: unhashable type:" in python3.python2 allows)
# hash value must not change in lifetime of object.
def __hash__(self):
return hash((self.val, self.color)) # Unique data in a tuple, since tuples are immutable.
# Ref: https://stackoverflow.com/questions/2909106/whats-a-correct-and-good-way-to-implement-hash
see = dataclases() # for similar implementation with annotation.
a = node(1, "Red")
b = node(2, "Blue")
aa = node(1, "Red")
aa.visited = True
print("a == aa", a == aa) # -> True. (w/o eq/hash, -> False).
s = set([a, b, aa]) # -> Node 1 & 2. (w/o eq/hash, -> Node 1,1,2)
print(s)
def dataclases():
# Py >= 3.7
from dataclasses import dataclass, field
from typing import Any, List, Set, Dict, Deque, DefaultDict, Tuple # for primitives, just use int/float/bool/str/bytes
@dataclass()
class Node:
name: str # must pass when creating. Node("mynode", set())
edges: Set
edges2: Set['Node'] # recursive structure reference. Auto-predict works better.
generic: Any # can be any field.
# Default for primitives
visited: bool = False # type annotation with default value.
name: str = "marry" # w/ default.
value = 1 # default value w/o type hints.
# Default for objects must use default_factory=FUNCTION to initialize
edges: Set = field(default_factory=set) # set/list/dict. [1]
file_perm: List = field(default_factory=lambda: ['r', 'w', 'x'])
# @dataclases() arg reference:
eq=True # Generate __eq__ method.
unsafe_hash=False # Can be overriden to provide hashable func.
frozen=False # Immutable, adds hashfunction.
order=False # generate __lt__ __le__ __gt__ __ge__. Tuple made out of attributes for cmp.
repr=True # should __repr__ method be created?
# ## Recursive self structure reference. (e.g node in linked list/Tree). -> Put into quotes.
# Field reference:
# field(
# default= #If provided, sets default value
# default_factory=FUNCTION IF provided, called during instantiation
# compare=True, # should field be used in comparison?
# hash=None # Default: inherits compare. True/False if hash to be computed.
# repr=True # Referenced by __repr__ method?
# )
# Readings:
# Basics/overview:
# https://realpython.com/python-data-classes/
#
# Basics, comparisons, frozen, inheritence:
# https://medium.com/mindorks/understanding-python-dataclasses-part-1-c3ccd4355c34
# default_factory, excluding fields from comparison, exclude from representation, omit from init.
# https://medium.com/mindorks/understanding-python-dataclasses-part-2-660ecc11c9b8
# References
# https://docs.python.org/3/library/dataclasses.html
# [1] https://stackoverflow.com/questions/53632152/why-cant-dataclasses-have-mutable-defaults-in-their-class-attributes-declaratio
# https://docs.python.org/3/library/dataclasses.html#mutable-default-values
# ########################
# ## For use in Sets/Dicts (making them hashable)
# ########################
# 1) Make Immutable
@dataclass(frozen=True) #-> Immutable == Hashable
class node:
val: int
# 2) Override eq/hash manually.
from dataclasses import dataclass
@dataclass()
class node:
val: int
color: int
visited: bool = False
def __eq__(self, other):
return self.val == other.val and self.color == other.color
def __hash__(self):
return hash((self.val, self.color))
# 3) Use unsafe_hash=True and compare=Flase
# To hash based on some values, use unsafe_hash=True and manually exclude
from dataclasses import dataclass, field
@dataclass(unsafe_hash=True)
class node:
x: int
visit_count: int = field(default=10, compare=False) # hash inherits compare setting. So valid.
# visit_count: int = field(default=False, hash=False) # also valid. Arguably easier to read, but can break some compare code.
# visit_count: int = False # if mutated, hashing breaks. (3* printed)
s = set()
n = node(1)
s.add(n)
if n in s: print("1* n in s")
n.visit_count = 11
if n in s:
print("2* n still in s")
else:
print("3* n is lost to the void because hashing broke.")
# ########################
# ## Post init actions: (maybe use proper class at this point.
# ########################
from math import sqrt
@dataclass()
class point:
x: int
y: int
def __post_init__(self): # Override this guy.
self.dist = sqrt(self.x ** 2 + self.y ** 2)
# p = point(2, 2)
# p.dist
# #2.8284271247461903
def exception_handling():
# # Exception handing
try:
1 / 0
except (ValueError, ZeroDivisionError) as e:
print(e)
except Exception as e: # Catch all exceptions.
print(e)
# | |
<reponame>wwydmanski/Chemformer
import torch
from rdkit import Chem, RDLogger
class DecodeSampler:
def __init__(
self,
tokeniser,
max_seq_len,
length_norm=None
):
self.tokeniser = tokeniser
self.max_seq_len = max_seq_len
self.length_norm = length_norm
assert max_seq_len > 1, f"Max sequence must be at least 2, got {max_seq_len}"
self.begin_token_id = self.tokeniser.vocab[self.tokeniser.begin_token]
self.pad_token_id = self.tokeniser.vocab[self.tokeniser.pad_token]
self.end_token_id = self.tokeniser.vocab[self.tokeniser.end_token]
self.bad_token_ll = -1e5
RDLogger.DisableLog("rdApp.*")
def decode(self, decode_fn, batch_size, sampling_alg="greedy", device="cpu", **kwargs):
""" Sample a molecule from a model by calling the decode function argument
Args:
decode_fn: A function mapping a batched sequence of token identifiers and their associated pad masks
to a log probability distribution over possible next tokens
batch_size: The number of elements to pass into the decode function in one batch
sampling_alg: Algorithm to use for sampling from the model
Returns:
(SMILES of sampled molecules (List[str]), log likelihoods (List[float]))
"""
if sampling_alg == "greedy":
output = self.greedy_decode(decode_fn, batch_size, device)
elif sampling_alg == "beam":
output = self.beam_decode(decode_fn, batch_size, device, kwargs)
else:
raise ValueError(f"Unknown sampling algorithm {sampling_alg}")
return output
def greedy_decode(self, decode_fn, batch_size, device="cpu"):
""" Sample molecules from the model using greedy search
Args:
decode_fn (fn): Function used to apply tokens to model and produce log probability distribution
batch_size (int): Number of molecules to sample
device: Torch device to create tensors on
Returns:
(List[str], List[float]): Tuple of (molecules, their log likelihoods)
"""
# Create tensors which will be reused
token_ids = [self.begin_token_id] + ([self.pad_token_id] * (self.max_seq_len - 1))
token_ids = [token_ids] * batch_size
token_ids = torch.tensor(token_ids, device=device).transpose(0, 1)
pad_mask = torch.zeros((self.max_seq_len, batch_size), device=device, dtype=torch.bool)
log_lhs = torch.zeros((batch_size))
# Iteratively apply the tokens to the model and build up the sequence
for i in range(1, self.max_seq_len):
token_ids_seq = token_ids[:i, :]
pad_mask_seq = pad_mask[:i, :]
# Sample next id for each element in the batch
output_dist = decode_fn(token_ids_seq, pad_mask_seq)
probs, output_ids = output_dist.max(dim=2)
new_ids = output_ids[-1, :]
new_probs = probs[-1, :]
# Generate next elements in the pad mask. An element is padded if:
# 1. The previous token is an end token
# 2. The previous token is a pad token
is_end_token = token_ids[i-1, :] == self.end_token_id
is_pad_token = token_ids[i-1, :] == self.pad_token_id
new_pad_mask = torch.logical_or(is_end_token, is_pad_token)
# Break if sampling is complete
if new_pad_mask.sum().item() == new_pad_mask.numel():
break
# Ensure all sequences contain an end token
if i == self.max_seq_len - 1:
new_ids[~new_pad_mask] = self.end_token_id
# Set the token to pad where required, update the token ids and update lls
new_ids[new_pad_mask] = self.pad_token_id
token_ids[i, :] = new_ids
pad_mask[i, :] = new_pad_mask
log_lhs += new_probs.cpu()
tokens = token_ids.transpose(0, 1).tolist()
tokens = self.tokeniser.convert_ids_to_tokens(tokens)
mol_strs = self.tokeniser.detokenise(tokens)
log_lhs = log_lhs.tolist()
return mol_strs, log_lhs
def beam_decode(self, decode_fn, batch_size, device="cpu", k=5):
""" Sample molecules from the model using beam search
Samples molecules by iteratively building up the sequence of SMILES characters using beam search.
Molecules are returned in a 2D list where batch_size is the outer dimension and k is the inner dimension.
Args:
decode_fn (fn): Function used to apply tokens to model and produce log probability distribution
batch_size (int): Number of molecules to sample
device: Torch device to create tensors on
k (int): Number of beams
Returns:
(List[List[str]], List[List[float]]): Tuple of (molecules, their log likelihoods)
"""
# Create tensors which will be reused
token_ids = [self.begin_token_id] + ([self.pad_token_id] * (self.max_seq_len - 1))
token_ids = [token_ids] * batch_size
token_ids = torch.tensor(token_ids, device=device).transpose(0, 1)
pad_mask = torch.zeros((self.max_seq_len, batch_size), device=device, dtype=torch.bool)
ts = token_ids[:1, :]
ms = pad_mask[:1, :]
ll = torch.zeros((batch_size))
# Apply starting token to model to get a distribution over next tokens
first_lls = self._beam_step(decode_fn, ts, ms, ll)
top_lls, top_idxs = torch.topk(first_lls, k, dim=1)
top_ids = list(top_idxs.T)
# Setup tensors for each beam which will be reused
token_ids_list = [token_ids.clone() for _ in range(k)]
pad_mask_list = [pad_mask.clone() for _ in range(k)]
lls_list = list(top_lls.cpu().T)
for beam_idx, ids in enumerate(top_ids):
token_ids_list[beam_idx][1, :] = ids
pad_mask_list[beam_idx][1, :] = 0
for i in range(2, self.max_seq_len):
complete = self._update_beams_(i, decode_fn, token_ids_list, pad_mask_list, lls_list)
if complete:
break
tokens_list = [token_ids.transpose(0, 1).tolist() for token_ids in token_ids_list]
tokens_list = [self.tokeniser.convert_ids_to_tokens(tokens) for tokens in tokens_list]
mol_strs_list = [self.tokeniser.detokenise(tokens) for tokens in tokens_list]
log_lhs_list = [log_lhs.tolist() for log_lhs in lls_list]
# Transpose and sort list of molecules based on ll
new_mol_strs = self._transpose_list(mol_strs_list)
new_log_lhs = self._transpose_list(log_lhs_list)
sorted_mols, sorted_lls = self._sort_beams(new_mol_strs, new_log_lhs)
return sorted_mols, sorted_lls
def _update_beams_(self, i, decode_fn, token_ids_list, pad_mask_list, lls_list):
""" Update beam tokens and pad mask in-place using a single decode step
Updates token ids and pad mask in-place by producing the probability distribution over next tokens
and choosing the top k (number of beams) log likelihoods to choose the next tokens.
Sampling is complete if every batch element in every beam has produced an end token.
Args:
i (int): The current iteration counter
decode_fn (fn): Function used to apply tokens to model and produce log probability distribution
token_ids_list (List[torch.Tensor]): List of token_ids, each of shape [seq_len, batch_size]
pad_mask_list (List[torch.Tensor]): List of pad_masks, each of shape [seq_len, batch_size]
lls_list (List[torch.Tensor]): List of log likelihoods, each of shape [batch_size]
Returns:
(bool): Specifies whether all of the beams are complete
"""
assert len(token_ids_list) == len(pad_mask_list) == len(lls_list)
num_beams = len(token_ids_list)
ts = [token_ids[:i, :] for token_ids in token_ids_list]
ms = [pad_mask[:i, :] for pad_mask in pad_mask_list]
# Apply current seqs to model to get a distribution over next tokens
# new_lls is a tensor of shape [batch_size, vocab_size * num_beams]
new_lls = [self._beam_step(decode_fn, t, m, lls) for t, m, lls in zip(ts, ms, lls_list)]
norm_lls = [self._norm_length(lls, mask) for lls, mask in zip(new_lls, ms)]
_, vocab_size = tuple(norm_lls[0].shape)
new_lls = torch.cat(new_lls, dim=1)
norm_lls = torch.cat(norm_lls, dim=1)
# Keep lists (of length num_beams) of tensors of shape [batch_size]
top_lls, top_idxs = torch.topk(norm_lls, num_beams, dim=1)
new_ids_list = list((top_idxs % vocab_size).T)
beam_idxs_list = list((top_idxs // vocab_size).T)
top_lls = [new_lls[b_idx, idx] for b_idx, idx in enumerate(list(top_idxs))]
top_lls = torch.stack(top_lls).T
beam_complete = []
new_ts_list = []
new_pm_list = []
new_lls_list = []
# Set the sampled tokens, pad masks and log likelihoods for each of the new beams
for new_beam_idx, (new_ids, beam_idxs, lls) in enumerate(zip(new_ids_list, beam_idxs_list, top_lls)):
# Get the previous sequences corresponding to the new beams
token_ids = [token_ids_list[beam_idx][:, b_idx] for b_idx, beam_idx in enumerate(beam_idxs)]
token_ids = torch.stack(token_ids).transpose(0, 1)
# Generate next elements in the pad mask. An element is padded if:
# 1. The previous token is an end token
# 2. The previous token is a pad token
is_end_token = token_ids[i-1, :] == self.end_token_id
is_pad_token = token_ids[i-1, :] == self.pad_token_id
new_pad_mask = torch.logical_or(is_end_token, is_pad_token)
beam_complete.append(new_pad_mask.sum().item() == new_pad_mask.numel())
# Ensure all sequences contain an end token
if i == self.max_seq_len - 1:
new_ids[~new_pad_mask] = self.end_token_id
# Set the tokens to pad if an end token as already been produced
new_ids[new_pad_mask] = self.pad_token_id
token_ids[i, :] = new_ids
# Generate full pad mask sequence for new token sequence
pad_mask = [pad_mask_list[beam_idx][:, b_idx] for b_idx, beam_idx in enumerate(beam_idxs)]
pad_mask = torch.stack(pad_mask).transpose(0, 1)
pad_mask[i, :] = new_pad_mask
# Add tokens, pad mask and lls to list to be updated after all beams have been processed
new_ts_list.append(token_ids)
new_pm_list.append(pad_mask)
new_lls_list.append(lls)
complete = sum(beam_complete) == len(beam_complete)
# Update all tokens, pad masks and lls
if not complete:
for beam_idx, (ts, pm, lls) in enumerate(zip(new_ts_list, new_pm_list, new_lls_list)):
token_ids_list[beam_idx] = ts
pad_mask_list[beam_idx] = pm
lls_list[beam_idx] = lls
return complete
def _beam_step(self, decode_fn, tokens, mask, lls):
""" Apply tokens to model to produce the log likelihoods for the full sequence
A single iteration of decode is applied to the model to produce the next tokens in the sequences
and the log likelihoods for the entire sequences (including the next token)
The lls are returned as a distribution over all possible next tokens
Args:
decode_fn (fn): Function used to apply tokens to model and produce log probability distribution
tokens (torch.Tensor): Tensor of shape [seq_len, batch_size] containing the current token ids
mask (torch.Tensor): BoolTensor of shape [seq_len, batch_size] containing the padding mask
lls (torch.Tensor): Tensor of shape [batch_size] containing log likelihoods | |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# @uthor : TheAsouka
# BTC address : 16Jg3VFJsWVkip2c5yCHo2bEBustkxQZpW
# ETH address : 0x3FF703Af55f5Ee0870f44A51d08f753A1f192Fcd
# Feel free to be generous
import re, sys, os, json, argparse, time
from urllib.request import urlopen
from random import randint
def Argument_Parser():
"""Parse argument supplied by the user"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-n","--number",help="Number of currencies to display in the main board",type=int,nargs=1)
arg_parser.add_argument("-f","--file",help="Your portfolio file, formatted like this : {\"EUR Invested\":XXX,\"CoinName\":XXX, ..., \"Bitcoin\":18}",type=argparse.FileType('r'),nargs='?')
arg_parser.add_argument("-c","--coiname",help="Name of the specific coin you want to display. e.g : zcash , bitcoin-cash")
args = arg_parser.parse_args()
return args
def Get_JSON(s_API_url):
"""Retrieve market values from CMC.com API,
and store them in a list of dictionaries"""
try :
b_source = urlopen(s_API_url).read()
except :
print ("Error : Can't connect to the API, please check your internet connection.")
sys.exit(0)
l_JSON_data = json.loads(b_source)
return l_JSON_data
def Get_Desired_Values(i_rank, l_data):
"""Retrieve desired values of a coin in list returned by GetJSON()"""
l_coin_values = []
l_key = [
"rank",
"name",
"symbol",
"price_btc",
"price_usd",
"price_eur",
"market_cap_usd",
"percent_change_1h",
"percent_change_24h",
"percent_change_7d"]
for key in l_key:
l_coin_values.append(l_data[i_rank][key])
return l_coin_values
def Make_a_Frame(s_to_frame, i_max_lenght):
"""Create a frame for a string and place it in the middle
to get a clean display of values"""
i_positioner = int((i_max_lenght - len(s_to_frame)) / 2)
#prevent misalignments when strings are sized differently
if (len("|" + "_" * i_positioner + s_to_frame + "_" * i_positioner + "|") % 2) != 0:
return "|" + " " * i_positioner + s_to_frame + " " * (i_positioner - 1) + "|"
return "|" + " " * i_positioner + s_to_frame + " " * i_positioner + "|"
def Colorize(s_to_colorize):
"""Add some color to changing values"""
re_minus = re.compile("\-")
if re.search(re_minus, s_to_colorize):
return "\033[1;31m" + s_to_colorize + "\033[0;m" #RED
else:
return "\033[1;32m" + s_to_colorize + "\033[0;m" #GREEN
def Display_Time():
print ("Source : www.coinmarketcap.com")
s_currentime = time.strftime("%d/%m/%Y %H:%M:%S")
print (s_currentime)
return s_currentime
def Display_Header(l_max_length):
"""Display first line of the board which explains columns"""
l_categories = [
"Rank",
"Name",
"Symbol",
"BTC Price",
"USD Price",
"EUR Price",
"Marketcap M$",
"\0" * 6 + "1h" + "\0" * 6, #"\0" (NULL) * 6
"\0" * 6 + "24h" + "\0" * 6, #to respect colorized strings length
"\0" * 6 + "7d" + "\0" * 6] #and avoid misalignments
print ("=" * (sum(l_max_length)-26))
i = 0
for cat in l_categories:
print (Make_a_Frame(cat,l_max_length[i]),end='')
i += 1
print ("\r")
print ("="*(sum(l_max_length)-26))
"""Display a long line to separate headers and boards,
-26 because colorized elements strings are longer
(~15 chars) than what is displayed (~3 chars), and
fit the perfect size"""
def Display_Main_Board(s_coin_number,l_market_values,l_max_length):
"""Display properly the main board (top marketcap)"""
i = 0
j = 0
while i != int(s_coin_number):
for element in Get_Desired_Values(i, l_market_values):
element = Element_Modifier(element, j)
print (Make_a_Frame(element, l_max_length[j]), end='') #Display values in a row
j += 1
print ("\r") #New line when all values for a coin are displayed
i += 1
j = 0
print ("=" * (sum(l_max_length) - 26))
def Display_Header_Portfolio(l_max_length):
"""Display first line of the portfolio board which explains columns"""
print ("=" * (sum(l_max_length)-26))
print ("\n"+Make_a_Frame("Your portfolio",16)) #Fixed length
l_categories = [
"Name",
"Symbol",
"Quantity",
"% Holdings",
"BTC Value",
"USD Value",
"EUR Value"]
print ("=" * (len(l_categories)*16))
i = 0
for cat in l_categories:
print (Make_a_Frame(cat,15),end='')
i += 1
print ("\r")
print ("=" * (len(l_categories)*16)) # Utiliser qu'une fonction pour display les headers.
def Display_Portfolio_Board(l_l_proper_values_coins):
for l_coin in l_l_proper_values_coins:
for element in l_coin:
print (Make_a_Frame(element,15),end='')
print ("\r")
print ("=" * 112)
def Element_Modifier(s_element,i_list_index):
""""Apply changes to some element of the board"""
if i_list_index == 3: #BTC Price
if len(s_element) != 10:
s_element = s_element + "0" * (10 - len(s_element))
if i_list_index == 4: #USD Price
s_element = str(round(float(s_element), 3))
if i_list_index == 5: #EUR Price
s_element = str(round(float(s_element), 3))
if i_list_index == 6: #Marketcap
s_element = str(round(float(s_element) / 10**6, 3))
if i_list_index == 7 or i_list_index == 8 or i_list_index == 9: #Changes
s_element = Colorize(s_element)
#ifififififififififif
return s_element
def Get_Portfolio_Values(o_portfolio_file,i_coin_number,l_market_values,l_max_length):
"""Get coins and their values in portfolio file"""
re_exclude = re.compile("\#")
#You can exclude a currency in your portfolio by adding a # before the name
#(e.g : "#Ripple":100)
i = 0
i_rank = 0
l_l_values_coins_portfolio = []
try :
d_portfolio_values = json.load(o_portfolio_file)
except :
print ("Your portfolio file is not formatted correctly.")
print ("May be a comma at the end, try to remove it")
print ("The portfolio file has to be formatted like this : ")
print ("{\"EUR Invested\":XXX,\"CoinName\":XXX, ..., \"Bitcoin\":18}")
sys.exit(0)
for s_currency, f_quantity in d_portfolio_values.items():
if re.match(re_exclude,s_currency):
continue
f_quantity = float(f_quantity) #Some values are integers
while i != i_coin_number:
if s_currency == str(Get_Desired_Values(i,l_market_values)[1]):
#If the coin is already displayed
b_isnew = False
i_rank = i
break
else :
b_isnew = True
i += 1
i = 0
if s_currency == "EUR Invested":
f_fiat_investment = f_quantity
pass
elif b_isnew == True:
n = 0
# Coins that aren't displayed yet
# Need to call the API to retrieve their values
l_new_coin_values = Get_Desired_Values(0, Get_JSON("https://api.coinmarketcap.com/v1/ticker/" + s_currency + "?convert=EUR"))
# Display in main board coins in portfolio that aren't displayed yet
for s_values in l_new_coin_values:
s_values = Element_Modifier(s_values,n)
print (Make_a_Frame(s_values,l_max_length[n]), end='')
n += 1
print ("\r")
l_new_coin_values.append(f_quantity) # Add quantity of a coin in list
# Add list of values of a coin into a general list
l_l_values_coins_portfolio.append(l_new_coin_values)
elif b_isnew == False:
# Coins that are already displayed
l_displayed_coins = Get_Desired_Values(i_rank, l_market_values)
l_displayed_coins.append(f_quantity)
l_l_values_coins_portfolio.append(l_displayed_coins)
o_portfolio_file.close()
for l_coin_values in l_l_values_coins_portfolio:
l_coin_values.pop(0) # Remove undesired values got from Get_Desired_Values()
l_coin_values.pop(5)
l_coin_values.pop(5)
l_coin_values.pop(5)
l_coin_values.pop(5)
l_coin_values.insert(2,l_coin_values[5]) # Copy f_quantity to another index
l_coin_values.pop() # Remove duplicate f_quantity
# list of lists
return l_l_values_coins_portfolio, f_fiat_investment
def Process_Portfolio_Values(l_l_values_coins_portfolio):
"""Process portfolio values to get total values, % holdings ..."""
# Variables to return
l_l_proper_values_coins = []
f_total_BTC_value = 0.0
f_total_USD_value = 0.0
f_total_EUR_value = 0.0
for l_coin_values in l_l_values_coins_portfolio:
s_BTC_value = round(l_coin_values[2] * float(l_coin_values[3]), 8)
i_len_s_BTC_value = len(str(s_BTC_value))
if i_len_s_BTC_value != 10 :
# To have satoshi value (8 decimals)
s_BTC_value = str(s_BTC_value) + "0" * (10 - i_len_s_BTC_value)
f_USD_value = l_coin_values[2] * float(l_coin_values[4])
f_USD_value = round(f_USD_value,2)
f_EUR_value = l_coin_values[2] * float(l_coin_values[5])
f_EUR_value = round(f_EUR_value,2)
# Calculate portfolio total value
f_total_BTC_value = f_total_BTC_value + float(s_BTC_value)
f_total_USD_value = f_total_USD_value + f_USD_value
f_total_EUR_value = f_total_EUR_value + f_EUR_value
# Put right values at right places
l_coin_values[2] = str(l_coin_values[2])
l_coin_values[4] = str(s_BTC_value)
l_coin_values[5] = str(f_USD_value)
l_coin_values.append(str(f_EUR_value))
for l_coin_values in l_l_values_coins_portfolio:
# Calculate in % representation of a coin in total portfolio value
s_coin_holding = round((float(l_coin_values[5]) / f_total_USD_value) * 100 , 2)
l_coin_values[3] = str(s_coin_holding)
l_l_proper_values_coins.append(l_coin_values)
return l_l_proper_values_coins, f_total_BTC_value, f_total_USD_value, f_total_EUR_value
def Random_Sentence():
"""Pick a random sentence to display in portfolio summary"""
"""Feel free to add some, max 20 chars long"""
l_words = [
"Are you broke ?",
"Are you rich ?",
"1-800-273-8255",
"01 45 39 40 00",
"Let's go to Palawan",
"Don't cry",
"HODL",
"Time to sell.",
"Buy more",
"Shitcoins only",
"It's a scam.",
"Learn the tech",
"Read whitepapers",
"We <3 Blockchain",
"Amazing script !",
"Send me some.",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>",
"<NAME>"]
i_rand = randint(0,len(l_words) - 1)
return l_words[i_rand]
def Portfolio_Summary(t_proper_portfolio_values,f_fiat_investment):
"""Display the summary of your portfolio, to quickly know if you are rich or poor."""
print ("\r")
print ("=" * 44)
print (Make_a_Frame("Portfolio Summary",21) + Make_a_Frame(Random_Sentence(),21))
print ("=" * 44)
l_portfolio_summary = []
d_portfolio_summary = {}
l_summary_categories = [
"Investment",
"BTC Value",
"USD Value",
"EUR Value",
"EUR Profit",
"%%% Profit"]
l_portfolio_summary.append(f_fiat_investment)
l_portfolio_summary.append(round(t_proper_portfolio_values[1],8)) # f_total_BTC_value
l_portfolio_summary.append(round(t_proper_portfolio_values[2],2)) # f_total_USD_value
l_portfolio_summary.append(round(t_proper_portfolio_values[3],2)) # f_total_EUR_value
f_profit = round(l_portfolio_summary[3] - l_portfolio_summary[0],2)
l_portfolio_summary.append(f_profit)
f_percentage_profit = round((l_portfolio_summary[3] * 100) / l_portfolio_summary[0] - 100, 2)
l_portfolio_summary.append(f_percentage_profit)
i = 0
for categories in l_summary_categories:
print (Make_a_Frame(categories,21) + Make_a_Frame(str(l_portfolio_summary[i]),21) ,end='')
d_portfolio_summary.update({categories:l_portfolio_summary[i]}) # Create a dictionary with these values, to log them.
print ("\r")
i += 1
print ("=" * 44)
return d_portfolio_summary
def Write_Logs(d_portfolio_summary,s_currentime):
"""Write logs to a txt file of your portfolio summary"""
#Create a log file in the same directory as the script
file_log = open(str(os.path.dirname(sys.argv[0])) + "/portfolog.txt", "a+")
file_log.write(s_currentime + " : " + str(d_portfolio_summary) + "\n")
file_log.close()
def main(args):
### | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import copy
import json
import logging
import os
import re
try:
import six
except ImportError:
from ansible.module_utils import six
import sys
import time
import traceback
from threading import Thread
from subprocess import PIPE, Popen
from collections import OrderedDict
try:
from queue import Queue
except ImportError:
from Queue import Queue
J = os.path.join
B = os.path.basename
D = os.path.dirname
A = os.path.abspath
R = os.path.relpath
OW = os.getcwd()
W = A(R(D(__file__)))
TOP = D(W)
RE_F = re.U | re.M
N = os.path.basename(__file__)
BN = os.path.basename(N)
if sys.argv and sys.argv[0].endswith(os.path.sep+BN):
N = sys.argv[0]
_STATUS = OrderedDict([('error', OrderedDict()), ('success', OrderedDict()),
('message', OrderedDict()), ('skip', OrderedDict())])
_HELP = '''\
Build images
Main usage:
- {N} [--images packer__toto.json] [--skip-images packer_toto.json]
- Iterate and run builds through packer or docker all the images found
inside $docker_folder/IMAGES.json
Other usages:
- {N} --generate-images \\
[--packer-template $docker_folder/packer.json]
- rewrite the packer template to $docker_folder/packer/IMG_X.json
- {N} --list
- List all available images
Details in REPO: /doc/docker_chain_build.md
'''.format(N=N)
BUILDER_TYPES = ['packer', 'dockerfile']
DEFAULT_BUILDER_TYPE = 'packer'
NAME_SANITIZER = re.compile('setups\.',
flags=RE_F)
PACKER_RETRY_CHECK = re.compile(
("/tmp/script[^.]+.sh: not found.*"
"Build 'docker' errored: "
"Script exited with non-zero exit status: 127"),
flags=re.M | re.U | re.S)
IMG_PARSER = re.compile('('
'(?P<registry>'
'(?P<registry_host>[^:]+)'
':'
'(?P<registry_port>[^/]+)'
')'
'/)?'
'('
'(?P<repo>[^/:]+)'
'/)?'
'(?P<image>[^/:]+)'
'(:'
'(?P<tag>.*)'
')?'
'$',
flags=RE_F)
_LOGGER = 'cops.dockerbuilder'
_LOGGER_FMT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
_LOGGER_DFMT = '%m/%d/%Y %I:%M:%S %p'
def test_image_matcher():
assert IMG_PARSER.match('b/a:1').groupdict() == {
'image': 'a', 'repo': 'b', 'registry_host': None,
'tag': '1', 'registry': None, 'registry_port': None}
assert IMG_PARSER.match('b').groupdict() == {
'image': 'b', 'repo': None, 'registry_host': None,
'tag': None, 'registry': None, 'registry_port': None}
assert IMG_PARSER.match('b:1').groupdict() == {
'image': 'b', 'repo': None, 'registry_host': None,
'tag': '1', 'registry': None, 'registry_port': None}
assert IMG_PARSER.match('b/a').groupdict() == {
'image': 'a', 'repo': 'b', 'registry_host': None,
'tag': None, 'registry': None, 'registry_port': None}
IMG_PARSER.match('c:2/b/a:1').groupdict() == {
'image': 'a', 'repo': 'b', 'registry_host': 'c',
'tag': '1', 'registry': 'c:1', 'registry_port': '2'}
for i in [
':b'
'/a'
'/b/a'
'/b/a:1'
'c/b/a:1'
]:
try:
IMG_PARSER.match(i).groupdict()
raise ValueError(i)
except AttributeError:
pass
def read_output(pipe, funcs):
for line in iter(pipe.readline, ''):
for func in funcs:
func(line.decode('utf-8'))
pipe.close()
def write_output(get):
for line in iter(get, None):
sys.stdout.write(line)
def run_cmd(command,
shell=True,
cwd=None,
env=None,
stdout=None,
stderr=None,
passthrough=True):
if stderr is None:
stderr = PIPE
if stdout is None:
stdout = PIPE
if env is None:
env = os.environ.copy()
outs, errs = None, None
proc = Popen(
command,
cwd=cwd,
env=env,
shell=shell,
close_fds=True,
stdout=stdout,
stderr=stderr,
bufsize=1)
if passthrough:
outs, errs = [], []
q = Queue()
stdout_thread = Thread(
target=read_output, args=(proc.stdout, [q.put, outs.append]))
stderr_thread = Thread(
target=read_output, args=(proc.stderr, [q.put, errs.append]))
writer_thread = Thread(target=write_output, args=(q.get,))
for t in (stdout_thread, stderr_thread, writer_thread):
t.daemon = True
t.start()
proc.wait()
for t in (stdout_thread, stderr_thread):
t.join()
q.put(None)
outs = ''.join(outs)
errs = ''.join(errs)
else:
outs, errs = proc.communicate()
outs = '' if outs is None else outs.decode('utf-8')
errs = '' if errs is None else errs.decode('utf-8')
rc = proc.returncode
return (rc, (outs, errs))
def splitstrip(a, *s):
return [b.strip() for b in a.split(*s)]
def shellexec(cmd, quiet=False, *args):
msg = 'shellexec {0}'
if args:
msg += ' {1}'
debug(msg.format(cmd, args))
ret = run_cmd(cmd, passthrough=not quiet)
return ret
def setup_logging(fmt=_LOGGER_FMT, datefmt=_LOGGER_DFMT, level=logging.INFO):
logging.basicConfig(format=fmt, datefmt=datefmt, level=level)
def log(msg, name=_LOGGER, level='info'):
logger = logging.getLogger(name)
return getattr(logger, level.lower())(msg)
def debug(*a, **kwargs):
kwargs['level'] = 'debug'
return log(*a, **kwargs)
def parse_docker_images(images, images_file='images.json'):
images_file = A(images_file)
images_folder = D(images_file)
parsed_images = []
gerrors = []
if 'images' not in images:
gerrors.append(("No 'images' in images data'", images))
else:
for img in images.pop('images'):
errors = []
if not isinstance(img, dict):
errors.append(('NOT_A_MAPPING', img))
img['images_file'] = images_file
dockerfile = img.get(
'dockerfile',
img.get('docker_file', None))
try:
if dockerfile:
builder_type = 'docker'
else:
builder_type = img['builder_type']
except KeyError:
builder_type = DEFAULT_BUILDER_TYPE
builder_type = {'docker': 'dockerfile'}.get(
builder_type, builder_type)
img['builder_type'] = builder_type
try:
if builder_type:
assert builder_type in BUILDER_TYPES
except (ValueError, AssertionError,):
builder_type = None
errors.append(('INVALID_BUILDER_TYPE', img))
#
name = img.get('name', None)
if not name:
name = NAME_SANITIZER.sub('', os.path.basename(OW))
img['name'] = name
if not img['name']:
errors.append(('NO_NAME', img))
#
version = img.get('version', None)
image_file = img.get(
'file', dockerfile)
if not image_file:
if builder_type in ['packer']:
if version:
formatter = '{0}-{1}.json'
else:
formatter = '{0}.json'
elif builder_type in ['dockerfile']:
if version:
formatter = 'Dockerfile.{0}-{1}'
else:
formatter = 'Dockerfile'
else:
raise Exception(
'no valid builder type for {0}'.format(img))
image_file = formatter.format(name, version,
name=name, version=version)
img['file'] = image_file
if (
builder_type in ['packer'] and
image_file and not
image_file.startswith(os.path.sep)
):
img['fimage_file'] = J(
images_folder, builder_type, image_file)
if not image_file:
errors.append(('NO_IMAGE_FILE_ERROR', img))
#
if builder_type in ['dockerfile']:
tag = None
try:
tag = img['tag']
except KeyError:
errors.append(('NO_TAG', img))
img_info, img_parts = IMG_PARSER.match(tag), None
if tag and not img_info:
errors.append(('INVALID_DOCKER_TAG', img))
else:
img_parts = img_info.groupdict()
img['img_parts'] = img_parts
new_name = img_parts.get('image', None)
new_tag = img_parts.get('tag', None)
if new_tag:
img['version'] = version = new_tag
if new_name:
img['name'] = name = new_name
if '{' in image_file and '}' in image_file:
image_file = image_file.format(name, version, **img)
img['file'] = image_file
if not version:
version = '.'.join(img['file'].split('.')[:-1]).strip()
img['version'] = version
if not img['version']:
errors.append(('NO_VERSION', img))
try:
working_dir = img['working_dir']
except KeyError:
working_dir = J(images_folder, '..')
img['working_dir'] = A(working_dir)
if (
builder_type in ['dockerfile'] and
image_file and not
image_file.startswith(os.path.sep)
):
img['fimage_file'] = J(
img['working_dir'], image_file)
if not os.path.isdir(img['working_dir']):
errors.append(
('working dir {0} is not a dir'.format(img['working_dir']),
img))
extra_args = img.setdefault('extra_args', '')
if '{' in extra_args and '}' in extra_args:
img['extra_args'] = extra_args.format(**img)
if not errors:
parsed_images.append(img)
else:
gerrors.extend(errors)
images['images'] = parsed_images
return images, gerrors
def parse_images_file(images_file):
images, errors = OrderedDict(), []
fimages_file = A(images_file)
debug('parse_images_file: {0}'.format(fimages_file))
if not os.path.exists(fimages_file):
errors.append(('Images file {0} does not exists', fimages_file))
with open(fimages_file) as fic:
try:
images = json.loads(fic.read())
except (Exception,):
print(traceback.format_exc())
errors.append(('IMAGES_FILE_NOT_A_JSON', fimages_file))
else:
images, errors = parse_docker_images(images, fimages_file)
return images, errors
class CharsetError(Exception):
'''.'''
val = None
def try_charsets(val, charsets=None):
if charsets is None:
charsets = ['utf-8', 'utf-16',
'iso-8859-15 ', 'iso-8859-1',
'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256']
charsets = charsets[:]
try:
if not isinstance(val, unicode):
return val
else:
while True:
charset = charsets.pop(0)
try:
return val.encode(charset)
except UnicodeEncodeError:
pass
except IndexError:
pass
exc = CharsetError(u'Cannot make msg from output (charset problem)')
exc.val = val
raise(exc)
def _build(cmd,
img,
fmt=True,
squash=False,
builder_args=None,
build_retries=None,
build_retry_check=None,
build_retry_delay=None,
quiet=False,
*a, **kw):
if build_retry_delay is None:
build_retry_delay = 1
if build_retry_check is None:
if img['builder_type'] == 'packer':
build_retry_check = PACKER_RETRY_CHECK
if build_retries is None:
if img['builder_type'] == 'packer':
build_retries = 10
else:
build_retries = 1
img = copy.deepcopy(img)
iargs = copy.deepcopy(img)
iargs.update(img)
iargs.setdefault('builder_args', builder_args or '')
if fmt:
cmd = cmd.format(**iargs)
img_file = iargs['fimage_file']
try:
os.stat(img_file)
except (OSError, IOError):
status = False
msg = ('retcode: {0}\n'
'OUT: {1}\n'
'ERR: {2}\n'.format(
-1,
'',
'IMAGEFILE_DOES_NOT_EXIST: {0}'.format(
img_file)))
else:
for i in range(build_retries):
if i > 0:
log(msg)
log('Retry: {0}'.format(i))
log('Running: {0}'.format(cmd))
ret = shellexec(cmd, quiet=quiet)
if ret[0] == 0:
status = True
else:
status = False
parts = [('retcode:', "{0}".format(ret[0])),
('OUT:', ret[1][0]),
('ERR:', ret[1][1])]
msg = ''
for label, val in parts:
try:
msg += '{0} '.format(label)
msg += try_charsets(val)
msg += '\n'
except (CharsetError,) as exc:
print(
u'ERROR {0}: Cannot make msg from output '
'(charset problem)'.format(label))
try:
print(exc.val)
except Exception:
# dont interrupt a whole build just for a failed print
pass
if status:
break
else:
if build_retry_check:
out_match = build_retry_check.search(ret[1][0])
err_match = build_retry_check.search(ret[1][1])
if (out_match or err_match):
time.sleep(build_retry_delay)
else:
break
else:
break
return status, msg
def packer_build(img, *a, **kw):
kw.setdefault('build_retries', 10)
cmd = ('cd \'{working_dir}\' &&'
' packer build {builder_args} {extra_args} {fimage_file}')
return _build(cmd, img, *a, **kw)
def dockerfile_build(img, squash=False, *a, **kw):
cmds = []
if squash:
cmds.append(('docker build {builder_args} --squash {extra_args}'
' -f {fimage_file} -t {tag} {working_dir}'))
cmds.append(('docker build {builder_args} {extra_args}'
' -f {fimage_file} -t {tag} {working_dir}'))
for cmd in cmds:
ret = _build(cmd, img, *a, **kw)
if squash and not ret[0] and '--squash' in ret[1]:
continue
else:
break
return ret
def _print(data, level=0):
if isinstance(data, (dict,)):
for k, item in data.items():
print('{1}{0}:'.format(k, ' '*level))
_print(item, level+2)
pass
if isinstance(data, (tuple, list, set)):
for item in data:
_print(item, level+2)
else:
data = '{0}'.format(data).replace('\n', '\n{0}'.format(' '*level))
print('{1}{0}'.format(data, ' '*level))
def print_status(status, quiet=False):
k = 'success'
if status.get(k, None):
print("\n"+k.capitalize())
for img in status[k]:
data = status[k][img]
if quiet:
data = (True, '')
print(" "+img)
_print(data, level=2)
for k in ['message', 'error', 'skip']:
if status.get(k, None):
print("\n"+k.capitalize())
for img in status[k]:
| |
#!/usr/bin/env python
"""The JIP Pipeline module contains the classs and functions
used to create pipeline graphs
"""
import collections
import os
from jip.options import Option
from jip.tools import Tool
from jip.profiles import Profile
from jip.logger import getLogger
from jip.templates import render_template
import jip.tools
log = getLogger('jip.pipelines')
class Job(Profile):
"""Container class that wraps job meta-data.
The pipeline job extends the general :class:`jip.profiles.Profile`, and
extends it in a way that you can create new pipeline nodes from the job.
Those nodes will then hold a reference to the profile and all customization
on the profile will be applied to the node.
"""
def __init__(self, pipeline=None, **kwargs):
Profile.__init__(self, **kwargs)
self._pipeline = pipeline
self._node = None
self._in_pipeline_name = None
@classmethod
def from_profile(cls, profile, pipeline):
job = cls(pipeline=pipeline, **(profile.__dict__))
return job
def __getstate__(self):
data = self.__dict__.copy()
data['_pipeline'] = None
data['_node'] = None
return data
# override the name setter in order to delegate switching names to
# the jobs node
@Profile.name.setter
def name(self, name):
self._name = name
if self._in_pipeline_name is None:
self._in_pipeline_name = name
else:
name = self._in_pipeline_name
if self._node is not None and self._pipeline is not None:
self._pipeline._apply_node_name(self._node, name)
def _render_job_name(self, job):
ctx = {}
for o in job.tool.options:
ctx[o.name] = o
if self._in_pipeline_name:
name = self._in_pipeline_name
else:
name = self._node._name if self._node else self.name
if not name:
name = job._tool.name
name = render_template(
"%s%s" % ("" if not self.prefix else self.prefix, name), **ctx
)
# set name
if self._pipeline and self._node:
self._pipeline._apply_node_name(self._node, name)
return self._node.name
return name
def _render_name(self):
if not self._pipeline or not self._node:
return self.name
ctx = {}
for o in self._node._tool.options:
ctx[o.name] = o
if self._in_pipeline_name:
name = self._in_pipeline_name
else:
name = self._node._name
name = render_template(
"%s%s" % ("" if not self.prefix else self.prefix, name), **ctx
)
return name
def __call__(self, *args, **kwargs):
clone = Profile.__call__(self, *args, **kwargs)
clone._pipeline = self._pipeline
clone._in_pipeline_name = self._in_pipeline_name
if clone._in_pipeline_name is None:
clone._in_pipeline_name = clone.name
return clone
def run(self, *args, **kwargs):
"""Delegates to :py:meth:`Pipeline.run` and runs the specified tool
using this job environment configuration
:param args: args passed on to the pipeline ``run`` method
:param kwargs: kwargs passed on to the pipeline ``run`` method
:returns: the newly created node
:rtype: :class:`Node`
"""
if len(args) > 1:
raise ValueError("You can only pass one tool to a job run !")
node = args[0]
return self._pipeline.run(node, _job=self, **kwargs)
def bash(self, command, **kwargs):
"""Create a new ``bash`` job.
:param command: the bash command
:param kwargs: keyword arguments passed on the bash job
:returns: the newly created node
:rtype: :class:`Node`
"""
return self.run('bash', cmd=command, **kwargs)
class Pipeline(object):
"""A pipeline is a directed acyclic graph of Nodes and edges"""
def __init__(self, cwd=None):
self._nodes = {}
self._edges = set([])
self._job = Job(self, working_dir=cwd)
self._current_job = self._job
self._component_index = {}
self._cleanup_nodes = []
self._name = None
self.excludes = []
self._node_index = 0 # unique steadily increasing number
self._utils = None
self._cwd = self._job.working_dir
self._pipeline_name = None
def __getstate__(self):
data = {}
data['_job'] = self._job
data['_cwd'] = self._cwd
data['_current_job'] = self._current_job
data['_name'] = self._name
data['_node_index'] = self._node_index
data['_nodes'] = list(self._nodes.values())
return data
def __setstate__(self, data):
## update dict
self.__dict__['_cwd'] = data['_cwd']
self.__dict__['_edges'] = set([])
self.__dict__['_component_index'] = {}
self.__dict__['_cleanup_nodes'] = []
self.__dict__['excludes'] = []
self.__dict__['_utils'] = None
self.__dict__['_job'] = data['_job']
self.__dict__['_current_job'] = data['_current_job']
self.__dict__['_name'] = data['_name']
self.__dict__['_node_index'] = data['_node_index']
self.__dict__['_job']._pipeline = self
self.__dict__['_current_job']._pipeline = self
###############################################
# update nodes
###############################################
nodes = {}
for node in data['_nodes']:
node._graph = self
node._job._pipeline = self
node._job._node = node
tool = node._tool
nodes[tool] = node
for e in node._edges:
self._edges.add(e)
self.__dict__['_nodes'] = nodes
def __len__(self):
return len(self._nodes)
def __exit__(self, *args, **kwargs):
pass
def __enter__(self):
return self
@property
def utils(self):
if self._utils is None:
self._utils = jip.tools.PythonBlockUtils(None, locals())
self._utils._pipeline = self
return self._utils
@property
def edges(self):
"""Access all edges in the current pipeline graph as a list
of :class:`Edge`
:getter: get a list of all edges
:type: list of :class:`Edge`
"""
return list(self._edges)
def pipeline_name(self, name):
""" Set the user defined name of the pipeline
:param name: the user defined name of the pipeline
:type name: string
"""
if name is None:
return
self._pipeline_name = name
for n in self.nodes():
n._pipeline_name = name
def name(self, name):
"""Set the name of the pipeline and ensures that all
nodes in the pipeline reference the pipeline name.
:param name: the name of the pipeline
:type name: string
"""
if name is None:
return
for n in self.nodes():
n._pipeline = name
def job(self, *args, **kwargs):
"""Create a new job profile.
The job profile can be used to customize the execution behaviour
of a job. Calling this method will only create a new job profile,
but it will not be applied to any node in the graph. You can however
create nodes *from* the job profile, using :py:meth:`Job.run` or
:py:meth:`Job.bash`. These nodes will then get a copy of the job
profile and the profiles properties will be applied before job
execution.
:param args: args passed to :class:`Job`
:param kwargs: kwargs passed to :class:`Job`
:returns: new job profile
:rtype: :class:`Job`
"""
return self._job(*args, **kwargs)
def run(self, _tool_name, _job=None, **kwargs):
"""Find the tool specified by name and add it as a node to the pipeline
graph.
All additional keyword arguments are passed as option configuration to
the tool instance, allowing you to configure your tool when you create
it.
Note that the tools :py:meth:`~jip.tools.Tool.validate` method is
called here silently. Exceptions are caught and logged. This is
necessary to allow tools to initialize themselves when they are added
to a pipeline.
:param _tool_name: a :class:`~jip.tools.Tool` instance or a tool name
:param kwargs: all keyword arguments are passed to the tool as option
configurations
:returns: the newly added node
:rtype: :class:`Node`
:raises jip.tool.ToolNotFoundException: if the specified tool could not
be found
"""
if not isinstance(_tool_name, Tool):
from jip import find
tool = find(_tool_name)
else:
tool = _tool_name
node = self.add(tool, _job=_job)
# add options if specified in kwargs
def _add_opts(option_type):
def _add(opts, name, kwargs=None):
kwargs = kwargs if kwargs else {}
if option_type == "_inputs":
opts.add_input(name, **kwargs)
elif option_type == '_outputs':
opts.add_output(name, **kwargs)
else:
opts.add_option(name, **kwargs)
if option_type in kwargs:
for name, value in kwargs[option_type].iteritems():
opts = node._tool.options
if isinstance(value, dict):
# get and remove any value set here,
# otherwise this will influence the nargs
# setting of the new option. We set the
# value later anyways. We remove it from the
# dict only if nargs is set. That means that
# nargs will dominate
v = None
if "value" in value:
v = value["value"]
if "nargs" in value:
del value["value"]
_add(opts, name, value)
if v is not None:
node.set(name, v, allow_stream=False)
else:
_add(opts, name)
node.set(name, value, allow_stream=False)
del kwargs[option_type]
_add_opts("_inputs")
_add_opts("_outputs")
_add_opts("_options")
for k, v in kwargs.iteritems():
node.set(k, v, allow_stream=False)
return node
def bash(self, command, **kwargs):
"""Create a *bash* job that executes a bash command.
This us a fast way to build pipelines that execute shell commands. The
functions wraps the given command string in the *bash tool* that
is defined with ``input``, ``output``, and ``outfile``. Input and
output default to stdin and stdout.
:param command: the bash command to execute
:type command: string
:param kwargs: arguments passed into the context used to render the
bash command. ``input``, ``output``, and ``outfile`` are
passed as options to the *bash* tool that is used to
run the command
:returns: a new pipeline node that represents the bash job
:rtype: :class:`jip.pipelines.Node`
"""
return self.utils.bash(command, **kwargs)
def add(self, tool, _job=None):
"""Add a tool or a node to the pipeline. If the given value
is not a node, it is wrapped in a new node instance and then added
to the pipeline. The newly created node is returned.
Note that the nodes uniquely map to tool instances. You can not
add the same instance twice to the pipeline. Instead, no new
node will be added and the already existing node will be returned.
:param tool: the tool or | |
<filename>logios/hyphenate.py<gh_stars>1-10
""" Hyphenation, using Frank Liang's algorithm.
This module provides a single function to hyphenate words. hyphenate_word takes
a string (the word), and returns a list of parts that can be separated by hyphens.
>>> hyphenate_word("hyphenation")
['hy', 'phen', 'ation']
>>> hyphenate_word("supercalifragilisticexpialidocious")
['su', 'per', 'cal', 'ifrag', 'ilis', 'tic', 'ex', 'pi', 'ali', 'do', 'cious']
>>> hyphenate_word("project")
['project']
<NAME>, July 2007.
This Python code is in the public domain.
"""
import re
__version__ = '1.0.20070709'
class Hyphenator:
def __init__(self, patterns, exceptions=''):
self.tree = {}
for pattern in patterns.split():
self._insert_pattern(pattern)
self.exceptions = {}
for ex in exceptions.split():
# Convert the hyphenated pattern into a point array for use later.
self.exceptions[ex.replace('-', '')] = [0] + [ int(h == '-') for h in re.split(r"[a-z]", ex) ]
def _insert_pattern(self, pattern):
# Convert the a pattern like 'a1bc3d4' into a string of chars 'abcd'
# and a list of points [ 0, 1, 0, 3, 4 ].
chars = re.sub('[0-9]', '', pattern)
points = [ int(d or 0) for d in re.split("[.a-z]", pattern) ]
# Insert the pattern into the tree. Each character finds a dict
# another level down in the tree, and leaf nodes have the list of
# points.
t = self.tree
for c in chars:
if c not in t:
t[c] = {}
t = t[c]
t[None] = points
def hyphenate_word(self, word):
""" Given a word, returns a list of pieces, broken at the possible
hyphenation points.
"""
# Short words aren't hyphenated.
if len(word) <= 4:
return [word]
# If the word is an exception, get the stored points.
if word.lower() in self.exceptions:
points = self.exceptions[word.lower()]
else:
work = '.' + word.lower() + '.'
points = [0] * (len(work)+1)
for i in range(len(work)):
t = self.tree
for c in work[i:]:
if c in t:
t = t[c]
if None in t:
p = t[None]
for j in range(len(p)):
points[i+j] = max(points[i+j], p[j])
else:
break
# No hyphens in the first two chars or the last two.
points[1] = points[2] = points[-2] = points[-3] = 0
# Examine the points to build the pieces list.
pieces = ['']
for c, p in zip(word, points[2:]):
pieces[-1] += c
if p % 2:
pieces.append('')
return pieces
patterns = (
# Knuth and Liang's original hyphenation patterns from classic TeX.
# In the public domain.
"""
.ach4 .ad4der .af1t .al3t .am5at .an5c .ang4 .ani5m .ant4 .an3te .anti5s .ar5s
.ar4tie .ar4ty .as3c .as1p .as1s .aster5 .atom5 .au1d .av4i .awn4 .ba4g .ba5na
.bas4e .ber4 .be5ra .be3sm .be5sto .bri2 .but4ti .cam4pe .can5c .capa5b .car5ol
.ca4t .ce4la .ch4 .chill5i .ci2 .cit5r .co3e .co4r .cor5ner .de4moi .de3o .de3ra
.de3ri .des4c .dictio5 .do4t .du4c .dumb5 .earth5 .eas3i .eb4 .eer4 .eg2 .el5d
.el3em .enam3 .en3g .en3s .eq5ui5t .er4ri .es3 .eu3 .eye5 .fes3 .for5mer .ga2
.ge2 .gen3t4 .ge5og .gi5a .gi4b .go4r .hand5i .han5k .he2 .hero5i .hes3 .het3
.hi3b .hi3er .hon5ey .hon3o .hov5 .id4l .idol3 .im3m .im5pin .in1 .in3ci .ine2
.in2k .in3s .ir5r .is4i .ju3r .la4cy .la4m .lat5er .lath5 .le2 .leg5e .len4
.lep5 .lev1 .li4g .lig5a .li2n .li3o .li4t .mag5a5 .mal5o .man5a .mar5ti .me2
.mer3c .me5ter .mis1 .mist5i .mon3e .mo3ro .mu5ta .muta5b .ni4c .od2 .odd5
.of5te .or5ato .or3c .or1d .or3t .os3 .os4tl .oth3 .out3 .ped5al .pe5te .pe5tit
.pi4e .pio5n .pi2t .pre3m .ra4c .ran4t .ratio5na .ree2 .re5mit .res2 .re5stat
.ri4g .rit5u .ro4q .ros5t .row5d .ru4d .sci3e .self5 .sell5 .se2n .se5rie .sh2
.si2 .sing4 .st4 .sta5bl .sy2 .ta4 .te4 .ten5an .th2 .ti2 .til4 .tim5o5 .ting4
.tin5k .ton4a .to4p .top5i .tou5s .trib5ut .un1a .un3ce .under5 .un1e .un5k
.un5o .un3u .up3 .ure3 .us5a .ven4de .ve5ra .wil5i .ye4 4ab. a5bal a5ban abe2
ab5erd abi5a ab5it5ab ab5lat ab5o5liz 4abr ab5rog ab3ul a4car ac5ard ac5aro
a5ceou ac1er a5chet 4a2ci a3cie ac1in a3cio ac5rob act5if ac3ul ac4um a2d ad4din
ad5er. 2adi a3dia ad3ica adi4er a3dio a3dit a5diu ad4le ad3ow ad5ran ad4su 4adu
a3duc ad5um ae4r aeri4e a2f aff4 a4gab aga4n ag5ell age4o 4ageu ag1i 4ag4l ag1n
a2go 3agog ag3oni a5guer ag5ul a4gy a3ha a3he ah4l a3ho ai2 a5ia a3ic. ai5ly
a4i4n ain5in ain5o ait5en a1j ak1en al5ab al3ad a4lar 4aldi 2ale al3end a4lenti
a5le5o al1i al4ia. ali4e al5lev 4allic 4alm a5log. a4ly. 4alys 5a5lyst 5alyt
3alyz 4ama am5ab am3ag ama5ra am5asc a4matis a4m5ato am5era am3ic am5if am5ily
am1in ami4no a2mo a5mon amor5i amp5en a2n an3age 3analy a3nar an3arc anar4i
a3nati 4and ande4s an3dis an1dl an4dow a5nee a3nen an5est. a3neu 2ang ang5ie
an1gl a4n1ic a3nies an3i3f an4ime a5nimi a5nine an3io a3nip an3ish an3it a3niu
an4kli 5anniz ano4 an5ot anoth5 an2sa an4sco an4sn an2sp ans3po an4st an4sur
antal4 an4tie 4anto an2tr an4tw an3ua an3ul a5nur 4ao apar4 ap5at ap5ero a3pher
4aphi a4pilla ap5illar ap3in ap3ita a3pitu a2pl apoc5 ap5ola apor5i apos3t
aps5es a3pu aque5 2a2r ar3act a5rade ar5adis ar3al a5ramete aran4g ara3p ar4at
a5ratio ar5ativ a5rau ar5av4 araw4 arbal4 ar4chan ar5dine ar4dr ar5eas a3ree
ar3ent a5ress ar4fi ar4fl ar1i ar5ial ar3ian a3riet ar4im ar5inat ar3io ar2iz
ar2mi ar5o5d a5roni a3roo ar2p ar3q arre4 ar4sa ar2sh 4as. as4ab as3ant ashi4
a5sia. a3sib a3sic 5a5si4t ask3i as4l a4soc as5ph as4sh as3ten as1tr asur5a a2ta
at3abl at5ac at3alo at5ap ate5c at5ech at3ego at3en. at3era ater5n a5terna
at3est at5ev 4ath ath5em a5then at4ho ath5om 4ati. a5tia at5i5b at1ic at3if
ation5ar at3itu a4tog a2tom at5omiz a4top a4tos a1tr at5rop at4sk at4tag at5te
at4th a2tu at5ua at5ue at3ul at3ura a2ty au4b augh3 au3gu au4l2 aun5d au3r
au5sib aut5en au1th a2va av3ag a5van ave4no av3era av5ern av5ery av1i avi4er
av3ig av5oc a1vor 3away aw3i aw4ly aws4 ax4ic ax4id ay5al aye4 ays4 azi4er azz5i
5ba. bad5ger ba4ge bal1a ban5dag ban4e ban3i barbi5 bari4a bas4si 1bat ba4z 2b1b
b2be b3ber bbi4na 4b1d 4be. beak4 beat3 4be2d be3da be3de be3di be3gi be5gu 1bel
be1li be3lo 4be5m be5nig be5nu 4bes4 be3sp be5str 3bet bet5iz be5tr be3tw be3w
be5yo 2bf 4b3h bi2b bi4d 3bie bi5en bi4er 2b3if 1bil bi3liz bina5r4 bin4d bi5net
bi3ogr bi5ou bi2t 3bi3tio bi3tr 3bit5ua b5itz b1j bk4 b2l2 blath5 b4le. blen4
5blesp b3lis b4lo blun4t 4b1m 4b3n bne5g 3bod bod3i bo4e bol3ic bom4bi bon4a
bon5at 3boo 5bor. 4b1ora bor5d 5bore 5bori 5bos4 b5ota both5 bo4to bound3 4bp
4brit broth3 2b5s2 bsor4 2bt bt4l b4to b3tr buf4fer bu4ga bu3li bumi4 bu4n
bunt4i bu3re bus5ie buss4e 5bust 4buta 3butio b5uto b1v 4b5w 5by. bys4 1ca
cab3in ca1bl cach4 ca5den 4cag4 2c5ah ca3lat cal4la call5in 4calo can5d can4e
can4ic can5is can3iz can4ty cany4 ca5per car5om cast5er cas5tig 4casy ca4th
4cativ cav5al c3c ccha5 cci4a ccompa5 ccon4 ccou3t 2ce. 4ced. 4ceden 3cei 5cel.
3cell 1cen 3cenc 2cen4e 4ceni 3cent 3cep ce5ram 4cesa 3cessi ces5si5b ces5t cet4
c5e4ta cew4 2ch 4ch. 4ch3ab 5chanic ch5a5nis che2 cheap3 4ched che5lo 3chemi
ch5ene ch3er. ch3ers 4ch1in 5chine. ch5iness 5chini 5chio 3chit chi2z 3cho2
ch4ti 1ci 3cia ci2a5b cia5r ci5c 4cier 5cific. 4cii ci4la 3cili 2cim 2cin c4ina
3cinat cin3em c1ing c5ing. 5cino cion4 4cipe ci3ph 4cipic 4cista 4cisti 2c1it
cit3iz 5ciz ck1 ck3i 1c4l4 4clar c5laratio 5clare cle4m 4clic clim4 cly4 c5n 1co
co5ag coe2 2cog co4gr coi4 co3inc col5i 5colo col3or com5er con4a c4one con3g
con5t co3pa cop3ic co4pl 4corb coro3n cos4e cov1 cove4 cow5a coz5e co5zi c1q
cras5t 5crat. 5cratic cre3at 5cred 4c3reta cre4v cri2 cri5f c4rin cris4 5criti
cro4pl crop5o cros4e cru4d 4c3s2 2c1t cta4b ct5ang c5tant c2te c3ter c4ticu
ctim3i ctu4r c4tw cud5 c4uf c4ui cu5ity 5culi cul4tis 3cultu cu2ma c3ume cu4mi
3cun cu3pi cu5py cur5a4b cu5ria 1cus cuss4i 3c4ut cu4tie 4c5utiv 4cutr 1cy cze4
1d2a 5da. 2d3a4b dach4 4daf 2dag da2m2 dan3g dard5 dark5 4dary 3dat 4dativ 4dato
5dav4 dav5e 5day d1b d5c d1d4 2de. deaf5 deb5it de4bon decan4 de4cil de5com
2d1ed 4dee. de5if deli4e del5i5q de5lo d4em 5dem. 3demic dem5ic. de5mil de4mons
demor5 1den de4nar de3no denti5f de3nu de1p de3pa depi4 de2pu d3eq d4erh 5derm
dern5iz der5s des2 d2es. de1sc de2s5o des3ti de3str de4su de1t de2to de1v dev3il
4dey 4d1f d4ga d3ge4t dg1i d2gy d1h2 5di. 1d4i3a dia5b di4cam d4ice 3dict 3did
5di3en d1if di3ge di4lato d1in 1dina 3dine. 5dini di5niz 1dio dio5g di4pl dir2
di1re dirt5i dis1 5disi d4is3t d2iti 1di1v d1j d5k2 4d5la 3dle. 3dled 3dles.
4dless 2d3lo 4d5lu 2dly d1m 4d1n4 1do 3do. do5de 5doe 2d5of d4og do4la doli4
do5lor dom5iz do3nat doni4 doo3d dop4p d4or 3dos 4d5out do4v 3dox d1p 1dr
drag5on 4drai dre4 drea5r 5dren dri4b dril4 dro4p 4drow 5drupli 4dry 2d1s2 ds4p
d4sw d4sy d2th 1du d1u1a du2c d1uca duc5er 4duct. 4ducts du5el du4g d3ule dum4be
du4n 4dup du4pe d1v d1w d2y 5dyn dy4se dys5p e1a4b e3act | |
= gcp.compute.NetworkPeering("peering2",
network=producer_network.id,
peer_network=consumer_network.id,
opts=pulumi.ResourceOptions(provider=google_beta))
hc = gcp.compute.HealthCheck("hc",
check_interval_sec=1,
timeout_sec=1,
tcp_health_check=gcp.compute.HealthCheckTcpHealthCheckArgs(
port=80,
),
opts=pulumi.ResourceOptions(provider=google_beta))
backend = gcp.compute.RegionBackendService("backend",
region="us-central1",
health_checks=[hc.id],
opts=pulumi.ResourceOptions(provider=google_beta))
default = gcp.compute.ForwardingRule("default",
region="us-central1",
load_balancing_scheme="INTERNAL",
backend_service=backend.id,
all_ports=True,
network=producer_network.name,
subnetwork=producer_subnetwork.name,
opts=pulumi.ResourceOptions(provider=google_beta))
route_ilb = gcp.compute.Route("route-ilb",
dest_range="0.0.0.0/0",
network=consumer_network.name,
next_hop_ilb=default.ip_address,
priority=2000,
tags=[
"tag1",
"tag2",
],
opts=pulumi.ResourceOptions(provider=google_beta,
depends_on=[
peering1,
peering2,
]))
```
## Import
Route can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/route:Route default projects/{{project}}/global/routes/{{name}}
```
```sh
$ pulumi import gcp:compute/route:Route default {{project}}/{{name}}
```
```sh
$ pulumi import gcp:compute/route:Route default {{name}}
```
:param str resource_name: The name of the resource.
:param RouteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
dest_range: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
next_hop_gateway: Optional[pulumi.Input[str]] = None,
next_hop_ilb: Optional[pulumi.Input[str]] = None,
next_hop_instance: Optional[pulumi.Input[str]] = None,
next_hop_instance_zone: Optional[pulumi.Input[str]] = None,
next_hop_ip: Optional[pulumi.Input[str]] = None,
next_hop_vpn_tunnel: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouteArgs.__new__(RouteArgs)
__props__.__dict__["description"] = description
if dest_range is None and not opts.urn:
raise TypeError("Missing required property 'dest_range'")
__props__.__dict__["dest_range"] = dest_range
__props__.__dict__["name"] = name
if network is None and not opts.urn:
raise TypeError("Missing required property 'network'")
__props__.__dict__["network"] = network
__props__.__dict__["next_hop_gateway"] = next_hop_gateway
__props__.__dict__["next_hop_ilb"] = next_hop_ilb
__props__.__dict__["next_hop_instance"] = next_hop_instance
__props__.__dict__["next_hop_instance_zone"] = next_hop_instance_zone
__props__.__dict__["next_hop_ip"] = next_hop_ip
__props__.__dict__["next_hop_vpn_tunnel"] = next_hop_vpn_tunnel
__props__.__dict__["priority"] = priority
__props__.__dict__["project"] = project
__props__.__dict__["tags"] = tags
__props__.__dict__["next_hop_network"] = None
__props__.__dict__["self_link"] = None
super(Route, __self__).__init__(
'gcp:compute/route:Route',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
dest_range: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[str]] = None,
next_hop_gateway: Optional[pulumi.Input[str]] = None,
next_hop_ilb: Optional[pulumi.Input[str]] = None,
next_hop_instance: Optional[pulumi.Input[str]] = None,
next_hop_instance_zone: Optional[pulumi.Input[str]] = None,
next_hop_ip: Optional[pulumi.Input[str]] = None,
next_hop_network: Optional[pulumi.Input[str]] = None,
next_hop_vpn_tunnel: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Route':
"""
Get an existing Route resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: An optional description of this resource. Provide this property
when you create the resource.
:param pulumi.Input[str] dest_range: The destination range of outgoing packets that this route applies to.
Only IPv4 is supported.
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and
match the regular expression `a-z?` which means
the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the
last character, which cannot be a dash.
:param pulumi.Input[str] network: The network that this route applies to.
:param pulumi.Input[str] next_hop_gateway: URL to a gateway that should handle matching packets.
Currently, you can only specify the internet gateway, using a full or
partial valid URL:
* `https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway`
* `projects/project/global/gateways/default-internet-gateway`
* `global/gateways/default-internet-gateway`
* The string `default-internet-gateway`.
:param pulumi.Input[str] next_hop_ilb: The IP address or URL to a forwarding rule of type
loadBalancingScheme=INTERNAL that should handle matching
packets.
With the GA provider you can only specify the forwarding
rule as a partial or full URL. For example, the following
are all valid values:
* 10.128.0.56
* https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule
* regions/region/forwardingRules/forwardingRule
When the beta provider, you can also specify the IP address
of a forwarding rule from the same VPC or any peered VPC.
Note that this can only be used when the destinationRange is
a public (non-RFC 1918) IP CIDR range.
:param pulumi.Input[str] next_hop_instance: URL to an instance that should handle matching packets.
You can specify this as a full or partial URL. For example:
* `https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance`
* `projects/project/zones/zone/instances/instance`
* `zones/zone/instances/instance`
* Just the instance name, with the zone in `next_hop_instance_zone`.
:param pulumi.Input[str] next_hop_instance_zone: (Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
:param pulumi.Input[str] next_hop_ip: Network IP address of an instance that should handle matching packets.
:param pulumi.Input[str] next_hop_network: URL to a Network that should handle matching packets.
:param pulumi.Input[str] next_hop_vpn_tunnel: URL to a VpnTunnel that should handle matching packets.
:param pulumi.Input[int] priority: The priority of this route. Priority is used to break ties in cases
where there is more than one matching route of equal prefix length.
In the case of two routes with equal prefix length, the one with the
lowest-numbered priority value wins.
Default value is 1000. Valid range is 0 through 65535.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A list of instance tags to which this route applies.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RouteState.__new__(_RouteState)
__props__.__dict__["description"] = description
__props__.__dict__["dest_range"] = dest_range
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["next_hop_gateway"] = next_hop_gateway
__props__.__dict__["next_hop_ilb"] = next_hop_ilb
__props__.__dict__["next_hop_instance"] = next_hop_instance
__props__.__dict__["next_hop_instance_zone"] = next_hop_instance_zone
__props__.__dict__["next_hop_ip"] = next_hop_ip
__props__.__dict__["next_hop_network"] = next_hop_network
__props__.__dict__["next_hop_vpn_tunnel"] = next_hop_vpn_tunnel
__props__.__dict__["priority"] = priority
__props__.__dict__["project"] = project
__props__.__dict__["self_link"] = self_link
__props__.__dict__["tags"] = tags
return Route(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
An optional description of this resource. Provide this property
when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destRange")
def dest_range(self) -> pulumi.Output[str]:
"""
The destination range of outgoing packets that this route applies to.
Only IPv4 is supported.
"""
return pulumi.get(self, "dest_range")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and
match the regular expression `a-z?` which means
the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the
last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> pulumi.Output[str]:
"""
The network that this route applies to.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter(name="nextHopGateway")
def next_hop_gateway(self) -> pulumi.Output[Optional[str]]:
"""
URL to a gateway that should handle matching packets.
Currently, you can only specify the internet gateway, using a full or
partial valid URL:
* `https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway`
* `projects/project/global/gateways/default-internet-gateway`
* `global/gateways/default-internet-gateway`
* The string `default-internet-gateway`.
"""
return pulumi.get(self, "next_hop_gateway")
@property
@pulumi.getter(name="nextHopIlb")
def next_hop_ilb(self) -> pulumi.Output[Optional[str]]:
"""
The IP address or URL to a forwarding rule of type
loadBalancingScheme=INTERNAL that should handle matching
packets.
With the GA provider you can only specify the forwarding
rule as a partial or full URL. For example, the following
are all valid values:
* 10.128.0.56
* https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule
* regions/region/forwardingRules/forwardingRule
When the beta provider, you can also specify the IP address
of a forwarding rule from the same VPC or any peered VPC.
Note that this can only be used when the destinationRange is
a public (non-RFC 1918) IP CIDR range.
"""
return pulumi.get(self, "next_hop_ilb")
@property
@pulumi.getter(name="nextHopInstance")
def next_hop_instance(self) -> pulumi.Output[Optional[str]]:
"""
URL to an instance that should handle matching packets.
You can specify this as a full or partial URL. For example:
| |
# ******************************************************************************
# pysimm.forcefield.charmm module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import json
import os
import re
import sys
from itertools import permutations
import numpy
from . import gasteiger
from .. import error_print, verbose_print, debug_print
from ..system import Angle, Dihedral, Improper, ParticleType
from ..system import BondType, AngleType
from .forcefield import Forcefield
from ..utils import ItemContainer
class Charmm(Forcefield):
"""pysimm.forcefield.Charmm
Forcefield object with typing rules for CHARMM model.
By default reads data file in forcefields subdirectory.
Attributes:
ff_name: charmm
pair_style: lj/charmm
ff_class: 1
"""
def __init__(self, db_file=None):
if not db_file and db_file is not False:
db_file = os.path.join(
os.path.dirname(
os.path.realpath(__file__)
),
os.pardir, 'data', 'forcefields', 'charmm.json'
)
Forcefield.__init__(self, db_file)
with open(db_file) as f:
j = json.loads(f.read())
self.nbfix_types = ItemContainer()
for elem in j.get('nbfix_types'):
self.nbfix_types.add(ParticleType(**elem))
self.name = 'charmm'
self.pair_style = 'lj/charmm'
self.bond_style = 'harmonic'
self.angle_style = 'charmm'
self.dihedral_style = 'fourier'
self.improper_style = 'harmonic'
self.ff_class = '1'
def assign_ptypes(self, s):
"""pysimm.forcefield.Charmm.assign_ptypes
Charmm specific particle typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds defined.
*** use System.add_particle_bonding() to ensure this ***
*** Not entirely inclusive - some atom types not used ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
s.pair_style = self.pair_style
s.add_particle_bonding()
for b in s.bonds:
if not b.order:
b.order = 1
for p in s.particles:
p.bond_orders = [x.order for x in p.bonds]
if None in p.bond_orders:
error_print('error: bond orders are not set')
p.bond_elements = [x.a.elem if p is x.b else x.b.elem for x in p.bonds]
p.nbonds = len(p.bond_elements)
if p.linker:
p.nbonds += 1
for p in s.particles:
if not p.type_name:
if p.elem == 'C':
# Some general definition of an -sp3 carbons
if (all(p.bond_orders) == 1) and (p.nbonds == 4):
n_partcls = [p_ for p_ in p.bonded_to if p_.elem == 'N']
if len(n_partcls) > 0 and (n_partcls[0].nbonds == 4) and (set(n_partcls[0].bond_elements) == {'C'}):
p.type_name = 'CG324'
else:
rng_count = __detect_rings__(p, [5, 6])
if rng_count == 0: # Linear sp3 carbon
hcount = p.bond_elements.count('H')
p.type_name = 'CG3{}1'.format(hcount)
elif rng_count > 0 : # tetrahydrofuran (THF) or tetrahydropyran (THP)
p.type_name = 'CG3C52'.format(rng_count)
if ('A' in p.bond_orders) or (4 in p.bond_orders):
p.type_name = 'CG2R61'
if (p.nbonds == 3): # carbonyl C condition
if set(p.bond_elements) == {'O', 'C', 'N'}: # in amide
p.type_name = 'CG2O1'
if p.bond_elements.count('O') == 2: # carbonyl C in esters or acids
tmp_part = [sb_p for sb_p in p.bonded_to if (sb_p.elem == 'O') and sb_p.nbonds == 2]
if len(tmp_part) > 0: # deprotonated
p.type_name = 'CG2O2'
else: # protonated
p.type_name = 'CG2O3'
if set(p.bond_elements) == {'O', 'C', 'H'}: # carbonyl C in aldehyde
p.type_name = 'CG2O4'
if (p.bond_elements.count('O') == 1) and (p.bond_elements.count('C') == 2): # in ketones
p.type_name = 'CG2O5'
elif p.elem == 'O':
if (p.nbonds == 2) and (all(p.bond_orders) == 1): # ethers, esters
if p.bond_elements.count('C') == 2:
is_ester = False
for p_ in p.bonded_to:
if (p_.bond_elements.count('O') == 2) and (p_.nbonds == 3):
is_ester = True
if is_ester:
p.type_name = 'OG302'
else:
p.type_name = 'OG301'
rng_count = __detect_rings__(p, [5, 6])
if rng_count > 0:
p.type_name = 'OG3C{}1'.format(rng_count)
if (p.nbonds == 1) and ('C' in p.bond_elements): # sp2 oxygen
p_ = [t for t in p.bonded_to][0]
if set(p_.bond_elements) == {'O', 'C', 'N'}: # in amide
p.type_name = 'OG2D1'
if p_.bond_elements.count('O') == 2: # in acids
tmp_part = [sb_p for sb_p in p_.bonded_to if (sb_p.elem == 'O') and sb_p.nbonds == 2]
if len(tmp_part) > 0:
p.type_name = 'OG2D1'
else:
p.type_name = 'OG2D2'
if p_.bond_elements.count('C') == 2: # in ketones
p.type_name = 'OG2D3'
if ('S' in p.bond_elements) or ('P' in p.bond_elements): # phosphate or sulfate
p.type_name = 'OG2P1'
if (p.nbonds == 2) and (set(p.bond_elements) == {'C', 'H'}):
p_ = [t for t in p.bonded_to if t.elem != 'H'][0]
if p_.bond_elements.count('O') == 2: # in acids
p.type_name = 'OG2D1'
if p_.bond_elements.count('O') == 1: # hydroxyl oxygen
p.type_name = 'OG311'
if(p.nbonds == 2) and all([t == 'H' for t in p.bond_elements]): # water oxygen
p.type_name = 'OT'
for sb_p in p.bonded_to: # type all hydrogens connected to this atom
if sb_p.elem == 'H':
sb_p.type_name = 'HT'
elif p.elem == 'N':
if (p.nbonds == 1) and ('C' in p.bond_elements): # nitrile (or cyano) group
p.type_name = 'NG1T1'
if (p.nbonds == 3) and (set(p.bond_elements) == {'H', 'N'}): # hydrazine
p.type_name = 'NG3N1'
if (p.nbonds == 3) and ('C' in p.bond_elements): # amide
p.type_name = 'NG2S{}'.format(p.bond_elements.count('H'))
if (p.nbonds == 4):
p.type_name = 'NG3P{}'.format(p.bond_elements.count('H'))
elif p.elem == 'H':
if p.bond_elements[0] == 'N':
p.type_name = 'HGP1'
if p.bond_elements[0] == 'O':
p.type_name = 'HGP1'
if p.bond_elements[0] == 'C':
host = [p_ for p_ in p.bonded_to][0]
nitrogen = [p_ for p_ in host.bonded_to if p_.elem == 'N']
if len(nitrogen) > 0 and (nitrogen[0].nbonds == 4):
p.type_name = 'HGP5'
else:
if ('A' in host.bond_orders) or (4 in host.bond_orders):
p.type_name = 'HGR61'
else:
hcount = [pt for pt in p.bonded_to][0].bond_elements.count('H')
p.type_name = 'HGA{}'.format(hcount)
elif p.elem == 'S':
if p.nbonds == 4:
p.type_name = 'SG3O{}'.format(4 - p.bond_elements.count('O'))
else:
print('cant type particle %s' % p.tag)
return p
all_types = set()
for p in s.particles:
tmp = self.particle_types.get(p.type_name)
if len(tmp) > 0:
all_types.add(tmp[0])
else:
debug_print('Current version of CHARMM-FF database file does not contain \'{}\' particle type'.format(p.type_name))
for pt in all_types:
s.particle_types.add(pt.copy())
for p in s.particles:
pt = s.particle_types.get(p.type_name)
if pt:
p.type = pt[0]
self.assign_extra_ljtypes(s)
def assign_extra_ljtypes(self, s):
"""pysimm.forcefield.Charmm.assign_extra_ljtypes
Addition to normal force field setup: filling up the non-diagonal interaction pair
coefficients (coefficients for interaction of particles of different type).
Assumes that all :class:`~pysimm.system.ParticleType` are defined for all particles in s
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
loc_lj_types = set()
for p in s.particle_types:
for p_ in s.particle_types:
if p != p_:
atm_type = tuple(sorted([p.tag, p_.tag]))
if not(atm_type in [at.atm_types for at in loc_lj_types]):
tmp = self.nbfix_types.get(','.join([p.name, p_.name]))
if len(tmp) > 0:
to_add = tmp[0].copy()
to_add.atm_types = atm_type
loc_lj_types.add(to_add)
if not s.nbfix_types:
s.nbfix_types = ItemContainer()
for ljt in loc_lj_types:
if not s.nbfix_types.get(ljt.name):
s.nbfix_types.add(ljt)
def assign_btypes(self, s):
"""pysimm.forcefield.Charmm.assign_btypes
Gaff specific bond typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.bond_style = self.bond_style
for b in s.bonds:
bt = self.bond_types.get('%s,%s' % (b.a.type.name, b.b.type.name))
if bt:
b.type_name = bt[0].name
else:
print('couldnt type this bond %s,%s'
% (b.a.type.name, b.b.type.name))
return b
all_types.add(self.bond_types.get(b.type_name)[0])
for bt in all_types:
bt = bt.copy()
s.bond_types.add(bt)
for b in s.bonds:
bt = s.bond_types.get(b.type_name)
if bt:
b.type = bt[0]
def assign_atypes(self, s):
"""pysimm.forcefield.Charmm.assign_atypes
Gaff specific boanglend typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.angle_style = self.angle_style
s.add_particle_bonding()
for p in s.particles:
for p1 in p.bonded_to:
for p2 in p.bonded_to:
if p1 is not p2:
unique = True
for a in s.angles:
if ((a.a is p1 and a.b is p and a.c is p2) or
(a.a is p2 and a.b is p and a.c is p1)):
unique = False
if unique:
at = self.angle_types.get('%s,%s,%s'
% (p1.type.name,
p.type.name,
p2.type.name))
if at:
s.angles.add(Angle(type_name=at[0].name,
| |
teams.session_id == session_id).first()
tid = result.id
new_student = students(id=id,
tid=tid,
session_id=session_id,
name=name,
email_address=email_address,
is_lead=False,
midterm_done=False,
final_done=False,
active="open")
db.session.add(new_student)
db.session.commit()
except exc.SQLAlchemyError:
handle_exception()
return False
return True
def get_students(self, tid):
"""
Get a list of the names of all students from a given team
Input: team id, session id
Output: list of student names, if everything succeeds. None otherwise
"""
try:
result = [r.name for r in students.query.filter_by(tid=tid)]
except exc.SQLAlchemyError:
handle_exception()
return None
return result
def get_team_members(self, tid):
"""
Get all members of a team
Input: team id as tid
Output: A list of student objects representing the students on that team
"""
try:
mems = students.query.filter_by(tid=tid).distinct().all()
except exc.SQLAlchemyError:
handle_exception()
return None
return mems
def get_students_in_session(self, session_id):
"""
Gets a list of students in the given session, ordered by team (in ascending order)
Input: session_id
Output: the list of students
"""
# https://stackoverflow.com/questions/4186062/sqlalchemy-order-by-descending
# https://docs.sqlalchemy.org/en/13/orm/query.html
try:
results = students.query.filter(
students.session_id == session_id).order_by(students.tid.asc()).all()
except exc.SQLAlchemyError:
handle_exception()
return None
return results
def get_user_sessions(self, student_id):
"""
Returns all capstone sessions that a user belongs to
Input: student_id: The database id of the student to retrieve capstone session ids for
output: an array of objects representing the rows for each capstone the student belongs to
"""
try:
results = [] # to store objects
# get all matching records
student_records = students.query.filter_by(id=student_id).all()
if student_records is not None:
# for each record, add the capstone the id points to
for rec in student_records:
cap = capstone_session().get_sess_by_id(rec.session_id)
if cap is not None:
results.append(cap)
return results
except exc.SQLAlchemyError:
handle_exception()
return None
def get_student_in_session(self, sid, session_id):
"""
Get a student from the students table
Input: student id, session id
Output: the student that we found, or none if nothing was found
"""
try:
result = students.query.filter(students.id == sid, students.session_id == session_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
return result
def remove_student(self, sts, t_name, session_id):
"""
Remove a list of selected students
Input: list of students, team name and session id
Output: return False of the list of student is empty or if something went wrong
otherwise, remove student from the team
"""
try:
if t_name is None or sts is None:
return False
removed_student = removed_students()
team = teams.query.filter(teams.name == t_name,
teams.session_id == session_id).first()
for i in sts:
student = students.query.filter(students.name == i,
students.tid == team.id,
students.session_id == session_id).first()
removed_student.add_student(student)
st = students.query.filter(students.id == student.id,
students.session_id == session_id).first()
db.session.delete(st)
db.session.commit()
except exc.SQLAlchemyError:
handle_exception()
return False
return True
def validate(self, id):
"""
validate cas username with student id in the database
Input: student id
Output: object of found student
"""
try:
result = students.query.filter_by(id=id).first()
except exc.SQLAlchemyError:
handle_exception()
result = None
if result is None:
return False
else:
return result
# Get the single student matching the id passed in
# input: student id of the student to retrieve
# output: the student's capstone session id value
def get_student(self, s_id):
try:
return students.query.filter_by(id=s_id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def update_team(self, name, s_id, t_id):
try:
students.query.filter_by(name=name,
session_id=s_id).\
update(dict(tid=t_id))
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def check_team_lead(self, s_id, sess_id):
"""
Check if the student passed in by id is the team lead
Input: student id of the student to check
Output: True if the student is a team lead, False otherwise
"""
try:
student = students.query.filter(students.id == s_id, students.session_id == sess_id).first()
if student.is_lead == 1:
return True
else:
return False
except exc.SQLAlchemyError:
handle_exception()
return False
def get_unassigned_students(self, s_id):
"""
Get students from a session that do not have a team.
Input: session id to grab students
Output: Students who have no team.
"""
try:
empty_team = teams.query.filter_by(name="", session_id=s_id).first()
if empty_team:
return students.query.filter_by(session_id=s_id, tid=empty_team.id).all()
else:
return None
# https://stackoverflow.com/questions/6470428/catch-multiple-exceptions-in-one-line-except-block
except (exc.SQLAlchemyError, AttributeError):
handle_exception()
return None
def edit_student(self, id, new_name, new_email):
"""
Allows students to edit their name and email address
Input: student's new email and name and current user id
Output: apply new name and email to students in student table
"""
try:
# Find the student
student = students.query.filter(students.id == id).all()
if student is None:
return False
# Change name and/or email, if either of them are non-blank
for i in student:
if new_name != '':
i.name = new_name
if new_email != '':
i.email_address = new_email
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def set_lead(self, session_id, team_name, lead):
"""
Professor can set a lead for each team
Input: self, chosen session id, team name and lead name
Output: set True to team lead and False to the rest of students in the team
"""
# Sanity check inputs
if team_name is None or lead is None:
return False
# Set team lead status
try:
# Find the team
team = teams.query.filter(teams.session_id == session_id, teams.name == team_name).first()
if team is None:
return False
# Get list of students in the given team
student = students.query.filter(students.tid == team.id).all()
for i in student:
if i.name == lead:
i.is_lead = True
else:
i.is_lead = False
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return False
def set_active(self, session_id, option):
"""
Sets the active attribute in student
For a student to be able to access their reviews, "open" must be set
Inputs: The capstone session id of the class to set as active or not. Option as 'open' or 'close'.
"Open" to allow students to submit/edit reviews, "close" to not allow review submission.
Outputs: True to indicate success, False to indicate an error.
"""
try:
student = students.query.filter(students.session_id == session_id).all()
# check option, set accordingly
if option == "open":
for i in student:
i.active = 'open'
db.session.commit()
elif option == "close":
for i in student:
i.active = 'close'
db.session.commit()
else:
# mismatch, return false
return False
# success, so return true
return True
except exc.SQLAlchemyError:
handle_exception()
return False
class capstone_session(db.Model):
__table__ = db.Model.metadata.tables['capstone_session']
def get_max(self):
"""
Calculate the next id for a newly added session
if the table is empty, returns 1
Otherwise, return the max id+1
"""
try:
max_id = db.session.query(func.max(capstone_session.id)).scalar()
except exc.SQLAlchemyError:
handle_exception()
max_id = None
if max_id is None:
return 1
else:
return max_id + 1
def insert_session(self, term, year, professor_id):
"""
Add a current session (only if it wasn't in the database)
Input: starting term and year of the session
Output: return id of the added session
"""
term = term.strip().lower()
year = year.strip().lower()
e_term = None
e_year = 0
terms = ["fall", "winter", "spring", "summer"]
for i in range(len(terms)):
if terms[i] == term:
e_term = terms[(i+1) % 4]
e_term = e_term.capitalize()
if term == 'fall':
e_year = int(year)+1
else:
e_year = year
id = self.get_max()
term = term.capitalize()
year = year.capitalize()
prof_id = professor_id.lower()
new_sess = capstone_session(id=id,
start_term=term,
start_year=year,
end_term=e_term,
end_year=e_year,
professor_id=prof_id)
db.session.add(new_sess)
db.session.commit()
return id
def remove_session(self, session_id):
"""
Removes an entire session with all the teams and students
Input: session id
"""
try:
team = teams()
session_teams = team.query.filter_by(session_id=session_id).all()
del_session = capstone_session.query.filter(capstone_session.id == session_id).first()
for t in session_teams:
team_name = t.name
team.remove_team_from_session(team_name, session_id)
db.session.delete(del_session)
db.session.commit()
return True
except exc.SQLAlchemyError:
handle_exception()
return None
def get_sess_by_id(self, id):
"""
Get the capstone session object associated with the given id
inputs: id of capstone session to retrieve
outputs: capstone session object if found, none otherwise
"""
try:
# query for session and return
return capstone_session.query.filter_by(id=id).first()
except exc.SQLAlchemyError:
handle_exception()
return None
def check_term_name(self, s_term):
"""
Checks if the name of the term is valid
Input: start term of new session
Output: return True if valid, False otherwise
"""
s_term = s_term.strip().lower()
terms = ["fall", "winter", "spring", "summer"]
for i in range(len(terms)):
if terms[i] == s_term:
return True
return False
def check_term_year(self, s_year):
"""
Checks if the year of the term is valid
Input: start year of new session
Output: return False if invalid, True otherwise
"""
check_year = s_year.isdigit()
if not check_year:
return False
return True
def check_session_id_valid(self, v_id):
"""
Checks if the returned session ID is greater than
or equal to 0
"""
check_id = v_id.isdigit()
| |
cluster.
remove_overlap
If `True`, remove ambiguous samples. Otherwise, assign them to the most likely cluster.
raise_threshold
If a cluster is assigned less than ``raise_threshold x n_most_likely`` samples, raise an
exception. Set to `None` if you only want to raise if there is an empty cluster.
check_row_sums
Check whether rows in `a_fuzzy` sum to one. The one situation where we don't do this is when
we have selected a couple of main states and we don't want to re-distribute probability mass.
Returns
-------
:class:`numpy.ndarray`m :class:`numpy.ndarray`
Boolean matrix of the same shape as `a_fuzzy`, assigning a subset of the samples to clusters and
an rray of clusters with less than `n_most_likely` samples assigned, respectively.
"""
# check the inputs
n_samples, n_clusters = a_fuzzy.shape
if not isinstance(a_fuzzy, np.ndarray):
raise TypeError(
f"Expected `a_fuzzy` to be of type `numpy.ndarray`, got `{type(a_fuzzy).__name__!r}`."
)
a_fuzzy = np.asarray(a_fuzzy) # convert to array from lineage classs, don't copy
if check_row_sums:
if n_clusters != 1 and not np.allclose(
a_fuzzy.sum(1), 1, rtol=1e6 * EPS, atol=1e6 * EPS
):
raise ValueError("Rows in `a_fuzzy` do not sum to `1`.")
if n_most_likely > int(n_samples / n_clusters):
raise ValueError(
f"You've selected `{n_most_likely}` cells, please decrease this to at most "
f"`{int(n_samples / n_clusters)}` cells for your dataset."
)
# initialise
n_raise = (
1
if raise_threshold is None
else np.max([int(raise_threshold * n_most_likely), 1])
)
logg.debug(f"Raising an exception if there are less than `{n_raise}` cells.")
# initially select `n_most_likely` samples per cluster
sample_assignment = {
cl: fuzzy_assignment.argpartition(-n_most_likely)[-n_most_likely:]
for cl, fuzzy_assignment in enumerate(a_fuzzy.T)
}
# create the one-hot encoded discrete clustering
a_discrete = np.zeros(
a_fuzzy.shape, dtype=bool
) # don't use `zeros_like` - it also copies the dtype
for ix in range(n_clusters):
a_discrete[sample_assignment[ix], ix] = True
# handle samples assigned to more than one cluster
critical_samples = np.where(a_discrete.sum(1) > 1)[0]
for sample_ix in critical_samples:
if remove_overlap:
a_discrete[sample_ix, :] = _one_hot(n_clusters)
else:
candidate_ixs = np.where(a_discrete[sample_ix, :])[0]
most_likely_ix = candidate_ixs[
np.argmax(a_fuzzy[sample_ix, list(a_discrete[sample_ix, :])])
]
a_discrete[sample_ix, :] = _one_hot(n_clusters, most_likely_ix)
# check how many samples this left for each cluster
n_samples_per_cluster = a_discrete.sum(0)
if raise_threshold is not None:
if (n_samples_per_cluster < n_raise).any():
min_samples = np.min(n_samples_per_cluster)
raise ValueError(
f"Discretizing leads to a cluster with `{min_samples}` samples, less than the threshold which is "
f"`{n_raise}` samples. Consider recomputing the fuzzy clustering."
)
if (n_samples_per_cluster > n_most_likely).any():
raise ValueError("Assigned more samples than requested.")
critical_clusters = np.where(n_samples_per_cluster < n_most_likely)[0]
return a_discrete, critical_clusters
def _series_from_one_hot_matrix(
membership: np.array,
index: Optional[Iterable] = None,
names: Optional[Iterable] = None,
) -> pd.Series:
"""
Create a pandas Series based on a one-hot encoded matrix.
Parameters
----------
membership
One-hot encoded membership matrix, of shape `(n_samples x n_clusters)` i.e. a `1` in position `i, j`
signifies that sample `i` belongs to cluster `j`.
index
Index for the Series. Careful, if this is not given, categories are removed when writing to AnnData.
Returns
-------
:class:`pandas.Series`
Series, indicating cluster membership for each sample. The data type of the categories is :class:`str`
and samples that belong to no cluster are assigned `NaN`.
"""
n_samples, n_clusters = membership.shape
if not isinstance(membership, np.ndarray):
raise TypeError(
f"Expected `membership` to be of type `numpy.ndarray`, found `{type(membership).__name__!r}`."
)
membership = np.asarray(
membership
) # change the type in case a lineage object was passed.
if membership.dtype != bool:
raise TypeError(
f"Expected `membership`'s elements to be boolean, found `{membership.dtype.name!r}`."
)
if not np.all(membership.sum(axis=1) <= 1):
raise ValueError("Not all items are one-hot encoded or empty.")
if (membership.sum(0) == 0).any():
logg.warning(f"Detected {np.sum((membership.sum(0) == 0))} empty categories")
if index is None:
index = range(n_samples)
if names is not None:
if len(names) != n_clusters:
raise ValueError(
f"Shape mismatch, length of `names` is `{len(names)}`, but `n_clusters={n_clusters}`."
)
else:
names = np.arange(n_clusters).astype("str")
target_series = pd.Series(index=index, dtype="category")
for vec, name in zip(membership.T, names):
target_series = target_series.cat.add_categories(name)
target_series[np.where(vec)[0]] = name
return target_series
def _get_cat_and_null_indices(
cat_series: Series,
) -> Tuple[np.ndarray, np.ndarray, Dict[Any, np.ndarray]]:
"""
Given a categorical :class:`pandas.Series`, get the indices corresponding to categories and `NaNs`.
Parameters
----------
cat_series
Series that contains categorical annotations.
Returns
-------
:class: `numpy.ndarray`
Array containing the indices of elements corresponding to categories in ``cat_series``.
:class: `numpy.ndarray`
Array containing the indices of elements corresponding to NaNs in ``cat_series``.
:class:`dict`
Dict containing categories of ``cat_series`` as keys and an array of corresponding indices as values.
"""
# check the dtype
if cat_series.dtype != "category":
raise TypeError(
f"Expected `cat_series` to be categorical, found `{cat_series.dtype.name!r}`."
)
# define a dict that has category names as keys and arrays of indices as values
lookup_dict = {
cat: np.where(cat_series == cat)[0] for cat in cat_series.cat.categories
}
all_indices = np.arange(len(cat_series))
# collect all category indices
cat_indices = np.concatenate(list(lookup_dict.values()))
# collect all null indices (the ones where we have NaN in `cat_series`)
null_indices = np.array(list(set(all_indices) - set(cat_indices)))
# check that null indices and cat indices are unique
assert (
np.unique(cat_indices, return_counts=True)[1] == 1
).all(), "Cat indices are not unique."
assert (
np.unique(null_indices, return_counts=True)[1] == 1
).all(), "Null indices are not unique."
# check that there is no overlap
assert (
len(set(cat_indices).intersection(set(null_indices))) == 0
), "Cat and null indices overlap."
# check that their untion is the set of all indices
assert set(cat_indices).union(set(null_indices)) == set(
all_indices
), "Some indices got lost on the way."
return cat_indices, null_indices, lookup_dict
def _check_estimator_type(estimator: Any) -> None:
# prevents cyclic import
from cellrank.tl.estimators._base_estimator import BaseEstimator
if not isinstance(estimator, type):
raise TypeError(
f"Expected estimator to be a class, found `{type(estimator).__name__!r}`."
)
if not issubclass(estimator, BaseEstimator):
raise TypeError(
f"Expected estimator to be a subclass of `cellrank.tl.estimators.BaseEstimator`, "
f"found `{type(estimator).__name__!r}`."
)
def _calculate_absorption_time_moments(
Q: Union[np.ndarray, spmatrix],
trans_indices: np.ndarray,
n: int,
calculate_variance: bool = False,
**kwargs,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Calculate the mean time until absorption and optionally its variance.
Parameters
----------
Q
Transient-transient submatrix of the transition matrix.
trans_indices
Transient indices.
n
Number of states of the full transition matrix.
calculate_variance
Whether to calculate also the variance of time to absorption, not only mean.
kwargs
Keyword arguments for :func:`cellrank.tl._lin_solver._solver_lin_system`.
Returns
-------
Mean time until absorption and optionally its variance, based on ``calculate_variance``.
"""
n_jobs = kwargs.pop("n_jobs", None)
solve_kwargs = _filter_kwargs(_solve_lin_system, **kwargs)
logg.debug("Calculating mean time to absorption to any absorbing state")
m = _solve_lin_system(
Q,
np.ones((Q.shape[0],), dtype=np.float32),
n_jobs=1,
use_eye=True,
**solve_kwargs,
).squeeze()
mean = np.zeros(n, dtype=np.float32)
var = None
mean[trans_indices] = m
if calculate_variance:
logg.debug(
"Calculating variance of mean time to absorption to any absorbing state"
)
I = speye(Q.shape[0]) if issparse(Q) else np.eye(Q.shape[0]) # noqa
A_t = (I + Q).T
B_t = (I - Q).T
logg.debug("Solving equation (1/2)")
X = _solve_lin_system(A_t, B_t, n_jobs=n_jobs, **kwargs).T
y = m - X @ (m ** 2)
logg.debug("Solving equation (2/2)")
v = _solve_lin_system(X, y, use_eye=False, n_jobs=1, **solve_kwargs).squeeze()
assert np.all(v >= 0), f"Encountered negative variance: `{v[v < 0]}`."
var = np.zeros(n, dtype=np.float32)
var[trans_indices] = v
return mean, var
def _calculate_lineage_absorption_time_means(
Q: csr_matrix,
R: csr_matrix,
trans_indices: np.ndarray,
ixs: Dict[str, np.ndarray],
lineages: Dict[Sequence[str], str],
index: pd.Index,
**kwargs: Any,
) -> pd.DataFrame:
"""
Calculate the mean time until absorption and optionally its variance for specific lineages or their combinations.
Parameters
----------
Q
Transient-transient submatrix of the transition matrix.
R
Transient-recurrent submatrix of the transition matrix.
trans_indices
Transient indices.
n
Number of states of the full transition matrix.
ixs
Mapping of names of absorbing states and their indices in the full transition matrix.
lineages
Lineages for which to calculate the mean time until absorption moments.
kwargs
Keyword arguments for :func:`cellrank.tl._lin_solver._solver_lin_system`.
Returns
-------
:class:`pandas.DataFrame`
A :class:`pandas.DataFrame. with means and optionally variances of
mean time to absorption for each lineage in ``lineages``.
Uses more efficient implementation if compute the time for all lineages.
"""
n = len(index)
res = pd.DataFrame(index=index)
if len(lineages) == 1 and set(next(iter(lineages.keys()))) == set(ixs.keys()):
# use faster implementation in this case
name = ", ".join(ixs.keys())
res[f"{name} mean"], var = _calculate_absorption_time_moments(
Q,
trans_indices,
n,
calculate_variance=next(iter(lineages.values())) == "var",
**kwargs,
)
if var is not None:
res[f"{name} var"] = var
return res
res = pd.DataFrame()
tmp_ixs, cnt = {}, 0
for k, ix in ixs.items():
# get the indices to B | |
import collections
import fastzbarlight
import itertools
import logging
from multiprocessing import pool
import numpy as np
import time
import threading
# import psutil
import sys
from collections import namedtuple
from gym.utils import reraise
import re
from universe import error, pyprofile, spaces
# TODO: prefix the loggers
logger = logging.getLogger(__name__)
extra_logger = logging.getLogger('universe.extra.'+__name__)
def show(ob):
from PIL import Image
Image.fromarray(ob).show()
def standard_error(ary, axis, scale=1):
ary = np.array(ary) * scale
if len(ary) > 1:
return np.std(ary, axis=axis) / np.sqrt(len(ary) - 1)
else:
return np.std(ary, axis=axis)
def extract_timestamp(observation):
total = 0
for byte in observation[0]:
total = 256 * total + byte
for byte in observation[1]:
total = 256 * total + byte
timestamp = total/1000.
return timestamp
class MetadataDecoder(object):
@classmethod
def build(cls, metadata_encoding, pool, qr_pool, label):
metadata_encoding = metadata_encoding.copy()
type = metadata_encoding.pop('type')
if type == 'qrcode':
return QRCodeMetadataDecoder(label=label, pool=pool, qr_pool=qr_pool, **metadata_encoding)
elif type == 'pixels':
return PixelsMetadataDecoder(label=label)
else:
raise error.Error('Invalid encoding: {}'.format(type))
class AsyncDecode(object):
pool = None
def __init__(self, pool, qr_pool, method, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self._last_img = None
self.method = method
self.results = []
self.deque = collections.deque()
self.pool = pool
self.qr_pool = qr_pool
def __call__(self, img, available_at):
# Choose the return value
if len(self.deque) > 0 and self.deque[0].ready():
last = self.deque.popleft()
res = last.get()
if res is not None:
pyprofile.timing('vnc_env.diagnostics.async_decode.latency', time.time() - res['available_at'])
else:
res = False
pyprofile.gauge('vnc_env.diagnostics.async_decode.queue_depth', len(self.deque))
# Just grayscale it by keeping only one component. Should be
# good enough as this region is black and white anyway.
grayscale = img[self.y:self.y+self.height, self.x:self.x+self.width, 0]
# Apply processing if needed
match = np.array_equal(self._last_img, grayscale)
if not match:
pyprofile.incr('vnc_env.diagnostics.async_decode.schedule')
# sneakily copy if numpy hasn't, so it can be cached
self._last_img = np.ascontiguousarray(grayscale)
async = self.qr_pool.apply_async(self.method, (self._last_img, time.time(), available_at))
self.deque.append(async)
else:
pyprofile.incr('vnc_env.diagnostics.async_decode.cache_hit')
return res
class QRCodeMetadataDecoder(MetadataDecoder):
def __init__(self, pool, qr_pool, x, y, width, height, label):
self.flag_synchronous = False
self.x = x
self.y = y
self.width = width
self.height = height
self.label = label
self.decode = AsyncDecode(pool, qr_pool, self._decode, x, y, width, height)
def _decode(self, observation, start, available_at):
# This method gets wrapped by AsyncDecode.__call__
with pyprofile.push('vnc_env.diagnostics.QRCodeMetadataDecoder.qr_code_scanner'):
encoded = fastzbarlight.qr_code_scanner(observation.tobytes(), self.width, self.height)
if encoded is None:
# Failed to parse!
return
if encoded.startswith(b'v1:'):
encoded = encoded.decode('utf-8')
if len(encoded) != len('v1:') + 12 + 12:
raise error.Error('Bad length for metadata from enviroment: {}'.format(encoded))
encoded = encoded[len('v1:'):]
last_update = int(encoded[:12], 16) / 1000.0
last_action = int(encoded[12:24], 16) / 1000.
return {
# Timestamp on the image
'now': last_update,
# When the last probe was received
'probe_received_at': last_action,
'processing_start': start,
'processing_end': time.time(),
'available_at': available_at,
}
else:
raise error.Error('Bad version string for metadata from environment: {}'.format(encoded))
class PixelsMetadataDecoder(MetadataDecoder):
def __init__(self, label):
self.flag_synchronous = True
self.anchor = np.array([
[(0x12, 0x34, 0x56), (0x78, 0x90, 0xab)],
[(0x23, 0x45, 0x67), (0x89, 0x0a, 0xbc)],
], dtype=np.uint8)
self.location = None
self.last_search_metadata = 0
self.label = label
def _check_location(self, observation, location):
y, x = location
return np.all(observation[y:y+2, x:x+2] == self.anchor)
def _find_metadata_location(self, observation):
ys, xs = np.where(np.all(observation == self.anchor[0, 0], axis=-1))
if len(ys) == 0:
extra_logger.info('[%s] Could not find metadata anchor pixel', self.label)
return False
# TODO: handle multiple hits
assert len(ys) == 1
location = (ys[0], xs[0])
assert self._check_location(observation, location)
extra_logger.info('[%s] Found metadata anchor pixel: %s', self.label, location)
return location
def _should_search_metadata(self):
return time.time() - self.last_search_metadata > 1
def decode(self, observation, available_at=None):
start = time.time()
# metadata pixel location hasn't been initialized or it has moved
if not self.location or not self._check_location(observation,
self.location):
# only search for metadata occasionally
if self._should_search_metadata():
self.location = self._find_metadata_location(observation)
self.last_search_metadata = time.time()
if not self.location:
return False # False translates to None in DiagnosticsInstance
y, x = self.location
now = extract_timestamp(observation[y, x+2:x+4])
probe_received_at = extract_timestamp(observation[y, x+4:x+6])
return {
'now': now,
'probe_received_at': probe_received_at,
'processing_start': start,
'processing_end': time.time(),
'available_at': available_at,
}
class Diagnostics(object):
def __init__(self, n, probe_key, ignore_clock_skew=False, metadata_encoding=None, disable_action_probes=False):
# Each QR code takes about 1ms (and updates at 5fps). We do
# our best to ensure the QR is processed in time for the next
# step call (n/16 would put us right at the threshold).
self.pool = pool.ThreadPool(max(int(n/4), 1))
self.qr_pool = pool.ThreadPool(max(int(n/8), 1))
self.lock = threading.RLock()
self.instance_n = [None] * n
self.ignore_clock_skew = ignore_clock_skew
self.disable_action_probes = disable_action_probes
self.metadata_encoding = metadata_encoding
self.update(probe_key=probe_key, metadata_encoding=metadata_encoding)
# only used in flashgames right now
def update(self, probe_key, metadata_encoding):
self.probe_key = probe_key
self.metadata_encoding = metadata_encoding
for instance in self.instance_n:
if instance is not None:
instance.update(probe_key=self.probe_key, metadata_encoding=self.metadata_encoding)
def connect(self, i, network=None, label=None):
# This should technically be synchronized
self.instance_n[i] = DiagnosticsInstance(i, network, self.probe_key, self.ignore_clock_skew, self.metadata_encoding, disable_action_probes=self.disable_action_probes, qr_pool=self.qr_pool, pool=self.pool, label=label)
def close(self, i=None):
if i is not None:
self.instance_n[i] = None
else:
self.pool.close()
self.qr_pool.close()
for i in range(len(self.instance_n)):
self.close(i)
self.instance_n = None
def add_probe(self, action_n, mask_n):
if self.disable_action_probes or self.instance_n is None:
return
for instance, action, mask in zip(self.instance_n, action_n, mask_n):
# Important that masking prevents us from adding probes. (This
# avoids us e.g. filling in backticks into text boxes as the
# environment boots.)
if mask and instance:
instance.add_probe(action)
def add_metadata(self, observation_n, info_n, available_at=None):
"""Mutates the info_n dictionary."""
if self.instance_n is None:
return
with pyprofile.push('vnc_env.diagnostics.Diagnostics.add_metadata'):
async = self.pool.imap_unordered(
self._add_metadata_i,
zip(self.instance_n, observation_n, info_n, [available_at] * len(observation_n)))
list(async)
def _add_metadata_i(self, args):
instance, observation, info, now = args
if instance is None or observation is None:
return
instance.add_metadata(observation, info, now)
def extract_metadata(self, observation_n):
return [instance._extract_metadata(observation)
for instance, observation in zip(self.instance_n, observation_n)]
def clear_probes_when_done(self, done_n):
if self.instance_n is None: # if we've been closed there's nothing to do
return
for instance, done in zip(self.instance_n, done_n):
if done:
instance.clear_probe()
class DiagnosticsInstance(object):
anchor = np.array([
[(0x12, 0x12, 0x12), (0x78, 0x78, 0x78)],
[(0x23, 0x23, 0x23), (0x89, 0x89, 0x89)],
], dtype=np.uint8)
zero_clock_skew = np.zeros([2])
def __init__(self, i, network, probe_key, ignore_clock_skew, metadata_encoding, disable_action_probes, pool, qr_pool, label=None):
'''
network - either Network() object used to get clock skew, or None.
If None, we skip measuring clock skew, and skip measuring
diagnostics which rely on clock skew.
'''
if network is None:
assert ignore_clock_skew
self.ignore_clock_skew = ignore_clock_skew
self.label = label
self.i = i
self.network = network
self.probe_sent_at = None # local time
self.probe_received_at = None # remote time
self.action_latency_skewed = None
self.last_observation_timestamp = None
self.disable_action_probes = disable_action_probes
self.pool = pool
self.qr_pool = qr_pool
self.could_read_metadata = None
self.update(probe_key=probe_key, metadata_encoding=metadata_encoding)
def update(self, probe_key, metadata_encoding):
self.probe = [
spaces.KeyEvent(probe_key, down=True).compile(),
spaces.KeyEvent(probe_key, down=False).compile(),
]
if metadata_encoding is not None:
self.metadata_decoder = MetadataDecoder.build(metadata_encoding, pool=self.pool, qr_pool=self.qr_pool, label=self.label)
else:
self.metadata_decoder = None
def clear_probe(self):
self.probe_sent_at = None
self.probe_received_at = None
def add_probe(self, action):
if self.network is not None and not self.network.active():
return
if self.probe_sent_at is not None and self.probe_sent_at + 10 < time.time():
extra_logger.warn('[%s] Probe to determine action latency timed out (was sent %s). (This is harmless, but worth knowing about.)', self.label, self.probe_sent_at)
self.probe_sent_at = None
if self.probe_sent_at is None:
extra_logger.debug('[%s] Sending out new action probe: %s', self.label, self.probe)
self.probe_sent_at = time.time()
action += self.probe
assert self.probe_sent_at is not None
def add_metadata(self, observation, info, available_at=None):
"""Extract metadata from a pixel observation and add it to the info
"""
observation = observation['vision']
if observation is None: return
if self.network is not None and not self.network.active():
return
elif self.metadata_decoder is None:
return
elif observation is None:
return
# should return a dict with now/probe_received_at keys
with pyprofile.push('vnc_env.diagnostics.DiagnosticsInstance.add_metadata.decode'):
metadata = self.metadata_decoder.decode(observation, available_at=available_at)
if metadata is False:
# No metadata ready, though it doesn't mean parsing failed
metadata = None
elif metadata is None:
if self.could_read_metadata:
self.could_read_metadata = False
extra_logger.info('[%s] Stopped being able to read metadata (expected when environment resets)', self.label)
elif not self.could_read_metadata:
self.could_read_metadata = True
extra_logger.info('[%s] Started being able to read metadata', self.label)
if self.metadata_decoder.flag_synchronous and metadata is not None:
info['diagnostics.image_remote_time'] = metadata['now']
local_now = time.time()
if self.network is None:
# Assume the clock skew is zero. Should only be run on the
# same machine as the VNC server, such as the universe
# instance inside of the environmenth containers.
real_clock_skew = self.zero_clock_skew
else:
# Note: this is a 2-length vector of (min, max), so anything added to
# it is also going to be a 2-length vector.
# Most of the diagnostics below | |
get_redirected_drug_name(xml_tree)
return get_drug_via_wikipedia(redirected_drug_name)
else:
pc_mol_uri = "http://pubchem.ncbi.nlm.nih.gov" + \
"/summary/summary.cgi?cid=" + \
mol_name + "&disopt=DisplaySDF"
file_name = "pc-" + mol_name + ".mol"
coot_get_url(pc_mol_uri, file_name)
else:
db_mol_uri = "http://www.drugbank.ca/structures/structures/small_molecule_drugs/" + \
mol_name + ".mol"
file_name = mol_name + ".mol"
coot_get_url(db_mol_uri, file_name)
return file_name
def get_SMILES_for_comp_id_from_pdbe(comp_id):
if not isinstance(comp_id, str):
return False
else:
s = SMILES_for_comp_id(comp_id)
if isinstance(s, str):
return s
else:
cif_file_name = os.path.join("coot-download",
"PDBe-" + comp_id + ".cif")
url = "ftp://ftp.ebi.ac.uk/pub/databases/msd/pdbechem/files/mmcif/" + \
comp_id + \
".cif"
make_directory_maybe("coot-download")
if os.path.isfile(cif_file_name):
# try the filesystem cache
l = os.stat(cif_file_name).st_size
if (l > 0):
read_cif_dictionary(cif_file_name)
s2 = SMILES_for_comp_id(comp_id)
if isinstance(s2, str):
return s2
else:
# give a dialog, saying that the file will not be
# overwritten
msg = cif_file_name + \
" exists but is empty." + \
"\nNot overwriting."
info_dialog(msg)
return False
# use network then
print "BL INFO:: getting url:", url
state = coot_get_url(url, cif_file_name)
if (state != 0):
msg = "Problem downloading\n" + \
url + "\n to file \n" + \
cif_file_name + \
"."
info_dialog(msg)
return False
else:
read_cif_dictionary(cif_file_name)
s = SMILES_for_comp_id(comp_id)
if isinstance(s, str):
return s
# something probably went wrong if we got to here
return False
# # load the redefining functions
# try:
# load_from_search_load_path("redefine_functions.py")
# # import redefine_functions
# except:
# print "load_from_search_load_path() of redefine_functions.py failed"
# pass
# Add file with filename to preferences directory
#
def file_to_preferences(filename):
"""Copy the file filename from python pkgdatadir directory to
prefereneces directory.
"""
import shutil
# depending on where/how Coot was build pkgdatadir may not be available
# or point the wrong way
pgkdata_dir = get_pkgdatadir()
if os.path.isdir(pkgdatadir):
ref_py = os.path.join(pkgdatadir, "python", filename)
else:
# dont have accessible pkgdata_dir, so guess install place
python_dir = os.getenv("COOT_PYTHON_DIR")
if os.path.isdir(python_dir):
ref_py = os.path.join(python_dir, "python", filename)
if not os.path.exists(ref_py):
add_status_bar_text("Missing reference template key bindings.")
else:
# happy path
home = os.getenv("HOME")
if is_windows():
home = os.getenv("COOT_HOME")
if isinstance(home, str):
pref_dir = os.path.join(home, ".coot-preferences")
if not os.path.isdir(pref_dir):
make_directory_maybe(pref_dir)
pref_file = os.path.join(pref_dir, filename)
# don't install it if it is already in place.
if os.path.isfile(pref_file):
s = "keybinding file " + pref_file + \
" already exists. Not overwritten."
add_status_bar_text(s)
else:
shutil.copyfile(ref_py, pref_file)
if os.path.isfile(pref_file):
execfile(pref_file, globals())
# add terminal residue is the normal thing we do with an aligned
# sequence, but also we can try ton find the residue type of a
# residue in the middle of the chain that is modelled as an ALA, say.
#
# PE comment - not sure why needed.
#find_aligned_residue_type = find_terminal_residue_type
def using_gui():
# we shall see if coot_main_menubar is defined in guile or python
ret = False
if coot_has_guile():
ret = run_scheme_command("(defined? 'coot-main-menubar)")
if not ret:
ret = globals().has_key("coot_python") # coot_main_menubar is not a global var
return ret
############################################################################################
# end of Paul's scripting
############################################################################################
#
# some BL functions
#
############################################################################################
#
# for easier switching on of GL lighting on surfaces:
#
def GL_light_on():
set_do_GL_lighting(1)
do_GL_lighting_state()
# and to turn it off
#
def GL_light_off():
set_do_GL_lighting(0)
do_GL_lighting_state()
# Helper functions to set B-Factors
# set B-factor to bval for molecule imol
#
def set_b_factor_molecule(imol, bval):
for chain_id in chain_ids(imol):
start_res = seqnum_from_serial_number(imol, chain_id, 0)
end_res = seqnum_from_serial_number(imol, chain_id, chain_n_residues(chain_id, imol) - 1)
set_b_factor_residue_range(imol, chain_id, start_res, end_res, bval)
# reset B-factor for molecule imol to default value
#
def reset_b_factor_molecule(imol):
for chain_id in chain_ids(imol):
start_res = seqnum_from_serial_number(imol, chain_id, 0)
end_res = seqnum_from_serial_number(imol, chain_id, chain_n_residues(chain_id, imol) - 1)
set_b_factor_residue_range(imol, chain_id, start_res, end_res, default_new_atoms_b_factor())
# reset B-factor for active residue to default value
#
def reset_b_factor_active_residue():
active_atom = active_residue()
if not active_atom:
print "No active atom"
else:
imol = active_atom[0]
chain_id = active_atom[1]
res_no = active_atom[2]
ins_code = active_atom[3]
atom_name = active_atom[4]
alt_conf = active_atom[5]
set_b_factor_residue_range(imol, chain_id, res_no, res_no, default_new_atoms_b_factor())
# BL module to find exe files
# we need this for popen as it requires the full path of the exe file
# we use arguments and keyword:
#
# program_name : name of exe to find
#
# args (i.e. path_names) : path name to search (usually "PATH", then maybe CCP4_BIN, ...,
# can be a single path as well)
# kwargs : for some extra argumentsto
# add_extensions=[list] pass extra extensions to be tested
# only_extension=str use only this one to test
# no_disk_search=bool Dont bother searching the disk
# screen_info=bool print info etc in console
#
# then we search everywhere
#
# on OS where "which" is available we use this first, rather than
# searching in PATH etc.
#
# returns full path of exe when successful, False otherwise
#
def find_exe(program_name, *args, **kwargs):
import os, string
global search_disk
search_disk = None
info = True
# we shall check for full path names first
if (os.path.isfile(program_name)):
return os.path.abspath(program_name)
# if Unix we use which and python's command module to locate the
# executable (indepent if PATH was given); commands only available on
# unix! May use subprocess at some point...
# if the program is not found with which we use the usual way...
if (os.name == 'posix'):
import commands
program_exe = commands.getoutput('which ' + program_name)
if (os.path.isfile(program_exe)):
return program_exe
if (len(args) > 0):
path_ls = args
else:
# no extra PATH given, should at least check in this dir
# and in PATH
path_ls = ["PATH", os.getcwd()]
# setting of OS specific path properties
extensions = []
drives_ls = ["/"]
program_name_noext = program_name
# some windows magic
if (os.name == 'nt'):
drives_ls = get_windows_drives()
program_name_noext = strip_extension(program_name)
file_ext = file_name_extension(program_name)
# if extenion is explicitly given - only use this one
# otherwise try all possible ones on Windows, i.e PATHEXT
if (file_ext):
extensions = [file_ext]
else:
tmp_ext = os.environ["PATHEXT"].split(os.pathsep)
# list of extensions (no dot) only
extensions = map(lambda ext: ext[1:], tmp_ext)
if "only_extension" in kwargs:
if kwargs["only_extension"]:
extensions = kwargs["only_extension"]
if "add_extension" in kwargs:
extensions += kwargs["add_extensions"]
if "screen_info" in kwargs:
info = kwargs["screen_info"]
program_names = [program_name_noext]
if extensions:
program_names = map(lambda ext: program_name_noext + "." + ext,
extensions)
# usually we want to have the one with extension, if there is one
program_names += [program_name_noext]
for file_name in program_names:
# search the extra Paths
for search_path in path_ls:
if (os.path.isdir(search_path)):
# we have a single file name, not environ var
program_exe = os.path.join(search_path, file_name)
if (os.path.isfile(program_exe)):
if info:
print "BL INFO:: We found ", program_exe
return program_exe
else:
try:
primary_path = os.environ[search_path]
for path in string.split(primary_path, os.pathsep):
program_exe = os.path.join(path, file_name)
if (os.path.isfile(program_exe)):
if info:
print "BL INFO:: We found ", program_exe
return program_exe
except:
if info:
print "BL WARNING:: {0!s} not defined!".format(search_path)
# BL says: before we search everywhere we might want to ask
# the user if he actually wishes to do so!
# lets insert a small pygtk dialog and ask!
# only if graphics
if "no_disk_search" in kwargs:
no_search = kwargs["no_disk_search"]
else:
no_search = False
search_disk = False
if (use_gui_qm and not no_search):
search_disk = search_disk_dialog(program_name, path_ls)
if search_disk:
# search everywhere
for drive in drives_ls:
for root, dir, file in os.walk(drive):
program_exe = os.path.join(root, program_name)
if (os.path.isfile(program_exe)):
return program_exe
else:
if info:
print "BL INFO:: we don't search the whole disk for", program_name_noext
if info:
print "BL WARNING:: We cannot find {0!s} anywhere! Program {1!s} won't run!".format(program_name_noext, program_name_noext)
return False
# for running online docs
def open_url(url):
import webbrowser
try:
webbrowser.open(url,1,1)
except:
print "BL WARNING:: Cannot open the URL {0!s} in webbrowser {1!s}!".format(url, webbrowser.get())
# to reload modules
def reload_module(name):
import os
path = os.getenv('COOT_PYTHON_DIR')
file = os.path.join(path, name)
execfile(file)
# to make print a function:
def printf(*args):
for arg in args:
print arg,
# to print elements of a list:
def printl(ls):
map(printf, ls)
# Where cmd is e.g. "bltwish"
# args is list, e.g. [loggraph, "refmac.log"]
#
# in python < 2.4 (and if no logfile)
#
# Returns the pid or False if failed.
#
# in python >= 2.4 (and with logfile)
#
# Returns the process and the open log file object
#
# uses os.spawn if python version < 2.4 otherwise subprocess
#
def run_concurrently(cmd, args=None, data_list=None, logfile=None, screen_flag=False):
if args is None:
args = []
import sys, string, os
major, minor, micro, releaselevel, serial = sys.version_info
cmd_execfile = ""
if not(command_in_path_qm(cmd)):
print "command ", cmd, " not found in $PATH!"
print "BL INFO:: Maybe we'll find it somewhere else later..."
else:
cmd_execfile = find_exe(cmd,"CCP4_BIN","PATH")
| |
<filename>zfit/_loss/binnedloss.py
# Copyright (c) 2021 zfit
from typing import Iterable, Optional, Set
import numpy as np
import tensorflow as tf
from uhi.typing.plottable import PlottableHistogram
from .. import z
from ..core.interfaces import ZfitBinnedData, ZfitBinnedPDF
from ..core.loss import BaseLoss
from ..util import ztyping
from ..util.checks import NONE
from ..util.container import convert_to_container
from ..util.warnings import warn_advanced_feature
from ..util.ztyping import OptionsInputType, ConstraintsInputType
from ..z import numpy as znp
@z.function(wraps='tensor')
def _spd_transform(values, probs, variances):
# Scaled Poisson distribution from Bohm and Zech, NIMA 748 (2014) 1-6
scale = values * tf.math.reciprocal_no_nan(variances)
return values * scale, probs * scale
@z.function(wraps='tensor')
def poisson_loss_calc(probs, values, log_offset=None, variances=None):
if variances is not None:
values, probs = _spd_transform(values, probs, variances=variances)
values += znp.asarray(1e-307, dtype=znp.float64)
probs += znp.asarray(1e-307, dtype=znp.float64)
poisson_term = tf.nn.log_poisson_loss(values, # TODO: correct offset
znp.log(
probs), compute_full_loss=False) # TODO: optimization?
if log_offset is not None:
poisson_term += log_offset
return poisson_term
class BaseBinned(BaseLoss):
def __init__(self,
model: ztyping.BinnedPDFInputType,
data: ztyping.BinnedDataInputType,
constraints: ConstraintsInputType = None,
options: OptionsInputType = None):
model = convert_to_container(model)
data = convert_to_container(data)
from zfit._data.binneddatav1 import BinnedData
data = [
BinnedData.from_hist(d)
if (isinstance(d, PlottableHistogram) and not isinstance(d, ZfitBinnedData)) else d
for d in data
]
not_binned_pdf = [mod for mod in model if not isinstance(mod, ZfitBinnedPDF)]
not_binned_data = [dat for dat in data if not isinstance(dat, ZfitBinnedData)]
not_binned_pdf_msg = ("The following PDFs are not binned but need to be. They can be wrapped in an "
f"BinnedFromUnbinnedPDF. {not_binned_pdf} ")
not_binned_data_msg = (
"The following datasets are not binned but need to be. They can be converted to a binned "
f"using the `to_binned` method. {not_binned_data}")
error_msg = ""
if not_binned_pdf:
error_msg += not_binned_pdf_msg
if not_binned_data:
error_msg += not_binned_data_msg
if error_msg:
raise ValueError(error_msg)
super().__init__(model=model, data=data, constraints=constraints, fit_range=None, options=options)
def create_new(self,
model: ztyping.BinnedPDFInputType = NONE,
data: ztyping.BinnedDataInputType = NONE,
constraints: ConstraintsInputType = NONE,
options: OptionsInputType = NONE):
r"""Create a new binned loss of this type. This is preferrable over creating a new instance in most cases.
Internals, such as certain optimizations will be shared and therefore the loss is made comparable.
If something is not given, it will be taken from the current loss.
Args:
model: |@doc:loss.binned.init.model| Binned PDF(s) that return the normalized probability
(`rel_counts` or `counts`) for
*data* under the given parameters.
If multiple model and data are given, they will be used
in the same order to do a simultaneous fit. |@docend:loss.binned.init.model|
data: |@doc:loss.binned.init.data| Binned dataset that will be given to the *model*.
If multiple model and data are given, they will be used
in the same order to do a simultaneous fit. |@docend:loss.binned.init.data|
constraints: |@doc:loss.init.constraints| Auxiliary measurements ("constraints")
that add a likelihood term to the loss.
.. math::
\mathcal{L}(\theta) = \mathcal{L}_{unconstrained} \prod_{i} f_{constr_i}(\theta)
Usually, an auxiliary measurement -- by its very nature -S should only be added once
to the loss. zfit does not automatically deduplicate constraints if they are given
multiple times, leaving the freedom for arbitrary constructs.
Constraints can also be used to restrict the loss by adding any kinds of penalties. |@docend:loss.init.constraints|
options: |@doc:loss.init.options| Additional options (as a dict) for the loss.
Current possibilities include:
- 'subtr_const' (default True): subtract from each points
log probability density a constant that
is approximately equal to the average log probability
density in the very first evaluation before
the summation. This brings the initial loss value closer to 0 and increases,
especially for large datasets, the numerical stability.
The value will be stored ith 'subtr_const_value' and can also be given
directly.
The subtraction should not affect the minimum as the absolute
value of the NLL is meaningless. However,
with this switch on, one cannot directly compare
different likelihoods ablolute value as the constant
may differ! Use `create_new` in order to have a comparable likelihood
between different losses
These settings may extend over time. In order to make sure that a loss is the
same under the same data, make sure to use `create_new` instead of instantiating
a new loss as the former will automatically overtake any relevant constants
and behavior. |@docend:loss.init.options|
Returns:
"""
if model is NONE:
model = self.model
if data is NONE:
data = self.data
if constraints is NONE:
constraints = self.constraints
if constraints is not None:
constraints = constraints.copy()
if options is NONE:
options = self._options
if isinstance(options, dict):
options = options.copy()
return type(self)(model=model, data=data, constraints=constraints, options=options)
class ExtendedBinnedNLL(BaseBinned):
def __init__(self,
model: ztyping.BinnedPDFInputType,
data: ztyping.BinnedDataInputType,
constraints: ConstraintsInputType = None,
options: OptionsInputType = None):
r"""Extended binned likelihood using the expected number of events per bin with a poisson probability.
|@doc:loss.init.explain.spdtransform| A scaled Poisson distribution is
used as described by Bohm and Zech, NIMA 748 (2014) 1-6 |@docend:loss.init.explain.spdtransform|
The binned likelihood is defined as
.. math::
\mathcal{L} = \product \mathcal{poiss}(N_{modelbin_i}, N_{databin_i})
= N_{databin_i}^{N_{modelbin_i}} \frac{e^{- N_{databin_i}}}{N_{modelbin_i}!}
where :math:`databin_i` is the :math:`i^{th}` bin in the data and
:math:`modelbin_i` is the :math:`i^{th}` bin of the model, the expected counts.
|@doc:loss.init.explain.simultaneous| A simultaneous fit can be performed by giving one or more `model`, `data`, to the loss. The
length of each has to match the length of the others
.. math::
\mathcal{L}_{simultaneous}(\theta | {data_0, data_1, ..., data_n})
= \prod_{i} \mathcal{L}(\theta_i, data_i)
where :math:`\theta_i` is a set of parameters and
a subset of :math:`\theta` |@docend:loss.init.explain.simultaneous|
|@doc:loss.init.explain.negativelog| For optimization purposes, it is often easier
to minimize a function and to use a log transformation. The actual loss is given by
.. math::
\mathcal{L} = - \sum_{i}^{n} ln(f(\theta|x_i))
and therefore being called "negative log ..." |@docend:loss.init.explain.negativelog|
Args:
model: |@doc:loss.binned.init.model| Binned PDF(s) that return the normalized probability
(`rel_counts` or `counts`) for
*data* under the given parameters.
If multiple model and data are given, they will be used
in the same order to do a simultaneous fit. |@docend:loss.binned.init.model|
data: |@doc:loss.binned.init.data| Binned dataset that will be given to the *model*.
If multiple model and data are given, they will be used
in the same order to do a simultaneous fit. |@docend:loss.binned.init.data|
constraints: |@doc:loss.init.constraints| Auxiliary measurements ("constraints")
that add a likelihood term to the loss.
.. math::
\mathcal{L}(\theta) = \mathcal{L}_{unconstrained} \prod_{i} f_{constr_i}(\theta)
Usually, an auxiliary measurement -- by its very nature -S should only be added once
to the loss. zfit does not automatically deduplicate constraints if they are given
multiple times, leaving the freedom for arbitrary constructs.
Constraints can also be used to restrict the loss by adding any kinds of penalties. |@docend:loss.init.constraints|
options: |@doc:loss.init.options| Additional options (as a dict) for the loss.
Current possibilities include:
- 'subtr_const' (default True): subtract from each points
log probability density a constant that
is approximately equal to the average log probability
density in the very first evaluation before
the summation. This brings the initial loss value closer to 0 and increases,
especially for large datasets, the numerical stability.
The value will be stored ith 'subtr_const_value' and can also be given
directly.
The subtraction should not affect the minimum as the absolute
value of the NLL is meaningless. However,
with this switch on, one cannot directly compare
different likelihoods ablolute value as the constant
may differ! Use `create_new` in order to have a comparable likelihood
between different losses
These settings may extend over time. In order to make sure that a loss is the
same under the same data, make sure to use `create_new` instead of instantiating
a new loss as the former will automatically overtake any relevant constants
and behavior. |@docend:loss.init.options|
"""
self._errordef = 0.5
super().__init__(model=model, data=data, constraints=constraints, options=options)
@z.function(wraps='loss')
def _loss_func(self, model: Iterable[ZfitBinnedPDF], data: Iterable[ZfitBinnedData],
fit_range, constraints, log_offset):
poisson_terms = []
for mod, dat in zip(model, data):
values = dat.values( # TODO: right order of model and data?
# obs=mod.obs
)
variances = dat.variances()
probs = mod.counts(dat)
poisson_term = poisson_loss_calc(probs, values, log_offset, variances)
poisson_terms.append(poisson_term) # TODO: change None
nll = znp.sum(poisson_terms)
if constraints:
constraints = z.reduce_sum([c.value() for c in constraints])
nll += constraints
return nll
@property
def is_extended(self):
return True
def _get_params(self, floating: Optional[bool] = True, is_yield: Optional[bool] = None,
extract_independent: Optional[bool] = True) -> Set["ZfitParameter"]:
return super()._get_params(floating, is_yield, extract_independent)
class BinnedNLL(BaseBinned):
def __init__(self,
model: ztyping.BinnedPDFInputType,
data: ztyping.BinnedDataInputType,
constraints: ConstraintsInputType = None,
| |
{}
# self._cache_candidate = {}
# if weights != -1:
# self.electionData.weights = weights
# self._cache_voter = {}
# self._cache_candidate = {}
# if order is not None:
# self.electionData.order = order
# if candidates is not None:
# self.electionData.candidates = candidates
# self._cache_candidate = {}
# if winners is not None:
# self.electionData.winners = winners
# if ballots is not None:
# self.electionData.ballots = ballots
# if ties is not None:
# self.electionData.ties = ties
# ### Calculate voter distances
# calculate = False
# if distances is None:
# if ((self.electionData.candidates is not None) and
# (self.electionData.voters is not None)):
# calculate = True
# else:
# self.electionData.distances = distances
# if calculate:
# self.electionData.distances = vcalcs.voter_distances(
# voters=self.electionData.voters,
# candidates=self.electionData.candidates,
# weights=self.electionData.weights,
# order=self.electionData.order,
# )
# self.electionData.set(**kwargs)
# return
_default_categories = [
'voters',
'candidates',
'winner',
'winner_categories',
'ballot'
]
def set_categories(self, names, fulloutput=False):
"""Set output categories to output.
Parameters
----------
names : list of str
Output category names.
fulloutput : bool, optional
If True output all avaialable outputs. The default is False.
Returns
-------
None.
"""
if fulloutput == True:
names = self.get_categories()
self._output_categories = names
return
def get_categories(self):
"""Retrieve available output categories."""
return self._default_categories
# def add_output(self, output, name='', cache='_cache_result'):
# """Add an output object.
# This output's base class must be :class:`~votesim.metrics.BaseStats`.
# Parameters
# ----------
# name : str
# Name of output
# output : subtype of :class:`~votesim.metrics.BaseStats`
# User defined output. Define this output by creating a class
# inherited from :class:`~votesim.metrics.BaseStats`
# cache : str
# Name of output cache to store results. This determines when
# output is retained and when it is deleted and regenerated
# during election model creation. The options are
# - '_cache_voter' - Clear cache when voter data changes (least aggressive)
# - '_cache_candidate' - Clear cache when candidate data changes
# - '_cache_result' - Clear cache after every election (most aggressive)
# Returns
# -------
# None.
# """
# if name == '':
# try:
# name = getattr(output, 'name')
# except AttributeError:
# name = output.__name__.lower()
# if hasattr(self, name):
# s = 'Name "%s" for output already taken. Use another' % name
# raise ValueError(s)
# if type(output) is type:
# # Set cache decorator. The default clears cache every new election.
# output = utilities.lazy_property2(cache)(output)
# else:
# utilities.modify_lazy_property(instance=self,
# name=name,
# value=output,
# dictname=cache)
# setattr(self, name, output)
# #self._default_categories.append(name)
# self._output_categories.append(name)
# return
def get_dict(self):
"""Retrieve desired category key and values and return dict of dict."""
d = {}
for key in self._output_categories:
stat = getattr(self, key)
di = stat._dict
d[key] = di
return d
def get_docs(self):
"""Retrieve all available statistic descriptions as dict."""
d = {}
for key in self._output_categories:
stat = getattr(self, key)
di = stat._docs
d[key] = di
return d
# def calculate_distance(self, data):
# """Re-calculate distance as the distance from Election may have error."""
# distances = vcalcs.voter_distances(
# voters=data.voters.pref,
# candidates=data.candidates.pref,
# weights=data.voters.weights,
# order=data.voters.order,
# )
# return distances
@utilities.lazy_property2('_cache_result')
def winner(self):
"""See :class:`~votesim.metrics.WinnerStats`."""
return WinnerStats(self)
@utilities.lazy_property2('_cache_result')
def winner_categories(self):
"""See :class:`~votesim.metrics.WinnerCategories`."""
return WinnerCategories(self)
@utilities.lazy_property2('_cache_result')
def ballot(self):
"""See :class:`~votesim.metrics.BallotStats`."""
return BallotStats(self)
def copy(self):
return copy.deepcopy(self)
class WinnerStats(BaseStats):
"""Winner output statistics."""
def _reinit(self):
self._candidate_regrets = self._electionStats.candidates.regrets
self._data = self._electionStats._election_data
self._winners = self._data.winners
self._name = 'winner'
return
@utilities.lazy_property
def regret(self):
"""Overall satisfaction of all winners for all voters."""
candidate_regrets = self._candidate_regrets
ii = self._winners
winner_regrets = candidate_regrets[ii]
return np.mean(winner_regrets)
@utilities.lazy_property
def regret_efficiency_candidate(self):
"""Voter satisfaction efficiency, compared to random candidate."""
random = self._electionStats.candidates.regret_avg
best = self._electionStats.candidates.regret_best
U = self.regret
R = random
B = best
vse = (U - R) / (B - R)
return vse
@utilities.lazy_property
def regret_efficiency_voter(self):
"""Voter satisfaction.
VSE equation normalized to voter
population regret of an ideal winner vs a random voter.
"""
v_random = self._electionStats.voters.regret_random_avg
v_median = self._electionStats.voters.regret_median
best = self._electionStats.candidates.regret_best
U = self.regret
R2 = v_random
R1 = v_median
B = best
return 1.0 - abs(U - B) / (R2 - R1)
@utilities.lazy_property
def regret_normed(self):
"""Voter regret normalized to ideal."""
U = self.regret
R = self._electionStats.voters.regret_median
return U / R - 1
@property
def winners(self):
"""int array: Index location of winners."""
return self._data.winners
@property
def ties(self):
"""int array: Index location of ties."""
return self._data.ties
class WinnerCategories(BaseStats):
"""Determine whether majority, condorcet, or utility winner was elected."""
def _reinit(self):
self._winners = self._electionStats._election_data.winners
self._name = 'winner_categories'
return
@utilities.lazy_property
def is_condorcet(self):
"""bool: check whether condorcet winner was elected."""
ii = self._electionStats.candidates.winner_condorcet
if self._winners[0] == ii:
return True
return False
@utilities.lazy_property
def is_majority(self):
"""bool: check if majority winner was elected."""
ii = self._electionStats.candidates.winner_majority
if self._winners[0] == ii:
return True
return False
@utilities.lazy_property
def is_utility(self):
"""bool: check if utility winner was elected."""
ii = self._electionStats.candidates.winner_utility
if self._winners[0] == ii:
return True
return False
class BallotStats(BaseStats):
"""Ballot marking statistics."""
def _reinit(self):
self._ballots = self._electionStats._election_data.ballots
self._name = 'ballot'
return
@utilities.lazy_property2('_cache_ballot')
def _ballot_stats(self) -> dict:
ballots = np.atleast_2d(self._ballots)
ballot_num, candidate_num = ballots.shape
# Get number of candidates marked for each ballot
marked_array = np.sum(ballots > 0, axis=1)
# Get ballots where bullet voting happened
bullet_num = np.sum(marked_array == 1)
bullet_ratio = bullet_num / ballot_num
#Get ballots where all but one candidate is marked
full_num = np.sum(marked_array >= (candidate_num - 1))
full_ratio = full_num / ballot_num
marked_num = np.sum(marked_array)
marked_avg = np.mean(marked_array)
marked_std = np.std(marked_array)
d = {}
d['ballot.bullet.num'] = bullet_num
d['ballot.bullet.ratio'] = bullet_ratio
d['ballot.full.num'] = full_num
d['ballot.full.ratio'] = full_ratio
d['ballot.marked.num'] = marked_num
d['ballot.marked.avg'] = marked_avg
d['ballot.marked.std'] = marked_std
return d
@property
def bullet_num(self):
"""Number of ballots where voters only bullet voted for 1 candidate."""
return self._ballot_stats['ballot.bullet.num']
@property
def bullet_ratio(self):
"""Ratio of ballots where voters only bullet voted for 1 candidate."""
return self._ballot_stats['ballot.bullet.ratio']
@property
def full_num(self):
"""Number of ballots where all but one candidate is marked."""
return self._ballot_stats['ballot.bullet.ratio']
@property
def full_ratio(self):
"""Ratio of ballots where all but one candidate is marked."""
return self._ballot_stats['ballot.bullet.ratio']
@property
def marked_num(self):
"""Total number of marked candidates for all ballots."""
return self._ballot_stats['ballot.marked.num']
@property
def marked_avg(self):
"""Average number of marked candidates per ballot."""
return self._ballot_stats['ballot.marked.avg']
@property
def marked_std(self):
"""Std deviation of marked candidates per ballot."""
return self._ballot_stats['ballot.marked.std']
class PrRegret(BaseStats):
"""Metrics for proportional representation."""
def _reinit(self):
edata = self._electionStats._election_data
cdata = self._electionStats._candidate_data
self._distances = cdata.distances
self._num_voters, self._num_candidates = self._distances.shape
self._num_winners = len(edata.winners)
self._winners = edata.winners
self._name = 'pr_regret'
return
@utilities.decorators.lazy_property
def _nearest_winners(self):
"""(a,) array: index locations of the nearest winners for each voter.
For `a` total voters.
"""
return np.argmin(self._distances[:, self._winners], axis=1)
@utilities.decorators.lazy_property
def _nearest_winner_distances(self):
"""array shaped (a,)
Preference distances of nearest winner for `a` voters.
"""
ii = np.arange(self._num_voters)
jj = self._nearest_winners
return self._distances[ii, jj]
@utilities.decorators.lazy_property
def avg_regret(self):
"""float: Average voter regret for his nearest winner."""
distances = self._nearest_winner_distances
num_voters = self._num_voters
num_winners = self._num_winners
regret = np.sum(distances) / num_voters
regret = regret * num_winners
return regret
@utilities.decorators.lazy_property
def winners_regret(self):
"""(b,) array: Avg voter regrets for each winner."""
num_voters = self._num_voters
num_winners = self._num_winners
sregrets = []
for ii in range(num_winners):
index = (ii == self._nearest_winners)
distances = self._nearest_winner_distances[index]
regret = np.sum(distances)
sregrets.append(regret)
sregrets = np.array(sregrets) / num_voters * num_winners
return sregrets
@utilities.decorators.lazy_property
def winners_regret_std(self):
"""float: Standard deviation of nearest regrets for each winner.
An ideal proportional system ought to have low std deviation.
"""
return np.std(self.winners_regret)
@utilities.decorators.lazy_property
def std_num_voters_per_winner(self):
"""float: Standard deviation of number of nearest voters for each winner."""
num_voters = self._num_voters
num_winners = self._num_winners
wcounts = []
for ii in range(num_winners):
wcount = np.sum(ii == self._nearest_winners)
wcounts.append(wcount)
voters_per_winner = num_voters / num_winners
std = np.std(wcounts) / voters_per_winner
return std
def candidate_regrets(voters, candidates, weights=None, order=1):
"""Calculate the voter regret for each candidate or winner.
Parameters
----------
voters : array (a, n)
Voter preferences; n-dimensional voter cardinal preferences for n issues.
candidates : array (b, n)
Candidate preferences for `b` candidates and `n`-dimensional issues.
Returns
-------
out : (b,) array
Average preference distance | |
import json
from datetime import datetime
import hashlib
import hmac
from django.core.exceptions import ImproperlyConfigured
from django.test import override_settings, tag
from django.utils.timezone import utc
from mock import ANY
from anymail.exceptions import AnymailConfigurationError
from anymail.signals import AnymailTrackingEvent
from anymail.webhooks.mailgun import MailgunTrackingWebhookView
from .webhook_cases import WebhookTestCase, WebhookBasicAuthTestsMixin
TEST_WEBHOOK_SIGNING_KEY = 'TEST_WEBHOOK_SIGNING_KEY'
def mailgun_signature(timestamp, token, webhook_signing_key):
"""Generates a Mailgun webhook signature"""
# https://documentation.mailgun.com/en/latest/user_manual.html#securing-webhooks
return hmac.new(
key=webhook_signing_key.encode('ascii'),
msg='{timestamp}{token}'.format(timestamp=timestamp, token=token).encode('ascii'),
digestmod=hashlib.sha256).hexdigest()
def mailgun_sign_payload(data, webhook_signing_key=TEST_WEBHOOK_SIGNING_KEY):
"""Add or complete Mailgun webhook signature block in data dict"""
# Modifies the dict in place
event_data = data.get('event-data', {})
signature = data.setdefault('signature', {})
token = signature.setdefault('token', '<KEY>')
timestamp = signature.setdefault('timestamp',
str(int(float(event_data.get('timestamp', '1234567890.123')))))
signature['signature'] = mailgun_signature(timestamp, token, webhook_signing_key=webhook_signing_key)
return data
def mailgun_sign_legacy_payload(data, webhook_signing_key=TEST_WEBHOOK_SIGNING_KEY):
"""Add a Mailgun webhook signature to data dict"""
# Modifies the dict in place
data.setdefault('timestamp', '1234567890')
data.setdefault('token', '<KEY>')
data['signature'] = mailgun_signature(data['timestamp'], data['token'], webhook_signing_key=webhook_signing_key)
return data
def querydict_to_postdict(qd):
"""Converts a Django QueryDict to a TestClient.post(data)-style dict
Single-value fields appear as normal
Multi-value fields appear as a list (differs from QueryDict.dict)
"""
return {
key: values if len(values) > 1 else values[0]
for key, values in qd.lists()
}
@tag('mailgun')
class MailgunWebhookSettingsTestCase(WebhookTestCase):
def test_requires_webhook_signing_key(self):
with self.assertRaisesMessage(ImproperlyConfigured, "MAILGUN_WEBHOOK_SIGNING_KEY"):
self.client.post('/anymail/mailgun/tracking/', content_type="application/json",
data=json.dumps(mailgun_sign_payload({'event-data': {'event': 'delivered'}})))
@override_settings(
ANYMAIL_MAILGUN_API_KEY='TEST_API_KEY',
ANYMAIL_MAILGUN_WEBHOOK_SIGNING_KEY='TEST_WEBHOOK_SIGNING_KEY',
)
def test_webhook_signing_is_different_from_api_key(self):
"""Webhooks should use MAILGUN_WEBHOOK_SIGNING_KEY, not MAILGUN_API_KEY, if both provided"""
payload = json.dumps(mailgun_sign_payload({'event-data': {'event': 'delivered'}},
webhook_signing_key='TEST_WEBHOOK_SIGNING_KEY'))
response = self.client.post('/anymail/mailgun/tracking/', content_type="application/json", data=payload)
self.assertEqual(response.status_code, 200)
@override_settings(ANYMAIL_MAILGUN_API_KEY='TEST_API_KEY')
def test_defaults_webhook_signing_to_api_key(self):
"""Webhooks should default to MAILGUN_API_KEY if MAILGUN_WEBHOOK_SIGNING_KEY not provided"""
payload = json.dumps(mailgun_sign_payload({'event-data': {'event': 'delivered'}},
webhook_signing_key='TEST_API_KEY'))
response = self.client.post('/anymail/mailgun/tracking/', content_type="application/json", data=payload)
self.assertEqual(response.status_code, 200)
def test_webhook_signing_key_view_params(self):
"""Webhook signing key can be provided as a view param"""
view = MailgunTrackingWebhookView.as_view(webhook_signing_key='VIEW_SIGNING_KEY')
view_instance = view.view_class(**view.view_initkwargs)
self.assertEqual(view_instance.webhook_signing_key, b'VIEW_SIGNING_KEY')
# Can also use `api_key` param for backwards compatiblity with earlier Anymail versions
view = MailgunTrackingWebhookView.as_view(api_key='VIEW_API_KEY')
view_instance = view.view_class(**view.view_initkwargs)
self.assertEqual(view_instance.webhook_signing_key, b'VIEW_API_KEY')
@tag('mailgun')
@override_settings(ANYMAIL_MAILGUN_WEBHOOK_SIGNING_KEY=TEST_WEBHOOK_SIGNING_KEY)
class MailgunWebhookSecurityTestCase(WebhookTestCase, WebhookBasicAuthTestsMixin):
should_warn_if_no_auth = False # because we check webhook signature
def call_webhook(self):
return self.client.post('/anymail/mailgun/tracking/', content_type="application/json",
data=json.dumps(mailgun_sign_payload({'event-data': {'event': 'delivered'}})))
# Additional tests are in WebhookBasicAuthTestsMixin
def test_verifies_correct_signature(self):
response = self.client.post('/anymail/mailgun/tracking/', content_type="application/json",
data=json.dumps(mailgun_sign_payload({'event-data': {'event': 'delivered'}})))
self.assertEqual(response.status_code, 200)
def test_verifies_missing_signature(self):
response = self.client.post('/anymail/mailgun/tracking/', content_type="application/json",
data=json.dumps({'event-data': {'event': 'delivered'}}))
self.assertEqual(response.status_code, 400)
def test_verifies_bad_signature(self):
data = mailgun_sign_payload({'event-data': {'event': 'delivered'}},
webhook_signing_key="wrong signing key")
response = self.client.post('/anymail/mailgun/tracking/', content_type="application/json",
data=json.dumps(data))
self.assertEqual(response.status_code, 400)
@tag('mailgun')
@override_settings(ANYMAIL_MAILGUN_WEBHOOK_SIGNING_KEY=TEST_WEBHOOK_SIGNING_KEY)
class MailgunTestCase(WebhookTestCase):
# Tests for Mailgun's new webhooks (announced 2018-06-29)
def test_delivered_event(self):
# This is an actual, complete (sanitized) "delivered" event as received from Mailgun.
# (For brevity, later tests omit several payload fields that aren't used by Anymail.)
raw_event = mailgun_sign_payload({
"signature": {
"timestamp": "1534108637",
"token": "<KEY>",
"signature": "...",
},
"event-data": {
"tags": [],
"timestamp": 1534108637.153125,
"storage": {
"url": "https://sw.api.mailgun.net/v3/domains/example.org/messages/eyJwI...",
"key": "eyJwI...",
},
"recipient-domain": "example.com",
"id": "hTWCTD81RtiDN-...",
"campaigns": [],
"user-variables": {},
"flags": {
"is-routed": False,
"is-authenticated": True,
"is-system-test": False,
"is-test-mode": False,
},
"log-level": "info",
"envelope": {
"sending-ip": "333.123.123.200",
"sender": "<EMAIL>",
"transport": "smtp",
"targets": "<EMAIL>",
},
"message": {
"headers": {
"to": "<EMAIL>",
"message-id": "20180812211713.<EMAIL>",
"from": "<EMAIL>",
"subject": "Testing",
},
"attachments": [],
"size": 809,
},
"recipient": "<EMAIL>",
"event": "delivered",
"delivery-status": {
"tls": True,
"mx-host": "smtp-in.example.com",
"attempt-no": 1,
"description": "",
"session-seconds": 3.5700838565826416,
"utf8": True,
"code": 250,
"message": "OK",
"certificate-verified": True,
},
},
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertIsInstance(event, AnymailTrackingEvent)
self.assertEqual(event.event_type, "delivered")
self.assertEqual(event.timestamp, datetime(2018, 8, 12, 21, 17, 17, microsecond=153125, tzinfo=utc))
self.assertEqual(event.message_id, "<20180812211713.<EMAIL>>")
# Note that Anymail uses the "token" as its normalized event_id:
self.assertEqual(event.event_id, "651869375b9df3c98fc15c4889b102119add1235c38fc92824")
# ... if you want the Mailgun "event id", that's available through the raw esp_event:
self.assertEqual(event.esp_event["event-data"]["id"], "hTWCTD81RtiDN-...")
self.assertEqual(event.recipient, "<EMAIL>")
self.assertEqual(event.esp_event, raw_event)
self.assertEqual(event.tags, [])
self.assertEqual(event.metadata, {})
def test_failed_permanent_event(self):
raw_event = mailgun_sign_payload({
"event-data": {
"event": "failed",
"severity": "permanent",
"reason": "bounce",
"recipient": "<EMAIL>",
"timestamp": 1534110422.389832,
"log-level": "error",
"message": {
"headers": {
"to": "<EMAIL>",
"message-id": "20180812214658.1.<EMAIL>",
"from": "<NAME> ",
},
},
"delivery-status": {
"tls": True,
"mx-host": "aspmx.l.example.org",
"attempt-no": 1,
"description": "",
"session-seconds": 2.952177047729492,
"utf8": True,
"code": 550,
"message": "5.1.1 The email account that you tried to reach does not exist. Please try\n"
"5.1.1 double-checking the recipient's email address for typos",
"certificate-verified": True
}
},
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "bounced")
self.assertEqual(event.recipient, "<EMAIL>")
self.assertEqual(event.reject_reason, "bounced")
self.assertEqual(event.description, "")
self.assertEqual(event.mta_response,
"5.1.1 The email account that you tried to reach does not exist. Please try\n"
"5.1.1 double-checking the recipient's email address for typos")
def test_failed_temporary_event(self):
raw_event = mailgun_sign_payload({
"event-data": {
"event": "failed",
"severity": "temporary",
"reason": "generic",
"timestamp": 1534111899.659519,
"log-level": "warn",
"message": {
"headers": {
"to": "<EMAIL>",
"message-id": "20180812214638.<EMAIL>",
"from": "Test Sender ",
"subject": "Testing"
},
},
"recipient": "<EMAIL>",
"delivery-status": {
"attempt-no": 3,
"description": "No MX for nomx.example.com",
"session-seconds": 0.0,
"retry-seconds": 1800,
"code": 498,
"message": "No MX for nomx.example.com"
}
},
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "deferred")
self.assertEqual(event.recipient, "<EMAIL>")
self.assertEqual(event.reject_reason, "other")
self.assertEqual(event.description, "No MX for nomx.example.com")
self.assertEqual(event.mta_response, "No MX for nomx.example.com")
def test_failed_greylisted_event(self):
raw_event = mailgun_sign_payload({
"event-data": {
"event": "failed",
"severity": "temporary",
"reason": "greylisted",
"timestamp": 1534111899.659519,
"log-level": "warn",
"message": {
"headers": {
"to": "<EMAIL>",
"message-id": "<EMAIL>12214638.<EMAIL>",
"from": "Test Sender ",
"subject": "Testing"
},
},
"recipient": "<EMAIL>",
"delivery-status": {
"mx-host": "mx.example.com",
"attempt-no": 1,
"description": "Recipient address rejected: Greylisted",
"session-seconds": 0.0,
"retry-seconds": 300,
"code": 450,
"message": "Recipient address rejected: Greylisted"
}
},
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "deferred")
self.assertEqual(event.recipient, "<EMAIL>")
self.assertEqual(event.reject_reason, "other")
self.assertEqual(event.description, "Recipient address rejected: Greylisted")
self.assertEqual(event.mta_response, "Recipient address rejected: Greylisted")
def test_rejected_event(self):
# (The "rejected" event is documented and appears in Mailgun dashboard logs,
# but it doesn't appear to be delivered through webhooks as of 8/2018.)
# Note that this payload lacks the recipient field present in all other events.
raw_event = mailgun_sign_payload({
"event-data": {
"event": "rejected",
"timestamp": 1529704976.104692,
"log-level": "warn",
"reject": {
"reason": "Sandbox subdomains are for test purposes only.",
"description": "",
},
"message": {
"headers": {
"to": "Recipient Name <<EMAIL>>",
"message-id": "20180622220256.1.<EMAIL>",
"from": "<EMAIL>",
"subject": "Test Subject"
},
},
},
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "rejected")
self.assertEqual(event.reject_reason, "other")
self.assertEqual(event.description, "Sandbox subdomains are for test purposes only.")
self.assertEqual(event.recipient, "<EMAIL>")
def test_complained_event(self):
raw_event = mailgun_sign_payload({
"event-data": {
"event": "complained",
"id": "ncV2XwymRUKbPek_MIM-Gw",
"timestamp": 1377214260.049634,
"log-level": "warn",
"recipient": "<EMAIL>",
"message": {
"headers": {
"to": "<EMAIL>",
"message-id": "20130718032413.263EE2E092<EMAIL>",
"from": "<NAME> <<EMAIL>>",
"subject": "We are not spammer",
},
},
},
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "complained")
self.assertEqual(event.recipient, "<EMAIL>")
def test_unsubscribed_event(self):
raw_event = mailgun_sign_payload({
"event-data": {
"event": "unsubscribed",
"id": "W3X4JOhFT-OZidZGKKr9iA",
"timestamp": 1377213791.421473,
"log-level": "info",
"recipient": "<EMAIL>",
"message": {
"headers": {
"message-id": "20130822232216.13966.<EMAIL>"
}
},
},
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "unsubscribed")
self.assertEqual(event.recipient, "<EMAIL>")
def test_opened_event(self):
raw_event = mailgun_sign_payload({
"event-data": {
"event": "opened",
"timestamp": 1534109600.089676,
"recipient": "<EMAIL>",
"tags": ["welcome", "variation-A"],
"user-variables": {
"cohort": "2018-08-B",
"user_id": "123456"
},
"message": {
# Mailgun *only* includes the message-id header for opened, clicked events...
"headers": {
"message-id": "20180812213139.1.<EMAIL>"
}
},
"geolocation": {
"country": "US",
"region": "CA",
"city": "San Francisco"
},
"ip": "888.222.444.111",
"client-info": {
"client-type": "browser",
"client-os": "OS X",
"device-type": "desktop",
"client-name": "Chrome",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6)..."
},
}
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "opened")
self.assertEqual(event.recipient, "<EMAIL>")
self.assertEqual(event.tags, ["welcome", "variation-A"])
self.assertEqual(event.metadata, {"cohort": "2018-08-B", "user_id": "123456"})
def test_clicked_event(self):
raw_event = mailgun_sign_payload({
"event-data": {
"event": "clicked",
"timestamp": 1534109600.089676,
"recipient": "<EMAIL>",
"url": "https://example.com/test"
}
})
response = self.client.post('/anymail/mailgun/tracking/',
data=json.dumps(raw_event), content_type='application/json')
self.assertEqual(response.status_code, 200)
kwargs = self.assert_handler_called_once_with(self.tracking_handler, sender=MailgunTrackingWebhookView,
event=ANY, esp_name='Mailgun')
event = kwargs['event']
self.assertEqual(event.event_type, "clicked")
self.assertEqual(event.click_url, "https://example.com/test")
@tag('mailgun')
@override_settings(ANYMAIL_MAILGUN_WEBHOOK_SIGNING_KEY=TEST_WEBHOOK_SIGNING_KEY)
class MailgunLegacyTestCase(WebhookTestCase):
# Tests for Mailgun's "legacy" webhooks
# (which were the only webhooks available prior to Anymail 4.0)
def test_delivered_event(self):
raw_event = mailgun_sign_legacy_payload({
'domain': 'example.com',
'message-headers': json.dumps([
["Sender", "from=example.com"],
["Date", "Thu, 21 Apr 2016 17:55:29 +0000"],
["X-Mailgun-Sid", "WyIxZmY4ZSIsICJtZWRtdW5kc0BnbWFpbC5jb20iLCAiZjFjNzgyIl0="],
["Received", "by luna.mailgun.net with HTTP; Thu, 21 Apr 2016 17:55:29 +0000"],
["Message-Id", "<<EMAIL>0421175529.19495.<EMAIL>>"],
["To", "<EMAIL>"],
["From", "<EMAIL>"],
["Subject", "Webhook testing"],
["Mime-Version", "1.0"],
["Content-Type", ["multipart/alternative", {"boundary": "74fb561763da440d8e6a034054974251"}]]
]),
'X-Mailgun-Sid': 'WyIxZmY4ZSIsICJtZWRtdW5kc0BnbWFpbC5jb20iLCAiZjFjNzgyIl0=',
'token': '06c96bafc3f42a66b9edd546347a2fe18dc23461fe80dc52f0',
'timestamp': '1461261330',
'Message-Id': '<20160421175529.19495.8903<EMAIL>>',
'recipient': '<EMAIL>',
'event': 'delivered',
})
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import warnings
import numpy as np
import random
import six
import time
import itertools
import collections
from collections import defaultdict
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops
from paddle.fluid import unique_name
from white_list import op_accuracy_white_list, check_shape_white_list, compile_vs_runtime_white_list, no_check_set_white_list
def _set_use_system_allocator(value=None):
USE_SYSTEM_ALLOCATOR_FLAG = "FLAGS_use_system_allocator"
old_value = core.globals()[USE_SYSTEM_ALLOCATOR_FLAG]
value = old_value if value is None else value
core.globals()[USE_SYSTEM_ALLOCATOR_FLAG] = value
return old_value
def randomize_probability(batch_size, class_num, dtype='float32'):
prob = np.random.uniform(
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
prob_sum = prob.sum(axis=1)
for i in six.moves.xrange(len(prob)):
prob[i] /= prob_sum[i]
return prob
def get_numeric_gradient(place,
scope,
op,
inputs,
input_to_check,
output_names,
delta=0.005,
in_place=False):
# FIXME: change this method by compile time concepts
set_input(scope, op, inputs, place)
def product(dim):
return six.moves.reduce(lambda a, b: a * b, dim, 1)
tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.shape())
if not hasattr(get_numeric_gradient, 'check_shape_time'):
get_numeric_gradient.check_shape_time = 0
if tensor_size >= 100:
get_numeric_gradient.check_shape_time += 1
tensor_to_check_dtype = tensor_to_check._dtype()
if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
tensor_to_check_dtype = np.float32
elif tensor_to_check_dtype == core.VarDesc.VarType.FP64:
tensor_to_check_dtype = np.float64
elif tensor_to_check_dtype == core.VarDesc.VarType.FP16:
tensor_to_check_dtype = np.float16
# set delta as np.float16, will automatic convert to float32, float64
delta = np.array(delta).astype(np.float16)
else:
raise ValueError("Not supported data type " + str(
tensor_to_check_dtype))
def get_output():
sum = []
op.run(scope, place)
for output_name in output_names:
sum.append(
np.array(scope.find_var(output_name).get_tensor()).astype(
tensor_to_check_dtype).mean())
return tensor_to_check_dtype(np.array(sum).sum() / len(output_names))
gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype)
def __get_elem__(tensor, i):
if tensor_to_check_dtype == np.float16:
numpy_tensor = np.array(tensor).astype(np.float16)
numpy_tensor = numpy_tensor.flatten()
return numpy_tensor[i]
elif tensor_to_check_dtype == np.float32:
return tensor._get_float_element(i)
else:
return tensor._get_double_element(i)
def __set_elem__(tensor, i, e):
if tensor_to_check_dtype == np.float16:
numpy_tensor = np.array(tensor).astype(np.float16)
shape = numpy_tensor.shape
numpy_tensor = numpy_tensor.flatten()
numpy_tensor[i] = e
numpy_tensor = numpy_tensor.reshape(shape)
tensor.set(numpy_tensor, place)
elif tensor_to_check_dtype == np.float32:
tensor._set_float_element(i, e)
else:
tensor._set_double_element(i, e)
# we only compute gradient of one element each time.
# we use a for loop to compute the gradient of every element.
for i in six.moves.xrange(tensor_size):
if in_place:
set_input(scope, op, inputs, place)
# get one input element throw it's index i.
origin = __get_elem__(tensor_to_check, i)
# add delta to it, run op and then get the sum of the result tensor.
x_pos = origin + delta
__set_elem__(tensor_to_check, i, x_pos)
y_pos = get_output()
if in_place:
set_input(scope, op, inputs, place)
x_neg = origin - delta
__set_elem__(tensor_to_check, i, x_neg)
y_neg = get_output()
__set_elem__(tensor_to_check, i, origin)
gradient_flat[i] = (y_pos - y_neg) / delta / 2
return gradient_flat.reshape(tensor_to_check.shape())
def skip_check_grad_ci(reason=None):
"""Decorator to skip check_grad CI.
Check_grad is required for Op test cases. However, there are some special
cases that do not need to do check_grad. This decorator is used to skip the
check_grad of the above cases.
Note: the execution of unit test will not be skipped. It just avoids check_grad
checking in tearDownClass method by setting a `no_need_check_grad` flag.
Example:
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestInference(OpTest):
"""
if not isinstance(reason, str):
raise AssertionError("The reason for skipping check_grad is required.")
def wrapper(cls):
cls.no_need_check_grad = True
return cls
return wrapper
class OpTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''Fix random seeds to remove randomness from tests'''
cls._np_rand_state = np.random.get_state()
cls._py_rand_state = random.getstate()
cls.call_once = False
cls.dtype = None
cls.outputs = {}
np.random.seed(123)
random.seed(124)
cls._use_system_allocator = _set_use_system_allocator(True)
@classmethod
def tearDownClass(cls):
"""Restore random seeds"""
np.random.set_state(cls._np_rand_state)
random.setstate(cls._py_rand_state)
_set_use_system_allocator(cls._use_system_allocator)
def is_empty_grad_op(op_type):
all_op_kernels = core._get_all_register_op_kernels()
grad_op = op_type + '_grad'
if grad_op in all_op_kernels.keys():
if hasattr(cls, "use_mkldnn") and cls.use_mkldnn == True:
grad_op_kernels = all_op_kernels[grad_op]
for grad_op_kernel in grad_op_kernels:
if 'MKLDNN' in grad_op_kernel:
return False
else:
return False
return True
if not hasattr(cls, "op_type"):
raise AssertionError(
"This test do not have op_type in class attrs,"
" please set self.__class__.op_type=the_real_op_type manually.")
# case in NO_FP64_CHECK_GRAD_CASES and op in NO_FP64_CHECK_GRAD_OP_LIST should be fixed
if not hasattr(cls, "no_need_check_grad") \
and not is_empty_grad_op(cls.op_type):
if cls.dtype is None or \
(cls.dtype == np.float16 \
and cls.op_type not in op_accuracy_white_list.NO_FP16_CHECK_GRAD_OP_LIST \
and not hasattr(cls, "exist_check_grad")):
raise AssertionError("This test of %s op needs check_grad." %
cls.op_type)
if cls.dtype in [np.float32, np.float64] \
and cls.op_type not in op_accuracy_white_list.NO_FP64_CHECK_GRAD_OP_LIST \
and not hasattr(cls, 'exist_fp64_check_grad'):
raise AssertionError(
"This test of %s op needs check_grad with fp64 precision." %
cls.op_type)
if hasattr(get_numeric_gradient, 'check_shape_time') \
and get_numeric_gradient.check_shape_time == 0 \
and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST \
and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST:
raise AssertionError(
"At least one input's shape should be large than or equal to 100 for "
+ OpTest.op_type + " Op.")
def try_call_once(self, data_type):
if not self.call_once:
self.call_once = True
self.dtype = data_type
def infer_dtype_from_inputs_outputs(self, inputs, outputs):
def is_np_data(input):
return isinstance(input, (np.ndarray, np.generic))
def infer_dtype(numpy_dict, dtype_set):
assert isinstance(
numpy_dict,
dict), "self.inputs, self.outputs must be numpy_dict"
# the inputs are as follows:
# case 1: inputs = {'X': x}
# case 2: inputs = {'X': (x, x_lod)}
# case 3: inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
# case 4: inputs = {'X': [("x1", (x1, [x1_lod1])), ("x2", (x2, [x2_.lod2]))]}
# TODO(juncaipeng) infer dtype from inputs maybe obtain wrong type.
for _, var_value in six.iteritems(numpy_dict):
if is_np_data(var_value): # case 1
dtype_set.add(var_value.dtype)
elif isinstance(var_value, (list, tuple)): # case 2, 3, 4
for sub_val_value in var_value:
if is_np_data(sub_val_value): # case 2
dtype_set.add(sub_val_value.dtype)
elif len(sub_val_value) > 1 and is_np_data(
sub_val_value[1]): # case 3
dtype_set.add(sub_val_value[1].dtype)
elif len(sub_val_value) > 1 and isinstance(sub_val_value[1], (list, tuple)) \
and is_np_data(sub_val_value[1][0]): # case 4
dtype_set.add(sub_val_value[1][0].dtype)
# infer dtype from inputs, and dtype means the precision of the test
# collect dtype of all inputs
dtype_set = set()
infer_dtype(inputs, dtype_set)
dtype_list = [
np.dtype(np.float64), np.dtype(np.float32), np.dtype(np.float16),
np.dtype(np.int64), np.dtype(np.int32), np.dtype(np.int16),
np.dtype(np.int8), np.dtype(np.uint8), np.dtype(np.bool)
]
# check the dtype in dtype_list in order, select the first dtype that in dtype_set
for dtype in dtype_list:
if dtype in dtype_set:
self.dtype = dtype
break
# save dtype in class attr
self.__class__.dtype = self.dtype
def feed_var(self, input_vars, place):
feed_map = {}
for var_name in input_vars:
if isinstance(input_vars[var_name], list):
for name, np_value in self.inputs[var_name]:
tensor = core.LoDTensor()
if isinstance(np_value, tuple):
tensor.set(np_value[0], place)
tensor.set_recursive_sequence_lengths(np_value[1])
else:
tensor.set(np_value, place)
feed_map[name] = tensor
else:
tensor = core.LoDTensor()
if isinstance(self.inputs[var_name], tuple):
tensor.set(self.inputs[var_name][0], place)
tensor.set_recursive_sequence_lengths(self.inputs[var_name][
1])
else:
tensor.set(self.inputs[var_name], place)
feed_map[var_name] = tensor
return feed_map
def _append_ops(self, block):
self.__class__.op_type = self.op_type # for ci check, please not delete it for now
if hasattr(self, "use_mkldnn"):
self.__class__.use_mkldnn = self.use_mkldnn
op_proto = OpProtoHolder.instance().get_op_proto(self.op_type)
"infer datatype from inputs and outputs for this test case"
self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
inputs = append_input_output(block, op_proto, self.inputs, True,
self.dtype)
outputs = append_input_output(block, op_proto, self.outputs, False,
self.dtype)
if hasattr(self, "cache_name_list"):
for name in self.cache_name_list:
inputs[name] = block.create_var(
name=name,
persistable=True,
type=core.VarDesc.VarType.RAW,
stop_gradient=True)
op = block.append_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=self.attrs if hasattr(self, "attrs") else dict())
# infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
return op
def _get_io_vars(self, block, numpy_inputs):
inputs = {}
for name, value in six.iteritems(numpy_inputs):
if isinstance(value, list):
var_list = [
block.var(sub_name) for sub_name, sub_value in value
]
inputs[name] = var_list
else:
inputs[name] = block.var(name)
return inputs
def _get_inputs(self, block):
return self._get_io_vars(block, self.inputs)
def _get_outputs(self, block):
return self._get_io_vars(block, self.outputs)
def calc_output(self, place):
outs, _ = self._calc_output(place)
return outs
def _create_var_from_numpy(self, value):
if isinstance(value, tuple):
data = value[0]
lod = value[1]
v = fluid.dygraph.base.to_variable(value=data)
v.value().get_tensor().set_recursive_sequence_lengths(lod)
return v
else:
return fluid.dygraph.base.to_variable(value)
def append_input_output_for_dygraph(self, op_proto, np_list, is_input,
if_return_inputs_grad_dict, block):
def create_var(np_value, name, is_input, if_return_inputs_grad_dict):
np_value_temp = np_value
has_lod = False
lod_temp = None
if isinstance(np_value, tuple):
np_value_temp = np_value[0]
has_lod = True
lod_temp = np_value[1]
if is_input:
v = self._create_var_from_numpy(np_value_temp)
if if_return_inputs_grad_dict:
v.stop_gradient = False
if has_lod:
v.value().get_tensor().set_recursive_sequence_lengths(
lod_temp)
else:
v = block.create_var(
name=name,
dtype=np_value_temp.dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False)
return v
| |
<filename>tests/test_cart.py
import json
from unittest.mock import MagicMock, Mock
from uuid import uuid4
import pytest
from django.contrib.auth.models import AnonymousUser
from django.core import signing
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.urls import reverse
from measurement.measures import Weight
from prices import Money, TaxedMoney
from saleor.checkout import forms, utils
from saleor.checkout.context_processors import cart_counter
from saleor.checkout.models import Cart
from saleor.checkout.utils import (
add_variant_to_cart, change_cart_user, find_open_cart_for_user)
from saleor.checkout.views import clear_cart, update_cart_line
from saleor.core.exceptions import InsufficientStock
from saleor.core.utils.taxes import ZERO_TAXED_MONEY
from saleor.discount.models import Sale
from saleor.shipping.utils import get_shipping_price_estimate
@pytest.fixture()
def cart_request_factory(rf, monkeypatch):
def create_request(user=None, token=None):
request = rf.get(reverse('home'))
if user is None:
request.user = AnonymousUser()
else:
request.user = user
request.discounts = Sale.objects.all()
request.taxes = None
monkeypatch.setattr(
request, 'get_signed_cookie', Mock(return_value=token))
return request
return create_request
@pytest.fixture()
def anonymous_cart(db):
return Cart.objects.get_or_create(user=None)[0]
@pytest.fixture()
def user_cart(customer_user):
return Cart.objects.get_or_create(user=customer_user)[0]
@pytest.fixture()
def local_currency(monkeypatch):
def side_effect(price, currency):
return price
monkeypatch.setattr('saleor.checkout.views.to_local_currency', side_effect)
def test_get_or_create_anonymous_cart_from_token(anonymous_cart, user_cart):
queryset = Cart.objects.all()
carts = list(queryset)
cart = utils.get_or_create_anonymous_cart_from_token(anonymous_cart.token)
assert Cart.objects.all().count() == 2
assert cart == anonymous_cart
# test against new token
cart = utils.get_or_create_anonymous_cart_from_token(uuid4())
assert Cart.objects.all().count() == 3
assert cart not in carts
assert cart.user is None
cart.delete()
# test against getting cart assigned to user
cart = utils.get_or_create_anonymous_cart_from_token(user_cart.token)
assert Cart.objects.all().count() == 3
assert cart not in carts
assert cart.user is None
def test_get_or_create_user_cart(
customer_user, anonymous_cart, user_cart, admin_user):
cart = utils.get_or_create_user_cart(customer_user)
assert Cart.objects.all().count() == 2
assert cart == user_cart
# test against creating new carts
Cart.objects.create(user=admin_user)
queryset = Cart.objects.all()
carts = list(queryset)
cart = utils.get_or_create_user_cart(admin_user)
assert Cart.objects.all().count() == 3
assert cart in carts
assert cart.user == admin_user
def test_get_anonymous_cart_from_token(anonymous_cart, user_cart):
cart = utils.get_anonymous_cart_from_token(anonymous_cart.token)
assert Cart.objects.all().count() == 2
assert cart == anonymous_cart
# test against new token
cart = utils.get_anonymous_cart_from_token(uuid4())
assert Cart.objects.all().count() == 2
assert cart is None
# test against getting cart assigned to user
cart = utils.get_anonymous_cart_from_token(user_cart.token)
assert Cart.objects.all().count() == 2
assert cart is None
def test_get_user_cart(anonymous_cart, user_cart, admin_user, customer_user):
cart = utils.get_user_cart(customer_user)
assert Cart.objects.all().count() == 2
assert cart == user_cart
def test_get_or_create_cart_from_request(
cart_request_factory, monkeypatch, customer_user):
token = uuid4()
queryset = Cart.objects.all()
request = cart_request_factory(user=customer_user, token=token)
user_cart = Cart(user=customer_user)
anonymous_cart = Cart()
mock_get_for_user = Mock(return_value=user_cart)
mock_get_for_anonymous = Mock(return_value=anonymous_cart)
monkeypatch.setattr(
'saleor.checkout.utils.get_or_create_user_cart', mock_get_for_user)
monkeypatch.setattr(
'saleor.checkout.utils.get_or_create_anonymous_cart_from_token',
mock_get_for_anonymous)
returned_cart = utils.get_or_create_cart_from_request(request, queryset)
mock_get_for_user.assert_called_once_with(customer_user, queryset)
assert returned_cart == user_cart
request = cart_request_factory(user=None, token=token)
returned_cart = utils.get_or_create_cart_from_request(request, queryset)
mock_get_for_anonymous.assert_called_once_with(token, queryset)
assert returned_cart == anonymous_cart
def test_get_cart_from_request(
monkeypatch,
customer_user,
cart_request_factory
):
"""
PDP.M1
"""
queryset = Cart.objects.all()
token = uuid4()
request = cart_request_factory(user=customer_user, token=token)
user_cart = Cart(user=customer_user)
mock_get_for_user = Mock(return_value=user_cart)
monkeypatch.setattr(
'saleor.checkout.utils.get_user_cart', mock_get_for_user)
returned_cart = utils.get_cart_from_request(request, queryset)
mock_get_for_user.assert_called_once_with(customer_user, queryset)
assert returned_cart == user_cart
mock_get_for_user = Mock(return_value=None)
monkeypatch.setattr(
'saleor.checkout.utils.get_user_cart', mock_get_for_user)
returned_cart = utils.get_cart_from_request(request, queryset)
mock_get_for_user.assert_called_once_with(customer_user, queryset)
assert not Cart.objects.filter(token=returned_cart.token).exists()
anonymous_cart = Cart()
mock_get_for_anonymous = Mock(return_value=anonymous_cart)
monkeypatch.setattr(
'saleor.checkout.utils.get_anonymous_cart_from_token',
mock_get_for_anonymous)
request = cart_request_factory(user=None, token=token)
returned_cart = utils.get_cart_from_request(request, queryset)
mock_get_for_user.assert_called_once_with(customer_user, queryset)
assert returned_cart == anonymous_cart
mock_get_for_anonymous = Mock(return_value=None)
monkeypatch.setattr(
'saleor.checkout.utils.get_anonymous_cart_from_token',
mock_get_for_anonymous)
returned_cart = utils.get_cart_from_request(request, queryset)
assert not Cart.objects.filter(token=returned_cart.token).exists()
def test_find_and_assign_anonymous_cart(anonymous_cart, customer_user, client):
cart_token = anonymous_cart.token
# Anonymous user has a cart with token stored in cookie
value = signing.get_cookie_signer(salt=utils.COOKIE_NAME).sign(cart_token)
client.cookies[utils.COOKIE_NAME] = value
# Anonymous logs in
response = client.post(
reverse('account:login'),
{'username': customer_user.email, 'password': 'password'}, follow=True)
assert response.context['user'] == customer_user
# User should have only one cart, the same as he had previously in
# anonymous session
authenticated_user_carts = customer_user.carts.all()
assert authenticated_user_carts.count() == 1
assert authenticated_user_carts[0].token == cart_token
def test_login_without_a_cart(customer_user, client):
assert utils.COOKIE_NAME not in client.cookies
response = client.post(
reverse('account:login'),
{'username': customer_user.email, 'password': 'password'}, follow=True)
assert response.context['user'] == customer_user
authenticated_user_carts = customer_user.carts.all()
assert authenticated_user_carts.count() == 0
def test_login_with_incorrect_cookie_token(customer_user, client):
value = signing.get_cookie_signer(salt=utils.COOKIE_NAME).sign('incorrect')
client.cookies[utils.COOKIE_NAME] = value
response = client.post(
reverse('account:login'),
{'username': customer_user.email, 'password': 'password'}, follow=True)
assert response.context['user'] == customer_user
authenticated_user_carts = customer_user.carts.all()
assert authenticated_user_carts.count() == 0
def test_find_and_assign_anonymous_cart_and_close_opened(
customer_user, user_cart, anonymous_cart, cart_request_factory):
token = anonymous_cart.token
token_user = user_cart.token
request = cart_request_factory(user=customer_user, token=token)
utils.find_and_assign_anonymous_cart()(
lambda request: Mock(delete_cookie=lambda name: None))(request)
token_cart = Cart.objects.filter(token=token).first()
user_cart = Cart.objects.filter(token=token_user).first()
assert token_cart is not None
assert token_cart.user.pk == customer_user.pk
assert not user_cart
def test_adding_without_checking(cart, product):
variant = product.variants.get()
add_variant_to_cart(cart, variant, 1000, check_quantity=False)
assert len(cart) == 1
def test_adding_zero_quantity(cart, product):
variant = product.variants.get()
add_variant_to_cart(cart, variant, 0)
assert len(cart) == 0
def test_adding_same_variant(cart, product, taxes):
variant = product.variants.get()
add_variant_to_cart(cart, variant, 1)
add_variant_to_cart(cart, variant, 2)
assert len(cart) == 1
assert cart.quantity == 3
cart_total = TaxedMoney(net=Money('24.39', 'USD'), gross=Money(30, 'USD'))
assert cart.get_subtotal(taxes=taxes) == cart_total
def test_replacing_same_variant(cart, product):
variant = product.variants.get()
add_variant_to_cart(cart, variant, 1, replace=True)
add_variant_to_cart(cart, variant, 2, replace=True)
assert len(cart) == 1
assert cart.quantity == 2
def test_adding_invalid_quantity(cart, product):
variant = product.variants.get()
with pytest.raises(ValueError):
add_variant_to_cart(cart, variant, -1)
def test_getting_line(cart, product):
variant = product.variants.get()
assert cart.get_line(variant) is None
add_variant_to_cart(cart, variant)
assert cart.lines.get() == cart.get_line(variant)
def test_shipping_detection(cart, product):
assert not cart.is_shipping_required()
variant = product.variants.get()
add_variant_to_cart(cart, variant, replace=True)
assert cart.is_shipping_required()
def test_cart_counter(monkeypatch):
monkeypatch.setattr(
'saleor.checkout.context_processors.get_cart_from_request',
Mock(return_value=Mock(quantity=4)))
ret = cart_counter(Mock())
assert ret == {'cart_counter': 4}
def test_get_prices_of_discounted_products(cart_with_item):
discounted_line = cart_with_item.lines.first()
discounted_product = discounted_line.variant.product
prices = utils.get_prices_of_discounted_products(
cart_with_item, [discounted_product])
excepted_value = [
discounted_line.variant.get_price()
for item in range(discounted_line.quantity)]
assert list(prices) == excepted_value
def test_contains_unavailable_variants():
missing_variant = Mock(
check_quantity=Mock(side_effect=InsufficientStock('')))
cart = MagicMock()
cart.__iter__ = Mock(return_value=iter([Mock(variant=missing_variant)]))
assert utils.contains_unavailable_variants(cart)
variant = Mock(check_quantity=Mock())
cart.__iter__ = Mock(return_value=iter([Mock(variant=variant)]))
assert not utils.contains_unavailable_variants(cart)
def test_remove_unavailable_variants(cart, product):
variant = product.variants.get()
add_variant_to_cart(cart, variant)
variant.quantity = 0
variant.save()
utils.remove_unavailable_variants(cart)
assert len(cart) == 0
def test_check_product_availability_and_warn(
monkeypatch, cart, product):
variant = product.variants.get()
add_variant_to_cart(cart, variant)
monkeypatch.setattr(
'django.contrib.messages.warning', Mock(warning=Mock()))
monkeypatch.setattr(
'saleor.checkout.utils.contains_unavailable_variants',
Mock(return_value=False))
utils.check_product_availability_and_warn(MagicMock(), cart)
assert len(cart) == 1
monkeypatch.setattr(
'saleor.checkout.utils.contains_unavailable_variants',
Mock(return_value=True))
monkeypatch.setattr(
'saleor.checkout.utils.remove_unavailable_variants',
lambda c: add_variant_to_cart(cart, variant, 0, replace=True))
utils.check_product_availability_and_warn(MagicMock(), cart)
assert len(cart) == 0
def test_add_to_cart_form(cart, product):
variant = product.variants.get()
add_variant_to_cart(cart, variant, 3)
data = {'quantity': 1}
form = forms.AddToCartForm(data=data, cart=cart, product=product)
form.get_variant = Mock(return_value=variant)
assert form.is_valid()
form.save()
assert cart.lines.count() == 1
assert cart.lines.filter(variant=variant).exists()
with pytest.raises(NotImplementedError):
data = {'quantity': 1}
form = forms.AddToCartForm(data=data, cart=cart, product=product)
form.is_valid()
data = {}
form = forms.AddToCartForm(data=data, cart=cart, product=product)
assert not form.is_valid()
def test_form_when_variant_does_not_exist():
cart_lines = []
cart = Mock(
add=lambda variant, quantity: cart_lines.append(Mock()),
get_line=Mock(return_value=Mock(quantity=1)))
form = forms.AddToCartForm(data={'quantity': 1}, cart=cart, product=Mock())
form.get_variant = Mock(side_effect=ObjectDoesNotExist)
assert not form.is_valid()
@pytest.mark.parametrize('track_inventory', (True, False))
def test_add_to_cart_form_when_insufficient_stock(product, track_inventory):
variant = product.variants.first()
variant.track_inventory = track_inventory
variant.save()
cart_lines = []
cart = Mock(
add=lambda variant, quantity: cart_lines.append(variant),
get_line=Mock(return_value=Mock(quantity=49)))
form = forms.AddToCartForm(data={'quantity': 1}, cart=cart, product=Mock())
form.get_variant = Mock(return_value=variant)
if track_inventory:
assert not form.is_valid()
else:
assert form.is_valid()
def test_replace_cart_line_form(cart, product):
variant = product.variants.get()
initial_quantity = 1
replaced_quantity = 4
add_variant_to_cart(cart, variant, initial_quantity)
data = {'quantity': replaced_quantity}
form = forms.ReplaceCartLineForm(data=data, cart=cart, variant=variant)
assert form.is_valid()
form.save()
assert cart.quantity == replaced_quantity
def test_replace_cartline_form_when_insufficient_stock(
monkeypatch, cart, product):
variant = product.variants.get()
initial_quantity = 1
replaced_quantity = 4
add_variant_to_cart(cart, variant, initial_quantity)
exception_mock = InsufficientStock(
Mock(quantity_available=2))
monkeypatch.setattr(
'saleor.product.models.ProductVariant.check_quantity',
Mock(side_effect=exception_mock))
data = {'quantity': replaced_quantity}
form = forms.ReplaceCartLineForm(data=data, cart=cart, variant=variant)
assert not form.is_valid()
with pytest.raises(KeyError):
form.save()
assert cart.quantity == initial_quantity
def test_view_empty_cart(client, request_cart):
response = client.get(reverse('cart:index'))
assert response.status_code == 200
def test_view_cart_without_taxes(client, request_cart_with_item):
response = client.get(reverse('cart:index'))
response_cart_line = response.context[0]['cart_lines'][0]
cart_line = request_cart_with_item.lines.first()
assert not response_cart_line['get_total'].tax.amount
assert response_cart_line['get_total'] == cart_line.get_total()
assert response.status_code == 200
def test_view_cart_with_taxes(
settings, client, request_cart_with_item, vatlayer):
settings.DEFAULT_COUNTRY = 'PL'
response = client.get(reverse('cart:index'))
response_cart_line = response.context[0]['cart_lines'][0]
cart_line = request_cart_with_item.lines.first()
assert response_cart_line['get_total'].tax.amount
assert response_cart_line['get_total'] == cart_line.get_total(
taxes=vatlayer)
assert response.status_code == 200
def test_view_update_cart_quantity(
client, local_currency, request_cart_with_item):
variant = request_cart_with_item.lines.get().variant
response = client.post(
reverse('cart:update-line', kwargs={'variant_id': variant.pk}),
data={'quantity': 3}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert request_cart_with_item.quantity == 3
def test_view_update_cart_quantity_with_taxes(
client, local_currency, request_cart_with_item, vatlayer):
variant = request_cart_with_item.lines.get().variant
response = client.post(
reverse('cart:update-line', kwargs={'variant_id': variant.id}),
{'quantity': 3}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert request_cart_with_item.quantity == 3
def test_view_invalid_update_cart(client, request_cart_with_item):
variant = request_cart_with_item.lines.get().variant
response = client.post(
reverse('cart:update-line', kwargs={'variant_id': variant.pk}),
data={}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
resp_decoded = json.loads(response.content.decode('utf-8'))
assert response.status_code == 400
assert 'error' in resp_decoded.keys()
assert request_cart_with_item.quantity == 1
def test_cart_page_without_openexchagerates(
client, request_cart_with_item, settings):
settings.OPENEXCHANGERATES_API_KEY = None
response = client.get(reverse('cart:index'))
context = response.context
assert context['local_cart_total'] is None
def test_cart_page_with_openexchagerates(
client, monkeypatch, request_cart_with_item, settings):
settings.DEFAULT_COUNTRY = 'PL'
settings.OPENEXCHANGERATES_API_KEY = 'fake-key'
response = client.get(reverse('cart:index'))
context = response.context
assert context['local_cart_total'] is None
monkeypatch.setattr(
'django_prices_openexchangerates.models.get_rates',
lambda c: {'PLN': Mock(rate=2)})
response = client.get(reverse('cart:index'))
context = response.context
assert context['local_cart_total'].currency == 'PLN'
def test_cart_summary_page(settings, client, request_cart_with_item, vatlayer):
settings.DEFAULT_COUNTRY = 'PL'
response = client.get(reverse('cart:summary'))
assert response.status_code == 200
content = response.context
assert content['quantity'] == request_cart_with_item.quantity
cart_total = request_cart_with_item.get_subtotal(taxes=vatlayer)
assert content['total'] == cart_total
assert len(content['lines']) == 1
cart_line = content['lines'][0]
variant = request_cart_with_item.lines.get().variant
assert cart_line['variant'] == variant
assert cart_line['quantity'] == 1
def test_cart_summary_page_empty_cart(client, request_cart):
response = client.get(reverse('cart:summary'))
assert response.status_code == 200
data = response.context
assert data['quantity'] == 0
def test_cart_line_total_with_discount_and_taxes(
sale, request_cart_with_item, taxes):
sales = Sale.objects.all()
line = request_cart_with_item.lines.first()
assert line.get_total(discounts=sales, taxes=taxes) == TaxedMoney(
net=Money('4.07', 'USD'), gross=Money('5.00', 'USD'))
def test_find_open_cart_for_user(customer_user, user_cart):
assert find_open_cart_for_user(customer_user) == user_cart
cart | |
contact.site
i_visit_id, j_visit_id = contact.id_tup
'''Check status of both individuals'''
i_has_valid_status = (
# not dead
(not (self.state['dead'][idxi] and self.state_started_at['dead'][idxi] <= start_contact)) and
# not hospitalized at time of contact
(not (self.state['hosp'][idxi] and self.state_started_at['hosp'][idxi] <= start_contact))
)
j_has_valid_status = (
# not dead
(not (self.state['dead'][idxj] and self.state_started_at['dead'][idxj] <= start_contact)) and
# not hospitalized at time of contact
(not (self.state['hosp'][idxj] and self.state_started_at['hosp'][idxj] <= start_contact))
)
if (not i_has_valid_status) or (not j_has_valid_status):
return False
'''Check contact tracing channels'''
# check if i is compliant with digital tracing
is_i_compliant = self.measure_list.is_compliant(
ComplianceForAllMeasure,
# to be consistent with general `is_i_compliant` check outside, don't use `start_contact`
t=max(end_contact, 0.0), j=idxi)
# check if j is compliant with digital tracing
is_j_compliant = self.measure_list.is_compliant(
ComplianceForAllMeasure,
# to be consistent with `is_i_compliant` check, don't use `start_contact`
t=max(end_contact, 0.0), j=idxj)
# Check if site at which contact happened has a beacon for beacon tracing
if self.mob.beacon_config is not None:
site_has_beacon = self.mob.site_has_beacon[site_id]
else:
site_has_beacon = False
# Contacts can be identified if one of the following is true:
# 1) i and j are compliant with digital tracing (require P2P tracing or location-based tracing with beacon at site)
digital_tracable = is_i_compliant and is_j_compliant and ((self.mob.beacon_config is None) or site_has_beacon)
contact_tracable = digital_tracable
if not contact_tracable:
return False
'''Check SocialDistancing measures'''
is_i_contained = self.is_person_home_from_visit_due_to_measure(
t=start_contact, i=idxi, visit_id=i_visit_id,
site_type=self.site_dict[self.site_type[site_id]])
is_j_contained = self.is_person_home_from_visit_due_to_measure(
t=start_contact, i=idxj, visit_id=j_visit_id,
site_type=self.site_dict[self.site_type[site_id]])
if is_i_contained or is_j_contained:
return False
# if all of the above checks passed, then contact is valid
return True
def __is_sib_tracing_contact_valid_households(self, i, j, house, t_day):
# start_contact is set to the start time of the day
# !! VALUES STILL HARDCODED !!
start_contact = t_day * TO_HOURS + 0.5 * TO_HOURS
# day 0 is potentially longer
if t_day == 0:
start_contact = 0
'''Check status of both individuals'''
i_has_valid_status = (
# not dead
(not (self.state['dead'][i] and self.state_started_at['dead'][i] <= start_contact)) and
# not hospitalized at time of contact
(not (self.state['hosp'][i] and self.state_started_at['hosp'][i] <= start_contact))
)
j_has_valid_status = (
# not dead
(not (self.state['dead'][j] and self.state_started_at['dead'][j] <= start_contact)) and
# not hospitalized at time of contact
(not (self.state['hosp'][j] and self.state_started_at['hosp'][j] <= start_contact))
)
if (not i_has_valid_status) or (not j_has_valid_status):
return False
'''Check SocialDistancing measures'''
is_i_home_isolated = (
self.measure_list.is_contained(
SocialDistancingForPositiveMeasureHousehold, t=start_contact, j=i,
state_posi_started_at=self.state_started_at['posi'],
state_posi_ended_at=self.state_ended_at['posi'],
state_resi_started_at=self.state_started_at['resi'],
state_dead_started_at=self.state_started_at['dead']) or
self.measure_list.is_contained(
SocialDistancingForSmartTracingHousehold, t=start_contact, j=i,
state_nega_started_at=self.state_started_at['nega'],
state_nega_ended_at=self.state_ended_at['nega']) or
self.measure_list.is_contained(
SocialDistancingSymptomaticAfterSmartTracingHousehold, t=start_contact, j=i,
state_isym_started_at=self.state_started_at['isym'],
state_isym_ended_at=self.state_ended_at['isym'],
state_nega_started_at=self.state_started_at['nega'],
state_nega_ended_at=self.state_ended_at['nega'])
)
is_j_home_isolated = (
self.measure_list.is_contained(
SocialDistancingForPositiveMeasureHousehold, t=start_contact, j=j,
state_posi_started_at=self.state_started_at['posi'],
state_posi_ended_at=self.state_ended_at['posi'],
state_resi_started_at=self.state_started_at['resi'],
state_dead_started_at=self.state_started_at['dead']) or
self.measure_list.is_contained(
SocialDistancingForSmartTracingHousehold, t=start_contact, j=j,
state_nega_started_at=self.state_started_at['nega'],
state_nega_ended_at=self.state_ended_at['nega']) or
self.measure_list.is_contained(
SocialDistancingSymptomaticAfterSmartTracingHousehold, t=start_contact, j=j,
state_isym_started_at=self.state_started_at['isym'],
state_isym_ended_at=self.state_ended_at['isym'],
state_nega_started_at=self.state_started_at['nega'],
state_nega_ended_at=self.state_ended_at['nega'])
)
if is_i_home_isolated or is_j_home_isolated:
return False
# if all of the above checks passed, then contact is valid
return True
def __is_tracing_contact_valid(self, *, t, i, contact):
"""
Compute whether a contact of individual i at time t is valid
This is called with `i` being the infector.
"""
start_contact = contact.t_from
j = contact.indiv_i
site_id = contact.site
j_visit_id, i_visit_id = contact.id_tup
site_type = self.mob.site_dict[self.mob.site_type[site_id]]
'''Check status of both individuals'''
i_has_valid_status = (
# not dead
(not (self.state['dead'][i] and self.state_started_at['dead'][i] <= start_contact)) and
# not hospitalized at time of contact
(not (self.state['hosp'][i] and self.state_started_at['hosp'][i] <= start_contact))
)
j_has_valid_status = (
# not dead
(not (self.state['dead'][j] and self.state_started_at['dead'][j] <= start_contact)) and
# not hospitalized at time of contact
(not (self.state['hosp'][j] and self.state_started_at['hosp'][j] <= start_contact)) and
# not positive at time of tracing
(not (self.state['posi'][j] and self.state_started_at['posi'][j] <= t))
)
if (not i_has_valid_status) or (not j_has_valid_status):
return False
'''Check contact tracing channels'''
# check if i is complaint with digital tracing
is_i_compliant = self.measure_list.is_compliant(
ComplianceForAllMeasure,
# to be consistent with general `is_i_compliant` check outside, don't use `start_contact`
t=max(t - self.smart_tracing_contact_delta, 0.0), j=i)
# check if j is compliant with digital tracing
is_j_compliant = self.measure_list.is_compliant(
ComplianceForAllMeasure,
# to be consistent with `is_i_compliant` check, don't use `start_contact`
t=max(t - self.smart_tracing_contact_delta, 0.0), j=j)
# check if i is compliant with manual tracing (offline/digital) and recalls site they visited
i_recalls_visit = self.measure_list.is_active(
ManualTracingForAllMeasure,
t=start_contact, # t not needed for the visit, but only for whether measure is active
j=i,
j_visit_id=i_visit_id) # `i_visit_id` queries whether `i` recalls this specific visit
# check if j can be traced with offline manual tracing
is_j_manually_tracable = self.measure_list.is_active(
ManualTracingReachabilityForAllMeasure,
# to be consistent with `is_i_compliant` check, don't use `start_contact`
t=max(t - self.smart_tracing_contact_delta, 0.0), j=j,
j_visit_id=j_visit_id,
site_type=site_type)
# Check if site at which contact happened has a beacon for beacon tracing
if self.mob.beacon_config is not None:
site_has_beacon = self.mob.site_has_beacon[site_id]
else:
site_has_beacon = False
# Contacts can be identified if one of the following is true:
# 1) i and j are compliant with digital tracing (require P2P tracing or location-based tracing with beacon at site)
# 2) i recalls visit in manual contact interview and j is offline manually reachable e.g. via phone
# 3) i recalls visit in manual contact interview and j is compliant with beacon tracing and the site at which
# the contact happened has a beacon
# 4) i is compliant with beacon tracing and j is manually reachable
digital_tracable = is_i_compliant and is_j_compliant and ((self.mob.beacon_config is None) or site_has_beacon)
offline_manual_tracable = i_recalls_visit and is_j_manually_tracable
manual_beacon_tracable = i_recalls_visit and is_j_compliant and site_has_beacon
beacon_manual_reachable = is_i_compliant and site_has_beacon and is_j_manually_tracable
contact_tracable = (digital_tracable or offline_manual_tracable or
manual_beacon_tracable or beacon_manual_reachable)
if not contact_tracable:
return False
'''Check SocialDistancing measures'''
is_i_contained = self.is_person_home_from_visit_due_to_measure(
t=start_contact, i=i, visit_id=i_visit_id,
site_type=self.site_dict[self.site_type[site_id]])
is_j_contained = self.is_person_home_from_visit_due_to_measure(
t=start_contact, i=j, visit_id=j_visit_id,
site_type=self.site_dict[self.site_type[site_id]])
if is_i_contained or is_j_contained:
return False
# if all of the above checks passed, then contact is valid
return True
def __compute_empirical_survival_probability(self, *, t, i, j, contacts_i_j, base_rate=1.0, ignore_sites=False):
""" Compute empirical survival probability of individual j due to node i at time t"""
s = 0
for contact in contacts_i_j:
t_start = contact.t_from
t_end = contact.t_to
t_end_direct = contact.t_to_direct
site = contact.site
# break if next contact starts after t
if t_start >= t:
break
# check whether this computation has access to site information
if self.mob.site_has_beacon[site] and not ignore_sites:
s += self.__survival_prob_contribution_with_site(
i=i, j=j, site=site, t=t, t_start=t_start,
t_end_direct=t_end_direct, t_end=t_end, base_rate=base_rate)
else:
s += self.__survival_prob_contribution_no_site(
t=t, t_start=t_start, t_end_direct=t_end_direct, base_rate=base_rate)
# survival probability
survival_prob = np.exp(-s)
return survival_prob
def __survival_prob_contribution_no_site(self, *, t, t_start, t_end_direct, base_rate):
"""Computes empirical survival probability estimate when no site information
such as non-contemporaneous contact is known.
t: time of tracing action (upper bound on visit time)
t_start: start of contact
t_end_direct: end of direct contact
"""
# only consider direct contact
if min(t_end_direct, t) >= t_start:
# assume infector was at site entire `delta` time window
# before j arrived by lack of information otherwise
return (min(t_end_direct, t) - t_start) * base_rate * self.betas_weighted_mean * self.__kernel_term(- self.delta, 0.0, 0.0)
else:
return 0.0
def __survival_prob_contribution_with_site(self, *, i, j, site, t, t_start, t_end_direct, t_end, base_rate):
"""Computes exact empirical survival probability estimate when site information
such as site-specific transmission rate and
such as non-contemporaneous contact is known.
i: infector
j: individual at risk due to `i`
site: site
t: time of tracing action (upper bound on visit time)
t_start: start of contact
t_end_direct: end of direct contact
t_end: end of contact
base_rate: base rate
"""
# query visit of infector i that resulted in the contact
inf_visit_ = list(self.mob.list_intervals_in_window_individual_at_site(
indiv=i, site=site, t0=t_end_direct, t1=t_end_direct))
assert(len(inf_visit_) == 1)
inf_from, inf_to = inf_visit_[0].left, inf_visit_[0].right
# query visit of j that resulted in the contact
j_visit_ = list(self.mob.list_intervals_in_window_individual_at_site(
indiv=j, site=site, t0=t_start, t1=t_start))
assert(len(j_visit_) == 1)
j_from, j_to = j_visit_[0].left, j_visit_[0].right
# BetaMultiplier measures
beta_fact = 1.0
beta_mult_measure = self.measure_list.find(BetaMultiplierMeasureBySite, t=t_start)
beta_fact *= beta_mult_measure.beta_factor(k=site, t=t_start) \
if beta_mult_measure else 1.0
beta_mult_measure = self.measure_list.find(BetaMultiplierMeasureByType, t=t_start)
beta_fact *= (beta_mult_measure.beta_factor(typ=self.site_dict[self.site_type[site]], t=t_start)
if beta_mult_measure else 1.0)
beta_mult_measure = self.measure_list.find(UpperBoundCasesBetaMultiplier, t=t)
beta_fact *= (beta_mult_measure.beta_factor(typ=self.site_dict[self.site_type[site]], t=t, t_pos_tests=self.t_pos_tests) \
if beta_mult_measure | |
<reponame>redwankarimsony/SSD-Mobilenet-People-Detection
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 15:45:16 2019
@author: viswanatha
"""
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d, Sequential, ModuleList, ReLU
import torch
from mobilenet_ssd_priors import *
from MobileNetV2 import MobileNetV2, MobileNetV2_pretrained
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MobileNetV1(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV1, self).__init__()
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, 64, 1),
conv_dw(64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
)
self.fc = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, 1024)
x = self.fc(x)
return x
class PredictionConvolutions(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes, backbone_net):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes
# Number of prior-boxes we are considering per position in each feature map
n_boxes = {'conv4_3': 4,
'conv7': 6,
'conv8_2': 6,
'conv9_2': 6,
'conv10_2': 4,
'conv11_2': 4}
# 4 prior-boxes implies we use 4 different aspect ratios, etc.
# Localization prediction convolutions (predict offsets w.r.t prior-boxes)
if backbone_net == 'MobileNetV2':
self.loc_conv4_3 = nn.Conv2d(96, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1280, n_boxes['conv7'] * 4, kernel_size=3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1)
# Class prediction convolutions (predict classes in localization boxes)
self.cl_conv4_3 = nn.Conv2d(96, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1280, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1)
# Initialize convolutions' parameters
self.init_conv2d()
elif backbone_net == 'MobileNetV1':
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1)
# Class prediction convolutions (predict classes in localization boxes)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1)
# Initialize convolutions' parameters
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats):
batch_size = conv4_3_feats.size(0)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats) # (N, 16, 38, 38)
l_conv4_3 = l_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 16), to match prior-box order (after .view())
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) # (N, 5776, 4), there are a total 5776 boxes on this feature map
l_conv7 = self.loc_conv7(conv7_feats) # (N, 24, 19, 19)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 24)
l_conv7 = l_conv7.view(batch_size, -1, 4) # (N, 2166, 4), there are a total 2116 boxes on this feature map
l_conv8_2 = self.loc_conv8_2(conv8_2_feats) # (N, 24, 10, 10)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 24)
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) # (N, 600, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats) # (N, 24, 5, 5)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 24)
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) # (N, 150, 4)
l_conv10_2 = self.loc_conv10_2(conv10_2_feats) # (N, 16, 3, 3)
l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 16)
l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) # (N, 36, 4)
l_conv11_2 = self.loc_conv11_2(conv11_2_feats) # (N, 16, 1, 1)
l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 16)
l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) # (N, 4, 4)
# Predict classes in localization boxes
c_conv4_3 = self.cl_conv4_3(conv4_3_feats) # (N, 4 * n_classes, 38, 38)
c_conv4_3 = c_conv4_3.permute(0, 2, 3,
1).contiguous() # (N, 38, 38, 4 * n_classes), to match prior-box order (after .view())
c_conv4_3 = c_conv4_3.view(batch_size, -1,
self.n_classes) # (N, 5776, n_classes), there are a total 5776 boxes on this feature map
c_conv7 = self.cl_conv7(conv7_feats) # (N, 6 * n_classes, 19, 19)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 6 * n_classes)
c_conv7 = c_conv7.view(batch_size, -1,
self.n_classes) # (N, 2166, n_classes), there are a total 2116 boxes on this feature map
c_conv8_2 = self.cl_conv8_2(conv8_2_feats) # (N, 6 * n_classes, 10, 10)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 6 * n_classes)
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) # (N, 600, n_classes)
c_conv9_2 = self.cl_conv9_2(conv9_2_feats) # (N, 6 * n_classes, 5, 5)
c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 6 * n_classes)
c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) # (N, 150, n_classes)
c_conv10_2 = self.cl_conv10_2(conv10_2_feats) # (N, 4 * n_classes, 3, 3)
c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 4 * n_classes)
c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) # (N, 36, n_classes)
c_conv11_2 = self.cl_conv11_2(conv11_2_feats) # (N, 4 * n_classes, 1, 1)
c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 4 * n_classes)
c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) # (N, 4, n_classes)
# A total of 8732 boxes
# Concatenate in this specific order (i.e. must match the order of the prior-boxes)
locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) # (N, 8732, 4)
classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2],
dim=1) # (N, 8732, n_classes)
return locs, classes_scores
#auxiliary_conv = [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256]
class AuxillaryConvolutions(nn.Module):
def __init__(self, backbone_net):
super(AuxillaryConvolutions, self).__init__()
if backbone_net == "MobileNetV2":
self.extras = ModuleList([
Sequential(
Conv2d(in_channels=1280, out_channels=256, kernel_size=1),
ReLU(),
Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=512, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
)
])
self.init_conv2d()
elif backbone_net=="MobileNetV1":
self.extras = ModuleList([
Sequential(
Conv2d(in_channels=1024, out_channels=256, kernel_size=1),
ReLU(),
Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=512, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
),
Sequential(
Conv2d(in_channels=256, out_channels=128, kernel_size=1),
ReLU(),
Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1),
ReLU()
)
])
self.init_conv2d()
def init_conv2d(self):
for c in self.children():
for layer in c:
if isinstance(layer, nn.Conv2d):
nn.init.xavier_uniform_(layer.weight)
nn.init.constant_(c.bias, 0.)
def forward(self, inp_features_10x10):
features = []
x = inp_features_10x10
for layer in self.extras:
x = layer(x)
features.append(x)
features_5x5 = features[0]
features_3x3 = features[1]
features_2x2 = features[2]
features_1x1 = features[3]
return features_5x5, features_3x3, features_2x2, features_1x1
class SSD(nn.Module):
def __init__(self, num_classes, backbone_network):
super(SSD, self).__init__()
self.num_classes = num_classes
self.priors = torch.FloatTensor(priors).to(device)
#self.base_net = MobileNetV1().model
self.backbone_net = backbone_network
if self.backbone_net == 'MobileNetV1':
self.base_net = MobileNetV1().model
elif self.backbone_net == 'MobileNetV2':
self.base_net = MobileNetV2_pretrained('mobilenet_v2.pth.tar').model
else:
raise('SSD cannot be created with the provided base network')
#self.base_net = MobileNetV2()
self.aux_network = AuxillaryConvolutions(self.backbone_net)
self.prediction_network = PredictionConvolutions(num_classes, self.backbone_net)
def forward(self, image):
x= image
if self.backbone_net == 'MobileNetV1':
source_layer_indexes = [
12,
14,]
start_layer_index = 0
flag = 0
x = x.to('cuda')
| |
<gh_stars>100-1000
#//
#//----------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
from ..base.uvm_component import UVMComponent
from ..base.uvm_object_globals import UVM_NO_ACTION
from ..tlm1 import (UVMAnalysisPort, UVMGetPeekExport, UVMMasterImp,
UVMPutExport, UVMSlaveImp, UVMTLMFIFO)
from ..base.uvm_port_base import (s_connection_error_id)
#//------------------------------------------------------------------------------
#// Title: TLM Channel Classes
#//------------------------------------------------------------------------------
#// This section defines built-in TLM channel classes.
#//------------------------------------------------------------------------------
#//------------------------------------------------------------------------------
#//
#// CLASS: uvm_tlm_req_rsp_channel #(REQ,RSP)
#//
#// The uvm_tlm_req_rsp_channel contains a request FIFO of type ~REQ~ and a response
#// FIFO of type ~RSP~. These FIFOs can be of any size. This channel is
#// particularly useful for dealing with pipelined protocols where the request
#// and response are not tightly coupled.
#//
#// Type parameters:
#//
#// REQ - Type of the request transactions conveyed by self channel.
#// RSP - Type of the response transactions conveyed by self channel.
#//
#//------------------------------------------------------------------------------
class UVMTLMReqRspChannel(UVMComponent):
# typedef uvm_tlm_req_rsp_channel #(REQ, RSP) this_type
#
# const static string type_name = "uvm_tlm_req_rsp_channel #(REQ,RSP)"
type_name = "uvm_tlm_req_rsp_channel #(REQ,RSP)"
# // Port: put_request_export
# //
# // The put_export provides both the blocking and non-blocking put interface
# // methods to the request FIFO:
# //
# //| def put(self,input T t)
# //| def bit can_put (self):
# //| def bit try_put (self,input T t):
# //
# // Any put port variant can connect and send transactions to the request FIFO
# // via self export, provided the transaction types match.
#
# uvm_put_export #(REQ) put_request_export
#
#
# // Port: get_peek_response_export
# //
# // The get_peek_response_export provides all the blocking and non-blocking get
# // and peek interface methods to the response FIFO:
# //
# //| def get(self,output T t)
# //| def bit can_get (self):
# //| def bit try_get (self,output T t):
# //| def peek(self,output T t)
# //| def bit can_peek (self):
# //| def bit try_peek (self,output T t):
# //
# // Any get or peek port variant can connect to and retrieve transactions from
# // the response FIFO via self export, provided the transaction types match.
#
# uvm_get_peek_export #(RSP) get_peek_response_export
#
#
# // Port: get_peek_request_export
# //
# // The get_peek_export provides all the blocking and non-blocking get and peek
# // interface methods to the response FIFO:
# //
# //| def get(self,output T t)
# //| def bit can_get (self):
# //| def bit try_get (self,output T t):
# //| def peek(self,output T t)
# //| def bit can_peek (self):
# //| def bit try_peek (self,output T t):
# //
# // Any get or peek port variant can connect to and retrieve transactions from
# // the response FIFO via self export, provided the transaction types match.
#
#
# uvm_get_peek_export #(REQ) get_peek_request_export
#
#
# // Port: put_response_export
# //
# // The put_export provides both the blocking and non-blocking put interface
# // methods to the response FIFO:
# //
# //| def put(self,input T t)
# //| def bit can_put (self):
# //| def bit try_put (self,input T t):
# //
# // Any put port variant can connect and send transactions to the response FIFO
# // via self export, provided the transaction types match.
#
# uvm_put_export #(RSP) put_response_export
#
#
# // Port: request_ap
# //
# // Transactions passed via ~put~ or ~try_put~ (via any port connected to the
# // put_request_export) are sent out self port via its write method.
# //
# //| def void write (self,T t):
# //
# // All connected analysis exports and imps will receive these transactions.
#
# uvm_analysis_port #(REQ) request_ap
#
#
# // Port: response_ap
# //
# // Transactions passed via ~put~ or ~try_put~ (via any port connected to the
# // put_response_export) are sent out self port via its write method.
# //
# //| def void write (self,T t):
# //
# // All connected analysis exports and imps will receive these transactions.
#
# uvm_analysis_port #(RSP) response_ap
#
#
# // Port: master_export
# //
# // Exports a single interface that allows a master to put requests and get or
# // peek responses. It is a combination of the put_request_export and
# // get_peek_response_export.
#
# uvm_master_imp #(REQ, RSP, this_type, uvm_tlm_fifo #(REQ), uvm_tlm_fifo #(RSP)) master_export
#
#
# // Port: slave_export
# //
# // Exports a single interface that allows a slave to get or peek requests and
# // to put responses. It is a combination of the get_peek_request_export
# // and put_response_export.
#
# uvm_slave_imp #(REQ, RSP, this_type, uvm_tlm_fifo #(REQ), uvm_tlm_fifo #(RSP)) slave_export
#
# // port aliases for backward compatibility
# uvm_put_export #(REQ) blocking_put_request_export,
# nonblocking_put_request_export
# uvm_get_peek_export #(REQ) get_request_export,
# blocking_get_request_export,
# nonblocking_get_request_export,
# peek_request_export,
# blocking_peek_request_export,
# nonblocking_peek_request_export,
# blocking_get_peek_request_export,
# nonblocking_get_peek_request_export
#
# uvm_put_export #(RSP) blocking_put_response_export,
# nonblocking_put_response_export
# uvm_get_peek_export #(RSP) get_response_export,
# blocking_get_response_export,
# nonblocking_get_response_export,
# peek_response_export,
# blocking_peek_response_export,
# nonblocking_peek_response_export,
# blocking_get_peek_response_export,
# nonblocking_get_peek_response_export
#
# uvm_master_imp #(REQ, RSP, this_type, uvm_tlm_fifo #(REQ), uvm_tlm_fifo #(RSP))
# blocking_master_export,
# nonblocking_master_export
#
# uvm_slave_imp #(REQ, RSP, this_type, uvm_tlm_fifo #(REQ), uvm_tlm_fifo #(RSP))
# blocking_slave_export,
# nonblocking_slave_export
# // Function: new
# //
# // The ~name~ and ~parent~ are the standard <uvm_component> constructor arguments.
# // The ~parent~ must be ~None~ if self component is defined within a static
# // component such as a module, program block, or interface. The last two
# // arguments specify the request and response FIFO sizes, which have default
# // values of 1.
def __init__(self, name, parent=None, request_fifo_size=1,
response_fifo_size=1):
super().__init__(name, parent)
self.m_request_fifo = UVMTLMFIFO("request_fifo", self, request_fifo_size)
self.m_response_fifo = UVMTLMFIFO("response_fifo", self, response_fifo_size)
self.request_ap = UVMAnalysisPort("request_ap", self)
self.response_ap = UVMAnalysisPort("response_ap", self)
self.put_request_export = UVMPutExport("put_request_export", self)
self.get_peek_request_export = UVMGetPeekExport("get_peek_request_export", self)
self.put_response_export = UVMPutExport("put_response_export", self)
self.get_peek_response_export = UVMGetPeekExport("get_peek_response_export", self)
self.master_export = UVMMasterImp("master_export", self, self.m_request_fifo,
self.m_response_fifo)
self.slave_export = UVMSlaveImp("slave_export", self, self.m_request_fifo,
self.m_response_fifo)
self.create_aliased_exports()
self.set_report_id_action_hier(s_connection_error_id, UVM_NO_ACTION)
def connect_phase(self, phase):
self.put_request_export.connect(self.m_request_fifo.put_export)
self.get_peek_request_export.connect(self.m_request_fifo.get_peek_export)
self.m_request_fifo.put_ap.connect(self.request_ap)
self.put_response_export.connect(self.m_response_fifo.put_export)
self.get_peek_response_export.connect(self.m_response_fifo.get_peek_export)
self.m_response_fifo.put_ap.connect(self.response_ap)
def create_aliased_exports(self):
# request
self.blocking_put_request_export = self.put_request_export
self.nonblocking_put_request_export = self.put_request_export
self.get_request_export = self.get_peek_request_export
self.blocking_get_request_export = self.get_peek_request_export
self.nonblocking_get_request_export = self.get_peek_request_export
self.peek_request_export = self.get_peek_request_export
self.blocking_peek_request_export = self.get_peek_request_export
self.nonblocking_peek_request_export = self.get_peek_request_export
self.blocking_get_peek_request_export = self.get_peek_request_export
self.nonblocking_get_peek_request_export = self.get_peek_request_export
# response
self.blocking_put_response_export = self.put_response_export
self.nonblocking_put_response_export = self.put_response_export
self.get_response_export = self.get_peek_response_export
self.blocking_get_response_export = self.get_peek_response_export
self.nonblocking_get_response_export = self.get_peek_response_export
self.peek_response_export = self.get_peek_response_export
self.blocking_peek_response_export = self.get_peek_response_export
self.nonblocking_peek_response_export = self.get_peek_response_export
self.blocking_get_peek_response_export = self.get_peek_response_export
self.nonblocking_get_peek_response_export = self.get_peek_response_export
# master/slave
self.blocking_master_export = self.master_export
self.nonblocking_master_export = self.master_export
self.blocking_slave_export = self.slave_export
self.nonblocking_slave_export = self.slave_export
# // get_type_name
# // -------------
def get_type_name(self):
return UVMTLMReqRspChannel.type_name
# // create
# // ------
def create(self, name=""):
return UVMTLMReqRspChannel(name)
#//------------------------------------------------------------------------------
#//
#// CLASS: uvm_tlm_transport_channel #(REQ,RSP)
#//
#// A uvm_tlm_transport_channel is a <uvm_tlm_req_rsp_channel #(REQ,RSP)> that implements
#// the transport interface. It is useful when modeling a non-pipelined bus at
#// the transaction level. Because the requests and responses have a tightly
#// coupled one-to-one relationship, the request and response FIFO sizes are both
#// set to one.
#//
#//------------------------------------------------------------------------------
#
#class uvm_tlm_transport_channel #(type REQ=int, type RSP=REQ)
# extends uvm_tlm_req_rsp_channel #(REQ, RSP)
#
# typedef uvm_tlm_transport_channel #(REQ, RSP) this_type
#
# // Port: transport_export
# //
# // The put_export provides both the blocking and non-blocking transport
# // interface methods to the response FIFO:
# //
# //| def transport(self,REQ request, output RSP response)
# //| def bit nb_transport(self,REQ request, output RSP response):
# //
# // Any transport port variant can connect to and send requests and retrieve
# // responses via self export, provided the transaction types match. Upon
# // return, the response argument carries the | |
<reponame>gamtiq/leo-editor
#@+leo-ver=5-thin
#@+node:tbrown.20080613095157.2: * @file ../plugins/active_path.py
#@+<< docstring >>
#@+node:tbrown.20080613095157.3: ** << docstring >> (active_path)
r'''Synchronizes \@path nodes with folders.
If a node is named '\@path *<path_to_folder>*', the content (file and folder
names) of the folder and the children of that node will synchronized whenever
you double-click the node.
For files not previously seen in a folder a new node will appear on top of the
children list (with a mark).
Folders appear in the list as /foldername/. If you double click on the folder
node, it will have children added to it based on the contents of the folder on
disk. These folders have the '@path' directive as the first line of their body
text.
When files are deleted from the folder and the list is updated by double
clicking the files will appear in the list as *filename* (or */foldername/*).
You can describe files and directories in the body of the nodes.
You can organize files and directories with organizer nodes, an organizer node
name cannot contain with '/'.
Files and folders can be created by entering a node with the required name as
its headline (must start and/or end with "/" for a folder) and then double
clicking on the node.
\@auto nodes can be set up for existing files can be loaded by double clicking
on the node. If you prefer \@shadow or something else use the
"active_path_attype" setting, without the "@".
There are commands on the Plugins active_path submenu:
- show path - show the current path
- set absolute path - changes a node "/dirname/" to "@path
/absolute/path/to/dirname".
- purge vanished (recursive) - remove *entries*
- update recursive - recursive load of directories, use with caution on large
file systems
- pick dir - select a folder interactively to make a new top level @path node
- mark-content - mark outline content in the @path tree, as opposed to
filesystem content. Useful if you want to delete the @path tree to check for
content not on the filesystem first
If you want to use an input other than double clicking a node set
active_path_event to a value like 'hypercclick1' or 'headrclick1'.
There are @settings for ignoring directory entries and automatically loading
files. ``re.search`` is used, rather than ``re.match``, so patterns need only
match part of the filename, not the whole filename.
The body of the @setting ``@data active_path_ignore`` is a list of regex
patterns, one per line. Directory entries matching any pattern in the list will
be ignored. The names of directories used for matching will have forward slashes
around them ('/dirname/'), so patterns can use this to distinguish between
directories and files.
The body of the @setting ``@data active_path_autoload`` is a list of regex
patterns, one per line. File entries matching any pattern in the list will be
loaded automatically. This works only with files, not directories (but you can
load directories recursively anyway).
Autoloading can be toggled with `active-path-toggle-autoload`, autoloading
defaults to initially on unless @bool active-path-do-autoload = False.
Set ``@bool active_path_load_docstring = True`` to have active_path load the
docstring of .py files automatically. These nodes start with the special
string::
@language rest # AUTOLOADED DOCSTRING
which must be left intact if you want active path to be able to double-click
load the file later.
\@float active_path_timeout_seconds (default 10.) controls the maximum time
active_path will spend on a recursive operation.
\@int active_path_max_size (default 1000000) controls the maximum size file
active_path will open without query.
Per Folder file/folder inclusion and exclusion by adding flags to the body of an
active path folder (either ``@`` or ``/*/``), can include multiple ``inc=`` and
``exc=`` flags:
- ``excdirs`` - excludes all directories
- ``excfiles`` - excludes all files
- ``inc=`` - a single item or comma separated list of strings to include in the
list of files/folders
- ``exc=`` - a single item or comma separated list of strings to exclude in the
list of files/folders
- ``re`` - search using regular expressions (otherwise a case-sensitive 'in'
comparison)
active_path is a rewrite of the at_directory plugin to use \@path directives
(which influence \@auto and other \@file type directives), and to handle
sub-folders more automatically.
'''
#@-<< docstring >>
#@+<< imports >>
#@+node:ekr.20140612210500.17669: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoPlugins as leoPlugins
# uses leoPlugins.TryNext
import ast # for docstring loading
import os
import re
import shutil
import time # for recursion bailout
# from leo.plugins.plugins_menu import PlugIn
# if g.app.gui.guiName() == "qt":
# from leo.core.leoQt import isQt5,QtCore
#@-<< imports >>
testing = False
#@+others
#@+node:tbrown.20091128094521.15048: ** init
def init():
'''Return True if the plugin has loaded successfully.'''
g.registerHandler('after-create-leo-frame', attachToCommander)
g.act_on_node.add(active_path_act_on_node, priority = 90)
g.plugin_signon(__name__)
if g.app.gui.guiName() == "qt":
g.tree_popup_handlers.append(popup_entry)
return True
#@+node:tbrown.20091128094521.15047: ** attachToCommander
# defer binding event until c exists
def attachToCommander(t,k):
# pylint: disable=simplifiable-if-statement
c = k.get('c')
event = c.config.getString('active-path-event') or "headdclick1"
# pylint: disable=unnecessary-lambda
g.registerHandler(event, lambda t,k: onSelect(t,k))
# not using a proper class, so
c.__active_path = {'ignore': [], 'autoload': [],
'do_autoload': c.config.getBool('active-path-do-autoload', default=True)}
if c.config.getData('active_path_ignore'):
c.__active_path['ignore'] = [re.compile(i, re.IGNORECASE)
for i in c.config.getData('active_path_ignore')]
if c.config.getData('active_path_autoload'):
c.__active_path['autoload'] = [re.compile(i, re.IGNORECASE)
for i in c.config.getData('active_path_autoload')]
if c.config.getBool('active-path-load-docstring'):
c.__active_path['load_docstring'] = True
else:
c.__active_path['load_docstring'] = False
if c.config.getFloat('active-path-timeout-seconds'):
c.__active_path['timeout'] = c.config.getFloat('active-path-timeout-seconds')
else:
c.__active_path['timeout'] = 10.
if c.config.getInt('active-path-max-size'):
c.__active_path['max_size'] = c.config.getInt('active-path-max-size')
else:
c.__active_path['max_size'] = 1000000
c.__active_path['DS_SENTINEL'] = "@language rest # AUTOLOADED DOCSTRING"
#@+node:tbrown.20091128094521.15042: ** popup_entry (active_path)
def popup_entry(c,p,menu):
'''Populate the Path submenu of the popup.'''
pathmenu = menu.addMenu("Path")
d = g.global_commands_dict
for key in d:
if key.startswith('active-path'):
a = pathmenu.addAction(key)
command = d.get(key)
def active_path_wrapper(aBool,command=command,c=c):
event = {'c':c}
command(event)
a.triggered.connect(active_path_wrapper)
#@+node:tbrown.20091128094521.15037: ** isDirNode
def isDirNode(p):
return (
p.h.startswith('@path ') or
# '/foo/' form *assumes* @path in body
(not p.h.strip().startswith('@') and p.h.strip().endswith('/')) or
p.h.strip().startswith('/')
)
#@+node:tbrown.20091128094521.15039: ** isFileNode
def isFileNode(p):
"""really isEligibleToBecomeAFileNode"""
return (
not p.h.strip().startswith('@') and not p.hasChildren() and
not isDirNode(p) and isDirNode(p.parent()) and
(not p.b.strip() or
p.b.startswith("@language rest # AUTOLOADED DOCSTRING") # no c!
# p.b.startswith(c.__active_path['DS_SENTINEL']
))
#@+node:jlunz.20150611151435.1: ** inAny
def inAny(item, group, regEx=False):
""" Helper function to check if word from list is in a string """
if regEx:
return any(re.search(word,item) for word in group)
return any(word in item for word in group)
#@+node:jlunz.20150611151003.1: ** checkIncExc
def checkIncExc(item,inc,exc,regEx):
""" Primary logic to check if an item is in either the include or exclude list """
if inc and not exc:
return inAny(item,inc,regEx)
if exc and not inc:
return not inAny(item,exc,regEx)
return True
#@+node:tbrown.20091129085043.9329: ** inReList
def inReList(txt, lst):
for pat in lst:
if pat.search(txt):
return True
return False
#@+node:tbrown.20091128094521.15040: ** subDir
def subDir(d, p):
if p.h.strip().startswith('@path'):
p = p.h.split(None,1)
if len(p) != 2:
return None
p = p[1]
elif p.b.strip().startswith('@path'):
p = p.b.split('\n',1)[0].split(None,1)
if len(p) != 2:
return None
p = p[1]
else:
p = p.h.strip(' /')
return os.path.join(d,p)
#@+node:tbrown.20080613095157.4: ** onSelect
def onSelect (tag,keywords):
"""Determine if a file or directory node was clicked, and the path"""
c = keywords.get('c') or keywords.get('new_c')
if not c:
return None
p = keywords.get("p")
p.expand()
pos = p.copy()
path = getPath(c, p)
if path and sync_node_to_folder(c,pos,path):
c.redraw()
return True
return None
#@+node:tbrown.20080616153649.4: ** getPath
def getPath(c, p):
for n in p.self_and_parents():
if n.h.startswith('@path'):
break
else:
return None # must have a full fledged @path in parents
aList = g.get_directives_dict_list(p)
path = c.scanAtPathDirectives(aList)
if (not isDirNode(p)): # add file name
h = p.h.split(None, 1)
if h[0].startswith('@') and len(h) == 2:
path = os.path.join(path, h[1])
else:
path = os.path.join(path, p.h.strip())
return path
#@+node:tbrown.20090219133655.230: ** getPathOld
def getPathOld(p):
# NOT USED, my version which does its own @path scanning
p = p.copy()
path = []
while p:
h = p.h
if g.match_word(h,0,"@path"): # top of the tree
path.insert(0,os.path.expanduser(h[6:].strip()))
d = os.path.join(*path)
return d
if h.startswith('@'): # some other directive, run away
break
elif isDirNode(p): # a directory
path.insert(0,h.strip('/*'))
elif not p.hasChildren(): # a leaf node, assume a file
path.insert(0,h.strip('*'))
p = p.parent()
return None
#@+node:tbrown.20080613095157.5: ** flattenOrganizers
def flattenOrganizers(p):
"""Children of p, some of which may be in organizer nodes
In the following example nodeA's children are nodes B, F, and G::
/nodeA/
nodeB
/nodeC/
nodeD
nodeE
oldStuff
nodeF
nodeG
"""
for n in p.children():
yield n
if not isDirNode(n) and not n.h.startswith('@'):
for i in flattenOrganizers(n):
yield i
#@+node:tbrown.20080613095157.6: ** sync_node_to_folder
def sync_node_to_folder(c,parent,d,updateOnly=False, recurse=False):
"""Decide whether we're opening or creating a file or a folder"""
if (
not updateOnly and
not recurse and
isDirNode(parent) and not parent.h.strip().startswith('@path') and
not parent.b.strip().startswith('@path')
):
createDir(c,parent,d)
return True # even if it didn't happen, else get stuck in edit mode w/o focus
if os.path.isdir(d):
if | |
text to long list
keywords_text.extend(filter_dict_page(parsed_pagetext, all_keywords)) # Filter using keywords
ideology_text.extend(filter_dict_page(parsed_pagetext, all_ideol)) # Filter using ideology words
logging.info("Successfully parsed and filtered file " + str(file) + "...")
return webtext,keywords_text,ideology_text
def filter_dict_page(pagetext_list, keyslist):
"""Filters webtext of a given .html page, which is parsed and in list format, to only those strings
within pagetext_list containing an element (word or words) of inputted keyslist.
Returns list filteredtext wherein each element has original case (not coerced to lower-case)."""
filteredtext = [] # Initialize empty list to hold strings of page
for string in pagetext_list:
lowercasestring = str(string).lower() # lower-case string...
dict_list = [key.lower() for key in list(keyslist)] # ...compared with lower-case element of keyslist
for key in dict_list:
if key in lowercasestring and key in lowercasestring.split(' '): # Check that the word is the whole word not part of another one
filteredtext.append(string)
return filteredtext
#logging.info("Output of filter_keywords_page with keywords:\n" + str(filter_dict_page(example_textlist, all_keywords)))
#logging.info("Output of filter_keywords_page with ideology words:\n\n" + str(filter_dict_page(example_textlist, all_ideol)))
def dictify_webtext(school_dict):
"""OBSOLETE. Kept here for purposes of comparison.
Reads parsing output from text files and saves to school_dict multiple parsing outputs:
webtext, keywords_text, ideology_text, file_count, etc."""
# Allow function to access these variables already defined outside the function (globally)
global itervar,numschools,parsed,wget_dataloc,URL_var,NAME_var,ADDR_var,save_dir
datalocation = wget_dataloc # Define path to local data storage
school_name, school_address, school_URL = school_dict[NAME_var], school_dict[ADDR_var], school_dict[URL_var] # Define varnames
itervar+=1 # Count this school
print("Loading into dict parsing output for " + str(school_name) + ", which is school #" + str(itervar) + " of " + str(numschools) + "...")
school_dict["webtext"], school_dict["keywords_text"], school_dict["ideology_text"] = [[] for _ in range(3)]
school_dict["duplicate_flag"], school_dict["parse_error_flag"], school_dict["wget_fail_flag"] = [0 for _ in range(3)]
school_dict['ess_strength'],school_dict['prog_strength'] = [0.0 for _ in range(2)]
folder_name = re.sub(" ","_",(school_dict[NAME_var]+" "+school_dict[ADDR_var][-8:-6])) # This gives name and state separated by "_"
school_folder = datalocation + folder_name + "/"
error_file = school_folder + "error_flags.txt" # Define file path for error text log
if school_URL==school_name:
school_URL = folder_name # Workaround for full_schooldata, which doesn't yet have URLs
# Check if folder exists. If not, exit function
if not (os.path.exists(school_folder) or os.path.exists(school_folder.lower()) or os.path.exists(school_folder.upper())):
print(" !! NO DIRECTORY FOUND matching " + str(school_folder) + ". Aborting dictify function...")
school_dict['wget_fail_flag'] = 1
return
try:
# Load school parse output from disk into dictionary
school_dict["webtext"] = load_list(school_folder + "webtext.txt")
school_dict["keywords_text"] = load_list(school_folder + "keywords_text.txt")
school_dict["ideology_text"] = load_list(school_folder + "ideology_text.txt")
""" # Comment out until dict_count is run
school_dict["ess_count"] = load_list(school_folder + "ess_count.txt")
school_dict["prog_count"] = load_list(school_folder + "prog_count.txt")
school_dict["rit_count"] = load_list(school_folder + "rit_count.txt")
school_dict['ess_strength'] = float(school_dict['ess_count'])/float(school_dict['rit_count'])
school_dict['prog_strength'] = float(school_dict['prog_count'])/float(school_dict['rit_count'])
"""
# load error_file as a list with four pieces, the last element of each of which is the flag value itself:
error_text = load_list(error_file)
school_dict["duplicate_flag"] = error_text[0].split()[-1] # last element of first piece of error_text
school_dict["parse_error_flag"] = error_text[1].split()[-1]
school_dict["wget_fail_flag"] = error_text[2].split()[-1]
school_dict["html_file_count"] = error_text[3].split()[-1]
if int(school_dict["html_file_count"])==0:
school_dict["wget_fail_flag"] = 1 # If no HTML, then web download failed!
print(" LOADED " + school_dict["html_file_count"] + " .html file(s) from website of " + str(school_name) + "...")
#save_datafile(dicts_list, save_dir+"school_parser_temp", "JSON") # Save output so we can pick up where left off, in case something breaks before able to save final output
return school_dict
except Exception as e:
print(" ERROR! Failed to load into dict parsing output for " + str(school_name))
print(" ",e)
school_dict["parse_error_flag"] = 1
return
def pandify_webtext(df):
"""Reads parsing output from text files and saves to DataFrame df multiple parsing outputs:
webtext, keywords_text, ideology_text, file_count, dict_count outputs, etc."""
# Allow function to access these variables already defined outside the function (globally)
global numschools,wget_dataloc,save_dir,NAME_var,ADDR_var,URL_var
datalocation = wget_dataloc # Define path to local data storage
#logging.info("Loading into DataFrame parsing output for " + str(len(df)) + " school websites out of a total of " + str(numschools) + "...")
# Initialize text strings and counts as empty, then convert data types:
empty = ["" for elem in range(len(df["NCESSCH"]))] # Create empty string column that is as long as the longest variable (NCESCCH used for matching)
df = df.assign(word_count=empty, chunk_count=empty, FOLDER_NAME=empty, TOTETH=empty, PCTETH=empty, AGE=empty, PCTFRL=empty, PLACE=empty, WEBTEXT=empty, PROG_TEXT=empty, ESS_TEXT=empty, RIT_TEXT=empty, IDEOLOGY_TEXT=empty, MISSION_TEXT=empty, CURR_TEXT=empty, PHIL_TEXT=empty, HIST_TEXT=empty, ABOUT_TEXT=empty, KEYWORDS_TEXT=empty, ESS_COUNT=empty, PROG_COUNT=empty, RIT_COUNT=empty, ESS_STR=empty, PROG_STR=empty, IDDIFF_STR=empty, IDDIFF_STRLOG=empty ESS_PCT=empty, PROG_PCT=empty, IDDIFF_PCT=empty, IDDIFF_PCTLOG=empty) # Add empty columns to df
df.loc[:,["PLACE", "WEBTEXT", "PROG_TEXT", "ESS_TEXT", "RIT_TEXT", "IDEOLOGY_TEXT", "MISSION_TEXT", "CURR_TEXT", "PHIL_TEXT", "HIST_TEXT", "ABOUT_TEXT", "KEYWORDS_TEXT", "FOLDER_NAME"]] = df.loc[:,["PLACE", "WEBTEXT", "PROG_TEXT", "ESS_TEXT", "RIT_TEXT", "IDEOLOGY_TEXT", "MISSION_TEXT", "CURR_TEXT", "PHIL_TEXT", "HIST_TEXT", "ABOUT_TEXT", "KEYWORDS_TEXT", "FOLDER_NAME"]].apply(lambda x: x.astype(object)) # Convert to object type--holds text
df.loc[:,["word_count", "chunk_count", "AGE", "TOTETH", "ESS_COUNT", "PROG_COUNT", "RIT_COUNT"]] = df.loc[:,["word_count", "chunk_count", "AGE", "TOTETH", "ESS_COUNT", "PROG_COUNT", "RIT_COUNT"]].apply(pd.to_numeric, downcast="unsigned") # Convert to int dtype--holds positive numbers (no decimals)
df.loc[:,["PCTETH", "PCTFRL", "ESS_STR", "PROG_STR", "IDDIFF_STR", "IDDIFF_STRLOG", "ESS_PCT", "PROG_PCT", "IDDIFF_PCT", "IDDIFF_PCTLOG"]] = df.loc[:,["PCTETH", "PCTFRL", "ESS_STR", "PROG_STR", "IDDIFF_STR", "IDDIFF_PCTLOG", "ESS_PCT", "PROG_PCT", "IDDIFF_PCT", "IDDIFF_PCTLOG"]].apply(pd.to_numeric, downcast="float") # Use most efficient float type for these vars--hold decimals
df.loc[:,"FOLDER_NAME"] = df.loc[:,[NAME_var,ADDR_var]].apply(lambda x: re.sub(" ","_","{} {}".format(str(x[0]),str(x[1][-8:-6]))), axis=1) # This gives name and state separated by "_"
df.loc[:,"school_folder"] = df.loc[:,"FOLDER_NAME"].apply(lambda x: str(datalocation) + '{}/'.format(str(x)))
df.loc[:,"error_file"] = df.loc[:,"school_folder"].apply(lambda x: '{}error_flags.txt'.format(str(x))) # Define file path for error text log
df.loc[:,"counts_file"] = df.loc[:,"school_folder"].apply(lambda x: '{}dict_counts.txt'.format(str(x)))
try:
# Compute demographic variables:
df["TOTETH"] = df[["AM", "AS", "BL", "HI", "HP", "TR"]].apply(sum, axis=1) # Number of nonwhite K-12 students
df["PCTETH"] = (df["TOTETH"]/df["MEMBER"]).apply(pd.to_numeric, downcast='float') # Percent nonwhite K-12 students
df["PCTFRL"] = (df["TOTFRL"]/df["MEMBER"]).apply(pd.to_numeric, downcast='float') # Percent receiving free/ reduced-price lunch
df["AGE"] = data_year - df["YEAR_OPENED"] # Number of years school has been open
# Recode variables:
df["PLACE"] = df["LOCALE"].map({11.0:"City", 12.0:"City", 13.0:"City", 21.0:"Suburb", 22.0:"Suburb", 23.0:"Suburb", 31.0:"Town", 32.0:"Town", 33.0:"Town", 41.0:"Rural", 42.0:"Rural", 43.0:"Rural"}).astype('category')
df["LOCALE"] = df["LOCALE"].map({11.0:"City (large)", 12.0:"City (midsize)", 13.0:"City (small)", 21.0:"Suburb (large)", 22.0:"Suburb (midsize)", 23.0:"Suburb (small)", 31.0:"Town (fringe)", 32.0:"Town (distant)", 33.0:"Town (remote)", 41.0:"Rural (fringe)", 42.0:"Rural (distant)", 43.0:"Rural (remote)"}).astype('category')
df["TITLEI"] = df["TITLEI"].map({"Yes":1, "No":0}).astype('category')
# load error_file as a list with four pieces, the last element of each of which is the flag value itself:
df.loc[:,"error_text"] = df.loc[:,"error_file"].apply(lambda x: load_list('{}'.format(str(x))))
df.loc[:,"duplicate_flag"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[0].split()[-1]))) # # last element of first piece of error_text
df.loc[:,"parse_error_flag"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[1].split()[-1])))
df.loc[:,"wget_fail_flag"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[2].split()[-1])))
df.loc[:,"html_file_count"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[3].split()[-1])))
downloaded = df["wget_fail_flag"].map({"1":True,1:True,"0":False,0:False}) == False # This binary conditional filters df to only those rows with downloaded web content--where wget_fail_flag==False and thus does NOT signal download failure
logging.info("Loading webtext from disk into DF...")
# For reference from webparser_mp.py:
# keysfiles_list = ["mission_text.txt","curr_text.txt","phil_text.txt","hist_text.txt","about_text.txt","allkeys_text.txt"]
# dictsfiles_list = ["prog_text.txt","ess_text.txt","rit_text.txt","allideol_text.txt","alldicts_text.txt"]
# Load school parse output from disk into DataFrame:
df.loc[downloaded,"WEBTEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}webtext.txt".format(str(x)))) # df["wget_fail_flag"]==False
df.loc[downloaded,"PROG_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}prog_text.txt".format(str(x))))
df.loc[downloaded,"ESS_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}ess_text.txt".format(str(x))))
df.loc[downloaded,"RIT_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}rit_text.txt".format(str(x))))
df.loc[downloaded,"IDEOLOGY_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}allideol_text.txt".format(str(x))))
df.loc[downloaded,"MISSION_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}mission_text.txt".format(str(x))))
df.loc[downloaded,"CURR_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}curr_text.txt".format(str(x))))
df.loc[downloaded,"PHIL_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}phil_text.txt".format(str(x))))
df.loc[downloaded,"HIST_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}hist_text.txt".format(str(x))))
df.loc[downloaded,"ABOUT_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}about_text.txt".format(str(x))))
df.loc[downloaded,"KEYWORDS_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}allkeys_text.txt".format(str(x))))
df.loc[downloaded,"word_count"] = df.loc[downloaded, "WEBTEXT"].apply(lambda x: sum(map(len, map(word_tokenize, x))))
df.loc[downloaded,"chunk_count"] = df.loc[downloaded, "WEBTEXT"].apply(lambda x: len(x))
df["counts_text"] = df.counts_file.apply(lambda x: load_list("{}".format(str(x))))
df.loc[downloaded,"ESS_COUNT"] = df.loc[downloaded,"counts_text"].apply(lambda x: "{}".format(str(x[0].split()[-1]))).apply(pd.to_numeric,downcast='unsigned') # 2nd element of 1st row in counts_text: take as uint dtype (no negatives)
df.loc[downloaded,"PROG_COUNT"] = df.loc[downloaded,"counts_text"].apply(lambda x: "{}".format(str(x[1].split()[-1]))).apply(pd.to_numeric,downcast='unsigned') # 2nd element of 2nd row
df.loc[downloaded,"RIT_COUNT"] = df.loc[downloaded,"counts_text"].apply(lambda x: "{}".format(str(x[2].split()[-1]))).apply(pd.to_numeric,downcast='unsigned') # 2nd element of 3nd row
df.loc[downloaded,"ESS_STR"] = (df.loc[downloaded,"ESS_COUNT"]/df.loc[downloaded, "RIT_COUNT"]).apply(pd.to_numeric, downcast='float') # calculate ideology ratio, use most memory-efficient float dtype
df.loc[downloaded,"PROG_STR"] = (df.loc[downloaded,"PROG_COUNT"]/df.loc[downloaded, "RIT_COUNT"]).apply(pd.to_numeric, downcast='float')
df.loc[downloaded,"IDDIFF_STR"] = (df.loc[downloaded,"PROG_STR"] - df.loc[downloaded,"ESS_STR"]).apply(pd.to_numeric, downcast='float')
df.loc[downloaded,"IDDIFF_STRLOG"] = (df.loc[downloaded,"PROG_STR"].apply(log10) - df.loc[downloaded,"ESS_STR"].apply(log10)).apply(pd.to_numeric, downcast='float')
df.loc[downloaded,"ESS_PCT"] = (df.loc[downloaded,"ESS_COUNT"]/df.loc[downloaded, "word_count"]).apply(pd.to_numeric, downcast='float') # calculate ideology ratio, use most memory-efficient float dtype
df.loc[downloaded,"PROG_PCT"] = (df.loc[downloaded,"PROG_COUNT"]/df.loc[downloaded, "word_count"]).apply(pd.to_numeric, downcast='float')
df.loc[downloaded,"IDDIFF_PCT"] = (df.loc[downloaded,"PROG_PCT"] - df.loc[downloaded,"ESS_PCT"]).apply(pd.to_numeric, downcast='float')
df.loc[downloaded,"IDDIFF_PCTLOG"] = (df.loc[downloaded,"PROG_PCT"].apply(log10) - df.loc[downloaded,"ESS_PCT"].apply(log10)).apply(pd.to_numeric, downcast='float')
df = df.drop(["school_folder","error_text","error_file","counts_text", "AM", "AS", "BL", "HI", "HP"],axis=1) # Clean up temp variables
logging.info("LOADED " + df["html_file_count"].sum() + " .html files into DataFrame!")
#save_datafile(df, save_dir+"df_parser_temp", "pickle") # Save output so we can pick up where left off, in case | |
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME> & <NAME> for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Matrix Displays
================
"""
from ht16k33.ht16k33 import HT16K33
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_HT16K33.git"
# Font table 0x20 ~ 0x7f
FontTable = [
[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], #
[0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x00], # !
[0x00, 0x00, 0x18, 0x00, 0x18, 0x00, 0x00, 0x00], # "
[0x00, 0x0a, 0x1f, 0x0a, 0x1f, 0x0a, 0x00, 0x00], # #
[0x00, 0x0e, 0x0a, 0x1f, 0x0a, 0x0b, 0x00, 0x00], # $
[0x00, 0x11, 0x02, 0x04, 0x08, 0x11, 0x00, 0x00], # %
[0x00, 0x0a, 0x15, 0x15, 0x02, 0x05, 0x00, 0x00], # &
[0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], # '
[0x00, 0x00, 0x0e, 0x11, 0x00, 0x00, 0x00, 0x00], # (
[0x00, 0x00, 0x11, 0x0e, 0x00, 0x00, 0x00, 0x00], # )
[0x00, 0x00, 0x04, 0x0e, 0x04, 0x00, 0x00, 0x00], # *
[0x00, 0x04, 0x04, 0x1f, 0x04, 0x04, 0x00, 0x00], # +
[0x00, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00], # ,
[0x00, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, 0x00], # -
[0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00], # .
[0x00, 0x00, 0x03, 0x04, 0x18, 0x00, 0x00, 0x00], # /
[0x00, 0x1f, 0x13, 0x15, 0x19, 0x1f, 0x00, 0x00], # 0
[0x00, 0x09, 0x1f, 0x01, 0x00, 0x00, 0x00, 0x00], # 1
[0x00, 0x13, 0x15, 0x15, 0x15, 0x09, 0x00, 0x00], # 2
[0x00, 0x11, 0x15, 0x15, 0x15, 0x1b, 0x00, 0x00], # 3
[0x00, 0x1e, 0x02, 0x07, 0x02, 0x02, 0x00, 0x00], # 4
[0x00, 0x1d, 0x15, 0x15, 0x15, 0x12, 0x00, 0x00], # 5
[0x00, 0x1f, 0x15, 0x15, 0x15, 0x17, 0x00, 0x00], # 6
[0x00, 0x10, 0x10, 0x13, 0x14, 0x18, 0x00, 0x00], # 7
[0x00, 0x1f, 0x15, 0x15, 0x15, 0x1f, 0x00, 0x00], # 8
[0x00, 0x1d, 0x15, 0x15, 0x15, 0x1f, 0x00, 0x00], # 9
[0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00], # :
[0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x00], # ;
[0x00, 0x00, 0x04, 0x0a, 0x11, 0x00, 0x00, 0x00], # <
[0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00], # =
[0x00, 0x00, 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00], # >
[0x00, 0x08, 0x10, 0x15, 0x08, 0x00, 0x00, 0x00], # ?
[0x00, 0x1f, 0x11, 0x15, 0x15, 0x1d, 0x00, 0x00], # @
[0x00, 0x1f, 0x12, 0x12, 0x12, 0x1f, 0x00, 0x00], # A
[0x00, 0x1f, 0x15, 0x15, 0x15, 0x1b, 0x00, 0x00], # B
[0x00, 0x1f, 0x11, 0x11, 0x11, 0x11, 0x00, 0x00], # C
[0x00, 0x1f, 0x11, 0x11, 0x11, 0x0e, 0x00, 0x00], # D
[0x00, 0x1f, 0x15, 0x15, 0x15, 0x11, 0x00, 0x00], # E
[0x00, 0x1f, 0x14, 0x14, 0x14, 0x10, 0x00, 0x00], # F
[0x00, 0x1f, 0x11, 0x11, 0x15, 0x17, 0x00, 0x00], # G
[0x00, 0x1f, 0x04, 0x04, 0x04, 0x1f, 0x00, 0x00], # H
[0x00, 0x11, 0x11, 0x1f, 0x11, 0x11, 0x00, 0x00], # I
[0x00, 0x03, 0x01, 0x01, 0x11, 0x1f, 0x00, 0x00], # J
[0x00, 0x1f, 0x04, 0x04, 0x0a, 0x11, 0x00, 0x00], # K
[0x00, 0x1f, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00], # L
[0x00, 0x1f, 0x08, 0x04, 0x08, 0x1f, 0x00, 0x00], # M
[0x00, 0x1f, 0x08, 0x04, 0x02, 0x1f, 0x00, 0x00], # N
[0x00, 0x0e, 0x11, 0x11, 0x11, 0x0e, 0x00, 0x00], # O
[0x00, 0x1f, 0x14, 0x14, 0x14, 0x08, 0x00, 0x00], # P
[0x00, 0x1e, 0x12, 0x13, 0x12, 0x1e, 0x00, 0x00], # Q
[0x00, 0x1f, 0x14, 0x14, 0x14, 0x0b, 0x00, 0x00], # R
[0x00, 0x1d, 0x15, 0x15, 0x15, 0x17, 0x00, 0x00], # S
[0x00, 0x10, 0x10, 0x1f, 0x10, 0x10, 0x00, 0x00], # T
[0x00, 0x1f, 0x01, 0x01, 0x01, 0x1f, 0x00, 0x00], # U
[0x00, 0x18, 0x06, 0x01, 0x06, 0x18, 0x00, 0x00], # V
[0x00, 0x1e, 0x01, 0x06, 0x01, 0x1e, 0x00, 0x00], # W
[0x00, 0x11, 0x0a, 0x04, 0x0a, 0x11, 0x00, 0x00], # X
[0x00, 0x18, 0x04, 0x03, 0x04, 0x18, 0x00, 0x00], # Y
[0x00, 0x11, 0x13, 0x15, 0x19, 0x11, 0x00, 0x00], # Z
[0x00, 0x00, 0x1f, 0x11, 0x11, 0x00, 0x00, 0x00], # [
[0x00, 0x00, 0x18, 0x04, 0x03, 0x00, 0x00, 0x00], # \
[0x00, 0x11, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00], # ]
[0x00, 0x00, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00], # ^
[0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00], # _
[0x00, 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00], # `
[0x00, 0x1f, 0x12, 0x12, 0x12, 0x1f, 0x00, 0x00], # a
[0x00, 0x1f, 0x15, 0x15, 0x15, 0x1b, 0x00, 0x00], # b
[0x00, 0x1f, 0x11, 0x11, 0x11, 0x11, 0x00, 0x00], # c
[0x00, 0x1f, 0x11, 0x11, 0x11, 0x0e, 0x00, 0x00], # d
[0x00, 0x1f, 0x15, 0x15, 0x15, 0x11, 0x00, 0x00], # e
[0x00, 0x1f, 0x14, 0x14, 0x14, 0x10, 0x00, 0x00], # f
[0x00, 0x1f, 0x11, 0x11, 0x15, 0x17, 0x00, 0x00], # g
[0x00, 0x1f, 0x04, 0x04, 0x04, 0x1f, 0x00, 0x00], # h
[0x00, 0x11, 0x11, 0x1f, 0x11, 0x11, 0x00, 0x00], # i
[0x00, 0x03, 0x01, 0x01, 0x11, 0x1f, 0x00, 0x00], # j
[0x00, 0x1f, 0x04, 0x04, 0x0a, 0x11, 0x00, 0x00], # k
[0x00, 0x1f, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00], # l
[0x00, 0x1f, 0x08, 0x04, 0x08, 0x1f, 0x00, 0x00], # m
[0x00, 0x1f, 0x08, 0x04, 0x02, 0x1f, 0x00, 0x00], # n
[0x00, 0x0e, 0x11, 0x11, 0x11, 0x0e, 0x00, 0x00], # o
[0x00, 0x1f, 0x14, 0x14, 0x14, 0x08, 0x00, 0x00], # p
[0x00, 0x1e, 0x12, 0x13, 0x12, 0x1e, 0x00, 0x00], # q
[0x00, 0x1f, 0x14, 0x14, 0x14, 0x0b, 0x00, 0x00], # r
[0x00, 0x1d, 0x15, 0x15, 0x15, 0x17, 0x00, 0x00], # s
[0x00, 0x10, 0x10, 0x1f, 0x10, 0x10, 0x00, 0x00], # t
[0x00, 0x1f, 0x01, 0x01, 0x01, 0x1f, 0x00, 0x00], # u
[0x00, 0x18, 0x06, 0x01, 0x06, 0x18, 0x00, 0x00], # v
[0x00, 0x1e, 0x01, 0x06, 0x01, 0x1e, 0x00, 0x00], # w
[0x00, 0x11, 0x0a, 0x04, 0x0a, 0x11, 0x00, 0x00], # x
[0x00, 0x18, 0x04, 0x03, 0x04, 0x18, 0x00, 0x00], # y
[0x00, 0x11, 0x13, 0x15, 0x19, 0x11, 0x00, 0x00], # z
[0x00, 0x00, 0x04, 0x1f, 0x11, 0x00, 0x00, 0x00], # {
[0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00], # |
[0x00, 0x00, 0x11, 0x1f, 0x04, 0x00, 0x00, 0x00], # }
[0x00, 0x04, 0x08, 0x04, 0x08, 0x00, 0x00, 0x00], # ~
[0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00], #
]
def getFontData(char):
value = ord(char)
if not 0x20 <= value <= 0x7f:
return None
return FontTable[value-0x20]
class Matrix16x8(HT16K33):
"""A double matrix or the matrix wing."""
def pixel(self, x, y, color=None):
"""Get or set the color of a given pixel."""
if not 0 <= x <= 15:
return None
if not 0 <= y <= 7:
return None
if x >= 8:
x -= 8
y += 8
return super()._pixel(y, x, color)
def __getitem__(self, key):
x, y = key
return self.pixel(x, y)
def __setitem__(self, key, value):
x, y = key
self.pixel(x, y, value)
def putChar(self, left, right):
total_buff = bytearray(16)
left_buff = getFontData(left)
right_buff = getFontData(right)
for i in range(0, 8):
total_buff[i*2] = right_buff[i]
total_buff[i*2+1] = left_buff[i]
self.set_buffer(total_buff)
class Matrix8x8(HT16K33):
"""A single matrix."""
def pixel(self, x, y, color=None):
"""Get or set the color | |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from dataclasses import dataclass
from pathlib import PurePath
from typing import Optional, Sequence
from pants.engine.engine_aware import EngineAwareParameter
from pants.util.dirutil import fast_relpath, longest_dir_prefix
from pants.util.strutil import strip_prefix
# Currently unused, but reserved for possible future needs.
BANNED_CHARS_IN_TARGET_NAME = frozenset("@!?=")
class InvalidSpecPath(ValueError):
"""Indicate an invalid spec path for `Address`."""
class InvalidTargetName(ValueError):
"""Indicate an invalid target name for `Address`."""
@dataclass(frozen=True)
class AddressInput:
"""A string that has been parsed and normalized using the Address syntax.
An AddressInput must be resolved into an Address using the engine (which involves inspecting
disk to determine the types of its components).
"""
path_component: str
target_component: Optional[str] = None
def __post_init__(self):
if self.target_component is not None or self.path_component == "":
if not self.target_component:
raise InvalidTargetName(
f"Address spec {self.path_component}:{self.target_component} has no name part."
)
banned_chars = BANNED_CHARS_IN_TARGET_NAME & set(self.target_component)
if banned_chars:
raise InvalidTargetName(
f"Banned chars found in target name. {banned_chars} not allowed in target "
f"name: {self.target_component}"
)
# A root is okay.
if self.path_component == "":
return
components = self.path_component.split(os.sep)
if any(component in (".", "..", "") for component in components):
raise InvalidSpecPath(
f"Address spec has un-normalized path part '{self.path_component}'"
)
if os.path.isabs(self.path_component):
raise InvalidSpecPath(
f"Address spec has absolute path {self.path_component}; expected a path relative "
"to the build root."
)
@classmethod
def parse(
cls,
spec: str,
relative_to: Optional[str] = None,
subproject_roots: Optional[Sequence[str]] = None,
) -> "AddressInput":
"""Parse a string into an AddressInput.
:param spec: Target address spec.
:param relative_to: path to use for sibling specs, ie: ':another_in_same_build_family',
interprets the missing spec_path part as `relative_to`.
:param subproject_roots: Paths that correspond with embedded build roots under
the current build root.
For example:
some_target(
name='mytarget',
dependencies=['path/to/buildfile:targetname'],
)
Where `path/to/buildfile:targetname` is the dependent target address spec.
In there is no target name component, it defaults the default target in the resulting
Address's spec_path.
Optionally, specs can be prefixed with '//' to denote an absolute spec path. This is
normally not significant except when a spec referring to a root level target is needed
from deeper in the tree. For example, in `path/to/buildfile/BUILD`:
some_target(
name='mytarget',
dependencies=[':targetname'],
)
The `targetname` spec refers to a target defined in `path/to/buildfile/BUILD*`. If instead
you want to reference `targetname` in a root level BUILD file, use the absolute form.
For example:
some_target(
name='mytarget',
dependencies=['//:targetname'],
)
The spec may be a file, such as `a/b/c.txt`. It may include a relative address spec at the
end, such as `a/b/c.txt:original` or `a/b/c.txt:../original`, to disambiguate which target
the file comes from; otherwise, it will be assumed to come from the default target in the
directory, i.e. a target which leaves off `name`.
"""
subproject = (
longest_dir_prefix(relative_to, subproject_roots)
if relative_to and subproject_roots
else None
)
def prefix_subproject(spec_path: str) -> str:
if not subproject:
return spec_path
if spec_path:
return os.path.join(subproject, spec_path)
return os.path.normpath(subproject)
spec_parts = spec.rsplit(":", 1)
path_component = spec_parts[0]
target_component = None if len(spec_parts) == 1 else spec_parts[1]
normalized_relative_to = None
if relative_to:
normalized_relative_to = (
fast_relpath(relative_to, subproject) if subproject else relative_to
)
if path_component.startswith("./") and normalized_relative_to:
path_component = os.path.join(normalized_relative_to, path_component[2:])
if not path_component and normalized_relative_to:
path_component = normalized_relative_to
path_component = prefix_subproject(strip_prefix(path_component, "//"))
return cls(path_component, target_component)
def file_to_address(self) -> "Address":
"""Converts to an Address by assuming that the path_component is a file on disk."""
if self.target_component is None:
# Use the default target in the same directory as the file.
spec_path, relative_file_path = os.path.split(self.path_component)
# We validate that this is not a top-level file. We couldn't do this earlier in the
# AddressSpec constructor because we weren't sure if the path_spec referred to a file
# vs. a directory.
if not spec_path:
raise InvalidTargetName(
"Top-level file specs must include which target they come from, such as "
f"`{self.path_component}:original_target`, but {self.path_component} did not "
f"have an address."
)
return Address(spec_path=spec_path, relative_file_path=relative_file_path)
# The target component may be "above" (but not below) the file in the filesystem.
# Determine how many levels above the file it is, and validate that the path is relative.
parent_count = self.target_component.count(os.path.sep)
if parent_count == 0:
spec_path, relative_file_path = os.path.split(self.path_component)
return Address(
spec_path=spec_path,
relative_file_path=relative_file_path,
target_name=self.target_component,
)
expected_prefix = f"..{os.path.sep}" * parent_count
if self.target_component[: self.target_component.rfind(os.path.sep) + 1] != expected_prefix:
raise InvalidTargetName(
"A target may only be defined in a directory containing a file that it owns in "
f"the filesystem: `{self.target_component}` is not at-or-above the file "
f"`{self.path_component}`."
)
# Split the path_component into a spec_path and relative_file_path at the appropriate
# position.
path_components = self.path_component.split(os.path.sep)
if len(path_components) <= parent_count:
raise InvalidTargetName(
"Targets are addressed relative to the files that they own: "
f"`{self.target_component}` is too far above the file `{self.path_component}` to "
"be valid."
)
offset = -1 * (parent_count + 1)
spec_path = os.path.join(*path_components[:offset]) if path_components[:offset] else ""
relative_file_path = os.path.join(*path_components[offset:])
target_name = os.path.basename(self.target_component)
return Address(spec_path, relative_file_path=relative_file_path, target_name=target_name)
def dir_to_address(self) -> "Address":
"""Converts to an Address by assuming that the path_component is a directory on disk."""
return Address(spec_path=self.path_component, target_name=self.target_component)
class Address(EngineAwareParameter):
"""A target address.
An address is a unique name for a `pants.engine.target.Target`, and optionally a particular file
that it owns.
While not their only use, a noteworthy use of addresses is specifying
target dependencies. For example:
some_target(
name='mytarget',
dependencies=['path/to/buildfile:targetname'],
)
Where `path/to/buildfile:targetname` is the dependent target address.
"""
def __init__(
self,
spec_path: str,
*,
relative_file_path: Optional[str] = None,
target_name: Optional[str] = None,
) -> None:
"""
:param spec_path: The path from the build root to the directory containing the BUILD file
for the target.
:param relative_file_path: The relative path from the spec_path to an addressed file,
if any. Because files must always be located below targets that apply metadata to
them, this will always be relative.
:param target_name: The name of the target applying metadata to the file, defined in a
BUILD file in the spec_path directory, or None if this path refers to the default
target in that directory.
"""
self.spec_path = spec_path
self._relative_file_path = relative_file_path
# If the target_name is the same as the default name would be, we normalize to None.
self._target_name = (
target_name if target_name and target_name != os.path.basename(self.spec_path) else None
)
self._hash = hash((self.spec_path, self._relative_file_path, self._target_name))
if PurePath(spec_path).name.startswith("BUILD"):
raise InvalidSpecPath(
f"The address {self.spec} has {PurePath(spec_path).name} as the last part of its "
f"path, but BUILD is a reserved name. Please make sure that you did not name any "
f"directories BUILD."
)
@property
def is_base_target(self) -> bool:
return self._relative_file_path is None
@property
def is_default_target(self) -> bool:
"""True if this is address refers to the "default" target in the spec_path.
The default target has a target name equal to the directory name.
"""
return self._target_name is None
@property
def filename(self) -> str:
if self._relative_file_path is None:
raise ValueError("Only a file Address (`not self.is_base_target`) has a filename.")
return os.path.join(self.spec_path, self._relative_file_path)
@property
def target_name(self) -> str:
if self._target_name is None:
return os.path.basename(self.spec_path)
return self._target_name
@property
def spec(self) -> str:
"""The canonical string representation of the Address.
Prepends '//' if the target is at the root, to disambiguate root-level targets
from "relative" spec notation.
:API: public
"""
prefix = "//" if not self.spec_path else ""
file_portion = f"{prefix}{self.spec_path}"
if self._relative_file_path is not None:
file_portion = os.path.join(file_portion, self._relative_file_path)
# Relativize the target name to the dirname of the file.
parent_prefix = (
"../" * self._relative_file_path.count(os.path.sep) if self._relative_file_path else ""
)
if self._target_name is None and not parent_prefix:
return file_portion
target_name = self._target_name or os.path.basename(self.spec_path)
return f"{file_portion}:{parent_prefix}{target_name}"
@property
def path_safe_spec(self) -> str:
"""
:API: public
"""
if self._relative_file_path:
parent_count = self._relative_file_path.count(os.path.sep)
parent_prefix = "@" * parent_count if parent_count else "."
file_portion = f".{self._relative_file_path.replace(os.path.sep, '.')}"
else:
parent_prefix = "."
file_portion = ""
if parent_prefix == ".":
target_portion = f"{parent_prefix}{self._target_name}" if self._target_name else ""
else:
target_name = self._target_name or os.path.basename(self.spec_path)
target_portion = f"{parent_prefix}{target_name}"
return f"{self.spec_path.replace(os.path.sep, '.')}{file_portion}{target_portion}"
def maybe_convert_to_base_target(self) -> "Address":
"""If this address is a generated subtarget, convert it back into its original base target.
Otherwise, return itself unmodified.
TODO: This is not correct: | |
the old positional version option and the new ue-version option
if self.args.release is not None and self.args.ue_version is not None:
raise RuntimeError(
"specified both `--ue-version` and the old positional version option; please use only `--ue-version`!"
)
# For the sake of a simpler pull request, we use self.args.release as the canonical place for this data.
# If support for the old positional version option is removed, this should be fixed.
if self.args.ue_version is not None:
self.args.release = self.args.ue_version
# We care about the version number only if we're building source
if self.buildTargets["source"]:
if self.args.release is None:
raise RuntimeError("missing `--ue-version` when building source")
# Determine if we are building a custom version of UE4 rather than an official release
self.args.release = self.args.release.lower()
if self.args.release == "custom" or self.args.release.startswith("custom:"):
# Both a custom repository and a custom branch/tag must be specified
if self.args.repo is None or self.args.branch is None:
raise RuntimeError(
"both a repository and branch/tag must be specified when building a custom version of the Engine"
)
# Use the specified repository and branch/tag
customName = (
self.args.release.split(":", 2)[1].strip()
if ":" in self.args.release
else ""
)
self.release = customName if len(customName) > 0 else "custom"
self.repository = self.args.repo
self.branch = self.args.branch
self.custom = True
else:
# Validate the specified version string
try:
ue4Version = semver.parse(self.args.release)
if (
ue4Version["major"] not in [4, 5]
or ue4Version["prerelease"] != None
):
raise Exception()
self.release = semver.format_version(
ue4Version["major"], ue4Version["minor"], ue4Version["patch"]
)
except:
raise RuntimeError(
'invalid Unreal Engine release number "{}", full semver format required (e.g. "4.20.0")'.format(
self.args.release
)
)
# Use the default repository and the release tag for the specified version
self.repository = DEFAULT_GIT_REPO
self.branch = "{}-release".format(self.release)
self.custom = False
# If the user specified a .0 release of the Unreal Engine and did not specify a changelist override then
# use the official changelist number for that release to ensure consistency with Epic Games Launcher builds
# (This is necessary because .0 releases do not include a `CompatibleChangelist` value in Build.version)
if (
self.changelist is None
and self.release in UNREAL_ENGINE_RELEASE_CHANGELISTS
):
self.changelist = UNREAL_ENGINE_RELEASE_CHANGELISTS[self.release]
else:
# defaults needed by other parts of the codebase
self.custom = False
self.release = None
self.repository = None
self.branch = None
# Store our common configuration settings
self.containerPlatform = (
"windows"
if platform.system() == "Windows" and self.args.linux == False
else "linux"
)
self.dryRun = self.args.dry_run
self.rebuild = self.args.rebuild
self.suffix = self.args.suffix
self.platformArgs = ["--no-cache"] if self.args.no_cache == True else []
self.excludedComponents = set(self.args.exclude)
self.baseImage = None
self.prereqsTag = None
self.ignoreBlacklist = self.args.ignore_blacklist
self.verbose = self.args.verbose
self.layoutDir = self.args.layout
self.combine = self.args.combine
# If the user specified custom version strings for ue4cli and/or conan-ue4cli, process them
self.ue4cliVersion = self._processPackageVersion("ue4cli", self.args.ue4cli)
self.conanUe4cliVersion = self._processPackageVersion(
"conan-ue4cli", self.args.conan_ue4cli
)
# Process any specified advanced configuration options (which we use directly as context values for the Jinja templating system)
self.opts = {"buildgraph_args": "-set:HostPlatformOnly=true"}
for o in self.args.opt:
if "=" in o:
key, value = o.split("=", 1)
self.opts[key.replace("-", "_")] = self._processTemplateValue(value)
else:
self.opts[o.replace("-", "_")] = True
# If we are generating Dockerfiles then generate them for all images that have not been explicitly excluded
if self.layoutDir is not None:
self.rebuild = True
# If we are generating Dockerfiles and combining them then set the corresponding Jinja context value
if self.layoutDir is not None and self.combine == True:
self.opts["combine"] = True
# If the user requested an option that is only compatible with generated Dockerfiles then ensure `-layout` was specified
if self.layoutDir is None and self.opts.get("source_mode", "git") != "git":
raise RuntimeError(
"the `-layout` flag must be used when specifying a non-default value for the `source_mode` option"
)
if self.layoutDir is None and self.combine == True:
raise RuntimeError(
"the `-layout` flag must be used when specifying the `--combine` flag"
)
# We care about source_mode and credential_mode only if we're building source
if self.buildTargets["source"]:
# Verify that the value for `source_mode` is valid if specified
validSourceModes = ["git", "copy"]
if self.opts.get("source_mode", "git") not in validSourceModes:
raise RuntimeError(
"invalid value specified for the `source_mode` option, valid values are {}".format(
validSourceModes
)
)
# Verify that the value for `credential_mode` is valid if specified
validCredentialModes = (
["endpoint", "secrets"]
if self.containerPlatform == "linux"
else ["endpoint"]
)
if self.opts.get("credential_mode", "endpoint") not in validCredentialModes:
raise RuntimeError(
"invalid value specified for the `credential_mode` option, valid values are {} when building {} containers".format(
validCredentialModes, self.containerPlatform.title()
)
)
# Generate Jinja context values for keeping or excluding components
self.opts["excluded_components"] = {
"ddc": ExcludedComponent.DDC in self.excludedComponents,
"debug": ExcludedComponent.Debug in self.excludedComponents,
"templates": ExcludedComponent.Templates in self.excludedComponents,
}
# If we're building Windows containers, generate our Windows-specific configuration settings
if self.containerPlatform == "windows":
self._generateWindowsConfig()
# If we're building Linux containers, generate our Linux-specific configuration settings
if self.containerPlatform == "linux":
self._generateLinuxConfig()
# If the user-specified suffix passed validation, prefix it with a dash
self.suffix = "-{}".format(self.suffix) if self.suffix != "" else ""
def describeExcludedComponents(self):
"""
Returns a list of strings describing the components that will be excluded (if any.)
"""
return sorted(
[
ExcludedComponent.description(component)
for component in self.excludedComponents
]
)
def _generateWindowsConfig(self):
self.visualStudio = self.args.visual_studio
if self.release is not None and not self.custom:
# Check whether specified Unreal Engine release is compatible with specified Visual Studio
vsMinSupportedUnreal = VisualStudio.MinSupportedUnreal.get(
self.visualStudio, None
)
if (
vsMinSupportedUnreal is not None
and semver.VersionInfo.parse(self.release) < vsMinSupportedUnreal
):
raise RuntimeError(
"specified version of Unreal Engine cannot be built with Visual Studio {}, oldest supported is {}".format(
self.visualStudio, vsMinSupportedUnreal
)
)
self.visualStudioBuildNumber = VisualStudio.BuildNumbers[self.visualStudio]
# See https://github.com/EpicGames/UnrealEngine/commit/72585138472785e2ee58aab9950a7260275ee2ac
# Note: We must not pass VS2019 arg for older UE4 versions that didn't have VS2019 variable in their build graph xml.
# Otherwise, UAT errors out with "Unknown argument: VS2019".
if self.visualStudio != VisualStudio.VS2017:
self.opts["buildgraph_args"] += f" -set:VS{self.visualStudio}=true"
# Determine base tag for the Windows release of the host system
self.hostBasetag = WindowsUtils.getHostBaseTag()
# Store the tag for the base Windows Server Core image
self.basetag = (
self.args.basetag if self.args.basetag is not None else self.hostBasetag
)
if self.basetag is None:
raise RuntimeError(
"unable to determine Windows Server Core base image tag from host system. Specify it explicitly using -basetag command-line flag"
)
self.baseImage = "mcr.microsoft.com/windows/servercore:" + self.basetag
self.dllSrcImage = WindowsUtils.getDllSrcImage(self.basetag)
self.prereqsTag = self.basetag + "-vs" + self.visualStudio
# If the user has explicitly specified an isolation mode then use it, otherwise auto-detect
if self.args.isolation is not None:
self.isolation = self.args.isolation
else:
# If we are able to use process isolation mode then use it, otherwise use Hyper-V isolation mode
differentKernels = self.basetag != self.hostBasetag
dockerSupportsProcess = parse_version(
DockerUtils.version()["Version"]
) >= parse_version("18.09.0")
if not differentKernels and dockerSupportsProcess:
self.isolation = "process"
else:
self.isolation = "hyperv"
# Set the isolation mode Docker flag
self.platformArgs.append("--isolation=" + self.isolation)
# If the user has explicitly specified a memory limit then use it, otherwise auto-detect
self.memLimit = None
if self.args.m is not None:
try:
self.memLimit = humanfriendly.parse_size(self.args.m) / (
1000 * 1000 * 1000
)
except:
raise RuntimeError('invalid memory limit "{}"'.format(self.args.m))
else:
# Only specify a memory limit when using Hyper-V isolation mode, in order to override the 1GB default limit
# (Process isolation mode does not impose any memory limits by default)
if self.isolation == "hyperv":
self.memLimit = (
DEFAULT_MEMORY_LIMIT
if self.args.random_memory == False
else random.uniform(
DEFAULT_MEMORY_LIMIT, DEFAULT_MEMORY_LIMIT + 2.0
)
)
# Set the memory limit Docker flag
if self.memLimit is not None:
self.platformArgs.extend(["-m", "{:.2f}GB".format(self.memLimit)])
def _generateLinuxConfig(self):
# Verify that any user-specified tag suffix does not collide with our base tags
if self.suffix.startswith("opengl") or self.suffix.startswith("cudagl"):
raise RuntimeError('tag suffix cannot begin with "opengl" or "cudagl".')
# Determine if we are building CUDA-enabled container images
self.cuda = None
if self.args.cuda is not None:
# Verify that the specified CUDA version is valid
self.cuda = self.args.cuda if self.args.cuda != "" else DEFAULT_CUDA_VERSION
# Use the appropriate base image for the specified CUDA version
self.baseImage = LINUX_BASE_IMAGES["cudagl"]
self.prereqsTag = "cudagl{cuda}-{ubuntu}"
else:
self.baseImage = LINUX_BASE_IMAGES["opengl"]
self.prereqsTag = | |
"""
.. _twitter:
Twitter Data API
================
"""
import logging
from functools import wraps
from twython import Twython
import pandas as pd
from pandas.io.json import json_normalize
TWITTER_LOG_FMT = ('%(asctime)s | %(levelname)s | %(filename)s:%(lineno)d '
'| %(funcName)s | %(message)s')
logging.basicConfig(format=TWITTER_LOG_FMT)
# Functions that depend on 'previous_cursor' and 'next_cursor' to
# navigate requests with a lot of data, request pagination basically.
CURSORED_FUNCTIONS = [
'get_followers_ids',
'get_followers_list',
'get_friends_ids',
'get_friends_list',
'get_list_members',
'get_list_memberships',
'get_list_subscribers',
'get_list_subscriptions',
'get_retweeters_ids',
'show_owned_lists',
]
# Responses that contain a special key (and the name of that key)
# containing the required data and need to be extracted through
# that key, as opposed to other responses where you can easily
# call DataFrame on them directly
SPECIAL_KEY_FUNCS = {
'search': 'statuses',
'get_followers_list': 'users',
'get_friends_list': 'users',
'get_list_members': 'users',
'get_list_subscribers': 'users',
'get_list_memberships': 'lists',
'get_list_subscriptions': 'lists',
'show_owned_lists': 'lists',
}
# Functions that contain an embedded ``user`` key, containing
# 40+ attributes of the user tweeting, listed, retweeted, etc.
USER_DATA_EMBEDDED = {
'get_favorites': 'tweet_',
'get_home_timeline': 'tweet_',
'get_list_memberships': 'list_',
'get_list_statuses': 'tweet_',
'get_list_subscriptions': '',
'get_mentions_timeline': 'tweet_',
'get_retweets': 'tweet_',
'get_user_timeline': 'tweet_',
'lookup_status': 'tweet_',
'retweeted_of_me': 'tweet_',
'search': 'tweet_',
'show_lists': 'list_',
'show_owned_lists': 'list_',
}
DEFAULT_COUNTS = {
'get_favorites': 200,
'get_followers_ids': 5000,
'get_followers_list': 200,
'get_friends_ids': 5000,
'get_friends_list': 200,
'get_home_timeline': 200,
'get_list_members': 5000,
'get_list_memberships': 1000,
'get_list_statuses': 100,
'get_list_subscribers': 5000,
'get_list_subscriptions': 1000,
'get_mentions_timeline': 200,
'get_retweeters_ids': 100,
'get_retweets': 100,
'get_user_timeline': 200,
'lookup_status': 100,
'lookup_user': 100,
'retweeted_of_me': 100,
'search': 100,
'search_users': 20,
'show_lists': 100,
'show_owned_lists': 1000
}
def _expand_entities(df):
if 'tweet_entities' in df:
colnames = ['tweet_entities_' + x for x in ['mentions', 'hashtags',
'urls', 'symbols',
'media']]
entities_df = json_normalize(df['tweet_entities'])
mentions = [', '.join(['@' + x['screen_name'] for x in y])
for y in entities_df['user_mentions']]
hashtags = [', '.join(['#' + x['text'] for x in y])
for y in entities_df['hashtags']]
urls = [', '.join([x['expanded_url'] for x in y])
for y in entities_df['urls']]
symbols = [', '.join(['$' + x['text'] for x in y])
for y in entities_df['symbols']]
if 'media' in entities_df:
entities_df['media'] = entities_df['media'].fillna('')
media = [', '.join([x['media_url'] for x in y]) if y != '' else
y for y in entities_df['media']]
entity_cols = [mentions, hashtags, urls, symbols, media]
else:
entity_cols = [mentions, hashtags, urls, symbols]
col_idx = df.columns.get_loc('tweet_entities')
for j, col in enumerate(entity_cols):
df.insert(col_idx+j+1, colnames[j], col)
return df
def _get_counts(number=None, default=None):
"""Split a number into a list of divisors and the remainder.
The divisor is the default count in this case."""
if not number:
number = 1
div = divmod(number, default)
result = [default for x in range(div[0])]
if div[1] != 0:
return result + [div[1]]
return result
def make_dataframe(func):
@wraps(func)
def wrapper(count=None, max_id=None, *args, **kwargs):
nonlocal func
twtr = Twython(**wrapper.get_auth_params())
fname = func.__name__
func = eval('twtr.' + fname)
if count is None:
count = DEFAULT_COUNTS[fname]
counts = _get_counts(count, DEFAULT_COUNTS[fname])
responses = []
for i, count in enumerate(counts):
if fname == 'search':
if responses and not responses[-1]['statuses']:
break
max_id = (max_id or None) if i == 0 else (responses[-1]['statuses'][-1]['id'] - 1)
if (fname != 'search') and (fname not in CURSORED_FUNCTIONS):
if responses and len(responses[-1]) == 0:
break
max_id = (max_id or None) if i == 0 else (responses[-1][-1]['id'] - 1)
if fname in CURSORED_FUNCTIONS:
cursor = None if i == 0 else responses[-1]['next_cursor']
max_id = None
else:
cursor = None
kwargs_log = ', '.join([k + '=' + str(v) for k, v in kwargs.items()])
args_log = ', '.join(args)
logging.info(msg=fname + ' | ' + 'Requesting: ' +
'count=' + str(count) + ', max_id=' +
str(max_id) + ', ' + kwargs_log + args_log)
resp = func(count=count,
max_id=max_id,
cursor=cursor,
*args, **kwargs)
responses.append(resp)
if '_ids' in fname:
finallist = []
for sublist in responses:
finallist.extend(sublist['ids'])
finaldict = {'previous_cursor': responses[0]['previous_cursor'],
'next_cursor': responses[-1]['next_cursor'],
'ids': finallist}
return finaldict
final_df = pd.DataFrame()
for resp in responses:
if SPECIAL_KEY_FUNCS.get(fname):
resp_df = pd.DataFrame(resp[SPECIAL_KEY_FUNCS.get(fname)])
if fname in USER_DATA_EMBEDDED:
resp_df.columns = [USER_DATA_EMBEDDED[fname] + col for col in resp_df.columns]
user_df = pd.DataFrame([x['user'] for x in resp[SPECIAL_KEY_FUNCS.get(fname)]])
user_df.columns = ['user_' + col for col in user_df.columns]
temp_df = pd.concat([resp_df, user_df], axis=1, sort=False)
else:
temp_df = resp_df
else:
resp_df = pd.DataFrame(resp)
if fname in USER_DATA_EMBEDDED:
resp_df.columns = [USER_DATA_EMBEDDED[fname] + x for x in resp_df.columns]
user_df = pd.DataFrame([x['user'] for x in resp])
user_df.columns = ['user_' + x for x in user_df.columns]
temp_df = pd.concat([resp_df, user_df], axis=1)
else:
temp_df = resp_df
final_df = final_df.append(temp_df, sort=False, ignore_index=True)
for col in final_df:
if 'created_at' in col:
final_df[col] = pd.to_datetime(final_df[col])
for col in final_df:
if 'source' in col:
final_df[col + '_url'] = final_df[col].str.extract('<a href="(.*)" rel=')[0]
final_df[col] = final_df[col].str.extract('nofollow">(.*)</a>')[0]
if 'tweet_entities' in final_df:
return _expand_entities(final_df)
return final_df
return wrapper
def authenticate(func):
"""Used internally, please use set_auth_params for authentication."""
auth_params = {}
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
def set_auth_params(**params):
nonlocal auth_params
auth_params.update(params)
def get_auth_params():
return auth_params
wrapper.set_auth_params = set_auth_params
wrapper.get_auth_params = get_auth_params
return wrapper
@authenticate
def get_application_rate_limit_status(consumed_only=True):
"""
Returns the current rate limits for methods belonging to the
specified resource families.
:param consumed_only: Whether or not to return only items that
have been consumed. Otherwise returns the full list.
https://developer.twitter.com/en/docs/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
"""
twtr = Twython(**get_application_rate_limit_status.get_auth_params())
ratelimit = twtr.get_application_rate_limit_status()
limit_df = pd.DataFrame()
for resource in ratelimit['resources']:
temp_df = pd.DataFrame(ratelimit['resources'][resource]).T
limit_df = limit_df.append(temp_df, sort=False)
limit_df['reset'] = pd.to_datetime(limit_df['reset'], unit='s')
limit_df['resource'] = limit_df.index.str.split('/').str[1]
limit_df.index.name = 'endpoint'
limit_df = limit_df.sort_values(['resource'])
limit_df = limit_df.reset_index()
if consumed_only:
print(' '*12, 'Rate limit as of:',
pd.Timestamp.now(tz='UTC').strftime('%Y-%m-%-d %H:%M:%S'))
return limit_df[limit_df['limit'].ne(limit_df['remaining'])]
return limit_df
@authenticate
def get_available_trends():
"""
Returns the locations that Twitter has trending topic information for.
https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-available
"""
twtr = Twython(**get_available_trends.get_auth_params())
available_trends = twtr.get_available_trends()
trends_df = pd.DataFrame(available_trends)
trends_df['code'] = [x['code'] for x in trends_df['placeType']]
trends_df['place_type'] = [x['name'] for x in trends_df['placeType']]
del trends_df['placeType']
trends_df = trends_df.sort_values(['country', 'place_type', 'name'])
trends_df = trends_df.reset_index(drop=True)
return trends_df
@make_dataframe
@authenticate
def get_favorites(user_id=None, screen_name=None, count=None, since_id=None,
max_id=None, include_entities=None, tweet_mode=None):
"""
Returns the 20 most recent Tweets favorited by the authenticating
or specified user.
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - optional) The screen name of the user for whom
to return results.
:param count: (int - optional) Specifies the number of results to retrieve.
:param since_id: (int - optional) Returns results with an ID greater than
(that is, more recent than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of Tweets has
occured since the since_id, the since_id will be forced to the oldest ID
available.
:param max_id: (int - optional) Returns results with an ID less than (that
is, older than) or equal to the specified ID.
:param include_entities: (bool - optional) The entities node will be
omitted when set to False .
:param tweet_mode: (str - optional) Valid request values are compat and
extended, which give compatibility mode and extended mode, respectively for
Tweets that contain over 140 characters
https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-favorites-list
"""
pass
@make_dataframe
@authenticate
def get_followers_ids(user_id=None, screen_name=None, cursor=None,
stringify_ids=None, count=None):
"""
Returns a cursored collection of user IDs for every user
following the specified user.
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - optional) The screen name of the user for whom
to return results.
:param cursor: (cursor - semi-optional) Causes the list of connections to
be broken into pages of no more than 5000 IDs at a time. The number of IDs
returned is not guaranteed to be 5000 as suspended users are filtered out after
connections are queried. If no cursor is provided, a value of -1 will be
assumed, which is the first “page.” The response from the API will include a
previous_cursor and next_cursor to allow paging back and forth. See Using
cursors to navigate collections for more information.
:param stringify_ids: (bool - optional) Some programming environments will
not consume Twitter IDs due to their size. Provide this option to have IDs
returned as strings instead. More about Twitter IDs.
:param count: (int - optional) Specifies the number of results to retrieve.
https://developer.twitter.com/en/docs/accounts-and-users/follow-search-get-users/api-reference/get-followers-ids
"""
pass
@make_dataframe
@authenticate
def get_followers_list(user_id=None, screen_name=None, cursor=None, count=None,
skip_status=None, include_user_entities=None):
"""
Returns a cursored collection of user objects for users
following the specified user.
:param user_id: (int - optional) The ID of the user for whom to return
results.
:param screen_name: (str - | |
from __future__ import with_statement
import py
try:
set
except NameError:
from sets import Set as set, ImmutableSet as frozenset
def compress_char_set(chars):
"""Take the character list and compress runs of adjacent
characters; the result is a list of the first character in
a run and the number of chars following, sorted with longer
runs first.
Example: 'abc' => [('a', 3)]
Example: 'abcmxyz' => [('a',3),('x',3),('m',1)]"""
# Find the runs. Creates a list like [['a',3],['m',1],['x',3]]
chars = list(chars)
chars.sort()
result = [[chars[0], 1]]
for a, b in zip(chars[:-1], chars[1:]):
if ord(a) == ord(b) - 1:
# Found adjacent characters, increment counter
result[-1][1] += 1
else:
# Found a 'hole', so create a new entry
result += [[b, 1]]
# Change the above list into a list of sorted tuples
real_result = [(c,l) for [c,l] in result]
# Sort longer runs first (hence -c), then alphabetically
real_result.sort(key=lambda (l,c): (-c,l))
return real_result
def make_nice_charset_repr(chars):
# Compress the letters & digits
letters = set(chars) & set("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
therest = set(chars) - letters - set(['-'])
charranges = compress_char_set(letters)
result = []
for a, num in charranges:
if num == 1:
result.append(a)
elif num==2: # 'ab' better than 'a-b'
result.append(a)
result.append(chr(ord(a)+1))
else:
result.append("%s-%s" % (repr(a)[1:-1], repr(chr(ord(a) + num - 1))[1:-1]))
result += [repr(c)[1:-1] for c in therest]
# Handle the special chars that MUST get escaped
if '-' in chars:
result += ['\\-']
return "".join(result)
class LexerError(Exception):
def __init__(self, input, state, source_pos):
self.input = input
self.state = state
self.source_pos = source_pos
self.args = (input, state, source_pos)
def nice_error_message(self, filename="<unknown>"):
# + 1 is because source_pos is 0-based and humans 1-based
result = [" File %s, line %s" % (filename, self.source_pos.lineno + 1)]
result.append(self.input.split("\n")[self.source_pos.lineno])
result.append(" " * self.source_pos.columnno + "^")
result.append("LexerError")
return "\n".join(result)
def __str__(self):
return self.nice_error_message()
class DFA(object):
def __init__(self, num_states=0, transitions=None, final_states=None,
unmergeable_states=None, names=None):
self.num_states = 0
if transitions is None:
transitions = {}
if final_states is None:
final_states = set()
if unmergeable_states is None:
unmergeable_states = set()
if names is None:
names = []
self.transitions = transitions
self.final_states = final_states
self.unmergeable_states = unmergeable_states
self.names = names
def __repr__(self):
from pprint import pformat
return "DFA%s" % (pformat((
self.num_states, self.transitions, self.final_states,
self.unmergeable_states, self.names)), )
def add_state(self, name=None, final=False, unmergeable=False):
state = self.num_states
self.num_states += 1
if final:
self.final_states.add(state)
if unmergeable:
self.unmergeable_states.add(state)
if name is None:
name = str(state)
self.names.append(name)
return state
# DFA returns transitions like a dict()
def __setitem__(self, (state, input), next_state):
self.transitions[state, input] = next_state
def __getitem__(self, (state, input)):
return self.transitions[state, input]
def __contains__(self, (state, input)):
return (state, input) in self.transitions
def get_all_chars(self):
all_chars = set()
for (state, input) in self.transitions:
all_chars.add(input)
return all_chars
def optimize(self):
all_chars = self.get_all_chars()
# find mergeable
non_final = frozenset(set(range(self.num_states)) - self.final_states -
self.unmergeable_states)
final = frozenset(self.final_states - self.unmergeable_states)
state_to_set = {}
equivalence_sets = set()
if non_final:
equivalence_sets.add(non_final)
if final:
equivalence_sets.add(final)
for state in range(self.num_states):
if state in final:
state_to_set[state] = final
elif state in self.unmergeable_states:
singleset = frozenset([state])
state_to_set[state] = singleset
equivalence_sets.add(singleset)
else:
state_to_set[state] = non_final
assert len(equivalence_sets) <= self.num_states
while len(equivalence_sets) < self.num_states:
new_equivalence_sets = set()
changed = False
for equivalent in equivalence_sets:
#print "checking", equivalent
for char in all_chars:
targets = {}
for state in equivalent:
if (state, char) in self:
nextstate = self[state, char]
target = frozenset(state_to_set[nextstate])
else:
nextstate = None
target = None
targets.setdefault(target, set()).add(state)
if len(targets) != 1:
#print "\nsplitting %s with %r\ninto %s" % (equivalent, char, targets.values())
for target, newequivalent in targets.iteritems():
#print " ", newequivalent
newequivalent = frozenset(newequivalent)
new_equivalence_sets.add(newequivalent)
for state in newequivalent:
state_to_set[state] = newequivalent
#print " ", new_equivalence_sets
changed = True
break
else:
new_equivalence_sets.add(equivalent)
if not changed:
break
#print "end", equivalence_sets
#print new_equivalence_sets
equivalence_sets = new_equivalence_sets
if len(equivalence_sets) == self.num_states:
return False
#print equivalence_sets
# merging the states
newnames = []
newtransitions = {}
newnum_states = len(equivalence_sets)
newstates = list(equivalence_sets)
newstate_to_index = {}
newfinal_states = set()
newunmergeable_states = set()
for i, newstate in enumerate(newstates):
newstate_to_index[newstate] = i
# bring startstate into first slot
startstateindex = newstate_to_index[state_to_set[0]]
newstates[0], newstates[startstateindex] = newstates[startstateindex], newstates[0]
newstate_to_index[newstates[0]] = 0
newstate_to_index[newstates[startstateindex]] = startstateindex
for i, newstate in enumerate(newstates):
name = ", ".join([self.names[s] for s in newstate])
for state in newstate:
if state in self.unmergeable_states:
newunmergeable_states.add(i)
name = self.names[state]
if state in self.final_states:
newfinal_states.add(i)
newnames.append(name)
for (state, char), nextstate in self.transitions.iteritems():
newstate = newstate_to_index[state_to_set[state]]
newnextstate = newstate_to_index[state_to_set[nextstate]]
newtransitions[newstate, char] = newnextstate
self.names = newnames
self.transitions = newtransitions
self.num_states = newnum_states
self.final_states = newfinal_states
self.unmergeable_states = newunmergeable_states
return True
def make_code(self):
from rpython.rlib.parsing.codebuilder import Codebuilder
result = Codebuilder()
result.start_block("def recognize(input):")
result.emit("i = 0")
result.emit("state = 0")
result.start_block("while 1:")
# state_to_chars is a dict containing the sets of
# Ex: state_to_chars = { 0: set('a','b','c'), ...}
state_to_chars = {}
for (state, char), nextstate in self.transitions.iteritems():
state_to_chars.setdefault(state, {}).setdefault(nextstate, set()).add(char)
above = set()
for state, nextstates in state_to_chars.iteritems():
above.add(state)
with result.block("if state == %s:" % (state, )):
with result.block("if i < len(input):"):
result.emit("char = input[i]")
result.emit("i += 1")
with result.block("else:"):
if state in self.final_states:
result.emit("return True")
else:
result.emit("break")
elif_prefix = ""
for nextstate, chars in nextstates.iteritems():
final = nextstate in self.final_states
compressed = compress_char_set(chars)
if nextstate in above:
continue_prefix = "continue"
else:
continue_prefix = ""
for i, (a, num) in enumerate(compressed):
if num < 5:
for charord in range(ord(a), ord(a) + num):
with result.block(
"%sif char == %r:" % (
elif_prefix, chr(charord))):
result.emit("state = %s" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
else:
with result.block(
"%sif %r <= char <= %r:" % (
elif_prefix, a, chr(ord(a) + num - 1))):
result.emit("state = %s""" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
with result.block("else:"):
result.emit("break")
#print state_to_chars.keys()
for state in range(self.num_states):
if state in state_to_chars:
continue
with result.block("if state == %s:" % (state, )):
with result.block("if i == len(input):"):
result.emit("return True")
with result.block("else:"):
result.emit("break")
result.emit("break")
result.end_block("while")
result.emit("raise LexerError(input, state, i)")
result.end_block("def")
result = result.get_code()
while "\n\n" in result:
result = result.replace("\n\n", "\n")
#print result
d = {'LexerError': LexerError}
exec(py.code.Source(result).compile(), d)
return d['recognize']
def make_lexing_code(self):
code = self.generate_lexing_code()
exec(py.code.Source(code).compile())
return recognize
def generate_lexing_code(self):
from rpython.rlib.parsing.codebuilder import Codebuilder
result = Codebuilder()
result.start_block("def recognize(runner, i):")
result.emit("#auto-generated code, don't edit")
result.emit("assert i >= 0")
result.emit("input = runner.text")
result.emit("state = 0")
result.start_block("while 1:")
state_to_chars = {}
for (state, char), nextstate in self.transitions.iteritems():
state_to_chars.setdefault(state, {}).setdefault(nextstate, set()).add(char)
state_to_chars_sorted = state_to_chars.items()
state_to_chars_sorted.sort()
above = set()
for state, nextstates in state_to_chars_sorted:
above.add(state)
with result.block("if state == %s:" % (state, )):
if state in self.final_states:
result.emit("runner.last_matched_index = i - 1")
result.emit("runner.last_matched_state = state")
with result.block("try:"):
result.emit("char = input[i]")
result.emit("i += 1")
with result.block("except IndexError:"):
result.emit("runner.state = %s" % (state, ))
if state in self.final_states:
result.emit("return i")
else:
result.emit("return ~i")
elif_prefix = ""
for nextstate, chars in nextstates.iteritems():
final = nextstate in self.final_states
compressed = compress_char_set(chars)
if nextstate in above:
continue_prefix = "continue"
else:
continue_prefix = ""
for i, (a, num) in enumerate(compressed):
if num < 3:
for charord in range(ord(a), ord(a) + num):
with result.block("%sif char == %r:"
% (elif_prefix, chr(charord))):
result.emit("state = %s" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
else:
with result.block(
"%sif %r <= char <= %r:" % (
elif_prefix, a, chr(ord(a) + num - 1))):
result.emit("state = %s" % (nextstate, ))
result.emit(continue_prefix)
if not elif_prefix:
elif_prefix = "el"
with result.block("else:"):
result.emit("break")
#print state_to_chars.keys()
for state in range(self.num_states):
if state in state_to_chars:
continue
assert state in self.final_states
result.emit("""
runner.last_matched_state = state
runner.last_matched_index = i - 1
runner.state = state
if i == len(input):
return i
else:
return ~i
break""")
result.end_block("while")
result.emit("""
runner.state = state
return ~i""")
result.end_block("def")
result.emit("from rpython.rlib.parsing.deterministic import DFA")
result.emit("automaton = %s" % self)
result = result.get_code()
while "\n\n" in result:
result = result.replace("\n\n", "\n")
return result
def get_runner(self):
return DFARunner(self)
def make_nondeterministic(self):
result = NFA()
result.num_states = self.num_states
result.names = self.names
result.start_states = set([0])
result.final_states = self.final_states.copy()
for (state, input), nextstate in self.transitions.iteritems():
result.add_transition(state, nextstate, input)
return result
def dot(self):
result = ["graph G {"]
for i in range(self.num_states):
if i == 0:
extra = ", color=red"
else:
extra = ""
if i in self.final_states:
| |
np.all(t1.right_sib_array == t2.right_sib_array)
assert list(t1.sites()) == list(t2.sites())
def test_copy_seek(self):
ts = msprime.simulate(10, recombination_rate=3, length=3, random_seed=42)
assert ts.num_trees > 5
tree = tskit.Tree(ts)
copy = tree.copy()
self.verify_empty_tree(copy)
while tree.next():
copy = tree.copy()
self.verify_trees_identical(tree, copy)
while tree.prev():
copy = tree.copy()
self.verify_trees_identical(tree, copy)
tree.clear()
copy = tree.copy()
tree.first()
# Make sure the underlying arrays are different
assert np.any(tree.parent_array != copy.parent_array)
copy.first()
while tree.index != -1:
self.verify_trees_identical(tree, copy)
assert tree.next() == copy.next()
tree.last()
copy.last()
while tree.index != -1:
self.verify_trees_identical(tree, copy)
assert tree.prev() == copy.prev()
# Seek to middle and two independent trees.
tree.seek_index(ts.num_trees // 2)
left_copy = tree.copy()
right_copy = tree.copy()
self.verify_trees_identical(tree, left_copy)
self.verify_trees_identical(tree, right_copy)
left_copy.prev()
assert left_copy.index == tree.index - 1
right_copy.next()
assert right_copy.index == tree.index + 1
def test_copy_tracked_samples(self):
ts = msprime.simulate(10, recombination_rate=2, length=3, random_seed=42)
tree = tskit.Tree(ts, tracked_samples=[0, 1])
while tree.next():
copy = tree.copy()
for j in range(ts.num_nodes):
assert tree.num_tracked_samples(j) == copy.num_tracked_samples(j)
copy = tree.copy()
while tree.next():
copy.next()
for j in range(ts.num_nodes):
assert tree.num_tracked_samples(j) == copy.num_tracked_samples(j)
def test_copy_multiple_roots(self):
ts = msprime.simulate(20, recombination_rate=2, length=3, random_seed=42)
ts = tsutil.decapitate(ts, ts.num_edges // 2)
for root_threshold in [1, 2, 100]:
tree = tskit.Tree(ts, root_threshold=root_threshold)
copy = tree.copy()
assert copy.roots == tree.roots
assert copy.root_threshold == root_threshold
while tree.next():
copy = tree.copy()
assert copy.roots == tree.roots
assert copy.root_threshold == root_threshold
copy = tree.copy()
assert copy.roots == tree.roots
assert copy.root_threshold == root_threshold
def test_map_mutations(self):
ts = msprime.simulate(5, random_seed=42)
tree = ts.first()
genotypes = np.zeros(5, dtype=np.int8)
alleles = [str(j) for j in range(64)]
ancestral_state, transitions = tree.map_mutations(genotypes, alleles)
assert ancestral_state == "0"
assert len(transitions) == 0
for j in range(1, 64):
genotypes[0] = j
ancestral_state, transitions = tree.map_mutations(genotypes, alleles)
assert ancestral_state == "0"
assert len(transitions) == 1
for j in range(64, 67):
genotypes[0] = j
with pytest.raises(ValueError):
tree.map_mutations(genotypes, alleles)
tree.map_mutations([0] * 5, alleles)
tree.map_mutations(np.zeros(5, dtype=int), alleles)
def test_sample_count_deprecated(self):
ts = msprime.simulate(5, random_seed=42)
with warnings.catch_warnings(record=True) as w:
ts.trees(sample_counts=True)
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
with warnings.catch_warnings(record=True) as w:
tskit.Tree(ts, sample_counts=False)
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
def test_node_edges(self):
ts = msprime.simulate(5, recombination_rate=1, random_seed=42)
assert ts.num_trees > 2
edge_table = ts.tables.edges
for tree in ts.trees():
nodes = set(tree.nodes())
midpoint = sum(tree.interval) / 2
mapping = tree._node_edges()
for node, edge in enumerate(mapping):
if node in nodes and tree.parent(node) != tskit.NULL:
edge_above_node = np.where(
np.logical_and.reduce(
(
edge_table.child == node,
edge_table.left < midpoint,
edge_table.right > midpoint,
)
)
)[0]
assert len(edge_above_node) == 1
assert edge_above_node[0] == edge
else:
assert edge == tskit.NULL
class TestNodeOrdering(HighLevelTestCase):
"""
Verify that we can use any node ordering for internal nodes
and get the same topologies.
"""
num_random_permutations = 10
def verify_tree_sequences_equal(self, ts1, ts2, approximate=False):
assert ts1.get_num_trees() == ts2.get_num_trees()
assert ts1.get_sample_size() == ts2.get_sample_size()
assert ts1.get_num_nodes() == ts2.get_num_nodes()
j = 0
for r1, r2 in zip(ts1.edges(), ts2.edges()):
assert r1.parent == r2.parent
assert r1.child == r2.child
if approximate:
assert r1.left == pytest.approx(r2.left)
assert r1.right == pytest.approx(r2.right)
else:
assert r1.left == r2.left
assert r1.right == r2.right
j += 1
assert ts1.num_edges == j
j = 0
for n1, n2 in zip(ts1.nodes(), ts2.nodes()):
assert n1.metadata == n2.metadata
assert n1.population == n2.population
if approximate:
assert n1.time == pytest.approx(n2.time)
else:
assert n1.time == n2.time
j += 1
assert ts1.num_nodes == j
def verify_random_permutation(self, ts):
n = ts.sample_size
node_map = {}
for j in range(n):
node_map[j] = j
internal_nodes = list(range(n, ts.num_nodes))
random.shuffle(internal_nodes)
for j, node in enumerate(internal_nodes):
node_map[n + j] = node
other_tables = tskit.TableCollection(ts.sequence_length)
# Insert the new nodes into the table.
inv_node_map = {v: k for k, v in node_map.items()}
for j in range(ts.num_nodes):
node = ts.node(inv_node_map[j])
other_tables.nodes.append(node)
for e in ts.edges():
other_tables.edges.append(
e.replace(parent=node_map[e.parent], child=node_map[e.child])
)
for _ in range(ts.num_populations):
other_tables.populations.add_row()
other_tables.sort()
other_ts = other_tables.tree_sequence()
assert ts.get_num_trees() == other_ts.get_num_trees()
assert ts.get_sample_size() == other_ts.get_sample_size()
assert ts.get_num_nodes() == other_ts.get_num_nodes()
j = 0
for t1, t2 in zip(ts.trees(), other_ts.trees()):
# Verify the topologies are identical. We do this by traversing
# upwards to the root for every sample and checking if we map to
# the correct node and time.
for u in range(n):
v_orig = u
v_map = u
while v_orig != tskit.NULL:
assert node_map[v_orig] == v_map
assert t1.get_time(v_orig) == t2.get_time(v_map)
v_orig = t1.get_parent(v_orig)
v_map = t2.get_parent(v_map)
assert v_orig == tskit.NULL
assert v_map == tskit.NULL
j += 1
assert j == ts.get_num_trees()
# Verify we can dump this new tree sequence OK.
with tempfile.TemporaryDirectory() as tempdir:
temp_file = pathlib.Path(tempdir) / "tmp.trees"
other_ts.dump(temp_file)
ts3 = tskit.load(temp_file)
self.verify_tree_sequences_equal(other_ts, ts3)
nodes_file = io.StringIO()
edges_file = io.StringIO()
# Also verify we can read the text version.
other_ts.dump_text(nodes=nodes_file, edges=edges_file, precision=14)
nodes_file.seek(0)
edges_file.seek(0)
ts3 = tskit.load_text(nodes_file, edges_file)
self.verify_tree_sequences_equal(other_ts, ts3, True)
def test_single_locus(self):
ts = msprime.simulate(7)
for _ in range(self.num_random_permutations):
self.verify_random_permutation(ts)
def test_multi_locus(self):
ts = msprime.simulate(20, recombination_rate=10)
for _ in range(self.num_random_permutations):
self.verify_random_permutation(ts)
def test_nonbinary(self):
ts = msprime.simulate(
sample_size=20,
recombination_rate=10,
demographic_events=[
msprime.SimpleBottleneck(time=0.5, population=0, proportion=1)
],
)
# Make sure this really has some non-binary nodes
found = False
for t in ts.trees():
for u in t.nodes():
if len(t.children(u)) > 2:
found = True
break
if found:
break
assert found
for _ in range(self.num_random_permutations):
self.verify_random_permutation(ts)
def assert_trees_identical(t1, t2):
assert t1.tree_sequence == t2.tree_sequence
assert t1.index == t2.index
assert np.all(t1.parent_array == t2.parent_array)
assert np.all(t1.left_child_array == t2.left_child_array)
assert np.all(t1.left_sib_array == t2.left_sib_array)
assert np.all(t1.right_child_array == t2.right_child_array)
assert np.all(t1.right_sib_array == t2.right_sib_array)
def assert_same_tree_different_order(t1, t2):
assert t1.tree_sequence == t2.tree_sequence
assert t1.index == t2.index
assert np.all(t1.parent_array == t2.parent_array)
assert not np.all(t1.left_child_array == t2.left_child_array)
def seek(tree, x):
"""
Python implementation of the seek algorithm. Useful for developing
tests.
"""
L = tree.tree_sequence.sequence_length
t_l, t_r = tree.interval
if x < t_l:
# |-----|-----|========|---------|
# 0 x t_l t_r L
distance_left = t_l - x
distance_right = L - t_r + x
else:
# |------|========|------|-------|
# 0 t_l t_r x L
distance_right = x - t_r
distance_left = t_l + L - x
if distance_right <= distance_left:
while not (tree.interval.left <= x < tree.interval.right):
tree.next()
else:
while not (tree.interval.left <= x < tree.interval.right):
tree.prev()
class TestSeekDirection:
"""
Test if we seek in the correct direction according to our hueristics.
"""
# 2.00┊ ┊ 4 ┊ 4 ┊ 4 ┊
# ┊ ┊ ┏━┻┓ ┊ ┏┻━┓ ┊ ┏┻━┓ ┊
# 1.00┊ 3 ┊ ┃ 3 ┊ 3 ┃ ┊ 3 ┃ ┊
# ┊ ┏━╋━┓ ┊ ┃ ┏┻┓ ┊ ┏┻┓ ┃ ┊ ┏┻┓ ┃ ┊
# 0.00┊ 0 1 2 ┊ 0 1 2 ┊ 0 2 1 ┊ 0 1 2 ┊
# 0 1 2 3 4
@tests.cached_example
def ts(self):
return tsutil.all_trees_ts(3)
def setup(self):
ts = self.ts()
t1 = tskit.Tree(ts)
t2 = tskit.Tree(ts)
# Note: for development we can monkeypatch in the Python implementation
# above like this:
# t2.seek = functools.partial(seek, t2)
return t1, t2
@pytest.mark.parametrize("index", range(4))
def test_index_from_different_directions(self, index):
# Check that we get different orderings of the children arrays
# for all trees when we go in different directions.
t1, t2 = self.setup()
while t1.index != index:
t1.next()
while t2.index != index:
t2.prev()
assert_same_tree_different_order(t1, t2)
def test_seek_0_from_null(self):
t1, t2 = self.setup()
t1.first()
t2.seek(0)
assert_trees_identical(t1, t2)
@pytest.mark.parametrize("index", range(3))
def test_seek_next_tree(self, index):
t1, t2 = self.setup()
while t1.index != index:
t1.next()
t2.next()
t1.next()
t2.seek(index + 1)
assert_trees_identical(t1, t2)
@pytest.mark.parametrize("index", [3, 2, 1])
def test_seek_prev_tree(self, index):
t1, t2 = self.setup()
while t1.index != index:
t1.prev()
t2.prev()
t1.prev()
t2.seek(index - 1)
assert_trees_identical(t1, t2)
def test_seek_1_from_0(self):
t1, t2 = self.setup()
t1.first()
t1.next()
t2.first()
t2.seek(1)
assert_trees_identical(t1, t2)
def test_seek_1_5_from_0(self):
t1, t2 = self.setup()
t1.first()
t1.next()
t2.first()
t2.seek(1.5)
assert_trees_identical(t1, t2)
def test_seek_1_5_from_1(self):
t1, t2 = self.setup()
for _ in range(2):
t1.next()
t2.next()
t2.seek(1.5)
assert_trees_identical(t1, t2)
def test_seek_3_from_null(self):
t1, t2 = self.setup()
t1.last()
t2.seek(3)
assert_trees_identical(t1, t2)
def test_seek_3_from_0(self):
t1, t2 = self.setup()
t1.last()
t2.first()
t2.seek(3)
assert_trees_identical(t1, t2)
def test_seek_0_from_3(self):
t1, t2 = self.setup()
t1.last()
t1.first()
t2.last()
t2.seek(0)
assert_trees_identical(t1, t2)
class TestSeek:
@pytest.mark.parametrize("ts", get_example_tree_sequences())
def test_new_seek_breakpoints(self, ts):
breakpoints = ts.breakpoints(as_array=True)
for index, left in enumerate(breakpoints[:-1]):
tree = tskit.Tree(ts)
tree.seek(left)
assert tree.index == index
@pytest.mark.parametrize("ts", get_example_tree_sequences())
def test_new_seek_mid(self, ts):
breakpoints = ts.breakpoints(as_array=True)
mid = breakpoints[:-1] + np.diff(breakpoints) / 2
for index, left in enumerate(mid[:-1]):
tree = tskit.Tree(ts)
tree.seek(left)
assert tree.index == index
@pytest.mark.parametrize("ts", get_example_tree_sequences())
| |
<gh_stars>1-10
"""Kernel Tuner interface module
This module contains the main functions that the Kernel Tuner
offers to its users.
Author
------
<NAME> <<EMAIL>>
Copyright and License
---------------------
* Copyright 2016 Netherlands eScience Center
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from collections import OrderedDict
import importlib
from datetime import datetime
import logging
import sys
import numpy
import kernel_tuner.util as util
import kernel_tuner.core as core
from kernel_tuner.strategies import brute_force, random_sample, diff_evo, minimize, basinhopping, genetic_algorithm, pso, simulated_annealing, firefly_algorithm
class Options(OrderedDict):
"""read-only class for passing options around"""
def __getattr__(self, name):
if not name.startswith('_'):
return self[name]
return super(Options, self).__getattr__(name)
def __deepcopy__(self, _):
return self
_kernel_options = Options([
("kernel_name", ("""The name of the kernel in the code.""", "string")),
("kernel_string", ("""The CUDA, OpenCL, or C kernel code as a string.
It is also allowed for the string to be a filename of the file
containing the code.
To support combined host and device code tuning for runtime
compiled device code, a list of filenames can be passed instead.
The first file in the list should be the file that contains the
host code. The host code is allowed to include or read as a string
any of the files in the list beyond the first.
Another alternative is to pass a function instead, or instead
of the first item in the list of filenames. The purpose of this
is to support the use of code generating functions that generate
the kernel code based on the specific parameters. This function
should take one positional argument, which will be used to pass
a dict containing the parameters. The function should return a
string with the source code for the kernel.""",
"string or list and/or callable")),
("problem_size", ("""An int or string, or 1,2,3-dimensional tuple
containing the size from which the grid dimensions of the kernel
will be computed.
Do not divide the problem_size yourself by the thread block sizes.
The Kernel Tuner does this for you based on tunable parameters,
called "block_size_x", "block_size_y", and "block_size_z".
If more or different parameters divide the grid dimensions use
grid_div_x/y/z options to specify this.
You are allowed to use a string to specify the problem
size. Within a string you are allowed to write Python
arithmetic and use the names of tunable parameters as variables
in these expressions.
The Kernel Tuner will replace instances of the tunable parameters
with their current value when computing the grid dimensions.
See the reduction CUDA example for an example use of this feature.""",
"string, int, or tuple(int or string, ..)")),
("arguments", ("""A list of kernel arguments, use numpy arrays for
arrays, use numpy.int32 or numpy.float32 for scalars.""", "list")),
("grid_div_x", ("""A list of names of the parameters whose values divide
the grid dimensions in the x-direction.
The product of all grid divisor expressions is computed before dividing
the problem_size in that dimension. Also note that the divison is treated
as a float divison and resulting grid dimensions will be rounded up to
the nearest integer number.
Arithmetic expressions can be
used if necessary inside the string containing a parameter name. For
example, in some cases you may want to divide the problem size in the
x-dimension with the number of warps rather than the number of threads
in a block, in such cases one could use ["block_size_x/32"].
If not supplied, ["block_size_x"] will be used by default, if you do not
want any grid x-dimension divisors pass an empty list.""", "list")),
("grid_div_y", ("""A list of names of the parameters whose values divide
the grid dimensions in the y-direction, ["block_size_y"] by default.
If you do not want to divide the problem_size, you should pass an empty list.
See grid_div_x for more details.""", "list")),
("grid_div_z", ("""A list of names of the parameters whose values divide
the grid dimensions in the z-direction, ["block_size_z"] by default.
If you do not want to divide the problem_size, you should pass an empty list.
See grid_div_x for more details.""", "list")),
("cmem_args", ("""CUDA-specific feature for specifying constant memory
arguments to the kernel. In OpenCL these are handled as normal
kernel arguments, but in CUDA you can copy to a symbol. The way you
specify constant memory arguments is by passing a dictionary with
strings containing the constant memory symbol name together with numpy
objects in the same way as normal kernel arguments.""",
"dict(string: numpy object)")),
("block_size_names", ("""A list of strings that replace the defaults for the names
that denote the thread block dimensions. If not passed, the behavior
defaults to ``["block_size_x", "block_size_y", "block_size_z"]``""",
"list(string)"))
])
_tuning_options = Options([
("tune_params", ("""A dictionary containing the parameter names as keys,
and lists of possible parameter settings as values.
The Kernel Tuner will try to compile and benchmark all possible
combinations of all possible values for all tuning parameters.
This typically results in a rather large search space of all
possible kernel configurations.
For each kernel configuration, each tuning parameter is
replaced at compile-time with its current value.
Currently, the Kernel Tuner uses the convention that the following
list of tuning parameters are used as thread block dimensions:
* "block_size_x" thread block (work group) x-dimension
* "block_size_y" thread block (work group) y-dimension
* "block_size_z" thread block (work group) z-dimension
Options for changing these defaults may be added later. If you
don't want the thread block dimensions to be compiled in, you
may use the built-in variables blockDim.xyz in CUDA or the
built-in function get_local_size() in OpenCL instead.""",
"dict( string : [...]")),
("restrictions", ("""A list of strings containing boolean expression that
limit the search space in that they must be satisfied by the kernel
configuration. These expressions must be true for the configuration
to be part of the search space. For example:
restrictions=["block_size_x==block_size_y*tile_size_y"] limits the
search to configurations where the block_size_x equals the product
of block_size_y and tile_size_y.
The default is None.""", "list")),
("answer", ("""A list of arguments, similar to what you pass to arguments,
that contains the expected output of the kernel after it has executed
and contains None for each argument that is input-only. The expected
output of the kernel will then be used to verify the correctness of
each kernel in the parameter space before it will be benchmarked.""",
"list")),
("atol", ("""The maximum allowed absolute difference between two elements
in the output and the reference answer, as passed to numpy.allclose().
Ignored if you have not passed a reference answer. Default value is
1e-6, that is 0.000001.""", "float")),
("verify", ("""Python function used for output verification. By default,
numpy.allclose is used for output verification, if this does not suit
your application, you can pass a different function here.
The function is expected to have two positional arguments. The first
is the reference result, the second is the output computed by the
kernel being verified. The types of these arguments depends on the
type of the output arguments you are verifying. The function may also
have an optional argument named atol, to which the value will be
passed that was specified using the atol option to tune_kernel.
The function should return True when the output passes the test, and
False when the output fails the test.""", "func(ref, ans, atol=None)")),
("sample_fraction", ("""Benchmark only a sample fraction of the search space, False by
default. To enable sampling, pass a value between 0 and 1. """, "float")),
("use_noodles", ("""Use Noodles workflow engine to tune in parallel using
multiple threads, False by Default.
Requires Noodles to be installed, use 'pip install noodles'.
Note that Noodles requires Python 3.5 or newer.
You can configure the number of threads to use with the | |
def get_docker_constructs(cls, construct_type, *expected_constructs):
'''Get a list of images, containers or volumes.'''
return {
'image': cls.get_docker_image_list,
'container': cls.get_docker_container_list,
'volume': cls.get_docker_volume_list
}[construct_type](*expected_constructs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_docker_image_list(*expected_images):
'''Get the output from "docker image ls" for specified image names.'''
image_listing_pattern = (
r'(?P<name>[^\s]+)\s+'
r'(?P<tag>[^\s]+)\s+'
r'(?P<id>[0-9a-f]+)\s+'
r'(?P<created>.+ago)\s+'
r'(?P<size>[^\s]+)'
r'\s*$'
)
image_listing_re = re.compile(image_listing_pattern)
docker_images_response = pexpect.run('docker image ls')
image_list = []
expected_image_nametag_pairs = [
(x.split(':') + ['latest'])[0:2] for x in expected_images
] if expected_images else None
docker_images_response_l = docker_images_response.decode('utf-8').split('\n')
for line in docker_images_response_l:
match = image_listing_re.match(line)
if (
match and (
not expected_images or [
match.groupdict()['name'], match.groupdict()['tag']
] in expected_image_nametag_pairs
)
):
image_list.append(match.groupdict())
return image_list
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_docker_container_list(*expected_containers):
'''Get the output from "docker ps -a" for specified container names.'''
container_listing_pattern = (
r'(?P<id>[0-9a-f]+)\s+'
r'(?P<image>[^\s]+)\s+'
r'(?P<command>"[^"]+")\s+'
r'(?P<created>.+ago)\s+'
r'(?P<status>(Created|Exited.*ago|Up \d+ \S+))\s+'
r'(?P<ports>[^\s]+)?\s+'
r'(?P<name>[a-z]+_[a-z]+)'
# r'\s*$'
)
container_listing_re = re.compile(container_listing_pattern)
docker_containers_response = pexpect.run('docker ps -a')
container_list = []
# expected_container_nametag_pairs = [
# (x.split(':') + ['latest'])[0:2] for x in expected_containers
# ] if expected_containers else []
docker_containers_response_l = docker_containers_response.decode('utf-8').split('\n')
for line in docker_containers_response_l:
match = container_listing_re.match(line)
if match:
container_list.append(match.groupdict())
return container_list
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def get_docker_volume_list(*expected_volumes):
'''Get the output from "docker volume ls" for specified volumes.'''
volume_listing_pattern = (
r'(?P<driver>\S+)\s+'
r'(?P<name>\S+)'
# r'\s*$'
)
volume_listing_re = re.compile(volume_listing_pattern)
docker_volumes_response = pexpect.run('docker volume ls')
docker_volumes_response_l = docker_volumes_response.decode('utf-8').split('\n')
volume_list = []
for line in docker_volumes_response_l:
match = volume_listing_re.match(line)
if match:
volume_list.append(match.groupdict())
return volume_list
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def require_filenames_in_directory(self, patterns=None, directory='.'):
'''Check that filenames are found in the indicated directory.
Each pattern in the list of patterns must match exactly one
file in the indicated directory.
'''
failure_string_template = (
'Unexpected or missing filename match result in {}'
' for pattern r\'{}\':\n{}\n'
'Directory contents:\n{}'
)
if patterns:
self.assertTrue(os.path.exists(directory))
all_filenames = os.listdir(directory)
for pattern in patterns:
matching_files = [
x for x in all_filenames
if re.match(pattern, x)
]
self.assertTrue(
len(matching_files) == 1,
failure_string_template.format(
directory,
pattern,
matching_files,
os.listdir(directory)
)
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def new_constructs(self, construct_type):
'''Get a list of images, containers or volumes not already recorded.'''
return [
c for c in self.get_docker_constructs(construct_type)
if c[UID_KEY[construct_type]] not in [
o[UID_KEY[construct_type]]
for o in self.constructs['original'][construct_type]
]
]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_container_details(self, image):
'''Run a docker container shell and retrieve several details.'''
# (detail name, command, result filter) for extracting details
# from container command lines.
shell_commands = (
('pwd', 'pwd', None),
('config_file', 'ls {}'.format(CONTAINER_CONFIG_PATH), None),
('config_contents', 'cat {}'.format(CONTAINER_CONFIG_PATH), None),
)
command = "docker run --rm -it {} bash".format(image)
logger.info('IMAGE: %s', image)
logger.info('CONTAINER LAUNCH COMMAND: %s', command)
spawn = pexpect.spawn(command)
container_details = {}
for field, shell_command, response_filter in shell_commands:
container_details[field] = interact(
spawn, shell_command, response_filter
)
# Exit the container.
spawn.sendcontrol('d')
# "Expand" the config records if we found a config file.
if container_details['config_file'] == CONTAINER_CONFIG_PATH:
try:
exec(container_details['config_contents'], container_details)
except SyntaxError:
pass
# The '__builtins__' are noise:
if '__builtins__' in container_details:
del container_details['__builtins__']
return container_details
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_container_details(self, image, expected_details=None):
'''Validate docker container details.'''
# A helper method to retrieve container details of interest,
# necessary for configuration items that aren't simple config
# file values; e.g. "SWAG_BUCKET".
# As written, this won't allow tests to catch *missing*
# entries if the *expected* value is None.
def get_detail_value(data, detail):
'''A helper method to retrieve container details of interest.'''
def default_get_detail_value_method(data):
'''Define a default method for get_detail_value.'''
return data.get(detail)
method = {
'SWAG_BUCKET': lambda data: (
data.get('SWAG_OPTS') or {}
).get('swag.bucket_name')
}.get(detail)
if method is None:
method = default_get_detail_value_method
return method(data)
image_name, image_tag = image.split(':')
expected_details = expected_details or {}
assert '_common' in expected_details
expected_container_details = dict(expected_details['_common'])
if image_name in expected_details:
expected_container_details.update(expected_details[image_name])
container_details = self.get_container_details(image)
clean_comparison = {
'image': image,
'missing': {},
'incorrect': {}
}
comparison = {
'image': image,
'missing': {},
'incorrect': {}
}
for k, v in expected_container_details.items():
logger.info(' -- checking configuration item %s...', k)
logger.info(' expected: %s', v)
actual = get_detail_value(container_details, k)
# if k not in container_details:
# TODO: Note that this fails if we're *expecting* None.
if actual is None:
comparison['missing'][k] = v
logger.info(' actual: -')
elif actual != v:
comparison['incorrect'][k] = {
'expected': v,
'actual': actual
}
logger.info(comparison['incorrect'][k])
else:
logger.info(' actual: %s', actual)
logger.info("comparing %s", image)
logger.info(comparison)
logger.info('')
self.assertEqual(comparison, clean_comparison)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def case_worker(
self,
target,
expected_artifacts=None,
expected_docker_images=None,
expected_details=None,
expect_aardvark=True,
add_env=None,
set_images_tag=True,
):
'''Carry out common test steps.
'''
logger.info(' -' * 8 + ' working case: %s' + ' -' * 8, target)
# Unless we finish without a failure Exception, tell tearDown
# not to clean up artifacts. We reset this below.
self.delete_artifacts = False
# A unique string to add to certain test case artifact names
# to avoid clobbering/colliding.
if set_images_tag:
images_tag = self.testcase_tag
logger.info('Test case images tag is %s', images_tag)
else:
images_tag = 'latest'
logger.info('Default test case images tag will be "latest"')
expected_artifacts = expected_artifacts or []
expected_docker_images = expected_docker_images or []
tagged_expected_docker_images = [
x + ':{}'.format(images_tag)
for x in expected_docker_images
]
# expected_details is a two level dict so a straightforward
# dict update isn't possible.
expected_details = expected_details or {}
expected_details['_common'] = expected_details.get('_common') or {}
expected_details['_common']['config_file'] = CONTAINER_CONFIG_PATH
# Environment variables to add to the pexpect interaction with
# containers.
add_env = dict(add_env or {})
if set_images_tag:
add_env = dict(add_env, AARDVARK_IMAGES_TAG=images_tag)
# Fetch the default environment settings so we can update
# those with specific case settings.
spawn_env = dict(
map(
lambda x: x.strip().split('=', 1),
pexpect.run('env').strip().decode("utf-8").split("\n")
)
)
# Remove any build control variables we inherit from the test
# environment - we want complete control over which are
# visible to the make process in the pexpect call.
spawn_env = {
k: v for (k, v) in spawn_env.items()
if k not in BUILD_CONTROL_ENV_VARIABLES
}
command = 'make {}'.format(target)
logger.info('COMMAND: %s', command)
# TODO: A sort of halfhearted attempt at adjusting for network
# conditions. Need some kind of not-too-slow way to check for
# network speed, say a sample download or something.
(result, exitstatus) = pexpect.run(
command,
timeout=(
PEXPECT_TIMEOUTS.get(target) or PEXPECT_TIMEOUTS['default'] *
NETWORK_SPEED_FACTOR
),
withexitstatus=True,
env=dict(spawn_env, **add_env)
)
self.assertEqual(
exitstatus, 0,
'command "{}" exited with exit status {}'.format(
command, exitstatus
)
)
# Sanity check - we didn't delete the Makefile or any of the
# Dockerfiles.
self.assertEqual(
[x for x in self.initial_contents if x not in os.listdir('.')],
[]
)
if expected_docker_images:
self.assertCountEqual(
[
[x['name'], x['tag']]
for x in self.get_docker_image_list(
*tagged_expected_docker_images
)
],
[x.split(':') for x in tagged_expected_docker_images]
)
for image in tagged_expected_docker_images:
self.check_container_details(image, expected_details)
if expect_aardvark:
self.require_filenames_in_directory([r'aardvark$'])
if expected_artifacts:
self.require_filenames_in_directory(
expected_artifacts,
directory=ARTIFACT_DIRECTORY
)
# We made it through, tell tearDown we can clean up artifacts.
self.delete_artifacts = True
# - - - - - | |
# Copyright 2020 University of Illinois Board of Trustees. All Rights Reserved.
# Author: <NAME>, DPRG (https://dprg.cs.uiuc.edu)
# This file is part of Baechi, which is released under specific terms. See file License.txt file for full license details.
# ==============================================================================
"""Runs training."""
from __future__ import absolute_import, division, print_function
import collections
import os
import pickle
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import item as gitem
from image_classifier.networks import nets_factory
from nmt import model_factory
from placer import placer_lib, cost as cost_lib
from third_party.grappler import graph_placer as grappler_graph_placer
from utils import logger
tf.app.flags.DEFINE_boolean(
'log_device_placement', False, 'Logging device placement.')
tf.app.flags.DEFINE_boolean(
'colocate_grads_with_ops', False, 'Colocate gradient with ops.')
tf.app.flags.DEFINE_enum(
'optimizer', 'sgd',
['adadelta', 'adagrad', 'adam', 'ftrl', 'momentum', 'sgd', 'rmsprop'],
'The name of the optimizer')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_string(
'logdir', '', 'Path to log dir.')
tf.app.flags.DEFINE_string(
'cost_path', '/tmp/cost.pkl', 'Path to the cost file.')
tf.app.flags.DEFINE_boolean(
'costgen', False, 'Generate cost dict.')
tf.app.flags.DEFINE_boolean(
'only_forward', False, 'Consider only forward ops.')
tf.app.flags.DEFINE_float('memory_fraction', 1.0, 'GPU memory fraction')
tf.app.flags.DEFINE_string(
'comm_cost_coeffs', '0.0001754,134',
'Comma-separated linear communication cost function coefficients')
tf.app.flags.DEFINE_float(
'comm_cost_factor', 1.0, 'Communication cost function factor.')
tf.app.flags.DEFINE_float(
'cost_factor', 1.0, 'Factor that applies to all costs')
###### Image classifier ######
tf.app.flags.DEFINE_enum(
'data_format', 'NHWC', ['NHWC', 'NCHW'], 'Image data format')
##### NMT ######
tf.app.flags.DEFINE_integer('vocab_size', 5000, 'Vocabulary size.')
tf.app.flags.DEFINE_integer('max_seq_length', 30, 'Max. sequence length.')
tf.app.flags.DEFINE_integer('rnn_units', 1024, 'RNN units.')
tf.app.flags.DEFINE_integer('num_layers', 2, 'RNN # layers.')
tf.app.flags.DEFINE_enum(
'rnn_unit_type', 'lstm', ['lstm', 'gru'], 'RNN unit type.')
tf.app.flags.DEFINE_enum(
'encoder_type', 'bi', ['bi', 'uni', 'gnmt'], 'Encoder type.')
tf.app.flags.DEFINE_boolean(
'residual', False, 'Add residual connections to RNN.')
tf.app.flags.DEFINE_integer('num_gpus', 1, 'Number of gpus for NMT.')
tf.app.flags.DEFINE_boolean('disable_nmt_colocation', False,
'Disable the NMT ops colocation.')
##### Grappler ######
tf.app.flags.DEFINE_boolean('grappler', False, 'Use Grappler.')
tf.app.flags.DEFINE_integer(
'grappler_time', 3600, 'Allotted time in seconds for Grappler.')
_LOGGER = logger.get_logger(__file__)
def _configure_optimizer(optimizer_name, learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if optimizer_name is not recognized.
"""
if optimizer_name == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer_name == 'adagrad':
optimizer = tf.train.AdagradOptimizer(learning_rate)
elif optimizer_name == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
elif optimizer_name == 'ftrl':
optimizer = tf.train.FtrlOptimizer(learning_rate)
elif optimizer_name == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, name='Momentum')
elif optimizer_name == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate)
elif optimizer_name == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError(
'Optimizer [%s] was not recognized' % optimizer_name)
return optimizer
def _get_gpu_devices(sess_config):
with tf.Session(config=sess_config) as sess:
return [
{"name": device.name,
"memory_size": device.memory_limit_bytes,
"type": device.device_type}
for device in sess.list_devices()
if device.device_type == 'GPU']
_NUM_CLASSES = {
'cifarnet': 10,
'inception_v3': 1000,
}
ModelSpec = collections.namedtuple('ModelSpec', ['cls', 'image_size'])
def build_image_classifier_model(inputs, model_name, data_format):
"""Builds a image classifier with the given specs."""
# pylint: disable=too-many-locals
_LOGGER.info('data format: %s', data_format)
images, labels = inputs
num_classes = _NUM_CLASSES[model_name]
network_fn = nets_factory.get_network_fn(
model_name,
num_classes=num_classes)
logits, _ = network_fn(images, data_format=data_format)
with tf.variable_scope('loss'):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='xentropy')
loss = tf.reduce_sum(losses) / tf.to_float(images.shape[0])
return loss
def build_nmt_model(inputs, model_name, **kwargs):
"""Builds NMT with the given specs."""
# pylint: disable=too-many-locals
# log NMT spec.
_LOGGER.info(', '.join(['{}={}'.format(*item) for item in kwargs.items()]))
src_input, target_input, target_output = inputs
vocab_size = kwargs.pop('vocab_size')
# replicate vocab size
kwargs['src_vocab_size'] = vocab_size
kwargs['tgt_vocab_size'] = vocab_size
model_fn = model_factory.get_model_fn(model_name, **kwargs)
_, loss = model_fn(src_input, target_input, target_output)
return loss
def build_model(inputs, model_name, data_format, **kwargs):
"""Builds a model with the given specs."""
if model_name in _NUM_CLASSES:
return build_image_classifier_model(inputs, model_name, data_format)
return build_nmt_model(inputs, model_name, **kwargs)
def run_op(target_op, warmup_count=5, num_measurement=10,
profile_every_n_steps=None, logdir=None, config=None):
"""Runs the given graph."""
# pylint: disable=too-many-locals, too-many-arguments
with tf.Session(config=config) as sess:
if logdir:
writer = tf.summary.FileWriter(logdir=logdir,
graph=tf.get_default_graph())
else:
writer = None
sess.run(tf.global_variables_initializer())
warmup_start_time = time.time()
for _ in range(warmup_count):
sess.run(target_op)
warmup_end_time = time.time()
_LOGGER.info('Warmup time: %s',
str(warmup_end_time - warmup_start_time))
runtimes = []
run_metadata_list = []
for step in range(1, num_measurement + 1):
if profile_every_n_steps and step % profile_every_n_steps == 0:
_LOGGER.info('Profiling step %d...', step)
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(target_op,
options=run_options,
run_metadata=run_metadata)
if writer:
writer.add_run_metadata(
run_metadata, 'step-{}'.format(step))
# pylint: disable=invalid-name
metadata_out_path = os.path.join(
logdir, 'run_metadata-{}.pbtxt'.format(step))
with open(metadata_out_path, 'wb') as f:
f.write(run_metadata.SerializeToString())
run_metadata_list.append(run_metadata)
else:
start_time = time.time()
sess.run(target_op)
end_time = time.time()
runtimes.append(end_time - start_time)
_LOGGER.info('Profile run time: %s',
str(time.time() - warmup_end_time))
avg_step_time = np.average(runtimes)
_LOGGER.info('Graph execution stats. #samples=%d, median=%s, mean=%s',
len(runtimes),
np.median(runtimes),
np.average(runtimes))
return avg_step_time, run_metadata_list
def get_costs(target_op, warmup_count=5, num_measurement=50,
profile_every_n_steps=5, sess_config=None, logdir=None):
"""Generates costs with tf.Session."""
# pylint: disable=too-many-arguments
avg_step_time, run_metadata_list = run_op(
target_op,
warmup_count=warmup_count,
num_measurement=num_measurement,
profile_every_n_steps=profile_every_n_steps,
logdir=logdir,
config=sess_config)
cost_dict = cost_lib.build_cost_dict(run_metadata_list)
return avg_step_time, cost_dict
def generate_cost(target_op, cost_path, sess_config=None, logdir=None):
"""Generates cost data for the graph at the given path."""
if not cost_path:
raise ValueError('cost_path is required.')
# copy graphdef since get_costs will create init_op.
graphdef = tf.get_default_graph().as_graph_def()
start_time = time.time()
step_time, cost_dict = get_costs(
target_op, sess_config=sess_config, logdir=logdir)
_LOGGER.info('Original runtime: %f', step_time)
cost_dir_path = os.path.dirname(cost_path)
if cost_dir_path:
os.makedirs(cost_dir_path, exist_ok=True)
# pylint: disable=invalid-name
with open(cost_path, 'wb') as f:
_LOGGER.info('Saving to %s...', cost_path)
cost_data = {'graphdef': graphdef,
'cost_dict': cost_dict}
pickle.dump(cost_data, f)
_LOGGER.info('Profile run costs: %s', str(time.time() - start_time))
def run_placement(target_op, cost_path, comm_cost_coeffs, cost_factor,
logdir=None, sess_config=None):
"""Runs the placement."""
# pylint: disable=too-many-locals
if not cost_path:
raise ValueError('cost_path is required.')
# pylint: disable=invalid-name
with open(cost_path, 'rb') as f:
cost_data = pickle.load(f)
graph = tf.get_default_graph()
assert cost_data['graphdef'] == graph.as_graph_def()
devices = _get_gpu_devices(sess_config)
cost_dict = cost_data['cost_dict']
# adjust costs for sensitivity experiments.
if cost_factor != 1.0:
cost_dict, comm_cost_coeffs = cost_lib.adjust_costs(
cost_factor, cost_dict, comm_cost_coeffs)
start_time = time.time()
placer = placer_lib.get_placer(
graph,
devices=devices,
cost_dict=cost_dict,
comm_cost_coeffs=comm_cost_coeffs)
placer.run()
_LOGGER.info('Entire placement time: %s', str(time.time() - start_time))
def _build_image_classifier_inputs(model_name, batch_size, data_format):
num_classes = _NUM_CLASSES[model_name]
network_fn = nets_factory.get_network_fn(
model_name,
num_classes=num_classes)
if data_format == 'NHWC':
input_shape = (batch_size,
network_fn.default_image_size,
network_fn.default_image_size,
3)
else:
input_shape = (batch_size,
3,
network_fn.default_image_size,
network_fn.default_image_size)
images = np.ones(input_shape, dtype=np.float32)
labels = np.zeros(batch_size, dtype=np.int32)
element = (images, labels)
with tf.variable_scope('dataset'):
dataset = tf.data.Dataset.from_tensors(element).repeat()
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def _build_nmt_inputs(batch_size, max_seq_length):
input_shape = (batch_size, max_seq_length)
src_input = np.ones(input_shape, dtype=np.int32)
target_input = np.ones(input_shape, dtype=np.int32)
target_output = np.ones(input_shape, dtype=np.int32)
element = (src_input, target_input, target_output)
with tf.variable_scope('dataset'):
dataset = tf.data.Dataset.from_tensors(element).repeat()
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
def build_inputs(model_name, batch_size, data_format, max_seq_length):
"""Generates dummy inputs."""
if model_name in _NUM_CLASSES:
return _build_image_classifier_inputs(
model_name, batch_size, data_format)
return _build_nmt_inputs(batch_size, max_seq_length)
def build_train_op(loss, optimizer_name, learning_rate,
colocate_grads_with_ops):
"""Builds a train op."""
optimizer = _configure_optimizer(optimizer_name, learning_rate)
grads_and_vars = optimizer.compute_gradients(
loss, colocate_gradients_with_ops=colocate_grads_with_ops)
global_step = tf.train.create_global_step()
return optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
def run_grappler(target_op, allotted_time, logdir, sess_config):
"""Runs Grappler placement."""
tf.logging.set_verbosity(tf.logging.INFO)
# need to create a session here with memory fraction.
# otherwise, memory fraction flag is not correctly set due to a session
# created by cluster
with tf.Session(config=sess_config):
pass
graph = tf.get_default_graph()
cluster = gcluster.Cluster()
metagraph = tf.train.export_meta_graph(graph=graph,
clear_extraneous_savers=True)
_LOGGER.info('Grappler allotted time: %d', allotted_time)
placed_metagraph_list = grappler_graph_placer.PlaceGraph(
metagraph,
cluster=cluster,
allotted_time=allotted_time,
verbose=True,
sess_config=sess_config,
gpu_only=True)
_LOGGER.info('# found metagraph: %d', len(placed_metagraph_list))
if len(placed_metagraph_list) == 0:
_LOGGER.info('No feasible placement is found.')
return
if logdir:
metagraph_dir = os.path.join(logdir, 'metagraph')
os.makedirs(metagraph_dir, exist_ok=True)
for i, metagraph in enumerate(placed_metagraph_list):
metagraph_path = os.path.join(
metagraph_dir, 'metagraph-%d.pbtxt' % i)
# pylint: disable=invalid-name
with open(metagraph_path, 'wb') as f:
f.write(metagraph.SerializeToString())
# use the last element because it is the best placement that is found.
placed_metagraph = placed_metagraph_list[-1]
# assign device placement
for node in placed_metagraph.graph_def.node:
tf_op = graph.get_operation_by_name(node.name)
# pylint: disable=protected-access
tf_op._set_device(node.device)
step_time = run_op(
target_op, warmup_count=10, num_measurement=21,
profile_every_n_steps=21, logdir=logdir,
config=sess_config)[0]
_LOGGER.info('Average runtime: {}'.format(step_time))
def parse_comm_cost_coeffs(coeffs_str, factor=1.0):
comm_cost_coeffs = coeffs_str.split(',')
assert len(comm_cost_coeffs) == 2
comm_cost_coeffs[0] = float(comm_cost_coeffs[0])
comm_cost_coeffs[1] = int(comm_cost_coeffs[1])
if factor != 1.0:
_LOGGER.info('Communication cost factor: %s', str(factor))
comm_cost_coeffs = tuple(
[value * factor for value in comm_cost_coeffs])
return comm_cost_coeffs
def main(unparsed_args):
"""Main function."""
if len(unparsed_args) > 1:
raise RuntimeError('Unparsed args: {}'.format(unparsed_args[1:]))
# pylint: disable=invalid-name
FLAGS = tf.app.flags.FLAGS
# pylint: enable=invalid-name
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement)
if FLAGS.memory_fraction != 1.0:
sess_config.gpu_options.per_process_gpu_memory_fraction = \
FLAGS.memory_fraction
# disable TF optimizer
sess_config.graph_options.optimizer_options.opt_level = -1
_LOGGER.debug('Session config: %s', str(sess_config))
inputs = build_inputs(
model_name=FLAGS.model_name,
batch_size=FLAGS.batch_size,
# image classifier
data_format=FLAGS.data_format,
# NMT
max_seq_length=FLAGS.max_seq_length,
)
# build graph
loss = build_model(
inputs=inputs,
model_name=FLAGS.model_name,
# image classifier
data_format=FLAGS.data_format,
# NMT
vocab_size=FLAGS.vocab_size,
rnn_units=FLAGS.rnn_units,
num_layers=FLAGS.num_layers,
rnn_unit_type=FLAGS.rnn_unit_type,
encoder_type=FLAGS.encoder_type,
residual=FLAGS.residual,
num_gpus=FLAGS.num_gpus,
colocation=not FLAGS.disable_nmt_colocation)
only_forward = FLAGS.only_forward
_LOGGER.info('Only consider forward ops: %s', str(only_forward))
colocate_grads_with_ops = FLAGS.colocate_grads_with_ops
_LOGGER.info('Coloate grads with ops: %s' % str(colocate_grads_with_ops))
comm_cost_coeffs = parse_comm_cost_coeffs(
FLAGS.comm_cost_coeffs, FLAGS.comm_cost_factor)
if only_forward:
assert colocate_grads_with_ops
# add to the train op collections to support important ops identification
tf.add_to_collection(tf.GraphKeys.TRAIN_OP, loss)
target_op = loss
if FLAGS.costgen:
if not only_forward:
train_op = build_train_op(
loss,
optimizer_name=FLAGS.optimizer,
learning_rate=FLAGS.learning_rate,
colocate_grads_with_ops=colocate_grads_with_ops)
target_op = train_op
generate_cost(target_op,
cost_path=FLAGS.cost_path,
sess_config=sess_config,
logdir=FLAGS.logdir)
else:
if | |
<filename>tests/test_export_sci.py
# coding: utf-8
import unittest
from lxml import etree as ET
import json
import os
from lxml import etree
from xylose.scielodocument import Article
from articlemeta import export_sci
from articlemeta import export
class XMLCitationTests(unittest.TestCase):
def setUp(self):
self._raw_json = json.loads(open(os.path.dirname(__file__)+'/fixtures/article_meta.json').read())
self._citation_meta = Article(self._raw_json).citations[0]
self._xmlcitation = export_sci.XMLCitation()
def test_xml_citation_setup_pipe(self):
data = [self._citation_meta, None]
raw, xml = self._xmlcitation.SetupCitationPipe().transform(data)
rootcitation = xml.findall('.')[0].tag
self.assertEqual('ref', rootcitation)
def test_xml_citation_id_as_str_pipe(self):
pxml = ET.Element('ref')
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.RefIdPipe().transform(data)
strid = xml.find('.').get('id')
self.assertTrue(isinstance(strid, basestring))
def test_xml_citation_element_citation_pipe(self):
pxml = ET.Element('ref')
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.ElementCitationPipe().transform(data)
publicationtype = xml.find('./element-citation[@publication-type="article"]').get('publication-type')
self.assertEqual(u'article', publicationtype)
def test_xml_citation_article_title_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.ArticleTitlePipe().transform(data)
expected = xml.find('./element-citation/article-title').text
self.assertEqual(u'End-stage renal disease in sub-Saharan Africa.', expected)
def test_xml_citation_article_title_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.ArticleTitlePipe().transform(data)
expected = xml.find('./element-citation/article-title')
self.assertEqual(None, expected)
def test_xml_citation_url_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{'v37': [{'_': 'http://www.scielo.br'}]}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.URIPipe().transform(data)
expected = xml.find('./element-citation/ext-link').text
self.assertEqual(u'http://www.scielo.br', expected)
def test_xml_citation_source_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.SourcePipe().transform(data)
expected = xml.find('./element-citation/source').text
self.assertEqual(u'Ethn Dis.', expected)
def test_xml_citation_source_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.SourcePipe().transform(data)
expected = xml.find('./element-citation/source')
self.assertEqual(None, expected)
def test_xml_citation_date_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.DatePipe().transform(data)
expected = xml.find('./element-citation/date/year').text
self.assertEqual(u'2006', expected)
def test_xml_citation_date_with_year_and_month_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{'v65': [{'_': '200604'}]}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.DatePipe().transform(data)
expected_year = xml.find('./element-citation/date/year').text
expected_month = xml.find('./element-citation/date/month').text
self.assertEqual(u'2006', expected_year)
self.assertEqual(u'04', expected_month)
def test_xml_citation_date_with_year_and_month_and_day_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{'v65': [{'_': '20060430'}]}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.DatePipe().transform(data)
expected_year = xml.find('./element-citation/date/year').text
expected_month = xml.find('./element-citation/date/month').text
expected_day = xml.find('./element-citation/date/day').text
self.assertEqual(u'2006', expected_year)
self.assertEqual(u'04', expected_month)
self.assertEqual(u'30', expected_day)
def test_xml_citation_date_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.DatePipe().transform(data)
expected = xml.find('./element-citation/date')
self.assertEqual(None, expected)
def test_xml_citation_fpage_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.StartPagePipe().transform(data)
expected = xml.find('./element-citation/fpage').text
self.assertEqual(u'2,5,9', expected)
def test_xml_citation_fpage_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.StartPagePipe().transform(data)
expected = xml.find('./element-citation/fpage')
self.assertEqual(None, expected)
def test_xml_citation_lpage_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{'v14': [{'_': '120-130'}]}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.EndPagePipe().transform(data)
expected = xml.find('./element-citation/lpage').text
self.assertEqual(u'130', expected)
def test_xml_citation_lpage_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.EndPagePipe().transform(data)
expected = xml.find('./element-citation/lpage')
self.assertEqual(None, expected)
def test_xml_citation_volume_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.VolumePipe().transform(data)
expected = xml.find('./element-citation/volume').text
self.assertEqual(u'16', expected)
def test_xml_citation_volume_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.VolumePipe().transform(data)
expected = xml.find('./element-citation/volume')
self.assertEqual(None, expected)
def test_xml_citation_issue_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.IssuePipe().transform(data)
expected = xml.find('./element-citation/issue').text
self.assertEqual(u'2', expected)
def test_xml_citation_issue_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.IssuePipe().transform(data)
expected = xml.find('./element-citation/issue')
self.assertEqual(None, expected)
def test_xml_citation_person_group_len_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.PersonGroupPipe().transform(data)
expected = len(xml.findall('./element-citation/person-group/name'))
self.assertEqual(1, expected)
def test_xml_citation_person_group_given_names_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.PersonGroupPipe().transform(data)
result = xml.find('./element-citation/person-group/name/given-names').text
self.assertEqual('EL', result)
def test_xml_citation_person_group_surname_pipe(self):
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [self._citation_meta, pxml]
raw, xml = self._xmlcitation.PersonGroupPipe().transform(data)
result = xml.find('./element-citation/person-group/name/surname').text
self.assertEqual('Bamgboye', result)
def test_xml_citation_person_group_without_data_pipe(self):
fakexylosearticle = Article({'article': {},
'title': {},
'citations': [{}]}).citations[0]
pxml = ET.Element('ref')
pxml.append(ET.Element('element-citation'))
data = [fakexylosearticle, pxml]
raw, xml = self._xmlcitation.PersonGroupPipe().transform(data)
expected = xml.find('./element-citation/person-group')
self.assertEqual(None, expected)
class ExportTests(unittest.TestCase):
def setUp(self):
self._raw_json = json.loads(open(os.path.dirname(__file__)+'/fixtures/article_meta.json').read())
self._article_meta = Article(self._raw_json)
def test_xmlclose_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
data = [None, pxml]
xmlarticle = export_sci.XMLClosePipe()
xml = xmlarticle.transform(data)
self.assertEqual('<articles><article/></articles>', xml)
def test_setuppipe_element_name(self):
data = [None, None]
xmlarticle = export_sci.SetupArticlePipe()
raw, xml = xmlarticle.transform(data)
self.assertEqual('articles', xml.tag)
def test_setuppipe_attributes(self):
data = [None, None]
xmlarticle = export_sci.SetupArticlePipe()
raw, xml = xmlarticle.transform(data)
attributes = sorted(['dtd-version', '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'])
self.assertEqual(attributes, sorted(xml.keys()))
def test_xmlarticle_pipe(self):
pxml = ET.Element('articles')
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLArticlePipe()
raw, xml = xmlarticle.transform(data)
self.assertEqual('<articles><article lang_id="pt" article-type="research-article"/></articles>', ET.tostring(xml))
def test_xmlfront_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
data = [None, pxml]
xmlarticle = export_sci.XMLFrontPipe()
raw, xml = xmlarticle.transform(data)
self.assertEqual('<articles><article><front><journal-meta/><article-meta/></front></article></articles>', ET.tostring(xml))
def test_xmljournal_id_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('journal-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLJournalMetaJournalIdPipe()
raw, xml = xmlarticle.transform(data)
self.assertEqual('<articles><article><front><journal-meta><journal-id journal-id-type="publisher">rsp</journal-id></journal-meta></front></article></articles>', ET.tostring(xml))
def test_xmljournal_meta_journal_title_group_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('journal-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLJournalMetaJournalTitleGroupPipe()
raw, xml = xmlarticle.transform(data)
title = xml.find('./article/front/journal-meta/journal-title-group/journal-title').text
abbrevtitle = xml.find('./article/front/journal-meta/journal-title-group/abbrev-journal-title').text
self.assertEqual(u'Revista de Saúde Pública', title)
self.assertEqual(u'Rev. Saúde Pública', abbrevtitle)
def test_xmljournal_meta_issn_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('journal-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLJournalMetaISSNPipe()
raw, xml = xmlarticle.transform(data)
issn = xml.find('./article/front/journal-meta/issn').text
self.assertEqual(u'0034-8910', issn)
def test_xmljournal_meta_collection_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('journal-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLJournalMetaCollectionPipe()
raw, xml = xmlarticle.transform(data)
collection = xml.find('./article/front/journal-meta/collection').text
self.assertEqual(u'SciELO Brazil', collection)
def test_xmljournal_meta_publisher_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('journal-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLJournalMetaPublisherPipe()
raw, xml = xmlarticle.transform(data)
publishername = xml.find('./article/front/journal-meta/publisher/publisher-name').text
publisherloc = xml.find('./article/front/journal-meta/publisher/publisher-loc').text
self.assertEqual(u'Faculdade de Saúde Pública da Universidade de São Paulo', publishername)
self.assertEqual(u'São Paulo', publisherloc)
def test_xml_article_meta_unique_article_id_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLArticleMetaUniqueArticleIdPipe()
raw, xml = xmlarticle.transform(data)
uniquearticleid = xml.find('./article/front/article-meta/unique-article-id').text
self.assertEqual(u'S0034-89102010000400007', uniquearticleid)
def test_xml_article_meta_article_id_publisher_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLArticleMetaArticleIdPublisherPipe()
raw, xml = xmlarticle.transform(data)
articleidpublisher = xml.find('./article/front/article-meta/article-id[@pub-id-type="publisher-id"]').text
self.assertEqual(u'S0034-89102010000400007', articleidpublisher)
def test_xml_article_meta_article_id_doi_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLArticleMetaArticleIdDOIPipe()
raw, xml = xmlarticle.transform(data)
articleidpublisher = xml.find('./article/front/article-meta/article-id[@pub-id-type="doi"]').text
self.assertEqual(u'10.1590/S0034-89102010000400007', articleidpublisher)
def test_xml_article_meta_article_id_doi_without_data_pipe(self):
fakexylosearticle = Article({'article': {}, 'title': {}})
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = [fakexylosearticle, pxml]
xmlarticle = export_sci.XMLArticleMetaArticleIdDOIPipe()
raw, xml = xmlarticle.transform(data)
# This try except is a trick to test the expected result of the
# piped XML, once the precond method don't raise an exception
# we try to check if the preconditioned pipe was called or not.
try:
xml.find('./article/front/article-meta/article-id[@pub-id-type="doi"]').text
except AttributeError:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_xmlarticle_meta_article_categories_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLArticleMetaArticleCategoriesPipe()
raw, xml = xmlarticle.transform(data)
categories = [i.text for i in xml.findall('./article/front/article-meta/article-categories/subj-group/subject')]
self.assertEqual([u'PUBLIC, ENVIRONMENTAL & OCCUPATIONAL HEALTH'], categories)
def test_xmlarticle_meta_article_categories_without_data_pipe(self):
fakexylosearticle = Article({'article': {}, 'title': {}})
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = [fakexylosearticle, pxml]
xmlarticle = export_sci.XMLArticleMetaArticleCategoriesPipe()
raw, xml = xmlarticle.transform(data)
self.assertEqual(None, xml.find('./article/front/article-meta/article-categories/subj-group/subject'))
def test_xmlarticle_meta_title_group_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLArticleMetaTitleGroupPipe()
raw, xml = xmlarticle.transform(data)
title = xml.find('./article/front/article-meta/title-group/article-title[@<EMAIL>="pt"]').text
self.assertEqual(u'Perfil epidemiológico dos pacientes em terapia renal substitutiva no Brasil, 2000-2004', title)
def test_xmlarticle_meta_translated_title_group_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
articlemeta = front.find('article-meta')
articlemeta.append(ET.Element('title-group'))
data = [self._article_meta, pxml]
xmlarticle = export_sci.XMLArticleMetaTranslatedTitleGroupPipe()
raw, xml = xmlarticle.transform(data)
titles = [i.find('trans-title').text for i in xml.findall('./article/front/article-meta/title-group/trans-title-group')]
self.assertEqual([u'Epidemiological profile of patients on renal replacement therapy in Brazil, 2000-2004',
u'Perfil epidemiológico de los pacientes en terapia renal substitutiva en Brasil, 2000-2004'], titles)
def test_xmlarticle_meta_translated_title_group_without_data_pipe(self):
fakexylosearticle = Article({'article': {}, 'title': {}})
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
articlemeta = front.find('article-meta')
articlemeta.append(ET.Element('title-group'))
data = [fakexylosearticle, pxml]
xmlarticle = export_sci.XMLArticleMetaContribGroupPipe()
raw, xml = xmlarticle.transform(data)
titles = [i.find('trans-title').text for i in xml.findall('./article/front/article-meta/title-group/trans-title-group')]
self.assertEqual([], titles)
def test_xmlarticle_meta_contrib_group_author_names_pipe(self):
pxml = ET.Element('articles')
pxml.append(ET.Element('article'))
article = pxml.find('article')
article.append(ET.Element('front'))
front = article.find('front')
front.append(ET.Element('article-meta'))
data = | |
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple, Type
from typing_extensions import Literal
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
from synapse.api.errors import Codes, SynapseError
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.http.servlet import parse_string_from_args
from synapse.server import HomeServer
from synapse.types import JsonDict, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
class BaseGroupsServerServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a groups server handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: HomeServer,
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_server_handler()
class FederationGroupsProfileServlet(BaseGroupsServerServlet):
"""Get/set the basic profile of a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/profile"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_profile(group_id, requester_user_id)
return 200, new_content
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.update_group_profile(
group_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/summary"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_summary(group_id, requester_user_id)
return 200, new_content
class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
"""Get the rooms in a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
"""Add/remove room from group"""
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.add_room_to_group(
group_id, requester_user_id, room_id, content
)
return 200, new_content
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_room_from_group(
group_id, requester_user_id, room_id
)
return 200, new_content
class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
"""Update room config in group"""
PATH = (
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
"/config/(?P<config_key>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
config_key: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
result = await self.handler.update_room_in_group(
group_id, requester_user_id, room_id, config_key, content
)
return 200, result
class FederationGroupsUsersServlet(BaseGroupsServerServlet):
"""Get the users in a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/users"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
"""Get the users that have been invited to a group"""
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_invited_users_in_group(
group_id, requester_user_id
)
return 200, new_content
class FederationGroupsInviteServlet(BaseGroupsServerServlet):
"""Ask a group server to invite someone to the group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.invite_to_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
"""Accept an invitation from the group server"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.accept_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsJoinServlet(BaseGroupsServerServlet):
"""Attempt to join a group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.join_group(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
"""Leave or kick a user from the group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_user_from_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
"""Add/remove a room from the group summary, with optional category.
Matches both:
- /groups/:group/summary/rooms/:room_id
- /groups/:group/summary/categories/:category/rooms/:room_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/categories/(?P<category_id>[^/]+))?"
"/rooms/(?P<room_id>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(
400, "category_id cannot be empty string", Codes.INVALID_PARAM
)
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_summary_room(
group_id,
requester_user_id,
room_id=room_id,
category_id=category_id,
content=content,
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_summary_room(
group_id, requester_user_id, room_id=room_id, category_id=category_id
)
return 200, resp
class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
"""Get all categories for a group"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_categories(group_id, requester_user_id)
return 200, resp
class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
"""Add/remove/get a category in a group"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.upsert_group_category(
group_id, requester_user_id, category_id, content
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
class FederationGroupsRolesServlet(BaseGroupsServerServlet):
| |
<gh_stars>10-100
#!/usr/bin/env python3
""" Colr - Progress - Frames
A collection of frames for `colr.progress.Progress`.
-<NAME> 3-12-17
The MIT License (MIT)
Copyright (c) 2015-2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from collections import namedtuple
from functools import total_ordering
from .colr import Colr as C
# Argument set for `range` in `BarSet._generate_move`.
RangeMoveArgs = namedtuple('RangeMoveArgs', ('forward', 'backward'))
def cls_get_by_name(cls, name):
""" Return a class attribute by searching the attributes `name` attribute.
"""
try:
val = getattr(cls, name)
except AttributeError:
for attr in (a for a in dir(cls) if not a.startswith('_')):
try:
val = getattr(cls, attr)
except AttributeError:
# Is known to happen.
continue
valname = getattr(val, 'name', None)
if valname == name:
return val
else:
raise ValueError('No {} with that name: {}'.format(
cls.__name__,
name,
))
else:
return val
def cls_names(cls, wanted_cls, registered=True):
""" Return a list of attributes for all `wanted_cls` attributes in this
class, where `wanted_cls` is the desired attribute type.
"""
return [
fset.name
for fset in cls_sets(cls, wanted_cls, registered=registered)
]
def cls_register(cls, frameset, new_class, init_args, name=None):
""" Register a new FrameSet or FrameSet subclass as a member/attribute
of a class.
Returns the new FrameSet or FrameSet subclass.
Arguments:
frameset : An existing FrameSet, or an iterable of strings.
init_args : A list of properties from the `frameset` to try to use
for initializing the new FrameSet.
new_class : The class type to initialize.
name : New name for the FrameSet, also used as the
classes attribute name.
If the `frameset` object has not `name` attribute,
this argument is required. It must not be empty
when given.
"""
name = name or getattr(frameset, 'name', None)
if name is None:
raise ValueError(
'`name` is needed when the `frameset` has no name attribute.'
)
kwargs = {'name': name}
for initarg in init_args:
kwargs[initarg] = getattr(frameset, initarg, None)
newframeset = new_class(frameset, **kwargs)
# Mark this FrameSet/BarSet as a registered item (not basic/original).
newframeset._registered = True
setattr(cls, name, newframeset)
return newframeset
def cls_sets(cls, wanted_cls, registered=True):
""" Return a list of all `wanted_cls` attributes in this
class, where `wanted_cls` is the desired attribute type.
"""
sets = []
for attr in dir(cls):
if attr.startswith('_'):
continue
val = getattr(cls, attr, None)
if not isinstance(val, wanted_cls):
continue
if (not registered) and getattr(val, '_registered', False):
continue
sets.append(val)
return sets
@total_ordering
class FrameSetBase(object):
""" The base class for FrameSets/BarSets. Shares specialized methods
for building new FrameSets/BarSets.
"""
def __init__(self):
raise NotImplementedError('FrameSetBase.__init__ must be overridden.')
def __add__(self, other):
""" FrameSets can be extended with other self.data lists/tuples, or
builtin lists/tuples.
"""
otherdata = getattr(other, 'data', None)
if isinstance(otherdata, tuple):
return self.__class__(self.data + other.data)
elif isinstance(otherdata, list):
return self.__class__(self.data + tuple(other.data))
elif isinstance(other, tuple):
return self.__class__(self.data + other)
elif isinstance(other, list):
return self.__class__(self.data + tuple(other))
else:
raise TypeError(
' '.join((
'Expecting list, tuple,',
'or object with a list or tuple data attribute.',
'Got: ({}) {!r}'
)).format(
type(other).__name__,
other,
)
)
def __bool__(self):
return bool(self.data)
def __bytes__(self):
""" bytes(FrameSet()) is the same as str(FrameSet()).encode(). """
return str(self).encode()
def __contains__(self, value):
return value in self.data
def __eq__(self, other):
if not isinstance(other, self.__class__):
raise TypeError(
'Cannot compare {} to {}.'.format(
type(self).__name__,
type(other).__name__,
)
)
return (
(self.data == getattr(other, 'data', None)) and
(self.name == getattr(other, 'name', None))
)
def __getitem__(self, index):
return self.data[index]
def __hash__(self):
return hash(self.data)
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return len(self.data)
def __lt__(self, other):
if isinstance(getattr(other, 'data', None), tuple):
return self.data < other.data
return self.data < other
def __mul__(self, n):
""" The data tuple for this frameset can be multiplied by a number.
It returns `FrameSet(self.data * n)`.
"""
if not isinstance(n, int):
raise TypeError(
'Cannot multiply {} by non-int type: {}'.format(
type(self).__name__,
type(n).__name__
)
)
return self.__class__(self.data * n)
def __radd__(self, other):
return self.__add__(other)
def __reversed__(self):
return self.__class__(
reversed(self.data),
name='reversed_{}'.format(self.name),
)
def __rmul__(self, other):
return self.__mul__(other)
def __setitem__(self, key, value):
raise TypeError('{} does not support assignment.'.format(
type(self).__name__,
))
def __str__(self):
""" A string representation of this FrameSet is it's frames joined
together.
"""
return ''.join(str(x) for x in self)
def _as_colr(self, init_args, **kwargs):
""" Wrap each frame of a FrameSet or FrameSet subclass in a Colr object,
using `kwargs` for Colr().
Arguments:
init_args : A list of properties to get from the instance and
use for initializing the new instance.
Keyword Arguments:
fore : Fore color for each frame.
back : Back color for each frame.
style : Style for each frame.
"""
clsargs = {'name': 'custom_{}_as_colr'.format(self.name)}
for initarg in init_args:
clsargs[initarg] = getattr(self, initarg, None)
newfset = self.__class__(
(C(s, **kwargs) for s in self),
**clsargs,
)
return newfset
def _as_gradient(self, init_args, name=None, style=None, rgb_mode=False):
""" Wrap each frame of a FrameSet or FrameSet subclass in a Colr object,
using `Colr.gradient`.
Arguments:
init_args : A list of properties to get from the instance and
use for initializing the new instance.
name : Starting color name. One of `Colr.gradient_names`.
style : Style arg for Colr.
rgb_mode : Whether to use RGB codes, instead of extended
256 color approximate matches.
"""
# TODO: Better, smoother gradients.
offset = C.gradient_names.get(name, None)
if offset is None:
offset = C.gradient_names['blue']
colrs = []
for i, char in enumerate(self):
colrs.append(
C(char, style=style).rainbow(
offset=offset + i,
spread=1,
rgb_mode=rgb_mode,
)
)
namefmt = 'custom_{}_as_gradient'
if rgb_mode:
namefmt = ''.join((namefmt, '_rgb'))
clsargs = {'name': namefmt.format(self.name)}
for initarg in init_args:
clsargs[initarg] = getattr(self, initarg, None)
return self.__class__(colrs, **clsargs)
def _as_rainbow(self, init_args, offset=35, style=None, rgb_mode=False):
""" Wrap each frame of a FrameSet or FrameSet subclass in a Colr
object, using `Colr.rainbow`.
Arguments:
init_args : A list of properties to get from the instance and
use for initializing the new instance.
offset : Starting offset for the rainbow.
style : Style arg for Colr.
rgb_mode : Whether to use RGB codes, instead of extended
256 color approximate matches.
"""
colrs = []
for i, char in enumerate(self):
colrs.append(
C(char, style=style).rainbow(
offset=offset + i,
freq=0.25,
spread=1,
rgb_mode=rgb_mode,
)
)
clsargs = {'name': 'custom_{}_as_rainbow'.format(self.name)}
for initarg in init_args:
clsargs[initarg] = getattr(self, initarg, None)
return self.__class__(colrs, **clsargs)
def has_codes(self):
""" Returns True if one the frames in this FrameSet has an escape code
in it.
"""
return any(s.startswith('\x1b[') for s in self)
class FrameSet(FrameSetBase):
""" A single spinner/progress frame list, with helper methods for
colorizing each frame. A FrameSet actually behaves like a `tuple`.
It is immutable, hashable, and comparable.
"""
default_delay = 0.1
def __init__(self, iterable, name=None, delay=None):
self.data = tuple(iterable)
if not self.data:
raise ValueError(
'Empty FrameSet is not allowed. Got: {!r}'.format(
iterable,
)
)
self.name = str(name or '').strip().lower()
self.delay = delay or self.default_delay
if not (isinstance(self.delay, (float, int)) or (self.delay is None)):
raise TypeError(
' '.join((
'Expecting None, float, or int for delay.',
'Got: ({}) {!r}'
)).format(
type(self.delay).__name__,
self.delay,
)
)
def __repr__(self):
""" Eval-friendly representation of this FrameSet. """
return ''.join((
'{clsname}',
'({s.data!r}, name={s.name!r}, delay={s.delay!r})'
)).format(clsname=self.__class__.__name__, s=self)
def append(self, append_str):
""" Append a string to every frame. """
app = str(append_str)
self.data = tuple(
''.join((str(s), app))
for s in self.data
)
return self
def as_colr(self, | |
idx_term # & ~idx_j
if np.any(idx):
return np.where(idx)[0][0]
# 3. Try to match H
# If spec is 'H 1', then try to match conf with conf,
# *or* try to match term with term, *or* try to match conf with term,
# *or* try to match term with conf
if level.species == "H 1":
# TODO
idx_term_conf = label.term == level.configuration
idx_conf_term = label.configuration == level.term
idx = idx_conf | idx_term | idx_term_conf | idx_conf_term
if np.any(idx):
return np.where(idx)[0][0]
# 4. Try to match energies (including H, if step 3 failed)
# Find the level in the nlte grid with the same spec,
# and the closest energy; *provided* that the desired energy does
# not exceed the highest energy out of all the levels in the nlte grid with this spec
idx = idx_species
if np.any(idx):
if level.energy > max_energy or level.energy <= 0:
# We exceed the maximum energy of the grid, ignore NLTE
return -1
diff = np.abs(label[idx].energy - level.energy)
idx2 = np.argmin(diff)
mindiff = diff[idx2]
# difference needs to be smaller than some limit?
if mindiff < energy_diff_limit:
return np.where(idx_map[idx][idx2])[0][0]
else:
return -1
# 5. If everything fails return nothing
return -1
# Loop through the linelist line levels
# and match to NLTE line levels
# match() return -1 of no match is found, or the index otherwise
for i, level in enumerate(line_label_low):
idx_l = match(level_labels, level)
linerefs[i, 0] = idx_l
iused[idx_l] |= idx_l != -1
for i, level in enumerate(line_label_upp):
idx_u = match(level_labels, level)
linerefs[i, 1] = idx_u
iused[idx_u] |= idx_u != -1
# Lineindices as integer pointers
lineindices = np.where(lineindices)[0]
# Remap the linelevel references
for j, i in enumerate(np.where(iused)[0]):
linerefs[linerefs == i] = j
return lineindices, linerefs, iused
def interpolate(self, rabund, teff, logg, monh, atmo):
"""
interpolate nlte coefficients on the model grid
Parameters
----------
rabund : float
relative (to solar) abundance of the element
teff : float
temperature in Kelvin
logg : float
surface gravity in log(cgs)
monh : float
Metallicity in H=12
Returns
-------
subgrid : array (ndepth, nlines)
interpolated grid values
"""
assert self._points is not None
if self.bgrid is None or self.bgrid.shape[0] == 0:
return None
# Interpolate the depth scale to the target depth, this is unstructured data
# i.e. each combination of parameters has a different depth scale (given in depth)
ndepths, _, *nparam = self.bgrid.shape
target_depth = atmo[self.depth_name]
target_depth = np.log10(target_depth)
ntarget = len(target_depth)
iabund = np.digitize(rabund, self._points[0]) - 1
iteff = np.digitize(teff, self._points[1]) - 1
ilogg = np.digitize(logg, self._points[2]) - 1
imonh = np.digitize(monh, self._points[3]) - 1
# Interpolate on the grid
# We only interpolate on the 2**4 points around the requested values
# self._points and self._grid are interpolated when reading the data in read_grid
target = (rabund, teff, logg, monh)
points = (
self._points[0][iabund : iabund + 2],
self._points[1][iteff : iteff + 2],
self._points[2][ilogg : ilogg + 2],
self._points[3][imonh : imonh + 2],
)
npoints = (
points[0].size,
points[1].size,
points[2].size,
points[3].size,
)
grid = np.empty((*npoints, ntarget, ndepths), float)
for l, x, t, g, f in np.ndindex(ndepths, *npoints):
xp = self.depth[imonh + f, ilogg + g, iteff + t, :]
yp = self.bgrid[l, :, iabund + x, iteff + t, ilogg + g, imonh + f]
xp = np.log10(xp)
grid[x, t, g, f, :, l] = interpolate.interp1d(
xp,
yp,
bounds_error=False,
fill_value="extrapolate",
kind="cubic",
)(target_depth)
# Check if we need to extrapolate
if any([t < min(p) or t > max(p) for t, p in zip(target, points)]):
logger.warning(
f"Extrapolate on the {self.elem} NLTE grid. Requested values of {target} on grid {points}"
)
# Some grids have only one value in that direction
# Usually in abundance. Then we need to remove that dimension
# to avoid nan output
mask = [len(p) > 1 for p in points]
if not all(mask):
points = [p for m, p in zip(mask, points) if m]
target = [t for m, t in zip(mask, target) if m]
idx = [slice(None, None) if m else 0 for m in mask]
grid = grid[tuple(idx)]
method = "order"
if method == "grid":
# TODO: Interpolate with splines
# Possibly in order of importance, since scipy doesn't have spline interpolation on a grid
subgrid = interpolate.interpn(
points,
grid,
target,
method="linear",
bounds_error=False,
fill_value=None,
)
subgrid = subgrid[0]
elif method == "order":
for p, t in zip(points, target):
grid = interpolate.interp1d(
p,
grid,
axis=0,
bounds_error=False,
fill_value="extrapolate",
)(t)
subgrid = grid
return subgrid
@CollectionFactory
class NLTE(Collection):
# fmt: off
_fields = Collection._fields + [
("elements", [], astype(list), this,
"list: elements for which nlte calculations will be performed"),
("grids", {}, astype(dict), this,
"dict: nlte grid datafiles for each element"),
("subgrid_size", [2, 2, 2, 2], array(4, int), this,
"array of shape (4,): defines size of nlte grid cache."
"Each entry is for one parameter abund, teff, logg, monh"),
("flags", None, array(None, np.bool_), this,
"array: contains a flag for each line, whether it was calculated in NLTE (True) or not (False)"),
("solar", None, this, this, "str: defines which default to use as the solar metallicitiies"),
("abund_format", "H=12", astype(str), this, "str: which abundance format to use for comparison"),
("selection", "energy", oneof("energy", "levels"), this, "str: which selection algorithm to use to match linelist and departure coefficients"),
("min_energy_diff", None, this, this, "float: difference between energy levels that are still matched. If None will default to the smallest non zero difference between energy levels in the grid.")
]
# fmt: on
# For marcs2012 atmosphere
_default_grids = {
"Al": "nlte_Al_ama51_pysme.grd",
"Fe": "marcs2012_Fe2016.grd",
"Li": "nlte_Li_ama51_pysme.grd",
"Mg": "nlte_Mg_ama51_pysme.grd",
"Na": "nlte_Na_ama51_pysme.grd",
"O": "nlte_O_ama51_pysme.grd",
"Ba": "nlte_Ba_ama51_pysme.grd",
"Ca": "nlte_Ca_ama51_pysme.grd",
"Si": "nlte_Si_ama51_pysme.grd",
"Ti": "marcs2012s_t2.0_Ti.grd",
"C": "nlte_C_ama51_pysme.grd",
"H": "nlte_H_ama51_pysme.grd",
"K": "nlte_K_ama51_pysme.grd",
"Mn": "nlte_Mn_ama51_pysme.grd",
"N": "nlte_N_ama51_pysme.grd",
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.first = False
if "solar" in kwargs.keys():
self.solar = kwargs["solar"]
# Convert IDL keywords to Python
if "nlte_elem_flags" in kwargs.keys():
elements = kwargs["nlte_elem_flags"]
self.elements = [abund_elem[i] for i, j in enumerate(elements) if j == 1]
if "nlte_subgrid_size" in kwargs.keys():
self.subgrid_size = kwargs["nlte_subgrid_size"]
if "nlte_grids" in kwargs:
grids = kwargs["nlte_grids"]
if isinstance(grids, (list, np.ndarray)):
grids = {
abund_elem[i]: name.decode()
for i, name in enumerate(grids)
if name != ""
}
self.grids = grids
# TODO
#:dict: the cached subgrid data for each element
# This is NOT saved on sme.save
# But maybe should be ?
self.grid_data = {}
def set_nlte(self, element, grid=None):
"""
Add an element to the NLTE calculations
Parameters
----------
element : str
The abbreviation of the element to add to the NLTE calculations
grid : str, optional
Filename of the NLTE data grid to use for this element
the file must be in nlte_grids directory
Defaults to a set of "known" files for some elements
"""
if element in self.elements:
# Element already in NLTE
# Change grid if given
if grid is not None:
self.grids[element] = grid
return
if grid is None:
# Use default grid
if element not in NLTE._default_grids.keys():
raise ValueError(f"No default grid known for element {element}")
grid = NLTE._default_grids[element]
logger.info("Using default grid %s for element %s", grid, element)
if element not in self.elements:
self.elements += [element]
self.grids[element] = grid
def remove_nlte(self, element):
"""
Remove an element from the NLTE calculations
Parameters
----------
element : str
Abbreviation of the element to remove from NLTE
"""
if element not in self.elements:
# Element not included in NLTE anyways
return
self.elements.remove(element)
self.grids.pop(element)
@property
def _citation_info(self):
citations = [
grid.citation_info
for el, grid in self.grid_data.items()
if grid.citation_info is not None
]
citations = "\n".join(citations)
return citations
@_citation_info.setter
def _citation_info(self, value):
pass
def update_coefficients(self, sme, dll, lfs_nlte):
"""pass departure coefficients to C library"""
# Only print "Running in NLTE" message on the first run each time
if np.all(self.grids == "") or np.size(self.elements) == 0:
# No NLTE to do
if self.first:
self.first = False
logger.info("Running in LTE")
return sme
if sme.linelist.lineformat == "short":
if self.first:
self.first = False
logger.warning(
"NLTE line formation | |
""" CISCO_IPSEC_MIB
The MIB module for modeling Cisco\-specific
IPsec attributes
Overview of Cisco IPsec MIB
MIB description
This MIB models the Cisco implementation\-specific
attributes of a Cisco entity that implements IPsec.
This MIB is complementary to the standard IPsec MIB
proposed jointly by Tivoli and Cisco.
The ciscoIPsec MIB provides the operational information
on Cisco's IPsec tunnelling implementation.
The following entities are managed\:
1) ISAKMP Group\:
a) ISAKMP global parameters
b) ISAKMP Policy Table
2) IPSec Group\:
a) IPSec Global Parameters
b) IPSec Global Traffic Parameters
c) Cryptomap Group
\- Cryptomap Set Table
\- Cryptomap Table
\- CryptomapSet Binding Table
3) System Capacity & Capability Group\:
a) Capacity Parameters
b) Capability Parameters
4) Trap Control Group
5) Notifications Group
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CryptomapSetBindStatus(Enum):
"""
CryptomapSetBindStatus (Enum Class)
The status of the binding of a cryptomap set
to the specified interface. The value qhen queried
is always 'attached'. When set to 'detached', the
cryptomap set if detached from the specified interface.
Setting the value to 'attached' will result in
SNMP General Error.
.. data:: unknown = 0
.. data:: attached = 1
.. data:: detached = 2
"""
unknown = Enum.YLeaf(0, "unknown")
attached = Enum.YLeaf(1, "attached")
detached = Enum.YLeaf(2, "detached")
class CryptomapType(Enum):
"""
CryptomapType (Enum Class)
The type of a cryptomap entry. Cryptomap
is a unit of IOS IPSec policy specification.
.. data:: cryptomapTypeNONE = 0
.. data:: cryptomapTypeMANUAL = 1
.. data:: cryptomapTypeISAKMP = 2
.. data:: cryptomapTypeCET = 3
.. data:: cryptomapTypeDYNAMIC = 4
.. data:: cryptomapTypeDYNAMICDISCOVERY = 5
"""
cryptomapTypeNONE = Enum.YLeaf(0, "cryptomapTypeNONE")
cryptomapTypeMANUAL = Enum.YLeaf(1, "cryptomapTypeMANUAL")
cryptomapTypeISAKMP = Enum.YLeaf(2, "cryptomapTypeISAKMP")
cryptomapTypeCET = Enum.YLeaf(3, "cryptomapTypeCET")
cryptomapTypeDYNAMIC = Enum.YLeaf(4, "cryptomapTypeDYNAMIC")
cryptomapTypeDYNAMICDISCOVERY = Enum.YLeaf(5, "cryptomapTypeDYNAMICDISCOVERY")
class DiffHellmanGrp(Enum):
"""
DiffHellmanGrp (Enum Class)
The Diffie Hellman Group used in negotiations.
.. data:: none = 1
.. data:: dhGroup1 = 2
.. data:: dhGroup2 = 3
"""
none = Enum.YLeaf(1, "none")
dhGroup1 = Enum.YLeaf(2, "dhGroup1")
dhGroup2 = Enum.YLeaf(3, "dhGroup2")
class EncryptAlgo(Enum):
"""
EncryptAlgo (Enum Class)
The encryption algorithm used in negotiations.
.. data:: none = 1
.. data:: des = 2
.. data:: des3 = 3
"""
none = Enum.YLeaf(1, "none")
des = Enum.YLeaf(2, "des")
des3 = Enum.YLeaf(3, "des3")
class IkeAuthMethod(Enum):
"""
IkeAuthMethod (Enum Class)
The authentication method used in IPsec Phase\-1 IKE
negotiations.
.. data:: none = 1
.. data:: preSharedKey = 2
.. data:: rsaSig = 3
.. data:: rsaEncrypt = 4
.. data:: revPublicKey = 5
"""
none = Enum.YLeaf(1, "none")
preSharedKey = Enum.YLeaf(2, "preSharedKey")
rsaSig = Enum.YLeaf(3, "rsaSig")
rsaEncrypt = Enum.YLeaf(4, "rsaEncrypt")
revPublicKey = Enum.YLeaf(5, "revPublicKey")
class IkeHashAlgo(Enum):
"""
IkeHashAlgo (Enum Class)
The hash algorithm used in IPsec Phase\-1
IKE negotiations.
.. data:: none = 1
.. data:: md5 = 2
.. data:: sha = 3
"""
none = Enum.YLeaf(1, "none")
md5 = Enum.YLeaf(2, "md5")
sha = Enum.YLeaf(3, "sha")
class IkeIdentityType(Enum):
"""
IkeIdentityType (Enum Class)
The type of identity used by the local entity to
identity itself to the peer with which it performs
IPSec Main Mode negotiations. This type decides the
content of the Identification payload in the
Main Mode of IPSec tunnel setup.
.. data:: isakmpIdTypeUNKNOWN = 0
.. data:: isakmpIdTypeADDRESS = 1
.. data:: isakmpIdTypeHOSTNAME = 2
"""
isakmpIdTypeUNKNOWN = Enum.YLeaf(0, "isakmpIdTypeUNKNOWN")
isakmpIdTypeADDRESS = Enum.YLeaf(1, "isakmpIdTypeADDRESS")
isakmpIdTypeHOSTNAME = Enum.YLeaf(2, "isakmpIdTypeHOSTNAME")
class TrapStatus(Enum):
"""
TrapStatus (Enum Class)
The administrative status for sending a TRAP.
.. data:: enabled = 1
.. data:: disabled = 2
"""
enabled = Enum.YLeaf(1, "enabled")
disabled = Enum.YLeaf(2, "disabled")
class CISCOIPSECMIB(Entity):
"""
.. attribute:: cipsisakmpgroup
**type**\: :py:class:`Cipsisakmpgroup <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipsisakmpgroup>`
.. attribute:: cipsipsecglobals
**type**\: :py:class:`Cipsipsecglobals <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipsipsecglobals>`
.. attribute:: cipsipsecstatistics
**type**\: :py:class:`Cipsipsecstatistics <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipsipsecstatistics>`
.. attribute:: cipssyscapacitygroup
**type**\: :py:class:`Cipssyscapacitygroup <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipssyscapacitygroup>`
.. attribute:: cipstrapcntlgroup
**type**\: :py:class:`Cipstrapcntlgroup <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipstrapcntlgroup>`
.. attribute:: cipsisakmppolicytable
The table containing the list of all ISAKMP policy entries configured by the operator
**type**\: :py:class:`Cipsisakmppolicytable <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipsisakmppolicytable>`
.. attribute:: cipsstaticcryptomapsettable
The table containing the list of all cryptomap sets that are fully specified and are not wild\-carded. The operator may include different types of cryptomaps in such a set \- manual, CET, ISAKMP or dynamic
**type**\: :py:class:`Cipsstaticcryptomapsettable <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipsstaticcryptomapsettable>`
.. attribute:: cipsdynamiccryptomapsettable
The table containing the list of all dynamic cryptomaps that use IKE, defined on the managed entity
**type**\: :py:class:`Cipsdynamiccryptomapsettable <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipsdynamiccryptomapsettable>`
.. attribute:: cipsstaticcryptomaptable
The table ilisting the member cryptomaps of the cryptomap sets that are configured on the managed entity
**type**\: :py:class:`Cipsstaticcryptomaptable <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipsstaticcryptomaptable>`
.. attribute:: cipscryptomapsetiftable
The table lists the binding of cryptomap sets to the interfaces of the managed entity
**type**\: :py:class:`Cipscryptomapsetiftable <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.CISCOIPSECMIB.Cipscryptomapsetiftable>`
"""
_prefix = 'CISCO-IPSEC-MIB'
_revision = '2000-08-07'
def __init__(self):
super(CISCOIPSECMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-IPSEC-MIB"
self.yang_parent_name = "CISCO-IPSEC-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("cipsIsakmpGroup", ("cipsisakmpgroup", CISCOIPSECMIB.Cipsisakmpgroup)), ("cipsIPsecGlobals", ("cipsipsecglobals", CISCOIPSECMIB.Cipsipsecglobals)), ("cipsIPsecStatistics", ("cipsipsecstatistics", CISCOIPSECMIB.Cipsipsecstatistics)), ("cipsSysCapacityGroup", ("cipssyscapacitygroup", CISCOIPSECMIB.Cipssyscapacitygroup)), ("cipsTrapCntlGroup", ("cipstrapcntlgroup", CISCOIPSECMIB.Cipstrapcntlgroup)), ("cipsIsakmpPolicyTable", ("cipsisakmppolicytable", CISCOIPSECMIB.Cipsisakmppolicytable)), ("cipsStaticCryptomapSetTable", ("cipsstaticcryptomapsettable", CISCOIPSECMIB.Cipsstaticcryptomapsettable)), ("cipsDynamicCryptomapSetTable", ("cipsdynamiccryptomapsettable", CISCOIPSECMIB.Cipsdynamiccryptomapsettable)), ("cipsStaticCryptomapTable", ("cipsstaticcryptomaptable", CISCOIPSECMIB.Cipsstaticcryptomaptable)), ("cipsCryptomapSetIfTable", ("cipscryptomapsetiftable", CISCOIPSECMIB.Cipscryptomapsetiftable))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.cipsisakmpgroup = CISCOIPSECMIB.Cipsisakmpgroup()
self.cipsisakmpgroup.parent = self
self._children_name_map["cipsisakmpgroup"] = "cipsIsakmpGroup"
self._children_yang_names.add("cipsIsakmpGroup")
self.cipsipsecglobals = CISCOIPSECMIB.Cipsipsecglobals()
self.cipsipsecglobals.parent = self
self._children_name_map["cipsipsecglobals"] = "cipsIPsecGlobals"
self._children_yang_names.add("cipsIPsecGlobals")
self.cipsipsecstatistics = CISCOIPSECMIB.Cipsipsecstatistics()
self.cipsipsecstatistics.parent = self
self._children_name_map["cipsipsecstatistics"] = "cipsIPsecStatistics"
self._children_yang_names.add("cipsIPsecStatistics")
self.cipssyscapacitygroup = CISCOIPSECMIB.Cipssyscapacitygroup()
self.cipssyscapacitygroup.parent = self
self._children_name_map["cipssyscapacitygroup"] = "cipsSysCapacityGroup"
self._children_yang_names.add("cipsSysCapacityGroup")
self.cipstrapcntlgroup = CISCOIPSECMIB.Cipstrapcntlgroup()
self.cipstrapcntlgroup.parent = self
self._children_name_map["cipstrapcntlgroup"] = "cipsTrapCntlGroup"
self._children_yang_names.add("cipsTrapCntlGroup")
self.cipsisakmppolicytable = CISCOIPSECMIB.Cipsisakmppolicytable()
self.cipsisakmppolicytable.parent = self
self._children_name_map["cipsisakmppolicytable"] = "cipsIsakmpPolicyTable"
self._children_yang_names.add("cipsIsakmpPolicyTable")
self.cipsstaticcryptomapsettable = CISCOIPSECMIB.Cipsstaticcryptomapsettable()
self.cipsstaticcryptomapsettable.parent = self
self._children_name_map["cipsstaticcryptomapsettable"] = "cipsStaticCryptomapSetTable"
self._children_yang_names.add("cipsStaticCryptomapSetTable")
self.cipsdynamiccryptomapsettable = CISCOIPSECMIB.Cipsdynamiccryptomapsettable()
self.cipsdynamiccryptomapsettable.parent = self
self._children_name_map["cipsdynamiccryptomapsettable"] = "cipsDynamicCryptomapSetTable"
self._children_yang_names.add("cipsDynamicCryptomapSetTable")
self.cipsstaticcryptomaptable = CISCOIPSECMIB.Cipsstaticcryptomaptable()
self.cipsstaticcryptomaptable.parent = self
self._children_name_map["cipsstaticcryptomaptable"] = "cipsStaticCryptomapTable"
self._children_yang_names.add("cipsStaticCryptomapTable")
self.cipscryptomapsetiftable = CISCOIPSECMIB.Cipscryptomapsetiftable()
self.cipscryptomapsetiftable.parent = self
self._children_name_map["cipscryptomapsetiftable"] = "cipsCryptomapSetIfTable"
self._children_yang_names.add("cipsCryptomapSetIfTable")
self._segment_path = lambda: "CISCO-IPSEC-MIB:CISCO-IPSEC-MIB"
class Cipsisakmpgroup(Entity):
"""
.. attribute:: cipsisakmpenabled
The value of this object is TRUE if ISAKMP has been enabled on the managed entity. Otherise the value of this object is FALSE
**type**\: bool
.. attribute:: cipsisakmpidentity
The value of this object is shows the type of identity used by the managed entity in ISAKMP negotiations with another peer
**type**\: :py:class:`IkeIdentityType <ydk.models.cisco_ios_xe.CISCO_IPSEC_MIB.IkeIdentityType>`
.. attribute:: cipsisakmpkeepaliveinterval
The value of this object is time interval in seconds between successive ISAKMP keepalive heartbeats issued to the peers to which IKE tunnels have been setup
**type**\: int
**range:** 10..3600
**units**\: seconds
.. attribute:: cipsnumisakmppolicies
The value of this object is the number of ISAKMP policies that have been configured on the managed entity
**type**\: int
**range:** 0..2147483647
"""
_prefix = 'CISCO-IPSEC-MIB'
_revision = '2000-08-07'
def __init__(self):
super(CISCOIPSECMIB.Cipsisakmpgroup, self).__init__()
self.yang_name = "cipsIsakmpGroup"
self.yang_parent_name = "CISCO-IPSEC-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipsisakmpenabled', YLeaf(YType.boolean, 'cipsIsakmpEnabled')),
('cipsisakmpidentity', YLeaf(YType.enumeration, 'cipsIsakmpIdentity')),
('cipsisakmpkeepaliveinterval', YLeaf(YType.int32, 'cipsIsakmpKeepaliveInterval')),
('cipsnumisakmppolicies', YLeaf(YType.int32, 'cipsNumIsakmpPolicies')),
])
self.cipsisakmpenabled = None
self.cipsisakmpidentity = None
self.cipsisakmpkeepaliveinterval = None
self.cipsnumisakmppolicies = None
self._segment_path = lambda: "cipsIsakmpGroup"
self._absolute_path = lambda: "CISCO-IPSEC-MIB:CISCO-IPSEC-MIB/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSECMIB.Cipsisakmpgroup, ['cipsisakmpenabled', 'cipsisakmpidentity', 'cipsisakmpkeepaliveinterval', 'cipsnumisakmppolicies'], name, value)
class Cipsipsecglobals(Entity):
"""
.. attribute:: cipssalifetime
The default lifetime (in seconds) assigned to an SA as a global policy (maybe overridden in specific cryptomap definitions)
**type**\: int
**range:** 120..86400
**units**\: Seconds
.. attribute:: cipssalifesize
The default lifesize in KBytes assigned to an SA as a global policy (unless overridden in cryptomap definition)
**type**\: int
**range:** 2560..536870912
**units**\: KBytes
.. attribute:: cipsnumstaticcryptomapsets
The number of Cryptomap Sets that are are fully configured. Statically defined cryptomap sets are ones where the operator has fully specified all the parameters required set up IPSec Virtual Private Networks (VPNs)
**type**\: int
**range:** 0..2147483647
**units**\: Integral Units
.. attribute:: cipsnumcetcryptomapsets
The number of static Cryptomap Sets that have at least one CET cryptomap element as | |
neck)
(default is 30 degrees from the straight neck position)
roll_tresh: int
Treshold of the roll angle for considering the person distracted/unconscious (not straight neck)
(default is None: not considered)
pose_time_tresh: float or int
Maximum time allowable for consecutive distracted head pose (given the pitch,yaw and roll thresholds)
(default is 4.0 seconds)
verbose: bool
If set to True, print additional information about the scores (default is False)
Methods
----------
- eval_scores: used to evaluate the driver state of attention
- get_PERCLOS: specifically used to evaluate the driver sleepiness
"""
self.fps = capture_fps
self.delta_time_frame = (1.0 / capture_fps) # estimated frame time
self.prev_time = 0 # auxiliary variable for the PERCLOS estimation function
# default time period for PERCLOS (60 seconds)
self.perclos_time_period = 60
self.perclos_tresh = perclos_tresh
# the time tresholds are divided for the estimated frame time
# (that is a function passed parameter and so can vary)
self.ear_tresh = ear_tresh
self.ear_act_tresh = ear_time_tresh / self.delta_time_frame
self.ear_counter = 0
self.eye_closure_counter = 0
self.gaze_tresh = gaze_tresh
self.gaze_act_tresh = gaze_time_tresh / self.delta_time_frame
self.gaze_counter = 0
self.roll_tresh = roll_tresh
self.pitch_tresh = pitch_tresh
self.yaw_tresh = yaw_tresh
self.pose_act_tresh = pose_time_tresh / self.delta_time_frame
self.pose_counter = 0
self.verbose = verbose
def eval_scores(self, ear_score, gaze_score, head_roll, head_pitch, head_yaw):
"""
:param ear_score: float
EAR (Eye Aspect Ratio) score obtained from the driver eye aperture
:param gaze_score: float
Gaze Score obtained from the driver eye gaze
:param head_roll: float
Roll angle obtained from the driver head pose
:param head_pitch: float
Pitch angle obtained from the driver head pose
:param head_yaw: float
Yaw angle obtained from the driver head pose
:return:
Returns a tuple of boolean values that indicates the driver state of attention
tuple: (asleep, looking_away, distracted)
"""
# instantiating state of attention variables
asleep = False
looking_away = False
distracted = False
if self.ear_counter >= self.ear_act_tresh: # check if the ear cumulative counter surpassed the threshold
asleep = True
if self.gaze_counter >= self.gaze_act_tresh: # check if the gaze cumulative counter surpassed the threshold
looking_away = True
if self.pose_counter >= self.pose_act_tresh: # check if the pose cumulative counter surpassed the threshold
distracted = True
'''
The 3 if blocks that follow are written in a way that when we have a score that's over it's value threshold,
a respective score counter (ear counter, gaze counter, pose counter) is increased and can reach a given maximum
over time.
When a score doesn't surpass a threshold, it is diminished and can go to a minimum of zero.
Example:
If the ear score of the eye of the driver surpasses the threshold for a SINGLE frame, the ear_counter is increased.
If the ear score of the eye is surpassed for multiple frames, the ear_counter will be increased and will reach
a given maximum, then it won't increase but the "asleep" variable will be set to True.
When the ear_score doesn't surpass the threshold, the ear_counter is decreased. If there are multiple frame
where the score doesn't surpass the threshold, the ear_counter can reach the minimum of zero
This way, we have a cumulative score for each of the controlled features (EAR, GAZE and HEAD POSE).
If high score it's reached for a cumulative counter, this function will retain its value and will need a
bit of "cool-down time" to reach zero again
'''
if (ear_score is not None) and (ear_score <= self.ear_tresh):
if not asleep:
self.ear_counter += 1
elif self.ear_counter > 0:
self.ear_counter -= 1
if (gaze_score is not None) and (gaze_score >= self.gaze_tresh):
if not looking_away:
self.gaze_counter += 1
elif self.gaze_counter > 0:
self.gaze_counter -= 1
if ((self.roll_tresh is not None and head_roll is not None and head_roll > self.roll_tresh) or (
head_pitch is not None and abs(head_pitch) > self.pitch_tresh) or (
head_yaw is not None and abs(head_yaw) > self.yaw_tresh)):
if not distracted:
self.pose_counter += 1
elif self.pose_counter > 0:
self.pose_counter -= 1
if self.verbose: # print additional info if verbose is True
print(
f"ear counter:{self.ear_counter}/{self.ear_act_tresh}\ngaze counter:{self.gaze_counter}/{self.gaze_act_tresh}\npose counter:{self.pose_counter}/{self.pose_act_tresh}")
print(
f"eye closed:{asleep}\tlooking away:{looking_away}\tdistracted:{distracted}")
return asleep, looking_away, distracted
def get_PERCLOS(self, ear_score):
"""
:param ear_score: float
EAR (Eye Aspect Ratio) score obtained from the driver eye aperture
:return:
tuple:(tired, perclos_score)
tired:
is a boolean value indicating if the driver is tired or not
perclos_score:
is a float value indicating the PERCLOS score over a minute
after a minute this scores resets itself to zero
"""
delta = time.time() - self.prev_time # set delta timer
tired = False # set default value for the tired state of the driver
# if the ear_score is lower or equal than the threshold, increase the eye_closure_counter
if (ear_score is not None) and (ear_score <= self.ear_tresh):
self.eye_closure_counter += 1
# compute the cumulative eye closure time
closure_time = (self.eye_closure_counter * self.delta_time_frame)
# compute the PERCLOS over a given time period
perclos_score = (closure_time) / self.perclos_time_period
if perclos_score >= self.perclos_tresh: # if the PERCLOS score is higher than a threshold, tired = True
tired = True
if self.verbose:
print(
f"Closure Time:{closure_time}/{self.perclos_time_period}\nPERCLOS: {round(perclos_score, 3)}")
if delta >= self.perclos_time_period: # at every end of the given time period, reset the counter and the timer
self.eye_closure_counter = 0
self.prev_time = time.time()
return tired, perclos_score
def main():
ctime = 0 # current time (used to compute FPS)
ptime = 0 # past time (used to compute FPS)
prev_time = 0 # previous time variable, used to set the FPS limit
fps_lim = 11 # FPS upper limit value, needed for estimating the time for each frame and increasing performances
time_lim = 1. / fps_lim # time window for each frame taken by the webcam
# instantiation of the dlib face detector object
Detector = dlib.get_frontal_face_detector()
Predictor = dlib.shape_predictor(
"predictor/shape_predictor_68_face_landmarks.dat") # instantiation of the dlib keypoint detector model
'''
the keypoint predictor is compiled in C++ and saved as a .dat inside the "predictor" folder in the project
inside the folder there is also a useful face keypoint image map to understand the position and numnber of the
various predicted face keypoints
'''
Scorer = Attention_Scorer(fps_lim, ear_tresh=0.15, ear_time_tresh=2, gaze_tresh=0.2,
gaze_time_tresh=2, pitch_tresh=35, yaw_tresh=28, pose_time_tresh=2.5, verbose=False)
# instantiation of the attention scorer object, with the various thresholds
# NOTE: set verbose to True for additional printed information about the scores
clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(8, 8))
# capture the input from the default system camera (camera number 0)
cap = cv2.VideoCapture(0)
if not cap.isOpened(): # if the camera can't be opened exit the program
print("Cannot open camera")
exit()
while True: # infinite loop for webcam video capture
delta_time = time.time() - prev_time # delta time for FPS capping
ret, frame = cap.read() # read a frame from the webcam
if not ret: # if a frame can't be read, exit the program
print("Can't receive frame from camera/stream end")
break
if delta_time >= time_lim: # if the time passed is bigger or equal than the frame time, process the frame
prev_time = time.time()
# compute the actual frame rate per second (FPS) of the webcam video capture stream, and show it
ctime = time.time()
fps = 1.0 / float(ctime - ptime)
ptime = ctime
cv2.putText(frame, "FPS:" + str(round(fps, 0)), (10, 400), cv2.FONT_HERSHEY_PLAIN, 2,
(255, 0, 255), 1)
# transform the BGR frame in grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# apply a bilateral filter to lower noise but keep frame details
gray = cv2.bilateralFilter(gray, 5, 10, 10)
# find the faces using the dlib face detector
faces = Detector(gray)
if len(faces) > 0: # process the frame only if at least a face is found
# take only the bounding box of the biggest face
faces = sorted(faces, key=get_face_area, reverse=True)
driver_face = faces[0]
# predict the 68 facial keypoints position
landmarks = Predictor(gray, driver_face)
# instantiate the Eye detector and pose estimator objects
Eye_det = Eye_Detector(gray, landmarks, show_processing=True)
Head_pose = Head_Pose_Estimator(
frame, landmarks, verbose=True)
# | |
<gh_stars>10-100
import simplejson as json
import urllib
import urllib2
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction, connection
from django.core.cache import cache
from django.utils import timezone
from dotastats.models import MatchDetails, MatchDetailsPlayerEntry, SteamPlayer, MatchHistoryQueue, MatchHistoryQueuePlayers, MatchPicksBans, MatchSequenceNumber
from dotastats.exceptions import SteamAPIError
# API Key macro from settings file.
API_KEY = settings.STEAM_API_KEY
MATCH_FRESHNESS = settings.DOTA_MATCH_REFRESH
PLAYER_FRESHNESS = settings.DOTA_PLAYER_REFRESH
MATCH_HISTORY_URL = 'https://api.steampowered.com/IDOTA2Match_570/GetMatchHistory/V001/'
MATCH_SEQUENCE_URL = 'https://api.steampowered.com/IDOTA2Match_570/GetMatchHistoryBySequenceNum/V001/'
MATCH_DETAILS_URL = 'https://api.steampowered.com/IDOTA2Match_570/GetMatchDetails/V001/'
STEAM_USER_NAMES = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/'
def GetLatestMatches():
"""Returns the last 500 matches (sorted by match_seq_num) that were parsed into MatchDetails.
"""
return MatchDetails.exclude_low_priority().filter(lobby_type=0).order_by('-match_seq_num')[:500]
def GetMatchHistoryBySequenceNum(start_at_match_seq_num=None):
"""Loads items into MatchDetails.
This will poll the WebAPI and acquire a list of MatchDetails. This function is intended to be used as bulk data import.
Data returned by this API is returned, by match_seq_num ascending.
This method keeps a record of the last match_seq_num recorded + 1.
WARNING: This method is volatile, matches retreived by this method will always be refreshed.
Args:
start_at_match_seq (int): If None, this method will use the last match_seq_num it requested, and the match_seq_num will not be recorded. Otherwise it will use the seq_num requested.
Returns:
None if no matches were processed at all, otherwise integer of the last match_seq_num. (That is, takes the last in the WebAPI resultset and increments by 1.)
"""
if start_at_match_seq_num:
record_last_match_seq_num = False
else:
record_last_match_seq_num = True
start_at_match_seq_num = MatchSequenceNumber.get_last_match_seq_num()
json_data = GetMatchHistoryBySequenceNumJson(start_at_seq_num=start_at_match_seq_num)
if json_data['status'] != 1:
raise SteamAPIError("API Status was not 1. Possible error occured.")
if len(json_data['matches']) == 0:
return None
last_seq_record = 0
for match_json in json_data['matches']:
CreateMatchDetails(match_json['match_id'], match_json)
last_seq_record = match_json['match_seq_num']
last_seq_record += 1 # Increment the last record by 1 so the next match is loaded instead.
if record_last_match_seq_num:
MatchSequenceNumber.set_last_match_seq_num(last_seq_record)
return last_seq_record
@transaction.commit_manually
def GetMatchHistory(**kargs):
"""Loads items into MatchHistoryQueue.
This will poll the WebAPI and acquire a list of matches. This will never return a match that has already been processed into MatchDetails.
This function is intended to be used in conjunction with specified kwargs.
Args:
**kargs (dict): kargs to pass into the WebAPI for filtering lookups. Valid kargs are:
player_name=<name> # Search matches with a player name, exact match only
hero_id=<id> # Search for matches with a specific hero being played, hero id's are in dota/scripts/npc/npc_heroes.txt in your Dota install directory
skill=<skill> # 0 for any, 1 for normal, 2 for high, 3 for very high skill
date_min=<date> # date in UTC seconds since Jan 1, 1970 (unix time format)
date_max=<date> # date in UTC seconds since Jan 1, 1970 (unix time format)
account_id=<id> # Steam account id (this is not SteamID, its only the account number portion)
league_id=<id> # matches for a particular league
start_at_match_id=<id> # Start the search at the indicated match id, descending
matches_requested=<n> # Defaults is 25 matches, this can limit to less
Returns:
A list of MatchHistoryQueue objects to be iterated on, sorted by match `start_time`
"""
create_queue = []
account_list = []
try:
json_data = GetMatchHistoryJson(**kargs)
if json_data['status'] == 15: # Match history denied, is set to private.
raise SteamAPIError("This user has his DotA2 Profile set to private.")
with connection.constraint_checks_disabled():
for match in json_data['matches']:
if(len(match['players']) < 1): # Don't log matches without players.
continue
bulk_json = []
json_player_data = match['players']
if MatchDetails.objects.filter(pk=match['match_id']).exists() or MatchHistoryQueue.objects.filter(pk=match['match_id']).exists() or match['lobby_type'] == 4:
continue # Object in queue or already created. Can ignore for now.
match_history = MatchHistoryQueue.from_json_response(match)
match_history.save() # Save here so the pk is created.
for json_player in json_player_data:
bulk_json.append(json_player)
account_list.append(convertAccountNumbertoSteam64(json_player.get('account_id', None)))
create_queue.append((match_history, bulk_json))
GetPlayerNames(account_list) # Loads accounts into cache
for create_match_history, json_player_list in create_queue:
queue_player_set = []
for json_player in json_player_list:
queue_player_set.append(MatchHistoryQueuePlayers.from_json_response(create_match_history, json_player))
create_match_history.matchhistoryqueueplayers_set.bulk_create(queue_player_set)
return_history = MatchHistoryQueue.objects.all().order_by('-start_time')
transaction.commit()
except:
transaction.rollback()
raise
return return_history
@transaction.commit_manually
def CreateMatchDetails(matchid, json_data=None):
"""This creates a MatchDetails object by matchid.
If a MatchDetails object already exists, it is deleted and recreated.
WARNING: This method is volatile, and will always delete a conflicting duplicate match. See ``GetMatchDetails`` for a non-volatile lookup method.
Args:
matchid (int): Valid MatchID to parse.
json_data (dict): Instead of asking WebAPI for json, you can provide your own.
Returns:
The newly created MatchDetails object.
"""
bulk_create = []
account_list = []
match_details = None
MatchDetails.objects.filter(match_id=matchid).all().delete() # Delete all previous MatchDetails.
try:
if not json_data:
try:
json_data = GetMatchDetailsJson(matchid)
except urllib2.HTTPError, e:
if e.code == 401: # Unauthorized to view lobby. Return None
MatchHistoryQueue.objects.filter(match_id=matchid).all().delete() # Remove from queue.
transaction.commit() # Make sure the deletion goes through before raising error.
raise SteamAPIError("This lobby is password protected.")
else:
raise
json_player_data = json_data['players']
match_details = MatchDetails.from_json_response(json_data)
match_details.save()
json_picks_bans_data = json_data.get('picks_bans', False)
if json_picks_bans_data:
picks_bans_bulk_create = []
for json_picks_bans in json_picks_bans_data:
picks_bans_bulk_create.append(MatchPicksBans.from_json_response(match_details, json_picks_bans))
MatchPicksBans.objects.bulk_create(picks_bans_bulk_create)
for json_player in json_player_data:
bulk_player = MatchDetailsPlayerEntry.from_json_response(match_details, json_player)
if bulk_player:
bulk_create.append(bulk_player)
account_list.append(convertAccountNumbertoSteam64(json_player.get('account_id', None)))
GetPlayerNames(account_list) # Loads accounts into db for FK constraints. TODO: Re-work me? Disable FK constraints entirely?
if match_details != None and len(bulk_create) > 0:
match_details.matchdetailsplayerentry_set.bulk_create(bulk_create)
MatchHistoryQueue.objects.filter(match_id=matchid).all().delete()
transaction.commit()
except:
print("Error creating match: " + str(matchid) + ". Rolling back.")
transaction.rollback()
raise
return match_details
def GetMatchDetails(matchid, force_refresh=False):
"""Fetches a MatchDetails object from db cache or newly created from json, by matchid.
NOTE: This method adheres to ``MATCH_FRESHNESS`` setting. Matches after this threshhold will be recreated.
Args:
matchid (int): MatchID to look up.
Kwargs:
force_refresh (bool): Whether to force match freshness. True will cause a lookup regardless of last_refresh.
Returns:
A MatchDetails object, old or new.
"""
match_details = None
try:
match_details = MatchDetails.objects.get(match_id=matchid)
except ObjectDoesNotExist:
match_details = None
if match_details == None or force_refresh or timezone.now() - match_details.last_refresh > MATCH_FRESHNESS:
match_details = CreateMatchDetails(matchid)
return match_details
def GetMatchHistoryJson(**kargs):
"""Fetches MatchHistory JSON as dict from WebAPI.
Args:
**kargs (dict): kwargs to pass to WebAPI for filtering. This is encoded into the url.
Returns:
dict. The resulting dict of json.loads.
Raises:
SteamAPIError: An error occured with a recognizable error code.
HTTPError: Misc. HTTP Error occured.
"""
json_data = dict()
try:
kargs.update({'key': API_KEY})
url_data = urllib.urlencode(kargs)
response = urllib2.urlopen(MATCH_HISTORY_URL + '?' + url_data)
json_data = json.loads(response.read())['result']
response.close()
except urllib2.HTTPError, e:
if e.code == 400:
raise SteamAPIError("Malformed API request.")
elif e.code == 401:
raise SteamAPIError("Unauthorized API access. Please recheck your API key.")
elif e.code == 503:
raise SteamAPIError("The Steam servers are currently overloaded.")
else:
raise SteamAPIError("Unknown API error" + str(e.code))
raise
return json_data
def GetMatchDetailsJson(match_id):
"""Fetches MatchDetails JSON as dict from WebAPI.
Args:
match_id (int): Valid match_id to pass to the WebAPI.
Returns:
dict. The resulting dict of json.loads.
Raises:
SteamAPIError: An error occured with a recognizable error code.
HTTPError: Misc. HTTP Error occured.
"""
json_data = dict()
try:
kargs = dict({'key': API_KEY, 'match_id': match_id})
url_data = urllib.urlencode(kargs)
response = urllib2.urlopen(MATCH_DETAILS_URL + '?' + url_data)
json_data = json.loads(response.read())['result']
response.close()
except urllib2.HTTPError, e:
if e.code == 400:
raise SteamAPIError("Malformed API request.")
elif e.code == 401:
raise SteamAPIError("Unauthorized API access. Please recheck your API key.")
elif e.code == 503:
raise SteamAPIError("The Steam servers are currently overloaded.")
else:
raise SteamAPIError("Unknown API error" + str(e.code))
raise
return json_data
def GetMatchHistoryBySequenceNumJson(start_at_seq_num=None):
""" Fetches GetMatchHistoryBySequenceNum JSON as dict from WebAPI.
Args:
start_at_seq_num (int): If None, argument will be left out of urlencode. Otherwise this argument is included.
Returns:
dict. The resulting dict of json.loads
Raises:
SteamAPIError: An error occured with a recognizable error code.
HTTPError: Misc. HTTP Error occured.
"""
json_data = dict()
args = dict()
if start_at_seq_num:
args.update({'start_at_match_seq_num': start_at_seq_num})
try:
args.update({'key': API_KEY})
url_data = urllib.urlencode(args)
response = urllib2.urlopen(MATCH_SEQUENCE_URL + '?' + url_data)
json_data = json.loads(response.read())['result']
response.close()
except urllib2.HTTPError, e:
if e.code == 400:
raise SteamAPIError("Malformed API request.")
elif e.code == 401:
raise SteamAPIError("Unauthorized API access. Please recheck your API key.")
elif e.code == 503:
raise SteamAPIError("The Steam servers are currently overloaded.")
else:
raise SteamAPIError("Unknown API error" + str(e.code))
raise
return json_data
def convertAccountNumbertoSteam64(steamID):
"""Converts the `account number` to Steam64. Does not convert PRIVATE players.
This does the opposite of `convertSteam64toAccountNumber`
Args:
steamID (int): None or SteamID to convert
Returns:
int or | |
<filename>mesonbuild/interpreter.py
# Copyright 2012-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mparser
from . import environment
from . import coredata
from . import dependencies
from . import mlog
from . import build
from . import optinterpreter
from . import compilers
from .wrap import wrap
from . import mesonlib
import os, sys, subprocess, shutil, uuid, re
from functools import wraps
import importlib
run_depr_printed = False
class InterpreterException(mesonlib.MesonException):
pass
class InvalidCode(InterpreterException):
pass
class InvalidArguments(InterpreterException):
pass
# Decorators for method calls.
def check_stringlist(a, msg='Arguments must be strings.'):
if not isinstance(a, list):
mlog.debug('Not a list:', str(a))
raise InvalidArguments('Argument not a list.')
if not all(isinstance(s, str) for s in a):
mlog.debug('Element not a string:', str(a))
raise InvalidArguments(msg)
def noPosargs(f):
@wraps(f)
def wrapped(self, node, args, kwargs):
if len(args) != 0:
raise InvalidArguments('Function does not take positional arguments.')
return f(self, node, args, kwargs)
return wrapped
def noKwargs(f):
@wraps(f)
def wrapped(self, node, args, kwargs):
if len(kwargs) != 0:
raise InvalidArguments('Function does not take keyword arguments.')
return f(self, node, args, kwargs)
return wrapped
def stringArgs(f):
@wraps(f)
def wrapped(self, node, args, kwargs):
assert(isinstance(args, list))
check_stringlist(args)
return f(self, node, args, kwargs)
return wrapped
def stringifyUserArguments(args):
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args])
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return "'%s'" % args
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
class InterpreterObject():
def __init__(self):
self.methods = {}
def method_call(self, method_name, args, kwargs):
if method_name in self.methods:
return self.methods[method_name](args, kwargs)
raise InvalidCode('Unknown method "%s" in object.' % method_name)
class TryRunResultHolder(InterpreterObject):
def __init__(self, res):
super().__init__()
self.res = res
self.methods.update({'returncode' : self.returncode_method,
'compiled' : self.compiled_method,
'stdout' : self.stdout_method,
'stderr' : self.stderr_method,
})
def returncode_method(self, args, kwargs):
return self.res.returncode
def compiled_method(self, args, kwargs):
return self.res.compiled
def stdout_method(self, args, kwargs):
return self.res.stdout
def stderr_method(self, args, kwargs):
return self.res.stderr
class RunProcess(InterpreterObject):
def __init__(self, command_array, source_dir, build_dir, subdir, in_builddir=False):
super().__init__()
pc = self.run_command(command_array, source_dir, build_dir, subdir, in_builddir)
(stdout, stderr) = pc.communicate()
self.returncode = pc.returncode
if sys.stdout.encoding:
self.stdout = stdout.decode(encoding=sys.stdout.encoding, errors='ignore').replace('\r\n', '\n')
else:
self.stdout = stdout.decode(errors='ignore').replace('\r\n', '\n')
if sys.stderr.encoding:
self.stderr = stderr.decode(encoding=sys.stderr.encoding, errors='ignore').replace('\r\n', '\n')
else:
self.stderr = stderr.decode(errors='ignore').replace('\r\n', '\n')
self.methods.update({'returncode' : self.returncode_method,
'stdout' : self.stdout_method,
'stderr' : self.stderr_method,
})
def run_command(self, command_array, source_dir, build_dir, subdir, in_builddir):
cmd_name = command_array[0]
env = {'MESON_SOURCE_ROOT' : source_dir,
'MESON_BUILD_ROOT' : build_dir,
'MESON_SUBDIR' : subdir}
if in_builddir:
cwd = os.path.join(build_dir, subdir)
else:
cwd = os.path.join(source_dir, subdir)
child_env = os.environ.copy()
child_env.update(env)
mlog.debug('Running command:', ' '.join(command_array))
try:
return subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=child_env, cwd=cwd)
except FileNotFoundError:
pass
# Was not a command, is a program in path?
exe = shutil.which(cmd_name)
if exe is not None:
command_array = [exe] + command_array[1:]
return subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=child_env, cwd=cwd)
# No? Maybe it is a script in the source tree.
fullpath = os.path.join(source_dir, subdir, cmd_name)
command_array = [fullpath] + command_array[1:]
try:
return subprocess.Popen(command_array, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=child_env, cwd=cwd)
except FileNotFoundError:
raise InterpreterException('Could not execute command "%s".' % cmd_name)
def returncode_method(self, args, kwargs):
return self.returncode
def stdout_method(self, args, kwargs):
return self.stdout
def stderr_method(self, args, kwargs):
return self.stderr
class ConfigureFileHolder(InterpreterObject):
def __init__(self, subdir, sourcename, targetname, configuration_data):
InterpreterObject.__init__(self)
self.held_object = build.ConfigureFile(subdir, sourcename, targetname, configuration_data)
class ConfigurationDataHolder(InterpreterObject):
def __init__(self):
super().__init__()
self.used = False # These objects become immutable after use in configure_file.
self.held_object = build.ConfigurationData()
self.methods.update({'set': self.set_method,
'set10': self.set10_method,
'has' : self.has_method,
})
def is_used(self):
return self.used
def mark_used(self):
self.used = True
def validate_args(self, args):
if len(args) != 2:
raise InterpreterException("Configuration set requires 2 arguments.")
if self.used:
raise InterpreterException("Can not set values on configuration object that has been used.")
name = args[0]
val = args[1]
if not isinstance(name, str):
raise InterpreterException("First argument to set must be a string.")
return (name, val)
def set_method(self, args, kwargs):
(name, val) = self.validate_args(args)
self.held_object.values[name] = val
def set10_method(self, args, kwargs):
(name, val) = self.validate_args(args)
if val:
self.held_object.values[name] = 1
else:
self.held_object.values[name] = 0
def has_method(self, args, kwargs):
return args[0] in self.held_object.values
def get(self, name):
return self.held_object.values[name]
def keys(self):
return self.held_object.values.keys()
# Interpreter objects can not be pickled so we must have
# these wrappers.
class DependencyHolder(InterpreterObject):
def __init__(self, dep):
InterpreterObject.__init__(self)
self.held_object = dep
self.methods.update({'found' : self.found_method,
'version': self.version_method})
def found_method(self, args, kwargs):
return self.held_object.found()
def version_method(self, args, kwargs):
return self.held_object.get_version()
class InternalDependencyHolder(InterpreterObject):
def __init__(self, dep):
InterpreterObject.__init__(self)
self.held_object = dep
self.methods.update({'found' : self.found_method,
'version': self.version_method,
})
def found_method(self, args, kwargs):
return True
def version_method(self, args, kwargs):
return self.held_object.get_version()
class ExternalProgramHolder(InterpreterObject):
def __init__(self, ep):
InterpreterObject.__init__(self)
self.held_object = ep
self.methods.update({'found': self.found_method,
'path': self.path_method})
def found_method(self, args, kwargs):
return self.found()
def path_method(self, args, kwargs):
return self.get_command()
def found(self):
return self.held_object.found()
def get_command(self):
return self.held_object.fullpath
def get_name(self):
return self.held_object.name
class ExternalLibraryHolder(InterpreterObject):
def __init__(self, el):
InterpreterObject.__init__(self)
self.held_object = el
self.methods.update({'found': self.found_method})
def found(self):
return self.held_object.found()
def found_method(self, args, kwargs):
return self.found()
def get_filename(self):
return self.held_object.fullpath
def get_name(self):
return self.held_object.name
def get_compile_args(self):
return self.held_object.get_compile_args()
def get_link_args(self):
return self.held_object.get_link_args()
def get_exe_args(self):
return self.held_object.get_exe_args()
class GeneratorHolder(InterpreterObject):
def __init__(self, interpreter, args, kwargs):
super().__init__()
self.interpreter = interpreter
self.held_object = build.Generator(args, kwargs)
self.methods.update({'process' : self.process_method})
def process_method(self, args, kwargs):
check_stringlist(args)
extras = mesonlib.stringlistify(kwargs.get('extra_args', []))
gl = GeneratedListHolder(self, extras)
[gl.add_file(os.path.join(self.interpreter.subdir, a)) for a in args]
return gl
class GeneratedListHolder(InterpreterObject):
def __init__(self, arg1, extra_args=[]):
super().__init__()
if isinstance(arg1, GeneratorHolder):
self.held_object = build.GeneratedList(arg1.held_object, extra_args)
else:
self.held_object = arg1
def add_file(self, a):
self.held_object.add_file(a)
class BuildMachine(InterpreterObject):
def __init__(self):
InterpreterObject.__init__(self)
self.methods.update({'system' : self.system_method,
'cpu_family' : self.cpu_family_method,
'cpu' : self.cpu_method,
'endian' : self.endian_method,
})
def cpu_family_method(self, args, kwargs):
return environment.detect_cpu_family()
def cpu_method(self, args, kwargs):
return environment.detect_cpu()
def system_method(self, args, kwargs):
return environment.detect_system()
def endian_method(self, args, kwargs):
return sys.byteorder
# This class will provide both host_machine and
# target_machine
class CrossMachineInfo(InterpreterObject):
def __init__(self, cross_info):
InterpreterObject.__init__(self)
minimum_cross_info = {'cpu', 'cpu_family', 'endian', 'system'}
if set(cross_info) < minimum_cross_info:
raise InterpreterException(
'Machine info is currently {}\n'.format(cross_info) +
'but is missing {}.'.format(minimum_cross_info - set(cross_info)))
self.info = cross_info
self.methods.update({'system' : self.system_method,
'cpu' : self.cpu_method,
'cpu_family' : self.cpu_family_method,
'endian' : self.endian_method,
})
def system_method(self, args, kwargs):
return self.info['system']
def cpu_method(self, args, kwargs):
return self.info['cpu']
def cpu_family_method(self, args, kwargs):
return self.info['cpu_family']
def endian_method(self, args, kwargs):
return self.info['endian']
class IncludeDirsHolder(InterpreterObject):
def __init__(self, idobj):
super().__init__()
self.held_object = idobj
class Headers(InterpreterObject):
def __init__(self, src_subdir, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.source_subdir = src_subdir
self.install_subdir = kwargs.get('subdir', '')
self.custom_install_dir = kwargs.get('install_dir', None)
if self.custom_install_dir is not None:
if not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def set_install_subdir(self, subdir):
self.install_subdir = subdir
def get_install_subdir(self):
return self.install_subdir
def get_source_subdir(self):
return self.source_subdir
def get_sources(self):
return self.sources
def get_custom_install_dir(self):
return self.custom_install_dir
class DataHolder(InterpreterObject):
def __init__(self, in_sourcetree, source_subdir, sources, kwargs):
super().__init__()
kwsource = mesonlib.stringlistify(kwargs.get('sources', []))
sources += kwsource
check_stringlist(sources)
install_dir = kwargs.get('install_dir', None)
if not isinstance(install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
self.held_object = build.Data(in_sourcetree, source_subdir, sources, install_dir)
def get_source_subdir(self):
return self.held_object.source_subdir
def get_sources(self):
return self.held_object.sources
def get_install_dir(self):
return self.held_object.install_dir
class InstallDir(InterpreterObject):
def __init__(self, source_subdir, installable_subdir, install_dir):
InterpreterObject.__init__(self)
self.source_subdir = source_subdir
self.installable_subdir = installable_subdir
self.install_dir = install_dir
class Man(InterpreterObject):
def __init__(self, source_subdir, sources, kwargs):
InterpreterObject.__init__(self)
self.source_subdir = source_subdir
self.sources = sources
self.validate_sources()
if len(kwargs) > 1:
raise InvalidArguments('Man function takes at most one keyword arguments.')
self.custom_install_dir = kwargs.get('install_dir', None)
if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def validate_sources(self):
for s in self.sources:
try:
num = int(s.split('.')[-1])
except (IndexError, ValueError):
num = 0
if num < 1 or num > 8:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 8')
def get_custom_install_dir(self):
return self.custom_install_dir
def get_sources(self):
return self.sources
def get_source_subdir(self):
return self.source_subdir
class GeneratedObjectsHolder(InterpreterObject):
def __init__(self, held_object):
super().__init__()
self.held_object = held_object
class BuildTargetHolder(InterpreterObject):
def __init__(self, target, interp):
super().__init__()
self.held_object = target
self.interpreter = interp
self.methods.update({'extract_objects' : self.extract_objects_method,
'extract_all_objects' : self.extract_all_objects_method,
'get_id': self.get_id_method,
'outdir' : self.outdir_method,
'full_path' : self.full_path_method,
'private_dir_include' : self.private_dir_include_method,
})
def is_cross(self):
return self.held_object.is_cross()
def private_dir_include_method(self, args, kwargs):
return IncludeDirsHolder(build.IncludeDirs('', [], False,
[self.interpreter.backend.get_target_private_dir(self.held_object)]))
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
def | |
"""This file contains code used in "Think DSP",
by <NAME>, available from greenteapress.com
Copyright 2013 <NAME>
License: MIT License (https://opensource.org/licenses/MIT)
"""
import copy
import math
import numpy as np
import random
import scipy
import scipy.stats
import scipy.fftpack
import subprocess
import warnings
from wave import open as open_wave
from scipy.io import wavfile
import matplotlib.pyplot as plt
try:
from math import gcd
except ImportError:
from fractions import gcd
try:
from IPython.display import Audio
except:
warnings.warn(
"Can't import Audio from IPython.display; " "Wave.make_audio() will not work."
)
PI2 = math.pi * 2
def random_seed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class WavFileWriter:
"""Writes wav files."""
def __init__(self, filename="sound.wav", framerate=11025):
"""Opens the file and sets parameters.
filename: string
framerate: samples per second
"""
self.filename = filename
self.framerate = framerate
self.nchannels = 1
self.sampwidth = 2
self.bits = self.sampwidth * 8
self.bound = 2 ** (self.bits - 1) - 1
self.fmt = "h"
self.dtype = np.int16
self.fp = open_wave(self.filename, "w")
self.fp.setnchannels(self.nchannels)
self.fp.setsampwidth(self.sampwidth)
self.fp.setframerate(self.framerate)
#
def write(self, wave):
"""Writes a wave.
wave: Wave
"""
zs = wave.quantize(self.bound, self.dtype)
self.fp.writeframes(zs.tostring())
def close(self, duration=0):
"""Closes the file.
duration: how many seconds of silence to append
"""
if duration:
self.write(rest(duration))
self.fp.close()
def play_wave(filename="sound.wav", player="aplay"):
"""Plays a wave file.
filename: string
player: string name of executable that plays wav files
"""
cmd = "%s %s" % (player, filename)
popen = subprocess.Popen(cmd, shell=True)
popen.communicate()
def find_index(x, xs):
"""Find the index corresponding to a given value in an array."""
n = len(xs)
start = xs[0]
end = xs[-1]
i = round((n - 1) * (x - start) / (end - start))
return int(i)
class _SpectrumParent:
"""Contains code common to Spectrum and DCT.
"""
def __init__(self, hs, fs, framerate, full=False):
"""Initializes a spectrum.
hs: array of amplitudes (real or complex)
fs: array of frequencies
framerate: frames per second
full: boolean to indicate full or real FFT
"""
self.hs = np.asanyarray(hs)
self.fs = np.asanyarray(fs)
self.framerate = framerate
self.full = full
@property
def max_freq(self):
"""Returns the Nyquist frequency for this spectrum."""
return self.framerate / 2
@property
def amps(self):
"""Returns a sequence of amplitudes (read-only property)."""
return np.absolute(self.hs)
@property
def power(self):
"""Returns a sequence of powers (read-only property)."""
return self.amps ** 2
def copy(self):
"""Makes a copy.
Returns: new Spectrum
"""
return copy.deepcopy(self)
def plot(self, high=None, **options):
"""Plots amplitude vs frequency.
Note: if this is a full spectrum, it ignores low and high
high: frequency to cut off at
"""
if self.full:
fs, amps = self.render_full(high)
plt.plot(fs, amps, **options)
else:
i = None if high is None else find_index(high, self.fs)
plt.plot(self.fs[:i], self.amps[:i], **options)
def plot_power(self, high=None, **options):
"""Plots power vs frequency.
high: frequency to cut off at
"""
if self.full:
fs, amps = self.render_full(high)
plt.plot(fs, amps ** 2, **options)
else:
i = None if high is None else find_index(high, self.fs)
plt.plot(self.fs[:i], self.power[:i], **options)
class Spectrum(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __len__(self):
"""Length of the spectrum."""
return len(self.hs)
def __add__(self, other):
"""Adds two spectrums elementwise.
other: Spectrum
returns: new Spectrum
"""
if other == 0:
return self.copy()
assert all(self.fs == other.fs)
hs = self.hs + other.hs
return Spectrum(hs, self.fs, self.framerate, self.full)
__radd__ = __add__
def __mul__(self, other):
"""Multiplies two spectrums elementwise.
other: Spectrum
returns: new Spectrum
"""
assert all(self.fs == other.fs)
hs = self.hs * other.hs
return Spectrum(hs, self.fs, self.framerate, self.full)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
if self.full:
ys = np.fft.ifft(self.hs)
else:
ys = np.fft.irfft(self.hs)
# NOTE: whatever the start time was, we lose it when
# we transform back; we could fix that by saving start
# time in the Spectrum
# ts = self.start + np.arange(len(ys)) / self.framerate
return Wave(ys, framerate=self.framerate)
class Spectrogram:
"""Represents the spectrum of a signal."""
def __init__(self, spec_map, seg_length):
"""Initialize the spectrogram.
spec_map: map from float time to Spectrum
seg_length: number of samples in each segment
"""
self.spec_map = spec_map
self.seg_length = seg_length
def any_spectrum(self):
"""Returns an arbitrary spectrum from the spectrogram."""
index = next(iter(self.spec_map))
return self.spec_map[index]
@property
def time_res(self):
"""Time resolution in seconds."""
spectrum = self.any_spectrum()
return float(self.seg_length) / spectrum.framerate
@property
def freq_res(self):
"""Frequency resolution in Hz."""
return self.any_spectrum().freq_res
def times(self):
"""Sorted sequence of times.
returns: sequence of float times in seconds
"""
ts = sorted(iter(self.spec_map))
return ts
def frequencies(self):
"""Sequence of frequencies.
returns: sequence of float freqencies in Hz.
"""
fs = self.any_spectrum().fs
return fs
def plot(self, high=None, **options):
"""Make a pseudocolor plot.
high: highest frequency component to plot
"""
fs = self.frequencies()
i = None if high is None else find_index(high, fs)
fs = fs[:i]
ts = self.times()
# make the array
size = len(fs), len(ts)
array = np.zeros(size, dtype=np.float)
# copy amplitude from each spectrum into a column of the array
for j, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:, j] = spectrum.amps[:i]
underride(options, cmap='inferno_r')
plt.pcolor(ts, fs, array, **options)
def get_data(self, high=None, **options):
"""Returns spectogram as 2D numpy array
high: highest frequency component to return
"""
fs = self.frequencies()
i = None if high is None else find_index(high, fs)
fs = fs[:i]
ts = self.times()
# make the array
size = len(fs), len(ts)
array = np.zeros(size, dtype=np.float)
# copy amplitude from each spectrum into a column of the array
for j, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:, j] = spectrum.amps[:i]
return array
def make_wave(self):
"""Inverts the spectrogram and returns a Wave.
returns: Wave
"""
res = []
for t, spectrum in sorted(self.spec_map.items()):
wave = spectrum.make_wave()
n = len(wave)
window = 1 / np.hamming(n)
wave.window(window)
i = wave.find_index(t)
start = i - n // 2
end = start + n
res.append((start, end, wave))
starts, ends, waves = zip(*res)
low = min(starts)
high = max(ends)
ys = np.zeros(high - low, np.float)
for start, end, wave in res:
ys[start:end] = wave.ys
# ts = np.arange(len(ys)) / self.framerate
return Wave(ys, framerate=wave.framerate)
class Wave:
"""Represents a discrete-time waveform.
"""
def __init__(self, ys, ts=None, framerate=None):
"""Initializes the wave.
ys: wave array
ts: array of times
framerate: samples per second
"""
self.ys = np.asanyarray(ys)
self.framerate = framerate if framerate is not None else 11025
if ts is None:
self.ts = np.arange(len(ys)) / self.framerate
else:
self.ts = np.asanyarray(ts)
def copy(self):
"""Makes a copy.
Returns: new Wave
"""
return copy.deepcopy(self)
def __len__(self):
return len(self.ys)
@property
def start(self):
return self.ts[0]
@property
def end(self):
return self.ts[-1]
@property
def duration(self):
"""Duration (property).
returns: float duration in seconds
"""
return len(self.ys) / self.framerate
def __add__(self, other):
"""Adds two waves elementwise.
other: Wave
returns: new Wave
"""
if other == 0:
return self
assert self.framerate == other.framerate
# make an array of times that covers both waves
start = min(self.start, other.start)
end = max(self.end, other.end)
n = int(round((end - start) * self.framerate)) + 1
ys = np.zeros(n)
ts = start + np.arange(n) / self.framerate
def add_ys(wave):
i = find_index(wave.start, ts)
# make sure the arrays line up reasonably well
diff = ts[i] - wave.start
dt = 1 / wave.framerate
if (diff / dt) > 0.1:
warnings.warn(
"Can't add these waveforms; their " "time arrays don't line up."
)
j = i + len(wave)
ys[i:j] += wave.ys
add_ys(self)
add_ys(other)
return Wave(ys, ts, self.framerate)
__radd__ = __add__
def quantize(self, bound, dtype):
"""Maps the waveform to quanta.
bound: maximum amplitude
dtype: numpy data type or string
returns: quantized signal
"""
return quantize(self.ys, bound, dtype)
def window(self, window):
"""Apply a window to the wave.
window: sequence of multipliers, same length as self.ys
"""
self.ys *= window
def normalize(self, amp=1.0):
"""Normalizes the signal to the given amplitude.
amp: float amplitude
"""
self.ys = normalize(self.ys, amp=amp)
def slice(self, i, j):
"""Makes a slice from a Wave.
i: first slice index
j: second slice index
"""
ys = self.ys[i:j].copy()
ts = self.ts[i:j].copy()
return Wave(ys, ts, self.framerate)
#
def make_spectrum(self, full=False):
"""Computes the spectrum using FFT.
full: boolean, whethere to compute a full FFT
(as opposed to a real FFT)
returns: Spectrum
"""
n = len(self.ys)
d = 1 / self.framerate
if full:
hs = np.fft.fft(self.ys)
fs = np.fft.fftfreq(n, d)
else:
hs = np.fft.rfft(self.ys)
| |
import json
import os
from sqlalchemy import (
Table, Column, Integer, String, UniqueConstraint, MetaData
)
from sqlalchemy.types import UserDefinedType
from twisted.trial.unittest import TestCase
from aludel.database import (
get_engine, make_table, CollectionMissingError, _PrefixedTables,
CollectionMetadata, TableCollection,
)
from .doubles import FakeReactorThreads
class DatabaseTestCase(TestCase):
def setUp(self):
connection_string = os.environ.get(
"ALUDEL_TEST_CONNECTION_STRING", "sqlite://")
self.engine = get_engine(
connection_string, reactor=FakeReactorThreads())
self._drop_tables()
self.conn = self.successResultOf(self.engine.connect())
def tearDown(self):
self.successResultOf(self.conn.close())
self._drop_tables()
assert self.successResultOf(self.engine.table_names()) == []
def _drop_tables(self):
# NOTE: This is a blocking operation!
md = MetaData(bind=self.engine._engine)
md.reflect()
md.drop_all()
class Test_PrefixedTables(DatabaseTestCase):
def test_get_table_name_not_implemented(self):
"""
.get_table_name() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(
NotImplementedError, my_tables.get_table_name, 'foo')
assert err.args[0] == "_PrefixedTables should not be used directly."
def test_exists_not_implemented(self):
"""
.exists() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(NotImplementedError, my_tables.exists)
assert err.args[0] == "_PrefixedTables should not be used directly."
def test__execute_query_happy(self):
"""
._execute_query() should query the database and return a result.
"""
my_tables = _PrefixedTables("prefix", self.conn)
result = self.successResultOf(my_tables._execute_query("SELECT 42;"))
rows = self.successResultOf(result.fetchall())
assert rows == [(42,)]
def test__execute_error(self):
"""
._execute_query() should fail if given an invalid query.
"""
my_tables = _PrefixedTables("prefix", self.conn)
self.failureResultOf(my_tables._execute_query("SELECT ;;"))
def test_execute_query_not_implemented(self):
"""
.execute_query() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(
NotImplementedError, my_tables.execute_query, "SELECT 42;")
assert err.args[0] == "_PrefixedTables should not be used directly."
def test_execute_fetchall_not_implemented(self):
"""
.execute_fetchall() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(
NotImplementedError, my_tables.execute_fetchall, "SELECT 42;")
assert err.args[0] == "_PrefixedTables should not be used directly."
class TestCollectionMetadata(DatabaseTestCase):
def test_create_new(self):
"""
.create() should create the appropriately named table.
"""
cmd = CollectionMetadata('MyTables', self.conn)
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is False
assert self.successResultOf(cmd.exists()) is False
self.successResultOf(cmd.create())
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is True
assert self.successResultOf(cmd.exists()) is True
def test_create_exists(self):
"""
.create() should do nothing if the table already exists.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is True
assert self.successResultOf(cmd.exists()) is True
# Create again, assert that everything still exists.
self.successResultOf(cmd.create())
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is True
assert self.successResultOf(cmd.exists()) is True
def test_collection_exists_no_table(self):
"""
.collection_exists() should return None if the metadata table does not
exist.
"""
cmd = CollectionMetadata('MyTables', self.conn)
assert self.successResultOf(cmd.collection_exists('foo')) is None
def test_collection_exists_no_metadata(self):
"""
.collection_exists() should return False if there is no metadata for
the provided name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
assert self.successResultOf(cmd.collection_exists('foo')) is False
def test_collection_exists_with_metadata(self):
"""
.collection_exists() should return True if there is metadata for the
provided name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
assert self.successResultOf(cmd.collection_exists('foo')) is True
def test_collection_exists_cached(self):
"""
.collection_exists() should return a cached result for the provided
name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
cmd._existence_cache['foo'] = True
assert self.successResultOf(cmd.collection_exists('foo')) is True
def test_get_metadata_no_table(self):
"""
.get_metadata() should fail with CollectionMissingError if the metadata
table does not exist.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.failureResultOf(cmd.get_metadata('foo'), CollectionMissingError)
def test_get_metadata_missing_collection(self):
"""
.get_metadata() should fail with CollectionMissingError if there is no
metadata for the provided name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.failureResultOf(cmd.get_metadata('foo'), CollectionMissingError)
def test_get_metadata(self):
"""
.get_metadata() should fetch metadata from the database.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
def test_get_metadata_updates_existence_cache(self):
"""
.get_metadata() should update the existence cache.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
# Set this back to False because create_collection updated it.
cmd._existence_cache['foo'] = False
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
assert cmd._existence_cache['foo'] is True
def test_get_metadata_updates_existence_cache_missing_collection(self):
"""
.get_metadata() should update the existence cache.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
assert 'foo' not in cmd._existence_cache
self.failureResultOf(cmd.get_metadata('foo'), CollectionMissingError)
assert cmd._existence_cache['foo'] is False
def test_get_all_metadata(self):
"""
.get_all_metadata() should fetch all metadata from the database.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'a': 1}))
self.successResultOf(cmd.create_collection('bar', {'b': 2}))
metadata = self.successResultOf(cmd.get_all_metadata())
assert metadata == {'foo': {'a': 1}, 'bar': {'b': 2}}
def test__decode_all_metadata_with_none(self):
"""
._decode_all_metadata() should ignore empty metadata entries.
"""
cmd = CollectionMetadata('MyTables', None)
metadata = {'foo': json.dumps({'a': 1}), 'bar': None}
assert cmd._decode_all_metadata(metadata) == {'foo': {'a': 1}}
def test_set_metadata(self):
"""
.set_metadata() should update the database.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo'))
assert self.successResultOf(cmd.get_metadata('foo')) == {}
self.successResultOf(cmd.set_metadata('foo', {'bar': 'baz'}))
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
def test_create_collection_no_table(self):
"""
.create_collection() should call .create() before creating the
collection if the metadata table does not exist.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create_collection('foo'))
assert cmd._existence_cache['foo'] is True
assert self.successResultOf(cmd.get_metadata('foo')) == {}
def test_create_collection_no_metadata(self):
"""
.create_collection() should create a collection metadata entry with an
empty dict if no metadata is provided.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo'))
assert cmd._existence_cache['foo'] is True
assert self.successResultOf(cmd.get_metadata('foo')) == {}
def test_create_collection_with_metadata(self):
"""
.create_collection() should create a collection metadata entry with the
provided metadata.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
assert cmd._existence_cache['foo'] is True
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
class TestTableCollection(DatabaseTestCase):
def _get_cmd(self, collection_cls):
"""
Create and return a CollectionMetadata instance for collection_cls.
"""
cmd = CollectionMetadata(collection_cls.collection_type(), self.conn)
self.successResultOf(cmd.create())
return cmd
def test_collection_type_class_name(self):
"""
.collection_type() should return the class name if the COLLECTION_TYPE
attr is unset.
"""
class MyTables(TableCollection):
pass
assert MyTables.collection_type() == 'MyTables'
my_tables = MyTables("prefix", connection=None)
assert my_tables.collection_type() == 'MyTables'
def test_collection_type_explicit_name(self):
"""
.collection_type() should return the COLLECTION_TYPE attr if set.
"""
class MyTables(TableCollection):
COLLECTION_TYPE = 'YourTables'
assert MyTables.collection_type() == 'YourTables'
my_tables = MyTables("prefix", connection=None)
assert my_tables.collection_type() == 'YourTables'
def test_init_uses_provided_collection_metadata(self):
"""
TableCollection should use the collection_metadata it's given, if any.
"""
cmd = self._get_cmd(TableCollection)
my_tables = TableCollection("foo", None, collection_metadata=cmd)
assert my_tables._collection_metadata is cmd
def test_init_uses_builds_collection_metadata(self):
"""
TableCollection should build a collection_metadata if none is given.
"""
my_tables = TableCollection("foo", None)
assert isinstance(my_tables._collection_metadata, CollectionMetadata)
def test_get_table_name(self):
"""
.get_table_name() should build an appropriate table name from the
collection type, collection name, and table name.
"""
class MyTables(TableCollection):
pass
my_tables = MyTables("prefix", connection=None)
assert my_tables.get_table_name("thing") == "MyTables_prefix_thing"
def test_make_table(self):
"""
Class attributes built by make_table() should be replaced by instance
attributes that are SQLAlchemy Table instances with the correct table
names.
"""
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
Column("other_value", String(255)),
UniqueConstraint("value", "other_value"),
)
my_tables_1 = MyTables("prefix1", self.conn)
assert isinstance(my_tables_1.tbl, Table)
assert my_tables_1.tbl.name == 'MyTables_prefix1_tbl'
assert len(my_tables_1.tbl.c) == 3
# Make another instance to check that things aren't bound improperly.
my_tables_2 = MyTables("prefix2", self.conn)
assert isinstance(my_tables_2.tbl, Table)
assert my_tables_2.tbl.name == 'MyTables_prefix2_tbl'
assert len(my_tables_2.tbl.c) == 3
def test_create_tables_with_metadata(self):
"""
.create_tables() should create the tables belonging to the collection
and set metadata.
"""
class MyTables(TableCollection):
tbl1 = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
tbl2 = make_table(
Column("id", Integer(), primary_key=True),
Column("other_value", String(255)),
)
cmd = self._get_cmd(MyTables)
my_tables = MyTables("foo", self.conn, cmd)
# Check that the tables don't already exist.
assert self.successResultOf(my_tables.exists()) is False
self.failureResultOf(self.conn.execute(my_tables.tbl1.select()))
self.failureResultOf(self.conn.execute(my_tables.tbl2.select()))
# Create the tables and check that they exist.
self.successResultOf(my_tables.create_tables(metadata={'bar': 'baz'}))
assert self.successResultOf(my_tables.exists()) is True
self.successResultOf(self.conn.execute(my_tables.tbl1.select()))
self.successResultOf(self.conn.execute(my_tables.tbl2.select()))
assert self.successResultOf(cmd.get_metadata("foo")) == {'bar': 'baz'}
def test_create_tables_no_metadata(self):
"""
.create_tables() should create the tables belonging to the collection
and set metadata. If no metadata is provided, an empty dict should be
used.
"""
class MyTables(TableCollection):
tbl1 = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
tbl2 = make_table(
Column("id", Integer(), primary_key=True),
Column("other_value", String(255)),
)
cmd = self._get_cmd(MyTables)
my_tables = MyTables("foo", self.conn, cmd)
# Check that the tables don't already exist.
assert self.successResultOf(my_tables.exists()) is False
self.failureResultOf(self.conn.execute(my_tables.tbl1.select()))
self.failureResultOf(self.conn.execute(my_tables.tbl2.select()))
# Create the tables and check that they exist.
self.successResultOf(my_tables.create_tables())
assert self.successResultOf(my_tables.exists()) is True
self.successResultOf(self.conn.execute(my_tables.tbl1.select()))
self.successResultOf(self.conn.execute(my_tables.tbl2.select()))
assert self.successResultOf(cmd.get_metadata("foo")) == {}
def test_create_tables_already_exists(self):
"""
.create_tables() should do nothing if the tables already exist.
"""
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
cmd = self._get_cmd(MyTables)
my_tables = MyTables("foo", self.conn, cmd)
# Create the tables and check that they exist.
self.successResultOf(my_tables.create_tables(metadata={'bar': 'baz'}))
assert self.successResultOf(my_tables.exists()) is True
assert self.successResultOf(cmd.get_metadata("foo")) == {'bar': 'baz'}
# Create the tables again and check that nothing changes.
self.successResultOf(my_tables.create_tables(metadata={'a': 'b'}))
assert self.successResultOf(my_tables.exists()) is True
assert self.successResultOf(cmd.get_metadata("foo")) == {'bar': 'baz'}
def test_create_tables_error(self):
"""
.create_tables() should fail if the tables can't be created.
"""
class BrokenType(UserDefinedType):
def get_col_spec(self):
return "BROKEN;;"
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", BrokenType()),
)
my_tables = MyTables("prefix", self.conn)
self.failureResultOf(my_tables.create_tables())
def test_get_metadata(self):
"""
.get_metadata() should fetch the metadata for this collection.
"""
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
| |
"""Implement pygmo optimizers."""
import warnings
import numpy as np
from estimagic import batch_evaluators
from estimagic.config import IS_PYGMO_INSTALLED
from estimagic.decorators import mark_minimizer
from estimagic.exceptions import NotInstalledError
from estimagic.optimization.algo_options import CONVERGENCE_RELATIVE_PARAMS_TOLERANCE
from estimagic.optimization.algo_options import (
STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,
)
STOPPING_MAX_ITERATIONS_GENETIC = 250
try:
import pygmo as pg
except ImportError:
pass
@mark_minimizer(
name="pygmo_gaco",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_gaco(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
kernel_size=63,
speed_parameter_q=1.0,
oracle=0.0,
accuracy=0.01,
threshold=1,
speed_of_std_values_convergence=7,
stopping_max_n_without_improvements=100000,
stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,
focus=0.0,
cache=False,
):
"""Minimize a scalar function using the generalized ant colony algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
if isinstance(speed_of_std_values_convergence, float):
if not speed_of_std_values_convergence.is_integer():
raise ValueError(
"The speed_of_std_values_convergence parameter must be an integer. "
f"You specified {speed_of_std_values_convergence}."
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"ker": kernel_size,
"q": speed_parameter_q,
"oracle": oracle,
"acc": accuracy,
"threshold": threshold,
"n_gen_mark": int(speed_of_std_values_convergence),
"impstop": stopping_max_n_without_improvements,
"evalstop": stopping_max_criterion_evaluations,
"focus": focus,
"memory": cache,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="gaco",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_bee_colony",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_bee_colony(
criterion,
x,
lower_bounds,
upper_bounds,
*,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
max_n_trials=1,
population_size=None,
):
"""Minimize a scalar function using the artifical bee colony algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=20
)
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options={
"limit": max_n_trials,
"gen": int(stopping_max_iterations),
},
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="bee_colony",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_de",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_de(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
weight_coefficient=0.8,
crossover_probability=0.9,
mutation_variant="rand/1/exp",
convergence_criterion_tolerance=1e-6,
convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,
):
"""Minimize a scalar function using the differential evolution algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=10
)
# support both integer and string specification of the mutation variant
mutation_variant_str_to_int = {
"best/1/exp": 1,
"rand/1/exp": 2,
"rand-to-best/1/exp": 3,
"best/2/exp": 4,
"rand/2/exp": 5,
"best/1/bin": 6,
"rand/1/bin": 7,
"rand-to-best/1/bin": 8,
"best/2/bin": 9,
"rand/2/bin": 10,
}
mutation_variant = _convert_str_to_int(
str_to_int=mutation_variant_str_to_int, value=mutation_variant
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"F": weight_coefficient,
"CR": crossover_probability,
"variant": mutation_variant,
"ftol": convergence_criterion_tolerance,
"xtol": convergence_relative_params_tolerance,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="de",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_sea",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_sea(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=10_000, # Each generation will compute the objective once
):
r"""Minimize a scalar function using the (N+1)-ES simple evolutionary algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=10
)
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options={"gen": int(stopping_max_iterations)},
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="sea",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_sga",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_sga(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
crossover_probability=0.9,
crossover_strategy="exponential",
eta_c=None,
mutation_probability=0.02,
mutation_strategy="polynomial",
mutation_polynomial_distribution_index=None,
mutation_gaussian_width=None,
selection_strategy="tournament",
selection_truncated_n_best=None,
selection_tournament_size=None,
):
"""Minimize a scalar function using a simple genetic algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
if eta_c is not None and crossover_strategy != "sbx":
warnings.warn(
f"You specified crossover strategy {crossover_strategy} and eta_c. "
"However, eta_c is ignored because it is only used when the "
"crossover_strategy is set to sbx."
)
eta_c = 1.0 if eta_c is None else eta_c
if (
mutation_polynomial_distribution_index is not None
) and mutation_strategy != "polynomial":
warnings.warn(
"You specified a mutation_polynomial_distribution_index but did not choose "
"polynomial as your mutation_strategy. Thus, "
"mutation_polynomial_distribution_index will be ignored."
)
if mutation_gaussian_width is not None and mutation_strategy != "gaussian":
warnings.warn(
"You specified a mutation_gaussian_width but did not choose gaussion as "
"your mutation_strategy. Thus, mutation_gaussian_width will be ignored."
)
if selection_strategy != "truncated" and selection_truncated_n_best is not None:
warnings.warn(
"You specified selection_truncated_n_best but did not specify truncated as "
"your selection strategy. Therefore, selection_truncated_n_best is ignored."
)
if selection_strategy != "tournament" and selection_tournament_size is not None:
warnings.warn(
"You specified selection_tournament_size but did not specify tournament as "
"your selection strategy. Therefore, selection_tournament_size is ignored."
)
if mutation_strategy == "gaussian" and mutation_gaussian_width is not None:
param_m = mutation_gaussian_width
elif (
mutation_strategy == "polynomial"
and mutation_polynomial_distribution_index is not None
):
param_m = mutation_polynomial_distribution_index
else:
param_m = 1.0
if selection_strategy == "truncated" and selection_truncated_n_best is not None:
param_s = selection_truncated_n_best
elif selection_strategy == "tournament" and selection_tournament_size is not None:
param_s = selection_tournament_size
else:
param_s = 2
algo_specific_options = {
"gen": int(stopping_max_iterations),
"cr": crossover_probability,
"eta_c": eta_c,
"m": mutation_probability,
"param_m": param_m,
"crossover": crossover_strategy,
"mutation": mutation_strategy,
"selection": selection_strategy,
"param_s": param_s,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="sga",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_sade",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_sade(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
jde=True,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
mutation_variant="rand/1/exp",
keep_adapted_params=False,
ftol=1e-6,
xtol=1e-6,
):
"""Minimize a scalar function using Self-adaptive Differential Evolution.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
mutation_variant_str_to_int = {
"best/1/exp": 1,
"rand/1/exp": 2,
"rand-to-best/1/exp": 3,
"best/2/exp": 4,
"rand/2/exp": 5,
"best/1/bin": 6,
"rand/1/bin": 7,
"rand-to-best/1/bin": 8,
"best/2/bin": 9,
"rand/2/bin": 10,
"rand/3/exp": 11,
"rand/3/bin": 12,
"best/3/exp": 13,
"best/3/bin": 14,
"rand-to-current/2/exp": 15,
"rand-to-current/2/bin": 16,
"rand-to-best-and-current/2/exp": 17,
"rand-to-best-and-current/2/bin": 18,
}
mutation_variant = _convert_str_to_int(
str_to_int=mutation_variant_str_to_int, value=mutation_variant
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"variant": mutation_variant,
"variant_adptv": 1 if jde else 2,
"ftol": ftol,
"xtol": xtol,
"memory": keep_adapted_params,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="sade",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_cmaes",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_cmaes(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
backward_horizon=None,
variance_loss_compensation=None,
learning_rate_rank_one_update=None,
learning_rate_rank_mu_update=None,
initial_step_size=0.5,
ftol=1e-6,
xtol=1e-6,
keep_adapted_params=False,
):
r"""Minimize a scalar function using the Covariance Matrix Evolutionary Strategy.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"cc": _replace_none(var=backward_horizon, none_value=-1.0),
"cs": _replace_none(var=variance_loss_compensation, none_value=-1.0),
"c1": _replace_none(var=learning_rate_rank_one_update, none_value=-1.0),
"cmu": _replace_none(var=learning_rate_rank_mu_update, none_value=-1.0),
"sigma0": initial_step_size,
"ftol": ftol,
"xtol": xtol,
"memory": keep_adapted_params,
"force_bounds": True,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="cmaes",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_simulated_annealing",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_simulated_annealing(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
start_temperature=10.0,
end_temperature=0.01,
n_temp_adjustments=10,
n_range_adjustments=10,
bin_size=10,
start_range=1.0,
):
"""Minimize a function with the simulated annealing algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
algo_specific_options = {
"Ts": start_temperature,
"Tf": end_temperature,
"n_T_adj": int(n_temp_adjustments),
"n_range_adj": int(n_range_adjustments),
"bin_size": bin_size,
"start_range": start_range,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="simulated_annealing",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_pso",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_pso(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
omega=0.7298,
force_of_previous_best=2.05,
force_of_best_in_neighborhood=2.05,
max_velocity=0.5,
algo_variant=5,
neighbor_definition="lbest",
neighbor_param=None,
keep_velocities=False,
):
r"""Minimize a scalar function using Particle Swarm Optimization.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
if neighbor_definition in [1, 3] and neighbor_param is not None:
warnings.warn(
"You gave a neighbor parameter but selected a neighbor_definition "
"that ignores this parameter."
)
neighbor_param = _replace_none(neighbor_param, 4)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=10
)
neighbor_definition_str_to_int = {
"gbest": 1,
"lbest": 2,
"<NAME>": 3,
"Adaptive random": 4,
}
algo_variant_str_to_int = {
"canonical_inertia": 1,
"social_and_cog_rand": 2,
"all_components_rand": 3,
"one_rand": 4,
"canonical_constriction": 5,
"fips": 6,
}
algo_specific_options = {
"gen": int(stopping_max_iterations),
"omega": omega,
"eta1": force_of_previous_best,
"eta2": force_of_best_in_neighborhood,
"max_vel": max_velocity,
"variant": _convert_str_to_int(algo_variant_str_to_int, algo_variant),
"neighb_type": _convert_str_to_int(
neighbor_definition_str_to_int, neighbor_definition
),
"neighb_param": neighbor_param,
"memory": keep_velocities,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="pso",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_pso_gen",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_pso_gen(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
omega=0.7298,
force_of_previous_best=2.05,
force_of_best_in_neighborhood=2.05,
max_velocity=0.5,
algo_variant="canonical_constriction",
neighbor_definition=2,
neighbor_param=None,
keep_velocities=False,
):
r"""Minimize a scalar function with generational Particle Swarm Optimization.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
if neighbor_definition in [1, 3] and neighbor_param is not None:
warnings.warn(
"You gave a neighbor parameter but selected a neighbor_definition "
"that ignores this parameter."
)
neighbor_param = _replace_none(neighbor_param, 4)
neighbor_str_to_int | |
<gh_stars>1-10
import aiohttp
import asyncio
from botstory.ast import story_context
from botstory.integrations.commonhttp import errors as commonhttp_errors
from botstory.utils import answer
import logging
import unittest
from unittest import mock
import pytest
from . import messenger
from .. import commonhttp, mockdb, mockhttp
from ... import di, Story, utils
from ...middlewares import any, option, sticker
logger = logging.getLogger(__name__)
story = None
def teardown_function(function):
logger.debug('tear down!')
story.clear()
@pytest.mark.asyncio
async def test_send_text_message():
user = utils.build_fake_user()
global story
story = Story()
interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await interface.send_text_message(
recipient=user, text='hi!', quick_replies=None
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'text': 'hi!',
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_truncate_long_message():
user = utils.build_fake_user()
global story
story = Story()
interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
very_long_message = 'very_long_message' * 100
await interface.send_text_message(
recipient=user,
text=very_long_message,
quick_replies=None,
options={
'overflow': 'cut'
}
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'text': very_long_message[:640],
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_truncate_with_ellipsis_long_message_by_default():
user = utils.build_fake_user()
global story
story = Story()
interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
very_long_message = 'very_long_message' * 100
await interface.send_text_message(
recipient=user,
text=very_long_message,
quick_replies=None,
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'text': very_long_message[:638] + '\u2026',
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_send_list():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await fb_interface.send_list(
recipient=talk.user,
elements=[{
'title': 'Classic T-Shirt Collection', # (*) required
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/collection.png',
'subtitle': 'See all our colors',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop_collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'View',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic White T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/white-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Blue T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/blue-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Black T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/black-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}], buttons=[{
'title': 'View More',
'payload': 'payload',
}])
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qw<PASSWORD>',
},
json={
'message': {
'attachment': {
'type': 'template',
'payload': {
'template_type': 'list',
'top_element_style': 'large',
'elements': [{
'title': 'Classic T-Shirt Collection', # (*) required
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/collection.png',
'subtitle': 'See all our colors',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop_collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'View',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/collection',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic White T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/white-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=100',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Blue T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/blue-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=101',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}, {
'title': 'Classic Black T-Shirt',
'image_url': 'https://peterssendreceiveapp.ngrok.io/img/black-t-shirt.png',
'subtitle': '100% Cotton, 200% Comfortable',
'default_action': {
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/view?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
},
'buttons': [{
'title': 'Shop Now',
'type': 'web_url',
'url': 'https://peterssendreceiveapp.ngrok.io/shop?item=102',
'messenger_extensions': True,
'webview_height_ratio': 'tall',
'fallback_url': 'https://peterssendreceiveapp.ngrok.io/'
}]
}],
'buttons': [
{
'title': 'View More',
'type': 'postback',
'payload': 'payload'
}
]
}
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_should_send_template_based_message():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='<PASSWORD>'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
payload = {
'template_type': 'receipt',
'recipient_name': '<NAME>',
'order_number': '12345678902',
'currency': 'USD',
'payment_method': 'Visa 2345',
'order_url': 'http://petersapparel.parseapp.com/order?order_id=123456',
'timestamp': '1428444852',
'elements': [{
'title': 'Classic White T-Shirt',
'subtitle': '100% Soft and Luxurious Cotton',
'quantity': 2,
'price': 50,
'currency': 'USD',
'image_url': 'http://petersapparel.parseapp.com/img/whiteshirt.png'
}, {
'title': 'Classic Gray T-Shirt',
'subtitle': '100% Soft and Luxurious Cotton',
'quantity': 1,
'price': 25,
'currency': 'USD',
'image_url': 'http://petersapparel.parseapp.com/img/grayshirt.png'
}],
'address': {
'street_1': '1 Hacker Way',
'street_2': '',
'city': 'Menlo Park',
'postal_code': '94025',
'state': 'CA',
'country': 'US'
},
'summary': {
'subtotal': 75.00,
'shipping_cost': 4.95,
'total_tax': 6.19,
'total_cost': 56.14
},
'adjustments': [{
'name': 'New Customer Discount',
'amount': 20
}, {
'name': '$10 Off Coupon',
'amount': 10
}]
}
await fb_interface.send_template(talk.user, payload)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'template',
'payload': payload,
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_send_audio():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await fb_interface.send_audio(talk.user, 'http://shevchenko.ua/speach.mp3')
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'audio',
'payload': {
'url': 'http://shevchenko.ua/speach.mp3',
},
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_send_image():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
await fb_interface.send_image(talk.user, 'http://shevchenko.ua/image.gif')
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'image',
'payload': {
'url': 'http://shevchenko.ua/image.gif',
},
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
def should_post_attachment(mock_http, talk):
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty1',
},
json={
'message': {
'attachment': {
'type': 'image',
'payload': {
'url': 'http://shevchenko.ua/image.gif',
},
}
},
'recipient': {
'id': talk.user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_retry_send_image():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface(
post_raise=commonhttp_errors.HttpRequestError(),
))
await story.start()
send_task = fb_interface.send_image(talk.user, 'http://shevchenko.ua/image.gif', options={
'retry_times': 3,
'retry_delay': 0.1,
})
async def lazy_fix_http():
# here should pass first 2 retry
await asyncio.sleep(0.15)
# than we change mock http without post raise
# so on 3 try it should pass without problem
story.use(mockhttp.MockHttpInterface())
await asyncio.gather(
lazy_fix_http(),
send_task,
)
should_post_attachment(mock_http, talk)
@pytest.mark.asyncio
async def test_retry_send_image_should_fail_on_tries_exceed():
with answer.Talk() as talk:
story = talk.story
fb_interface = story.use(messenger.FBInterface(page_access_token='qwerty1'))
mock_http = story.use(mockhttp.MockHttpInterface(
post_raise=commonhttp_errors.HttpRequestError(),
))
await story.start()
with pytest.raises(commonhttp_errors.HttpRequestError):
await fb_interface.send_image(talk.user, 'http://shevchenko.ua/image.gif', options={
'retry_times': 3,
'retry_delay': 0.1,
})
should_post_attachment(mock_http, talk)
@pytest.mark.asyncio
async def test_integration():
user = utils.build_fake_user()
global story
story = Story()
story.use(messenger.FBInterface(page_access_token='qwerty2'))
story.use(mockdb.MockDB())
mock_http = story.use(mockhttp.MockHttpInterface())
await story.say('hi there!', user=user)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty2',
},
json={
'message': {
'text': 'hi there!',
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_quick_replies():
user = utils.build_fake_user()
global story
story = Story()
story.use(messenger.FBInterface(page_access_token='qwerty3'))
story.use(mockdb.MockDB())
mock_http = story.use(mockhttp.MockHttpInterface())
await story.ask(
'Which color do you like?',
quick_replies=[{
'title': 'Red',
'payload': 0xff0000,
}, {
'title': 'Green',
'payload': 0x00ff00,
}, {
'title': 'Blue',
'payload': 0x0000ff,
}],
user=user,
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty3',
},
json={
'message': {
'text': 'Which color do you like?',
'quick_replies': [
{
'content_type': 'text',
'title': 'Red',
'payload': 0xff0000,
},
{
'content_type': 'text',
'title': 'Green',
'payload': 0x00ff00,
},
{
'content_type': 'text',
'title': 'Blue',
'payload': 0x0000ff,
},
],
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_quick_replies_with_location():
user = utils.build_fake_user()
global story
story = Story()
story.use(messenger.FBInterface(page_access_token='qwerty3'))
story.use(mockdb.MockDB())
mock_http = story.use(mockhttp.MockHttpInterface())
await story.ask(
'Where do you live?',
quick_replies=[{
'content_type': 'location',
}, {
'title': 'Europe',
'payload': 'SET_LOCATION_EU',
}, {
'title': 'US :',
'payload': 'SET_LOCATION_US',
}, {
'title': 'Ukraine',
'payload': 'SET_LOCATION_UA',
}, ],
user=user,
)
mock_http.post.assert_called_with(
'https://graph.facebook.com/v2.6/me/messages/',
params={
'access_token': 'qwerty3',
},
json={
'message': {
'text': 'Where do you live?',
'quick_replies': [
{
'content_type': 'location',
}, {
'content_type': 'text',
'title': 'Europe',
'payload': 'SET_LOCATION_EU',
}, {
'content_type': 'text',
'title': 'US :',
'payload': 'SET_LOCATION_US',
}, {
'content_type': 'text',
'title': 'Ukraine',
'payload': 'SET_LOCATION_UA',
},
],
},
'recipient': {
'id': user['facebook_user_id'],
},
}
)
@pytest.mark.asyncio
async def test_setup_webhook():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(
webhook_url='/webhook',
webhook_token='<PASSWORD>',
))
mock_http = story.use(mockhttp.MockHttpInterface())
await story.start()
mock_http.webhook.assert_called_with(
'/webhook',
fb_interface.handle,
'some-token',
)
@pytest.mark.asyncio
async def test_should_request_user_data_once_we_do_not_know_current_user():
global story
story = Story()
fb_interface = story.use(messenger.FBInterface(
page_access_token='<PASSWORD>',
webhook_url='/webhook',
webhook_token='<PASSWORD>',
))
http = story.use(mockhttp.MockHttpInterface(get={
'first_name': 'Peter',
'last_name': 'Chang',
'profile_pic': 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xpf1/v/t1.0-1/p200x200/13055603_10105219398495383_8237637584159975445_n.jpg?oh=1d241d4b6d4dac50eaf9bb73288ea192&oe=57AF5C03&__gda__=1470213755_ab17c8c8e3a0a447fed3f272fa2179ce',
'locale': 'en_US',
'timezone': -7,
'gender': 'male'
}))
story.use(mockdb.MockDB())
await fb_interface.process({
'object': 'page',
'entry': [{
'id': 'PAGE_ID',
'time': 1473204787206,
'messaging': [
{
'sender': {
'id': 'USER_ID'
},
'recipient': {
'id': | |
[when ``match`` is not `None`]: number of matched
sources
* **'matched_ref_idx'** [when ``match`` is not `None`]: indices of
the matched sources in the reference catalog
* **'matched_input_idx'** [when ``match`` is not `None`]: indices
of the matched sources in the "input" catalog (the catalog from
image to be aligned)
* **'fit_ref_idx'**: indices of the sources from the reference
catalog used for fitting (a subset of 'matched_ref_idx' indices,
when ``match`` is not `None`, left after clipping iterations
performed during fitting)
* **'fit_input_idx'**: indices of the sources from the "input"
(image) catalog used for fitting (a subset of
'matched_input_idx' indices, when ``match`` is not `None`,
left after clipping iterations performed during fitting)
* **'rmse'**: fit Root-Mean-Square Error in *tangent plane*
coordinates of corrected image source positions from reference
source positions.
* **'mae'**: fit Mean Absolute Error in *tangent plane*
coordinates of corrected image source positions from reference
source positions.
* **'std'**: Norm of the STandard Deviation of the residuals
in *tangent plane* along each axis.
* **'fit_RA'**: first (corrected) world coordinate of input source
positions used in fitting.
* **'fit_DEC'**: second (corrected) world coordinate of input
source positions used in fitting.
* **'status'**: Alignment status. Currently two possible status are
possible ``'SUCCESS'`` or ``'FAILED: reason for failure'``.
When alignment failed, the reason for failure is provided after
alignment status.
.. note::
A ``'SUCCESS'`` status does not indicate a "good" alignment. It
simply indicates that alignment algortithm has completed without
errors. Use other fields to evaluate alignment: fit ``RMSE``
and ``MAE`` values, number of matched sources, etc.
Parameters
----------
refcat: RefCatalog
A `RefCatalog` object that contains a catalog of reference sources.
ref_tpwcs : TPWCS
A `TPWCS` object that defines a projection tangent plane to be
used for matching and fitting during alignment.
match: MatchCatalogs, function, None, optional
A callable that takes two arguments: a reference catalog and an
image catalog.
minobj: int, None, optional
Minimum number of identified objects from each input image to use
in matching objects from other images. If the default `None` value
is used then `align` will automatically deternmine the minimum
number of sources from the value of the `fitgeom` parameter.
This parameter is used to interrupt alignment process (catalog
fitting, ``WCS`` "tweaking") when the number of matched sources
is smaller than the value of ``minobj`` in which case this
method will return `False`.
fitgeom: {'shift', 'rscale', 'general'}, optional
The fitting geometry to be used in fitting the matched object
lists. This parameter is used in fitting the offsets, rotations
and/or scale changes from the matched object lists. The 'general'
fit geometry allows for independent scale and rotation for each
axis. This parameter is ignored if ``match`` is `False`.
nclip: int, None, optional
Number (a non-negative integer) of clipping iterations in fit.
Clipping will be turned off if ``nclip`` is either `None` or 0.
This parameter is ignored if ``match`` is `False`.
sigma: float, tuple of the form (float, str), optional
When a tuple is provided, first value (a positive number)
indicates the number of "fit error estimates" to use for clipping.
The second value (a string) indicates the statistic to be
used for "fit error estimate". Currently the following values are
supported: ``'rmse'``, ``'mae'``, and ``'std'``
- see `~tweakwcs.linearfit.iter_linear_fit` for more details.
When ``sigma`` is a single number, it must be a positive number and
the default error estimate ``'rmse'`` is assumed.
This parameter is ignored when ``nclip`` is either `None` or 0
or when ``match`` is `False`.
Returns
-------
bool
Returns `True` if the number of matched sources is larger or equal
to ``minobj`` and all steps have been performed, including catalog
fitting and ``WCS`` "tweaking". If the number of matched sources is
smaller than ``minobj``, this function will return `False`.
"""
if not self._images:
name = 'Unnamed' if self.name is None else self.name
log.warning("WCSGroupCatalog '{:s}' is empty. Nothing to align."
.format(name))
return False
# set initial status to 'FAILED':
for imcat in self:
imcat.fit_status = "FAILED: Unknown error"
if minobj is None:
minobj = SUPPORTED_FITGEOM_MODES[fitgeom]
log.debug(f"Setting 'minobj' to {minobj} for fitgeom='{fitgeom}'")
if ref_tpwcs is None:
ref_tpwcs = deepcopy(self._images[0].tpwcs)
self.calc_tanp_xy(tanplane_wcs=ref_tpwcs)
refcat.calc_tanp_xy(tanplane_wcs=ref_tpwcs)
nmatches, mref_idx, minput_idx = self.match2ref(
refcat=refcat,
match=match
)
if nmatches < minobj:
name = 'Unnamed' if self.name is None else self.name
log.warning("Not enough matches (< {:d}) found for image "
"catalog '{:s}'.".format(nmatches, name))
for imcat in self:
imcat.fit_status = 'FAILED: not enough matches'
return False
fit = self.fit2ref(refcat=refcat, tanplane_wcs=ref_tpwcs,
fitgeom=fitgeom, nclip=nclip, sigma=sigma)
fit_info = {
'fitgeom': fitgeom,
'eff_minobj': minobj,
'matrix': fit['matrix'],
'shift': fit['shift'],
'center': fit['center'], # center of rotation in geom. transforms
'fitmask': fit['fitmask'], # sources was used for fitting
'proper_rot': fit['proper_rot'], # proper rotation
'proper': fit['proper'], # is a proper rotation? True/False
'rot': fit['rot'], # rotx, roty
'<rot>': fit['<rot>'], # Arithmetic mean of rotx and roty
'scale': fit['scale'], # sx, sy
'<scale>': fit['<scale>'], # Geometric mean of sx, sy
'skew': fit['skew'], # skew
'rmse': fit['rmse'], # fit RMSE in tangent plane coords
'mae': fit['mae'], # fit MAE in tangent plane coords
'fit_RA': fit['fit_RA'],
'fit_DEC': fit['fit_DEC'],
'status': 'SUCCESS',
}
if match is not None:
fit_info.update({
'nmatches': nmatches,
'matched_ref_idx': mref_idx,
'matched_input_idx': minput_idx
})
self.apply_affine_to_wcs(
ref_tpwcs=ref_tpwcs,
matrix=fit['matrix'],
shift=fit['shift'],
# meta=meta
)
for imcat in self:
imcat.fit_info.update(deepcopy(fit_info))
self.recalc_catalog_radec()
return True
class RefCatalog(object):
"""
An object that holds a reference catalog and provides
tools for coordinate convertions using reference WCS as well as
catalog manipulation and expansion.
"""
def __init__(self, catalog, name=None, footprint_tol=1.0):
"""
Parameters
----------
catalog: astropy.table.Table
Reference catalog.
.. note::
Reference catalogs (:py:class:`~astropy.table.Table`)
*must* contain *both* ``'RA'`` and ``'DEC'`` columns.
name: str, None, optional
Name of the reference catalog.
footprint_tol: float, optional
Matching tolerance in arcsec. This is used to estimate catalog's
footprint when catalog contains only one or two sources.
"""
self._name = name
self._catalog = None
self._footprint_tol = footprint_tol
self._poly_area = None
# make sure catalog has RA & DEC
if catalog is not None:
self.catalog = catalog
def _check_catalog(self, catalog):
if catalog is None:
raise ValueError("Reference catalogs cannot be None")
if 'RA' not in catalog.colnames or 'DEC' not in catalog.colnames:
raise KeyError("Reference catalogs *must* contain *both* 'RA' "
"and 'DEC' columns.")
@property
def name(self):
""" Get/set :py:class:`RefCatalog` object's name.
"""
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def catalog(self):
""" Get/set image's catalog.
"""
return self._catalog
@catalog.setter
def catalog(self, catalog):
self._check_catalog(catalog)
if not catalog:
raise ValueError("Reference catalog must contain at least one "
"source.")
self._catalog = catalog.copy()
if 'id' not in self._catalog.colnames:
self._catalog['id'] = np.arange(1, len(self._catalog) + 1)
# create spherical polygon bounding the sources
self.calc_bounding_polygon()
@property
def poly_area(self):
""" Area of the bounding polygon (in srad).
"""
return self._poly_area
@property
def polygon(self):
""" Get image's (or catalog's) bounding spherical polygon.
"""
return self._polygon
def intersection(self, wcsim):
"""
Compute intersection of this `WCSImageCatalog` object and another
`WCSImageCatalog`, `WCSGroupCatalog`, `RefCatalog`, or
:py:class:`~spherical_geometry.polygon.SphericalPolygon`
object.
Parameters
----------
wcsim: WCSImageCatalog, WCSGroupCatalog, RefCatalog, SphericalPolygon
Another object that should be intersected with this
`WCSImageCatalog`.
Returns
-------
polygon: SphericalPolygon
A :py:class:`~spherical_geometry.polygon.SphericalPolygon` that is
the intersection of this `WCSImageCatalog` and `wcsim`.
"""
if isinstance(wcsim, (WCSImageCatalog, WCSGroupCatalog, RefCatalog)):
return self._polygon.intersection(wcsim.polygon)
else:
return self._polygon.intersection(wcsim)
# TODO: due to a bug in the sphere package, see
# https://github.com/spacetelescope/sphere/issues/74
# intersections with polygons formed as union does not work.
# For this reason I re-implement 'intersection_area' below with
# a workaround for the bug.
# The original implementation should be uncommented once the bug
# is fixed.
#
# def intersection_area(self, wcsim):
# """ Calculate the area of the intersection polygon.
# """
# return np.fabs(self.intersection(wcsim).area())
def intersection_area(self, wcsim):
""" Calculate the area of the intersection polygon.
"""
if isinstance(wcsim, (WCSImageCatalog, RefCatalog)):
return np.fabs(self.intersection(wcsim).area())
else:
# this is bug workaround:
area = 0.0
for wim in wcsim:
area += np.fabs(
self.polygon.intersection(wim.polygon).area()
)
return area
def _guarded_intersection_area(self, wcsim):
"""
Calculate the area of the intersection polygon. If some
intersections fail due to a bug/limitation of ``spherical_geometry``
then | |
0x58, 0xd0, 0x21,
0x31, 0x24, 0x1a, 0x84, 0x31, 0x14, 0x2a, 0xa1,
0x14, 0x00, 0x19, 0x02, 0x23, 0x34, 0xca, 0x40,
0x94, 0x42, 0x93, 0xaf, 0x24, 0xb0, 0x28, 0x44,
0xa1, 0x24, 0x7f, 0xdb, 0x02, 0x52, 0x13, 0x8c,
0xd2, 0x25, 0x11, 0x71, 0x41, 0x48, 0x98, 0x24,
0x29, 0xb3, 0x64, 0xb1, 0x24, 0x74, 0x12, 0xa1,
0x42, 0x18, 0x25, 0x48, 0xf4, 0x24, 0x11, 0xcc,
0x72, 0x22, 0x5a, 0x68, 0x46, 0xc3, 0x18, 0x4a,
0x71, 0x18, 0x04, 0x27, 0x88, 0x28, 0x85, 0x02,
0x21, 0x29, 0xf1, 0x31, 0x44, 0x21, 0x1a, 0xb6,
0x28, 0xa4, 0x41, 0x13, 0x84, 0xa2, 0x21, 0x42,
0x29, 0x31, 0x11, 0x1a, 0xc2, 0x48, 0x42, 0x41,
0x28, 0x66, 0xb2, 0x41, 0x91, 0x42, 0x8c, 0x42,
0x38, 0x42, 0x4a, 0xb2, 0x22, 0x48, 0x32, 0x68,
0x1c, 0x22, 0xf5, 0x39, 0x1d, 0x70, 0x24, 0xe2,
0x41, 0x52, 0x18, 0x00, 0x50, 0x22, 0x40, 0x01,
0x80, 0x42, 0x12, 0x04, 0x30, 0x42, 0x80, 0x21,
0x42, 0x34, 0x24, 0x60, 0x22, 0x00, 0x00, 0x40,
0x28, 0x04, 0x50, 0x22, 0x50, 0x24, 0x1a, 0xc2,
0x13, 0x44, 0x29, 0x01, 0x61, 0x80, 0x01, 0x4a,
0x31, 0x11, 0x00, 0x89, 0x08, 0x44, 0xd0, 0x18,
0x89, 0x04, 0x80, 0x28, 0xf2, 0xf8, 0xbf, 0x00,
0x20, 0x08, 0x88, 0x00, 0x80, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x11, 0x00, 0x10, 0x01, 0x14, 0x00,
0x77, 0x5e, 0xa0, 0x22, 0x42, 0x31, 0x80, 0x71,
0x28, 0xc4, 0x21, 0x46, 0x02, 0x25, 0x18, 0xa4,
0x51, 0xa6, 0x83, 0x08, 0x25, 0x38, 0x14, 0x58,
0x1b, 0x82, 0x14, 0x86, 0xb6, 0x18, 0xc1, 0x28,
0x42, 0x48, 0x82, 0x74, 0x80, 0x62, 0x81, 0xb0,
0x83, 0x14, 0xc4, 0x14, 0x81, 0x2a, 0x2a, 0x14,
0x03, 0x18, 0x87, 0x42, 0x1c, 0x6a, 0x24, 0x50,
0x82, 0x4b, 0x23, 0x58, 0x86, 0x83, 0x08, 0x25,
0x98, 0x14, 0x1a, 0xa4, 0x82, 0x60, 0x68, 0x81,
0x8c, 0x22, 0xf1, 0xdd, 0xdf, 0xa0, 0x22, 0x46,
0x18, 0x33, 0x88, 0x18, 0x26, 0xc4, 0x21, 0x46,
0x02, 0x2d, 0x28, 0x43, 0xa2, 0x51, 0x86, 0x81,
0x08, 0x84, 0x63, 0x81, 0x35, 0x21, 0x14, 0x6c,
0x18, 0xc8, 0x28, 0x52, 0x48, 0x82, 0x74, 0x84,
0x89, 0xa2, 0x45, 0x84, 0x33, 0x18, 0xcc, 0x14,
0xa0, 0xa2, 0x42, 0x31, 0x80, 0x71, 0x28, 0xc4,
0xa1, 0x46, 0x02, 0x25, 0x98, 0x14, 0x1a, 0x64,
0x1a, 0x88, 0x40, 0x38, 0x16, 0x58, 0x22, 0xc0,
0x86, 0x81, 0x1e, 0x38, 0x70, 0xa1, 0x04, 0x00,
0x88, 0x80, 0x08, 0x00, 0x88, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x01, 0x00, 0x11, 0x40, 0x01, 0xf0,
0x8e, 0xc5, 0x20, 0x02, 0x61, 0x12, 0x41, 0x11,
0x40, 0x44, 0x02, 0x80, 0x04, 0x12, 0x81, 0x12,
0x42, 0x22, 0x2d, 0x21, 0x40, 0x01, 0x00, 0x10,
0x41, 0x25, 0x42, 0x04, 0x00, 0x00, 0x00, 0x12,
0x41, 0x50, 0x14, 0x48, 0x22, 0x16, 0x04, 0x8c,
0x94, 0x18, 0x12, 0x1c, 0x21, 0x08, 0x10, 0xa2,
0x42, 0x18, 0x90, 0x22, 0xa0, 0x14, 0xf0, 0xe5,
0x96, 0x90, 0x12, 0x54, 0x28, 0x45, 0x92, 0x28,
0x89, 0xb2, 0x22, 0xb4, 0x38, 0xe4, 0x14, 0xc2,
0x18, 0x2b, 0x24, 0x32, 0x83, 0x45, 0xc2, 0x51,
0x28, 0x28, 0x94, 0xc1, 0x1c, 0x11, 0x44, 0x24,
0x42, 0xb1, 0x42, 0x42, 0xa2, 0x42, 0xb0, 0x41,
0x52, 0x14, 0x2a, 0x54, 0xa4, 0x42, 0x23, 0x14,
0x52, 0x25, 0x64, 0x1b, 0x12, 0xe1, 0xe0, 0x82,
0xc1, 0x11, 0x88, 0x4f, 0x82, 0xa1, 0x18, 0x21,
0x23, 0x02, 0x1f, 0x81, 0x34, 0xc5, 0x84, 0x8f,
0x4c, 0xc2, 0x1c, 0x26, 0xc8, 0x58, 0x47, 0x88,
0x1a, 0x64, 0x82, 0x81, 0xfe, 0x9d, 0x80, 0x13,
0x71, 0x22, 0xc4, 0x12, 0x45, 0x58, 0x29, 0x62,
0x16, 0x64, 0x12, 0x8d, 0x12, 0x42, 0x38, 0x83,
0x45, 0x62, 0x81, 0xa0, 0x24, 0x84, 0xa5, 0x85,
0x11, 0x04, 0x40, 0xa3, 0x24, 0x15, 0x12, 0x24,
0x32, 0x41, 0x14, 0x22, 0x84, 0x2c, 0x24, 0x14,
0x12, 0x81, 0x71, 0x21, 0x42, 0x08, 0x84, 0x38,
0x49, 0x29, 0x61, 0x28, 0x20, 0x21, 0xa1, 0x58,
0x13, 0x44, 0xd8, 0xc2, 0xe8, 0x82, 0xc1, 0x28,
0x81, 0x25, 0x28, 0xc5, 0x58, 0x84, 0x6f, 0xe4,
0x49, 0x02, 0x41, 0x40, 0x24, 0x22, 0x02, 0x2e,
0x48, 0x46, 0x02, 0x28, 0x42, 0x80, 0x31, 0x41,
0x32, 0x30, 0x21, 0x22, 0x11, 0x14, 0x41, 0x22,
0x00, 0xb0, 0x41, 0x02, 0x43, 0x42, 0x84, 0x44,
0x06, 0x00, 0x45, 0x42, 0x94, 0x16, 0x44, 0x54,
0x4a, 0x13, 0x01, 0x4d, 0x28, 0x8c, 0x35, 0x18,
0x23, 0x12, 0x51, 0x18, 0x49, 0x08, 0x4a, 0x32,
0x44, 0x12, 0x58, 0x43, 0x0a, 0x28, 0xf0, 0xa7,
0x29, 0x00, 0x1c, 0x12, 0x92, 0x12, 0x21, 0x25,
0x73, 0x49, 0x22, 0x12, 0x81, 0x44, 0x48, 0x98,
0x21, 0x20, 0x81, 0x02, 0x48, 0x18, 0x00, 0x48,
0x44, 0x40, 0x02, 0x18, 0x16, 0xc8, 0x28, 0x48,
0x00, 0x41, 0x10, 0x8d, 0x28, 0x04, 0x12, 0x48,
0x3c, 0x21, 0xd3, 0x12, 0x82, 0x14, 0x14, 0x74,
0x62, 0x81, 0x22, 0xa8, 0x14, 0x26, 0x04, 0x20,
0x81, 0x24, 0x01, 0x81, 0x41, 0x37, 0xfb, 0x60,
0x12, 0x21, 0x00, 0x40, 0x38, 0x34, 0xe0, 0x82,
0x41, 0x3c, 0x15, 0x42, 0x38, 0x61, 0x10, 0x18,
0x09, 0x84, 0x28, 0x24, 0x44, 0x81, 0x44, 0x48,
0x11, 0xa0, 0x42, 0x40, 0x48, 0x94, 0x41, 0x84,
0x30, 0x18, 0x46, 0x24, 0x44, 0x91, 0x28, 0x40,
0x44, 0x84, 0x64, 0xc8, 0x10, 0x0c, 0x28, 0x58,
0x10, 0x08, 0x83, 0x24, 0x08, 0x2c, 0x12, 0x44,
0x84, 0x7c, 0xc7, 0x09, 0x40, 0x34, 0x14, 0x8c,
0x12, 0x01, 0x00, 0x90, 0x12, 0x80, 0x01, 0x81,
0xc0, 0x11, 0x48, 0x00, 0x49, 0x61, 0x83, 0x84,
0x14, 0x1c, 0x06, 0x70, 0x22, 0x01, 0x00, 0x18,
0x18, 0x48, 0x44, 0x42, 0x10, 0x14, 0x44, 0x23,
0x02, 0x84, 0x40, 0x41, 0x82, 0x01, 0x12, 0x85,
0x04, 0x18, 0x81, 0x25, 0x44, 0x08, 0x41, 0x81,
0x00, 0xad, 0x7d, 0xc0, 0x23, 0x5c, 0xd2, 0x62,
0x81, 0x13, 0x52, 0x3a, 0x1d, 0x12, 0x22, 0x15,
0xc8, 0x4c, 0x19, 0xc1, 0x18, 0x12, 0x41, 0x90,
0x18, 0x8d, 0x41, 0x48, 0x90, 0x24, 0x32, 0xc1,
0x95, 0x94, 0x25, 0x46, 0x42, 0xd1, 0x12, 0x23,
0x81, 0xc2, 0x48, 0x4c, 0x01, 0x8d, 0x44, 0x60,
0x11, 0x4e, 0x84, 0x44, 0x51, 0x2e, 0x19, 0x28,
0x3c, 0x51, 0x48, 0x42, 0x46, 0x44, 0x86, 0x91,
0x42, 0x18, 0xcb, 0x1c, 0x58, 0x89, 0x41, 0x18,
0x8c, 0x01, 0x32, 0x81, 0x45, 0x98, 0xc4, 0xb3,
0x02, 0x00, 0x40, 0x48, 0x01, 0xc3, 0x06, 0x2e,
0x12, 0x12, 0x81, 0x48, 0x34, 0x84, 0x18, 0x13,
0x02, 0x90, 0x18, 0x12, 0x81, 0x48, 0x80, 0x04,
0x00, 0x2e, 0x48, 0x81, 0x00, 0x58, 0x10, 0x64,
0x44, 0x00, 0x00, 0x23, 0x44, 0xe4, 0x22, 0x91,
0x21, 0x8d, 0x81, 0x10, 0x98, 0x21, 0x00, 0x30,
0x24, 0x24, 0x48, 0x89, 0x88, 0x44, 0x14, 0x04,
0xf0, 0x39, 0x18, 0x30, 0x24, 0x20, 0x02, 0x40,
0x98, 0x28, 0x18, 0x16, 0xa4, 0x12, 0x18, 0x28,
0x12, 0x10, 0x11, 0xc1, 0x2c, 0x2e, 0x18, 0x19,
0x81, 0x81, 0x11, 0x02, 0x42, 0x41, 0x84, 0x16,
0x21, 0x81, 0x84, 0x01, 0x48, 0x56, 0x64, 0x81,
0x52, 0x12, 0x00, 0x00, 0x12, 0x12, 0x43, 0x45,
0x88, 0x09, 0x12, 0x10, 0x54, 0x41, 0x43, 0x09,
0x00, 0x1a, 0x88, 0x04, 0x00, 0x9f, 0x35, 0x46,
0x82, 0x01, 0x20, 0xf1, 0x18, 0x22, 0x20, 0x11,
0x48, 0x12, 0x04, 0x84, 0x24, 0x00, 0x26, 0x88,
0x01, 0x00, 0xa0, 0x43, 0x47, 0x14, 0x41, 0x00,
0x44, 0xb0, 0x11, 0x82, 0x06, 0x45, 0x08, 0x00,
0x49, 0x84, 0x83, 0x81, 0x42, 0x92, 0x21, 0x00,
0x2c, 0x01, 0x12, 0x28, 0x2c, 0x01, 0x00, 0x4c,
0x11, 0xa4, 0x42, 0x22, 0x00, 0x10, 0xf4, 0x85,
0xa3, 0x00, 0x28, 0x28, 0x22, 0xc0, 0x48, 0x80,
0x04, 0x42, 0x81, 0x4a, 0x11, 0x04, 0x83, 0x01,
0x14, 0x00, 0x00, 0x84, 0x10, 0x02, 0x14, 0x80,
0x02, 0x00, 0x44, 0x48, 0x19, 0x04, 0x00, 0x00,
0x50, 0x84, 0x42, 0x14, 0xc0, 0x34, 0x00, 0x40,
0x01, 0x00, 0x81, 0x43, 0x42, 0xc4, 0x14, 0x4a,
0x82, 0xb1, 0x22, 0xd4, 0x64, 0x4e, 0x42, 0x01,
0x00, 0x80, 0x41, 0x81, 0x02, 0x40, 0x01, 0x00,
0x41, 0x83, 0x04, 0x00, 0x00, 0x14, 0x38, 0x00,
0x21, 0x00, 0x2a, 0x04, 0x12, 0x21, 0x00, 0x10,
0x12, 0x15, 0x44, 0x45, 0x0c, 0x81, 0x50, 0x41,
0x80, 0x82, 0x21, 0x04, 0x00, 0x00, 0x00, 0x24,
0x00, 0xf0, 0xd7, 0x6f, 0x20, 0x08, 0x41, 0x00,
0x00, 0x22, 0x44, 0x18, 0x84, 0x81, 0x2a, 0x21,
0x61, 0x91, 0x14, 0x11, 0x12, 0x20, 0x11, 0x38,
0x38, 0x00, 0x81, 0x40, 0x81, 0x23, 0xa1, 0x42,
0xcb, 0x41, 0x40, 0x94, 0x48, 0x80, 0x01, 0x20,
0x44, 0x41, 0x34, 0x41, 0x84, 0x22, 0x45, 0x18,
0x84, 0x28, 0xc1, 0x84, 0x22, 0x80, 0x81, 0x08,
0x00, 0x18, 0x82, 0x14, 0x5f, 0xe6, 0x0e, 0x00,
0x00, 0x4b, 0x12, 0x28, 0x22, 0xc4, 0x32, 0xa0,
0x21, 0x00, 0x28, 0x22, 0xa0, | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.123992,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.66738,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0189193,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.217548,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.178851,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0644642,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.103978,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0524848,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.220927,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0463072,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.18449,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0337888,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00270392,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0235179,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0199971,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0573066,
'Execution Unit/Register Files/Runtime Dynamic': 0.0227011,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0542764,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.161192,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.02775,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000146501,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000146501,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000126765,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 4.86151e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00028726,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000707028,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00143455,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0192238,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.2228,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0471822,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0652925,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.50066,
'Instruction Fetch Unit/Runtime Dynamic': 0.13384,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.060868,
'L2/Runtime Dynamic': 0.0184572,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.69281,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.254379,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0147426,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0147425,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.76243,
'Load Store Unit/Runtime Dynamic': 0.341827,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0363527,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.072705,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0129017,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0138151,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.076029,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0077373,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.254301,
'Memory Management Unit/Runtime Dynamic': 0.0215524,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.3522,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0888827,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00399013,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.031119,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
<reponame>jaidevd/pgmpy
from pgmpy.exceptions import CardinalityError
from pgmpy.factors import Factor
from pgmpy.models import MarkovModel
from pgmpy.tests import help_functions as hf
import numpy as np
import unittest
class TestMarkovModelCreation(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_class_init_without_data(self):
self.assertIsInstance(self.graph, MarkovModel)
def test_class_init_with_data_string(self):
self.g = MarkovModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.g.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.g.edges()),
[['a', 'b'], ['b', 'c']])
def test_class_init_with_data_nonstring(self):
self.g = MarkovModel([(1, 2), (2, 3)])
def test_add_node_string(self):
self.graph.add_node('a')
self.assertListEqual(self.graph.nodes(), ['a'])
def test_add_node_nonstring(self):
self.graph.add_node(1)
def test_add_nodes_from_string(self):
self.graph.add_nodes_from(['a', 'b', 'c', 'd'])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.graph.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.graph.add_edge('d', 'e')
self.assertListEqual(sorted(self.graph.nodes()), ['d', 'e'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['d', 'e']])
self.graph.add_nodes_from(['a', 'b', 'c'])
self.graph.add_edge('a', 'b')
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['d', 'e']])
def test_add_edge_nonstring(self):
self.graph.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.graph.add_edge, 'a', 'a')
def test_add_edges_from_string(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['b', 'c']])
self.graph.add_nodes_from(['d', 'e', 'f'])
self.graph.add_edges_from([('d', 'e'), ('e', 'f')])
self.assertListEqual(sorted(self.graph.nodes()),
['a', 'b', 'c', 'd', 'e', 'f'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
hf.recursive_sorted([('a', 'b'), ('b', 'c'),
('d', 'e'), ('e', 'f')]))
def test_add_edges_from_nonstring(self):
self.graph.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.graph.add_edges_from,
[('a', 'a')])
def test_number_of_neighbors(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertEqual(len(self.graph.neighbors('b')), 2)
def tearDown(self):
del self.graph
class TestMarkovModelMethods(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_factor_graph(self):
from pgmpy.models import FactorGraph
phi1 = Factor(['Alice', 'Bob'], [3, 2], np.random.rand(6))
phi2 = Factor(['Bob', 'Charles'], [3, 2], np.random.rand(6))
self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])
self.graph.add_factors(phi1, phi2)
factor_graph = self.graph.to_factor_graph()
self.assertIsInstance(factor_graph, FactorGraph)
self.assertListEqual(sorted(factor_graph.nodes()),
['Alice', 'Bob', 'Charles', 'phi_Alice_Bob',
'phi_Bob_Charles'])
self.assertListEqual(hf.recursive_sorted(factor_graph.edges()),
[['Alice', 'phi_Alice_Bob'], ['Bob', 'phi_Alice_Bob'],
['Bob', 'phi_Bob_Charles'], ['Charles', 'phi_Bob_Charles']])
self.assertListEqual(factor_graph.get_factors(), [phi1, phi2])
def test_factor_graph_raises_error(self):
self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])
self.assertRaises(ValueError, self.graph.to_factor_graph)
def test_junction_tree(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
junction_tree = self.graph.to_junction_tree()
self.assertListEqual(hf.recursive_sorted(junction_tree.nodes()),
[['a', 'b', 'd'], ['b', 'c', 'd']])
self.assertEqual(len(junction_tree.edges()), 1)
def test_markov_blanket(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.graph.markov_blanket('a'), ['b'])
self.assertListEqual(sorted(self.graph.markov_blanket('b')),
['a', 'c'])
def test_local_independencies(self):
from pgmpy.independencies import Independencies
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
independencies = self.graph.get_local_independecies()
self.assertIsInstance(independencies, Independencies)
self.assertEqual(len(independencies.get_independencies()), 2)
string = ''
for assertion in sorted(independencies.get_independencies(),
key=lambda x: list(x.event1)):
string += str(assertion) + '\n'
self.assertEqual(string, 'a _|_ c | b\nc _|_ a | b\n')
def test_bayesian_model(self):
from pgmpy.models import BayesianModel
import networkx as nx
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
bm = self.graph.to_bayesian_model()
self.assertIsInstance(bm, BayesianModel)
self.assertListEqual(sorted(bm.nodes()), ['a', 'b', 'c', 'd'])
self.assertTrue(nx.is_chordal(bm.to_undirected()))
def tearDown(self):
del self.graph
class TestUndirectedGraphFactorOperations(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_add_factor_raises_error(self):
self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles'),
('Charles', 'Debbie'), ('Debbie', 'Alice')])
factor = Factor(['Alice', 'Bob', 'John'], [2, 2, 2], np.random.rand(8))
self.assertRaises(ValueError, self.graph.add_factors, factor)
def test_add_single_factor(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi = Factor(['a', 'b'], [2, 2], range(4))
self.graph.add_factors(phi)
self.assertListEqual(self.graph.get_factors(), [phi])
def test_add_multiple_factors(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.assertListEqual(self.graph.get_factors(), [phi1, phi2])
def test_remove_single_factor(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.graph.remove_factors(phi1)
self.assertListEqual(self.graph.get_factors(), [phi2])
def test_remove_multiple_factors(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.graph.remove_factors(phi1, phi2)
self.assertListEqual(self.graph.get_factors(), [])
def test_partition_function(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertEqual(self.graph.get_partition_function(), 22.0)
def test_partition_function_raises_error(self):
self.graph.add_nodes_from(['a', 'b', 'c', 'd'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.assertRaises(ValueError,
self.graph.get_partition_function)
def tearDown(self):
del self.graph
class TestUndirectedGraphTriangulation(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_check_clique(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'a')])
self.assertTrue(self.graph.check_clique(['a', 'b', 'c']))
def test_is_triangulated(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'a')])
self.assertTrue(self.graph.is_triangulated())
def test_triangulation_h1_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H1', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h2_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H2', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h3_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H3', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h4_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H4', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h5_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H4', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h6_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H4', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_cardinality_mismatch_raises_error(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
factor_list = [Factor(edge, [2, 2], np.random.rand(4)) for edge in
self.graph.edges()]
self.graph.add_factors(*factor_list)
self.graph.add_factors(Factor(['a', 'b'], [2, 3], np.random.rand(6)))
self.assertRaises(CardinalityError, self.graph.triangulate)
def test_triangulation_h1_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H1', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h2_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H2', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h3_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H3', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h4_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H4', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h5_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H5', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h6_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], | |
in IMS (VoLTE or WiFi Calling) call with PhoneC.
Merge calls to conference on PhoneA (CEP enabled IMS conference).
Hangup on PhoneC, check call continues between AB.
Hangup on PhoneB, check A ends.
Args:
call_ab_id: call id for call_AB on PhoneA.
call_ac_id: call id for call_AC on PhoneA.
Returns:
True if succeed;
False if failed.
"""
ads = self.android_devices
call_conf_id = self._merge_cep_conference_call(call_ab_id, call_ac_id)
if call_conf_id is None:
return False
self.log.info("Step5: End call on PhoneC and verify call continues.")
ads[2].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if not verify_incall_state(self.log, [ads[0], ads[1]], True):
return False
if not verify_incall_state(self.log, [ads[2]], False):
return False
self.log.info("Step6: End call on PhoneB and verify PhoneA end.")
ads[1].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], False):
return False
return True
def _test_ims_conference_merge_drop_first_call_from_participant_cep(
self, call_ab_id, call_ac_id):
"""Test conference merge and drop in IMS (VoLTE or WiFi Calling) call.
(CEP enabled).
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneB.
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneC.
Merge calls to conference on PhoneA (CEP enabled IMS conference).
Hangup on PhoneB, check call continues between AC.
Hangup on PhoneC, check A ends.
Args:
call_ab_id: call id for call_AB on PhoneA.
call_ac_id: call id for call_AC on PhoneA.
Returns:
True if succeed;
False if failed.
"""
ads = self.android_devices
call_conf_id = self._merge_cep_conference_call(call_ab_id, call_ac_id)
if call_conf_id is None:
return False
self.log.info("Step5: End call on PhoneB and verify call continues.")
ads[1].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[2]], True):
return False
if not verify_incall_state(self.log, [ads[1]], False):
return False
self.log.info("Step6: End call on PhoneC and verify PhoneA end.")
ads[2].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], False):
return False
return True
def _test_ims_conference_merge_drop_second_call_from_host_cep(
self, call_ab_id, call_ac_id):
"""Test conference merge and drop in IMS (VoLTE or WiFi Calling) call.
(CEP enabled).
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneB.
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneC.
Merge calls to conference on PhoneA (CEP enabled IMS conference).
On PhoneA, disconnect call between A-C, verify PhoneA PhoneB still in call.
On PhoneA, disconnect call between A-B, verify PhoneA PhoneB disconnected.
Args:
call_ab_id: call id for call_AB on PhoneA.
call_ac_id: call id for call_AC on PhoneA.
Returns:
True if succeed;
False if failed.
"""
ads = self.android_devices
call_ab_uri = get_call_uri(ads[0], call_ab_id)
call_ac_uri = get_call_uri(ads[0], call_ac_id)
call_conf_id = self._merge_cep_conference_call(call_ab_id, call_ac_id)
if call_conf_id is None:
return False
calls = ads[0].droid.telecomCallGetCallIds()
calls.remove(call_conf_id)
self.log.info("Step5: Disconnect call A-C and verify call continues.")
call_to_disconnect = None
for call in calls:
if is_uri_equivalent(call_ac_uri, get_call_uri(ads[0], call)):
call_to_disconnect = call
calls.remove(call_to_disconnect)
break
if call_to_disconnect is None:
self.log.error("Can NOT find call on host represents A-C.")
return False
else:
ads[0].droid.telecomCallDisconnect(call_to_disconnect)
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1]], True):
return False
if not verify_incall_state(self.log, [ads[2]], False):
return False
self.log.info(
"Step6: Disconnect call A-B and verify PhoneA PhoneB end.")
call_to_disconnect = None
for call in calls:
if is_uri_equivalent(call_ab_uri, get_call_uri(ads[0], call)):
call_to_disconnect = call
calls.remove(call_to_disconnect)
break
if call_to_disconnect is None:
self.log.error("Can NOT find call on host represents A-B.")
return False
else:
ads[0].droid.telecomCallDisconnect(call_to_disconnect)
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], False):
return False
return True
def _test_ims_conference_merge_drop_first_call_from_host_cep(
self, call_ab_id, call_ac_id):
"""Test conference merge and drop in IMS (VoLTE or WiFi Calling) call.
(CEP enabled).
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneB.
PhoneA in IMS (VoLTE or WiFi Calling) call with PhoneC.
Merge calls to conference on PhoneA (CEP enabled IMS conference).
On PhoneA, disconnect call between A-B, verify PhoneA PhoneC still in call.
On PhoneA, disconnect call between A-C, verify PhoneA PhoneC disconnected.
Args:
call_ab_id: call id for call_AB on PhoneA.
call_ac_id: call id for call_AC on PhoneA.
Returns:
True if succeed;
False if failed.
"""
ads = self.android_devices
call_ab_uri = get_call_uri(ads[0], call_ab_id)
call_ac_uri = get_call_uri(ads[0], call_ac_id)
call_conf_id = self._merge_cep_conference_call(call_ab_id, call_ac_id)
if call_conf_id is None:
return False
calls = ads[0].droid.telecomCallGetCallIds()
calls.remove(call_conf_id)
self.log.info("Step5: Disconnect call A-B and verify call continues.")
call_to_disconnect = None
for call in calls:
if is_uri_equivalent(call_ab_uri, get_call_uri(ads[0], call)):
call_to_disconnect = call
calls.remove(call_to_disconnect)
break
if call_to_disconnect is None:
self.log.error("Can NOT find call on host represents A-B.")
return False
else:
ads[0].droid.telecomCallDisconnect(call_to_disconnect)
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[2]], True):
return False
if not verify_incall_state(self.log, [ads[1]], False):
return False
self.log.info(
"Step6: Disconnect call A-C and verify PhoneA PhoneC end.")
call_to_disconnect = None
for call in calls:
if is_uri_equivalent(call_ac_uri, get_call_uri(ads[0], call)):
call_to_disconnect = call
calls.remove(call_to_disconnect)
break
if call_to_disconnect is None:
self.log.error("Can NOT find call on host represents A-C.")
return False
else:
ads[0].droid.telecomCallDisconnect(call_to_disconnect)
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], False):
return False
return True
def _test_wcdma_conference_merge_drop(self, call_ab_id, call_ac_id):
"""Test conference merge and drop in WCDMA/CSFB_WCDMA call.
PhoneA in WCDMA (or CSFB_WCDMA) call with PhoneB.
PhoneA in WCDMA (or CSFB_WCDMA) call with PhoneC.
Merge calls to conference on PhoneA.
Hangup on PhoneC, check call continues between AB.
Hangup on PhoneB, check A ends.
Args:
call_ab_id: call id for call_AB on PhoneA.
call_ac_id: call id for call_AC on PhoneA.
Returns:
True if succeed;
False if failed.
"""
ads = self.android_devices
self.log.info("Step4: Merge to Conf Call and verify Conf Call.")
ads[0].droid.telecomCallJoinCallsInConf(call_ab_id, call_ac_id)
time.sleep(WAIT_TIME_IN_CALL)
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 3:
self.log.error("Total number of call ids in {} is not 3.".format(
ads[0].serial))
return False
call_conf_id = None
for call_id in calls:
if call_id != call_ab_id and call_id != call_ac_id:
call_conf_id = call_id
if not call_conf_id:
self.log.error("Merge call fail, no new conference call id.")
return False
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], True):
return False
# Check if Conf Call is currently active
if ads[0].droid.telecomCallGetCallState(
call_conf_id) != CALL_STATE_ACTIVE:
self.log.error(
"Call_id:{}, state:{}, expected: STATE_ACTIVE".format(
call_conf_id, ads[0].droid.telecomCallGetCallState(
call_conf_id)))
return False
self.log.info("Step5: End call on PhoneC and verify call continues.")
ads[2].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 1:
return False
if not verify_incall_state(self.log, [ads[0], ads[1]], True):
return False
if not verify_incall_state(self.log, [ads[2]], False):
return False
self.log.info("Step6: End call on PhoneB and verify PhoneA end.")
ads[1].droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
if not verify_incall_state(self.log, [ads[0], ads[1], ads[2]], False):
return False
return True
def _three_phone_hangup_call_verify_call_state(
self, ad_hangup, ad_verify, call_id, call_state, ads_active):
"""Private Test utility for swap test.
Hangup on 'ad_hangup'.
Verify 'call_id' on 'ad_verify' is in expected 'call_state'
Verify each ad in ads_active are 'in-call'.
Args:
ad_hangup: android object to hangup call.
ad_verify: android object to verify call id state.
call_id: call id in 'ad_verify'.
call_state: expected state for 'call_id'.
'call_state' is either CALL_STATE_HOLDING or CALL_STATE_ACTIVE.
ads_active: list of android object.
Each one of them should be 'in-call' after 'hangup' operation.
Returns:
True if no error happened. Otherwise False.
"""
self.log.info("Hangup at {}, verify call continues.".format(
ad_hangup.serial))
ad_hangup.droid.telecomEndCall()
time.sleep(WAIT_TIME_IN_CALL)
if ad_verify.droid.telecomCallGetCallState(call_id) != call_state:
self.log.error("Call_id:{}, state:{}, expected: {}".format(
call_id, ad_verify.droid.telecomCallGetCallState(
call_id), call_state))
return False
# TODO: b/26296375 add voice check.
if not verify_incall_state(self.log, ads_active, True):
return False
if not verify_incall_state(self.log, [ad_hangup], False):
return False
return True
def _test_epdg_mo_mo_add_epdg_swap_x(self, num_swaps):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneA (epdg) call PhoneC (epdg), accept on PhoneC.
Swap active call on PhoneA.(N times)
Args:
num_swaps: do swap for 'num_swaps' times.
This value can be 0 (no swap operation).
Returns:
call_ab_id, call_ac_id if succeed;
None, None if failed.
"""
ads = self.android_devices
# To make thing simple, for epdg, setup should be called before calling
# _test_epdg_mo_mo_add_epdg_swap_x in test cases.
call_ab_id = self._three_phone_call_mo_add_mo(
[ads[0], ads[1], ads[2]], [None, None, None],
[is_phone_in_call_iwlan, is_phone_in_call_iwlan,
is_phone_in_call_iwlan])
if call_ab_id is None:
self.log.error("Failed to get call_ab_id")
return None, None
calls = ads[0].droid.telecomCallGetCallIds()
self.log.info("Calls in PhoneA{}".format(calls))
if num_active_calls(self.log, ads[0]) != 2:
return None, None
if calls[0] == call_ab_id:
call_ac_id = calls[1]
else:
call_ac_id = calls[0]
if num_swaps > 0:
self.log.info("Step3: Begin Swap x{} test.".format(num_swaps))
if not swap_calls(self.log, ads, call_ab_id, call_ac_id,
num_swaps):
self.log.error("Swap test failed.")
return None, None
return call_ab_id, call_ac_id
def _test_epdg_mo_mt_add_epdg_swap_x(self, num_swaps):
"""Test swap feature in epdg call.
PhoneA (epdg) call PhoneB (epdg), accept on PhoneB.
PhoneC (epdg) call PhoneA (epdg), accept on PhoneA.
Swap active call on PhoneA.(N times)
Args:
num_swaps: do swap for | |
<reponame>lucaskotres/ElipseExamples
# -*- coding: utf-8 -*-
import epmsdk
import epmsdk.communication as epmcomm
import epmsdk.dataaccess as epmda
import epmsdk.historicaldata as epmhda
import epmsdk.opcua.core as core
import datetime
def status_air():
try:
conn01 = epmcomm.epmConnect(hostname='kotres', username='sa', password='<PASSWORD>')
print "Conectado ao EPM Server!"
except epmsdk.EpmException as ex:
print 'Failed on connection to EPM with error\n{}\n'.format(ex)
exit(1)
#Leitura das annotations
tagTeste = epmda.epmGetDataObjectAnnotation(conn01, 'wb_Tin')
dataInicial = datetime.datetime(year=2016, month=3, day=1)
dataFinal = datetime.datetime(year=2016, month=4, day=1)
try:
hG = epmhda.epmTagHistoryRead(tagTeste, dataInicial, dataFinal)
print unicode(hG.size)
except epmsdk.EpmException as ex:
print 'Failed when getting Tag with error\n{}\n'.format(ex)
raw_input("Error on historyread")
exit(1)
print 'Succeeded!'
ok = []
down=[]
up=[]
acc_day_OK=0.0
acc_day_UP=0.0
acc_day_DOWN=0.0
days = []
acc_days = []
current_day=-1
acc_day = 0
for i in range(1,hG.size):
date = hG[i][0]-datetime.timedelta(hours=3)
comfort, setpoint, statusAir, T_OUT, WorkTime, NUser = hG[i][1].split(',')
print date.day
print statusAir.split(':')[1]
print WorkTime.split(':')[1]
if current_day != date.day:
days.append(date.day)
acc_days.append(acc_day)
current_day = date.day
acc_day=0
if statusAir.split(':')[1] == 'ON' and WorkTime.split(':')[1]=='n':
acc_day =1
else:
if statusAir.split(':')[1] == 'ON' and WorkTime.split(':')[1]=='n':
acc_day =acc_day+1
return days, acc_days
def comfort_workTime_totalMonth():
days, up, down, ok = comfort_workTime()
total_up = sum(up)
total_down = sum(down)
total_ok = sum(ok)
total_hours = total_ok + total_down + total_ok
total_ok = (total_ok*100)/total_hours
total_down = (total_down*100)/total_hours
total_up = (total_up*100)/total_hours
return len(days), total_down,total_ok,total_up
def comfort_workTime():
try:
conn01 = epmcomm.epmConnect(hostname='kotres', username='sa', password='<PASSWORD>')
print "Conectado ao EPM Server!"
except epmsdk.EpmException as ex:
print 'Failed on connection to EPM with error\n{}\n'.format(ex)
exit(1)
#Leitura das annotations
tagTeste = epmda.epmGetDataObjectAnnotation(conn01, 'wb_Tin')
dataInicial = datetime.datetime(year=2016, month=3, day=1)
dataFinal = datetime.datetime(year=2016, month=4, day=1)
try:
hG = epmhda.epmTagHistoryRead(tagTeste, dataInicial, dataFinal)
print unicode(hG.size)
except epmsdk.EpmException as ex:
print 'Failed when getting Tag with error\n{}\n'.format(ex)
raw_input("Error on historyread")
exit(1)
print 'Succeeded!'
ok = []
down=[]
up=[]
acc_day_OK=0.0
acc_day_UP=0.0
acc_day_DOWN=0.0
days = []
for i in range(1,hG.size):
date = hG[i][0]-datetime.timedelta(hours=3)
previous_date = hG[i-1][0]-datetime.timedelta(hours=3)
comfort, setpoint, statusAir, T_OUT, WorkTime, NUser = hG[i][1].split(',')
previous_comfort,previous_setpoint, previous_statusAir, previous_T_OUT, previous_WorkTime, previous_NUser = hG[i-1][1].split(',')
WorkTime = str(WorkTime.split(':')[1])
comfort= str(comfort.split(':')[1])
previous_WorkTime = str(previous_WorkTime.split(':')[1])
previous_comfort= str(previous_comfort.split(':')[1])
if WorkTime == 'y' and previous_WorkTime == 'y':
#Dias diferentes -> Calula ate as 18:00 do mesmo dia e acaba a analise
if previous_date.day!=date.day:
if previous_date.hour<18:
diff_hours= float(((datetime.datetime(year=previous_date.year, month= previous_date.month, day= previous_date.day,hour=18,minute=0)- previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
acc_day_UP = 0.0
acc_day_OK = 0.0
acc_day_DOWN = 0.0
days.append(previous_date.day)
#Calula ate as 8:00 do mesmo dia
diff_hours= float(((date - datetime.datetime(year=date.year, month= date.month, day= date.day,hour=8,minute=0)).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
#Mesmo dia
else:
diff_hours= float(((date - previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
elif WorkTime == 'y' and previous_WorkTime == 'n':
#Dias diferentes -> Calula ate as 18:00 do mesmo dia e acaba a analise
if previous_date.day!=date.day:
#É expediente
if previous_date.hour<18:
diff_hours= float(((datetime.datetime(year=previous_date.year, month= previous_date.month, day= previous_date.day,hour=18,minute=0)- previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
acc_day_UP = 0.0
acc_day_OK = 0.0
acc_day_DOWN = 0.0
days.append(previous_date.day)
#Calula ate as 8:00 do mesmo dia
diff_hours= float(((date - datetime.datetime(year=date.year, month= date.month, day= date.day,hour=8,minute=0)).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
#Mesmo dia
else:
if previous_date.hour<8:
diff_hours= float(((date - datetime.datetime(year=date.year, month= date.month, day= date.day,hour=8,minute=0)).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
elif WorkTime == 'n' and previous_WorkTime == 'y':
if previous_date.day!=date.day:
#É expediente
diff_hours= float(((datetime.datetime(year=previous_date.year, month= previous_date.month, day= previous_date.day,hour=18,minute=0)- previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
acc_day_UP = 0.0
acc_day_OK = 0.0
acc_day_DOWN = 0.0
days.append(previous_date.day)
#mesmo dia
else:
if date.hour>=18:
diff_hours= float(((datetime.datetime(year=previous_date.year, month= previous_date.month, day= previous_date.day,hour=18,minute=0)-previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
else:
diff_hours= float(((date - previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
# worktime e previous_worktime ==n
elif WorkTime == 'n' and previous_WorkTime == 'n':
if previous_date.day!=date.day:
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
acc_day_UP = 0.0
acc_day_OK = 0.0
acc_day_DOWN = 0.0
days.append(previous_date.day)
#adicina o ultimo porque a ultima anotação não muda o dia
if i==hG.size-1:
if date.hour>=18:
diff_hours= float(((datetime.datetime(year=previous_date.year, month= previous_date.month, day= previous_date.day,hour=18,minute=0)-previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
else:
diff_hours= float(((date - previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
days.append(date.day)
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
return days, up, down, ok
def comfort_day():
try:
conn01 = epmcomm.epmConnect(hostname='kotres', username='sa', password='<PASSWORD>')
print "Conectado ao EPM Server!"
except epmsdk.EpmException as ex:
print 'Failed on connection to EPM with error\n{}\n'.format(ex)
exit(1)
#Leitura das annotations
tagTeste = epmda.epmGetDataObjectAnnotation(conn01, 'wb_Tin')
dataInicial = datetime.datetime(year=2016, month=3, day=1)
dataFinal = datetime.datetime(year=2016, month=4, day=1)
try:
hG = epmhda.epmTagHistoryRead(tagTeste, dataInicial, dataFinal)
print unicode(hG.size)
except epmsdk.EpmException as ex:
print 'Failed when getting Tag with error\n{}\n'.format(ex)
raw_input("Error on historyread")
exit(1)
print 'Succeeded!'
ok = []
down=[]
up=[]
acc_day_OK=0.0
acc_day_UP=0.0
acc_day_DOWN=0.0
days = []
day=1
for i in range(1,hG.size):
date = hG[i][0]-datetime.timedelta(hours=3)
previous_date = hG[i-1][0]-datetime.timedelta(hours=3)
comfort, setpoint, statusAir, T_OUT, WorkTime, NUser = hG[i][1].split(',')
previous_comfort,previous_setpoint, previous_statusAir, previous_T_OUT, previous_WorkTime, previous_NUser = hG[i-1][1].split(',')
WorkTime = str(WorkTime.split(':')[1])
comfort= str(comfort.split(':')[1])
previous_WorkTime = str(previous_WorkTime.split(':')[1])
previous_comfort= str(previous_comfort.split(':')[1])
if previous_date.day == date.day:
diff_hours= float(((date - previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
#mudou de dia
else:
#Calula ate as 23:59 do mesmo dia
diff_hours= float(((datetime.datetime(year=previous_date.year, month= previous_date.month, day= previous_date.day,hour=23,minute=59) - previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
days.append(day)
day=day+1
#calcula da meia noite até a anotaçao
diff_hours = float(((date - datetime.datetime(year= date.year, month= date.month, day= date.day,hour=00,minute=00)).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = diff_hours
acc_day_OK = 0.0
acc_day_DOWN = 0.0
elif previous_comfort == 'OK':
acc_day_OK = diff_hours
acc_day_DOWN = 0.0
acc_day_UP = 0.0
else:
acc_day_DOWN = diff_hours
acc_day_OK=0.0
acc_day_UP=0.0
weekend = date.day - previous_date.day
if(weekend>1):
for j in range(previous_date.day+1,date.day):
days.append(j)
day=day+1
if previous_comfort == 'UP':
acc_day_UP =24
acc_day_OK = 0.0
acc_day_DOWN = 0.0
elif previous_comfort == 'OK':
acc_day_OK = 24
acc_day_DOWN = 0.0
acc_day_UP = 0.0
else:
acc_day_DOWN = 24
acc_day_OK=0
acc_day_UP=0
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
#adicina o ultimo porque a ultima anotação não muda o dia
if i==hG.size-1:
days.append(day)
diff_hours= float(((date - previous_date).seconds)/60)/60
if previous_comfort == 'UP':
acc_day_UP = acc_day_UP + diff_hours
elif previous_comfort == 'OK':
acc_day_OK = acc_day_OK + diff_hours
else:
acc_day_DOWN = acc_day_DOWN + diff_hours
up.append(float('%.2f'% acc_day_UP))
down.append(float('%.2f'% acc_day_DOWN))
ok.append(float('%.2f'% acc_day_OK))
return days,ok,up,down
def get_annotations():
try:
conn01 = epmcomm.epmConnect(hostname='kotres', username='sa', password='<PASSWORD>')
print "Conectado ao EPM Server!"
except epmsdk.EpmException as ex:
print 'Failed on connection to EPM with error\n{}\n'.format(ex)
exit(1)
#Leitura das annotations
tagTeste = epmda.epmGetDataObjectAnnotation(conn01, 'wb_Tin')
dataInicial = datetime.datetime(year=2016, month=3, day=1)
dataFinal = datetime.datetime(year=2016, month=4, day=1)
try:
hG = epmhda.epmTagHistoryRead(tagTeste, dataInicial, dataFinal)
print unicode(hG.size)
except epmsdk.EpmException as ex:
print 'Failed when getting Tag with error\n{}\n'.format(ex)
raw_input("Error on historyread")
exit(1)
print 'Succeeded!'
| |
#!/home/tortes/anaconda3/envs/ts/bin/python
"""
Change list:
- Remove past action
- adjust step output: remove arrived
- Change python version to 3
- Add action space, observation space
8.17
- Change action space to discrete action
"""
import os
import rospy
import numpy as np
import math
from math import pi
import random
import gym
from geometry_msgs.msg import Twist, Point, Pose
from sensor_msgs.msg import LaserScan, PointCloud2, Imu, NavSatFix
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
from gazebo_msgs.srv import SpawnModel, DeleteModel
from rosgraph_msgs.msg import Clock
import sensor_msgs.point_cloud2 as pc2 # pcl lib
# from velodyne_msgs.msg import VelodyneScan, VelodynePacket
action_linear_max = 5. # m/s
action_angular_max = 2. # rad/s
EARTH_RADIUS = 6378137
# REALSENSE_MAX_POINT = 20000
REALSENSE_MAX_POINT = 5000
zero_point = (0,0,0)
diagonal_dis = math.sqrt(2) * 100
epi = 10**-6
goal_model_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0], '..', 'models', 'Target_col', 'model.sdf')
# Velodyne Disabled
class Env():
def __init__(self, is_training):
self.position = Pose()
self.goal_position = Pose()
self.goal_position.position.x = 0.
self.goal_position.position.y = 0.
self.pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.reset_proxy = rospy.ServiceProxy('gazebo/reset_simulation', Empty)
self.unpause_proxy = rospy.ServiceProxy('gazebo/unpause_physics', Empty)
self.pause_proxy = rospy.ServiceProxy('gazebo/pause_physics', Empty)
self.goal = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
self.del_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
self.time = 0
self.roll = 0.
self.pitch = 0.
self.nav_yaw = 0.
self.extend_data = np.zeros(3*REALSENSE_MAX_POINT)
self.scan_data = []
self.nav_position = [9.083599620367968, -8.909992062367177]
self.sub_time = rospy.Subscriber('clock', Clock, self.getClock)
self.sub_imu = rospy.Subscriber('imu/data', Imu, self.getQuaternion)
self.sub_realsense = rospy.Subscriber('realsense/downsample', PointCloud2, self.getRealsense)
self.sub_lidar = rospy.Subscriber('scan', LaserScan, self.getLidar)
self.sub_navsat = rospy.Subscriber('navsat/fix', NavSatFix, self.getNavSat)
self.past_distance = 0.
self.nav_goal_distance = 0.
self.nav_rel_theta = 0.
self.nav_diff_angle = 0.
self.action_space()
self.observation_space()
if is_training:
self.threshold_arrive = 0.5
else:
self.threshold_arrive = 1.0
def getNavGoalDistance(self):
nav_goal_distance = math.hypot(self.goal_position.position.x - self.nav_position[0], self.goal_position.position.y - self.nav_position[1])
self.nav_past_distance = nav_goal_distance
return nav_goal_distance
def getClock(self, clock):
self.time = clock.clock.secs
# return clock.clock.secs
def getQuaternion(self, imu_data):
# roll, pitch, yaw
q_data = imu_data.orientation
eular_data = self.getEular(q_data)
self.orientation = q_data
self.roll = eular_data[0]
self.pitch = eular_data[1]
self.nav_yaw = eular_data[2]
def getRealsense(self, realsense_data):
rs_generator = pc2.read_points(realsense_data, skip_nans=True, field_names=("x","y","z"))
realsense_point_ = list(rs_generator)
rs_point_length = len(realsense_point_)
# sample or extend
if rs_point_length <= REALSENSE_MAX_POINT:
realsense_point_.extend([zero_point for _ in range(REALSENSE_MAX_POINT-rs_point_length)])
else:
selected_point = np.random.choice(np.arange(rs_point_length), REALSENSE_MAX_POINT, replace=True)
realsense_point_ = [realsense_point_[i] for i in selected_point]
extend_data_ = []
for point in realsense_point_:
extend_data_.extend([point[0],point[1],point[2]])
self.realsense_point = realsense_point_
self.extend_data = extend_data_
def getLidar(self, scan_raw_data):
scan_data_ = []
scan_length = len(scan_raw_data.ranges)
for i in range(scan_length):
if scan_raw_data.ranges[i] == float('Inf'):
scan_data_.append(30.)
elif np.isnan(scan_raw_data.ranges[i]):
scan_data_.append(0)
else:
scan_data_.append(scan_raw_data.ranges[i])
self.scan_data = scan_data_
def getNavSat(self, navsat_data):
# reference Longi:45 Lati:45
ref_longi = 45.0
ref_lati = 45.0
longitude = navsat_data.longitude
latitude = navsat_data.latitude
delta_longi = (longitude-ref_longi) * pi / 180
delta_lati = (latitude-ref_lati) * pi / 180
para_longi = 0.5 * (1-math.cos(delta_longi))
para_lati = math.cos(latitude*pi/180) * math.cos(latitude*pi/180)
if delta_longi >= 0:
para_symbol = 1
else:
para_symbol = -1
longitude_aff = para_symbol * EARTH_RADIUS * math.acos(1-2*para_lati*para_longi)
latitude_aff = EARTH_RADIUS * delta_lati
self.nav_position = [longitude_aff, latitude_aff]
def getGoalAngle(self):
rel_dis_x = round(self.goal_position.position.x - self.nav_position[0], 1)
rel_dis_y = round(self.goal_position.position.y - self.nav_position[1], 1)
# Calculate the angle between robot and target
if rel_dis_x > 0 and rel_dis_y > 0:
theta = math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x > 0 and rel_dis_y < 0:
theta = 2 * math.pi + math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x < 0 and rel_dis_y < 0:
theta = math.pi + math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x < 0 and rel_dis_y > 0:
theta = math.pi + math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x == 0 and rel_dis_y > 0:
theta = 1 / 2 * math.pi
elif rel_dis_x == 0 and rel_dis_y < 0:
theta = 3 / 2 * math.pi
elif rel_dis_y == 0 and rel_dis_x > 0:
theta = 0
else:
theta = math.pi
rel_theta = round(math.degrees(theta), 2)
diff_angle = abs(rel_theta - self.nav_yaw)
if diff_angle <= 180:
diff_angle = round(diff_angle, 2)
else:
diff_angle = round(360 - diff_angle, 2)
self.nav_rel_theta = rel_theta
self.nav_diff_angle = diff_angle
def getState(self):
# Get angle info
self.getGoalAngle()
extend_data = self.extend_data
roll = self.roll
pitch = self.pitch
yaw = self.nav_yaw
rel_theta = self.nav_rel_theta
diff_angle = self.nav_diff_angle
min_range = 0.3 # Collision range
done = False
arrive = False
# Add physical factors to depend done
done = self.is_done()
# Use only dist to depend arrive
current_distance = math.hypot(self.goal_position.position.x - self.nav_position[0], self.goal_position.position.y - self.nav_position[1])
if current_distance <= self.threshold_arrive:
arrive = True
print("Arrived!")
# Check data
assert len(extend_data) == 3 * REALSENSE_MAX_POINT
return extend_data, current_distance, roll, pitch, yaw, rel_theta, diff_angle, done, arrive
def setReward(self, done, arrive):
current_distance = math.hypot(self.goal_position.position.x - self.nav_position[0], self.goal_position.position.y - self.nav_position[1])
distance_rate = (self.past_distance - current_distance)
reward = 200.*distance_rate
self.past_distance = current_distance
# Time reward
# reward = reward - .5 * self.time
# Imu reward
if abs(self.roll) > 22.5:
# print("Alert! Roll angle is %.2f" % self.roll)
reward = reward - 1.
if abs(self.pitch) > 22.5:
# print("Alert! Pitch angle is %.2f" % self.pitch)
reward = reward - 1.
if done:
reward = -100.
self.pub_cmd_vel.publish(Twist())
if arrive:
reward = 2000.
self.pub_cmd_vel.publish(Twist())
arrive = False
return reward
def step(self, action):
linear_vel = self.action_space_discrete[action][0]
ang_vel = self.action_space_discrete[action][1]
# print(linear_vel, ang_vel)
vel_cmd = Twist()
vel_cmd.linear.x = linear_vel / 4
vel_cmd.angular.z = ang_vel
self.pub_cmd_vel.publish(vel_cmd)
# Update sensor data
# self.getSensor()
# Update state observation
realsense_data, rel_dis, roll, pitch, yaw, rel_theta, diff_angle, done, arrive = self.getState()
# Normalize the state
'''
Realsense: [0, 12] => [0,1]
LiDAR: [0, 30] => [0,1]
roll, pitch:[-180, 180] => [0,1]
'''
# scan_data = [i/30 for i in scan_data]
state = realsense_data + [rel_dis / diagonal_dis, (roll+180)/360, (pitch+180)/360, yaw / 360, rel_theta / 360, diff_angle / 180]
reward = self.setReward(done, arrive)
return np.asarray(state), reward, done or arrive, {}
def reset(self):
# Reset the env #
rospy.wait_for_service('/gazebo/delete_model')
self.del_model('target')
rospy.wait_for_service('gazebo/reset_simulation')
try:
self.reset_proxy()
except (rospy.ServiceException) as e:
print("gazebo/reset_simulation service call failed")
# Build the targetz
rospy.wait_for_service('/gazebo/spawn_sdf_model')
try:
goal_urdf = open(goal_model_dir, "r").read()
target = SpawnModel
target.model_name = 'target' # the same with sdf name
target.model_xml = goal_urdf
# Get goal position
self.goal_position.position.x, self.goal_position.position.y = self.goal_on_law()
self.goal(target.model_name, target.model_xml, 'namespace', self.goal_position, 'world')
# Affine Goal Position to NavSatFix(x -> -y, y->x)
self.goal_position.position.x = -self.goal_position.position.y
self.goal_position.position.y = self.goal_position.position.x
except (rospy.ServiceException) as e:
print("/gazebo/failed to build the target")
rospy.wait_for_service('/gazebo/unpause_physics')
# Get sensor data
# self.getSensor()
self.goal_distance = self.getNavGoalDistance()
realsense_data, rel_dis, roll, pitch, yaw, rel_theta, diff_angle, done, arrive = self.getState()
# scan_data = [i/30 for i in scan_data]
realsense_data = [i/12 for i in realsense_data]
# Normalize the state
state = realsense_data + [rel_dis / diagonal_dis, (roll+180)/360, (pitch+180)/360, yaw / 360, rel_theta / 360, diff_angle / 180]
return np.asarray(state)
def goal_on_law(self):
x_ = 0
y_ = 0
while True:
x_ = random.uniform(0.0, 10.0)
y_ = random.uniform(-10.0, 0.0)
dist1 = math.hypot(x_+0.6, y_+0.6)
dist2 = math.hypot(x_+0.6, y_-0.6)
dist3 = math.hypot(x_-0.6, y_-0.6)
if (dist1 > 0.2) or (dist2 > 0.2) or (dist3 > 0.2):
break
return x_, y_
def box_affine(self, p, threshold_affine):
# threshold_affine = 0.2
x, y, z = p[0], p[1], p[2]
if (x<threshold_affine and y<threshold_affine and z<threshold_affine):
k = threshold_affine / max(map(abs, (x,y,z)))
x, y, z = map(lambda x: x*k, (x,y,z))
return x, y, z
def ball_affine(self, p, threshold_affine):
# threshold_affine = 0.2
x, y, z = p[0], p[1], p[2]
point_dist = np.linalg.norm((x,y,z))
if (point_dist < threshold_affine):
k = point_dist / threshold_affine
x, y, z = map(lambda x: x/k, (x,y,z))
return x, y, z
def is_outbound(self):
x = self.nav_position[0]
y = self.nav_position[1]
# print(x,y)
if abs(x) > 13.5 or abs(y) > 13.5:
return True
return False
def is_done(self):
min_range = 1.2
if len(self.scan_data) == 0:
return False
# Roll Pitch error
if abs(self.roll) > 45 or abs(self.pitch) > 45:
# print("Roll/Pitch danger")
return True
# Collision error
if min_range > min(self.scan_data) > 0 and self.is_outbound():
# print("Collision")
return True
if self.time > 10000:
# print("Time exceed")
return True
return False
def getEular(self, quaternion):
x = quaternion.x
y = quaternion.y
z = quaternion.z
w = quaternion.w
# roll
sinr_cosp = 2.0*(w*x+y*z)
cosr_cosp = 1-2.0*(x*x+y*y)
roll = math.atan2(sinr_cosp, cosr_cosp)
# pitch
sinp = 2.0*(w*y-z*x)
if abs(sinp) > 1:
pitch = pi/2 if sinp > 0 else -pi/2 # Use pi/2 if out of range
else:
pitch = math.asin(sinp)
# yaw
siny_cosp = 2.0*(w*z + x*y)
cosy_cosp = 1-2.0*(y*y+z*z)
yaw = math.atan2(siny_cosp, cosy_cosp)
if yaw >= 0:
yaw = yaw
else:
yaw | |
-1) # -1 => Unknown
self._max_mem_mb_usage = Value("i", -1) # -1 => Unknown
# All values below are cumulative.
self._num_queries_dequeued = Value("i", 0)
self._num_queries_started = Value("i", 0)
self._num_queries_finished = Value("i", 0)
self._num_queries_exceeded_mem_limit = Value("i", 0)
self._num_queries_cancelled = Value("i", 0)
self._num_queries_timedout = Value("i", 0)
self._num_result_mismatches = Value("i", 0)
self._num_other_errors = Value("i", 0)
self.cancel_probability = 0
self.spill_probability = 0
self.startup_queries_per_sec = 1.0
self.num_successive_errors_needed_to_abort = 1
self._num_successive_errors = Value("i", 0)
self.result_hash_log_dir = gettempdir()
self._status_headers = [" Done", "Running", "Mem Lmt Ex", "Time Out", "Cancel",
"Err", "Next Qry Mem Lmt", "Tot Qry Mem Lmt", "Tracked Mem", "RSS Mem"]
self._num_queries_to_run = None
self._query_producer_thread = None
self._query_runners = list()
self._query_consumer_thread = None
self._mem_polling_thread = None
def run_queries(self, queries, impala, num_queries_to_run, mem_overcommit_pct,
should_print_status):
"""Runs queries randomly chosen from 'queries' and stops after 'num_queries_to_run'
queries have completed.
Before a query is run, a mem limit will be chosen. 'spill_probability' determines
the likelihood of choosing a mem limit that will cause spilling. To induce
spilling, a value is randomly chosen below the min memory needed to avoid spilling
but above the min memory needed with spilling. So the min/max query memory
requirements must be determined before calling this method.
If 'mem_overcommit_pct' is zero, an exception will be raised if any queries
fail for any reason other than cancellation (controlled by the 'cancel_probability'
property), since each query should have enough memory to run successfully. If
non-zero, failures due to insufficient memory will be ignored if memory was
overcommitted at any time during execution.
If a query completes without error, the result will be verified. An error
will be raised upon a result mismatch.
"""
# XXX: The state from a previous run should be cleared out. This isn't really a
# problem now because the one caller (main()) never calls a second time.
if self.startup_queries_per_sec <= 0:
raise Exception("Startup queries per second must be positive")
if self.leak_check_interval_mins is not None and self.leak_check_interval_mins <= 0:
raise Exception("Memory leak check interval must be positive")
# If there is a crash, start looking for errors starting from this time.
start_time = datetime.now()
self._mem_broker = MemBroker(impala.min_impalad_mem_mb,
int(impala.min_impalad_mem_mb * mem_overcommit_pct / 100))
# Print the status to show the state before starting.
if should_print_status:
self._print_status_header()
self._print_status()
lines_printed = 1
last_report_secs = 0
self._num_queries_to_run = num_queries_to_run
self._start_polling_mem_usage(impala)
self._start_producing_queries(queries)
self._start_consuming_queries(impala)
# Wait for everything to finish.
sleep_secs = 0.1
while self._query_producer_thread.is_alive() \
or self._query_consumer_thread.is_alive() \
or self._query_runners:
if self._query_producer_thread.error or self._query_consumer_thread.error:
# This is bad enough to abort early. A failure here probably means there's a
# bug in this script. The mem poller could be checked for an error too. It is
# not critical so is ignored.
sys.exit(1)
for idx, runner in enumerate(self._query_runners):
if runner.exitcode is not None:
if runner.exitcode != 0:
if self.num_successive_errors_needed_to_abort \
>= self._num_successive_errors.value:
if self.num_successive_errors_needed_to_abort > 1:
print("Aborting due to %s successive errors encounter"
% self.num_successive_errors_needed_to_abort, file=sys.stderr)
sys.exit(1)
if print_crash_info_if_exists(impala, start_time):
sys.exit(runner.exitcode)
del self._query_runners[idx]
sleep(sleep_secs)
if should_print_status:
last_report_secs += sleep_secs
if last_report_secs > 5:
if not self._query_producer_thread.is_alive() \
or not self._query_consumer_thread.is_alive() \
or not self._query_runners:
LOG.debug("Producer is alive: %s" % self._query_producer_thread.is_alive())
LOG.debug("Consumer is alive: %s" % self._query_consumer_thread.is_alive())
LOG.debug("Queue size: %s" % self._query_queue.qsize())
LOG.debug("Runners: %s" % len(self._query_runners))
last_report_secs = 0
lines_printed %= 50
if lines_printed == 0:
self._print_status_header()
self._print_status()
lines_printed += 1
# And print the final state.
if should_print_status:
self._print_status()
def _start_producing_queries(self, queries):
def enqueue_queries():
try:
for _ in xrange(self._num_queries_to_run):
self._query_queue.put(choice(queries))
except Exception as e:
current_thread().error = e
raise e
self._query_producer_thread = create_and_start_daemon_thread(enqueue_queries,
"Query Producer")
def _start_consuming_queries(self, impala):
def start_additional_runners_if_needed():
try:
while self._num_queries_started.value < self._num_queries_to_run:
sleep(1.0 / self.startup_queries_per_sec)
# Remember num dequeued/started are cumulative.
with self._submit_query_lock:
if self._num_queries_dequeued.value != self._num_queries_started.value:
# Assume dequeued queries are stuck waiting for cluster resources so there
# is no point in starting an additional runner.
continue
impalad = impala.impalads[len(self._query_runners) % len(impala.impalads)]
runner = Process(target=self._start_single_runner, args=(impalad, ))
runner.daemon = True
self._query_runners.append(runner)
runner.start()
except Exception as e:
current_thread().error = e
raise e
self._query_consumer_thread = create_and_start_daemon_thread(
start_additional_runners_if_needed, "Query Consumer")
def _start_polling_mem_usage(self, impala):
def poll_mem_usage():
if self.leak_check_interval_mins:
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
query_sumbission_is_locked = False
# Query submission will be unlocked after a memory report has been collected twice
# while no queries were running.
ready_to_unlock = None
try:
while self._num_queries_started.value < self._num_queries_to_run:
if ready_to_unlock:
assert query_sumbission_is_locked, "Query submission not yet locked"
assert not self._num_queries_running, "Queries are still running"
LOG.debug("Resuming query submission")
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
self._submit_query_lock.release()
query_sumbission_is_locked = False
ready_to_unlock = None
if not query_sumbission_is_locked \
and self.leak_check_interval_mins \
and time() > self._next_leak_check_unix_time.value:
assert self._num_queries_running <= len(self._query_runners), \
"Each running query should belong to a runner"
LOG.debug("Stopping query submission")
self._submit_query_lock.acquire()
query_sumbission_is_locked = True
max_reported, max_actual = self._get_mem_usage_values()
if max_reported != -1 and max_actual != -1:
# Value were already retrieved but haven't been used yet. Assume newer
# values aren't wanted and check again later.
sleep(1)
continue
try:
max_reported = max(impala.find_impalad_mem_mb_reported_usage())
except Timeout:
LOG.debug("Timeout collecting reported mem usage")
max_reported = -1
try:
max_actual = max(impala.find_impalad_mem_mb_actual_usage())
except Timeout:
LOG.debug("Timeout collecting reported actual usage")
max_actual = -1
self._set_mem_usage_values(max_reported, max_actual)
if query_sumbission_is_locked and not self._num_queries_running:
if ready_to_unlock is None:
ready_to_unlock = False
else:
ready_to_unlock = True
except Exception:
LOG.debug("Error collecting impalad mem usage", exc_info=True)
if query_sumbission_is_locked:
LOG.debug("Resuming query submission")
self._submit_query_lock.release()
self._mem_polling_thread = create_and_start_daemon_thread(poll_mem_usage,
"Mem Usage Poller")
def _get_mem_usage_values(self, reset=False):
reported = None
actual = None
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
reported = self._max_mem_mb_reported_usage.value
actual = self._max_mem_mb_usage.value
if reset:
self._max_mem_mb_reported_usage.value = -1
self._max_mem_mb_usage.value = -1
return reported, actual
def _set_mem_usage_values(self, reported, actual):
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
self._max_mem_mb_reported_usage.value = reported
self._max_mem_mb_usage.value = actual
@property
def _num_queries_running(self):
num_running = self._num_queries_started.value - self._num_queries_finished.value
assert num_running >= 0, "The number of running queries is negative"
return num_running
def _start_single_runner(self, impalad):
"""Consumer function to take a query of the queue and run it. This is intended to
run in a separate process so validating the result set can use a full CPU.
"""
LOG.debug("New query runner started")
runner = QueryRunner()
runner.impalad = impalad
runner.result_hash_log_dir = self.result_hash_log_dir
runner.use_kerberos = self.use_kerberos
runner.connect()
while not self._query_queue.empty():
try:
query = self._query_queue.get(True, 1)
except Empty:
continue
LOG.debug("Getting query_idx")
with self._num_queries_dequeued.get_lock():
query_idx = self._num_queries_dequeued.value
self._num_queries_dequeued.value += 1
if not query.required_mem_mb_without_spilling:
mem_limit = query.required_mem_mb_with_spilling
solo_runtime = query.solo_runtime_secs_with_spilling
elif self.spill_probability < random():
mem_limit = query.required_mem_mb_without_spilling
solo_runtime = query.solo_runtime_secs_without_spilling
else:
mem_limit = randrange(query.required_mem_mb_with_spilling,
query.required_mem_mb_without_spilling + 1)
solo_runtime = query.solo_runtime_secs_with_spilling
LOG.debug("Waiting for other query runners to start their queries")
while query_idx > self._num_queries_started.value:
sleep(0.1)
self._mem_mb_needed_for_next_query.value = mem_limit
LOG.debug("Requesting memory reservation")
with self._mem_broker.reserve_mem_mb(mem_limit) as reservation_id:
LOG.debug("Received memory reservation")
with self._submit_query_lock:
increment(self._num_queries_started)
should_cancel = self.cancel_probability > random()
if should_cancel:
timeout = randrange(1, max(int(solo_runtime), 2))
else:
timeout = solo_runtime * max(10, self._num_queries_started.value
- self._num_queries_finished.value)
report = runner.run_query(query, timeout, mem_limit)
LOG.debug("Got execution report for query")
if report.timed_out and should_cancel:
report.was_cancelled = True
self._update_from_query_report(report)
if report.non_mem_limit_error:
error_msg = str(report.non_mem_limit_error)
# There is a possible race during cancellation. If a fetch request fails (for
# example due to hitting a mem limit), just before the cancellation request, the
# server may have already unregistered the query as part of the fetch failure.
# In that case the server gives an error response saying the handle is invalid.
if "Invalid query handle" in error_msg and report.timed_out:
self._num_successive_errors.value = 0
continue
# Occasionally the network connection will fail, and depending on when the
# failure occurred during run_query(), an attempt to get the profile may be
# made which results in "Invalid session id" since the server destroyed the
# session upon disconnect.
if "Invalid session id" in error_msg:
self._num_successive_errors.value = 0
continue
increment(self._num_successive_errors)
increment(self._num_other_errors)
raise Exception("Query failed: %s" % str(report.non_mem_limit_error))
if report.mem_limit_exceeded \
and not self._mem_broker.was_overcommitted(reservation_id):
increment(self._num_successive_errors)
raise Exception("Unexpected mem limit exceeded; mem was not overcommitted\n"
"Profile: %s" % report.profile)
if not report.mem_limit_exceeded \
and not report.timed_out \
and report.result_hash != query.result_hash:
increment(self._num_successive_errors)
increment(self._num_result_mismatches)
raise Exception("Result hash mismatch; expected %s, got %s\nQuery: %s"
% (query.result_hash, report.result_hash, query.sql))
self._num_successive_errors.value = 0
def _print_status_header(self):
print(" | ".join(self._status_headers))
def _print_status(self):
reported_mem, | |
<filename>unitracer/lib/windows/amd64/kernel32.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for kernel32.dll in ctypes.
"""
__revision__ = "$Id: kernel32.py 1299 2013-12-20 09:30:55Z qvasimodo $"
import warnings
from defines import *
import context_i386
import context_amd64
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
_all.add('version')
#==============================================================================
from version import *
#------------------------------------------------------------------------------
# This can't be defined in defines.py because it calls GetLastError().
def RaiseIfLastError(result, func = None, arguments = ()):
"""
Error checking for Win32 API calls with no error-specific return value.
Regardless of the return value, the function calls GetLastError(). If the
code is not C{ERROR_SUCCESS} then a C{WindowsError} exception is raised.
For this to work, the user MUST call SetLastError(ERROR_SUCCESS) prior to
calling the API. Otherwise an exception may be raised even on success,
since most API calls don't clear the error status code.
"""
code = GetLastError()
if code != ERROR_SUCCESS:
raise ctypes.WinError(code)
return result
#--- CONTEXT structure and constants ------------------------------------------
ContextArchMask = 0x0FFF0000 # just guessing here! seems to work, though
if arch == ARCH_I386:
from context_i386 import *
elif arch == ARCH_AMD64:
if bits == 64:
from context_amd64 import *
else:
from context_i386 import *
else:
warnings.warn("Unknown or unsupported architecture: %s" % arch)
#--- Constants ----------------------------------------------------------------
STILL_ACTIVE = 259
WAIT_TIMEOUT = 0x102
WAIT_FAILED = -1
WAIT_OBJECT_0 = 0
EXCEPTION_NONCONTINUABLE = 0x1 # Noncontinuable exception
EXCEPTION_MAXIMUM_PARAMETERS = 15 # maximum number of exception parameters
MAXIMUM_WAIT_OBJECTS = 64 # Maximum number of wait objects
MAXIMUM_SUSPEND_COUNT = 0x7f # Maximum times thread can be suspended
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
GR_GDIOBJECTS = 0
GR_USEROBJECTS = 1
PROCESS_NAME_NATIVE = 1
MAXINTATOM = 0xC000
STD_INPUT_HANDLE = 0xFFFFFFF6 # (DWORD)-10
STD_OUTPUT_HANDLE = 0xFFFFFFF5 # (DWORD)-11
STD_ERROR_HANDLE = 0xFFFFFFF4 # (DWORD)-12
ATTACH_PARENT_PROCESS = 0xFFFFFFFF # (DWORD)-1
# LoadLibraryEx constants
DONT_RESOLVE_DLL_REFERENCES = 0x00000001
LOAD_LIBRARY_AS_DATAFILE = 0x00000002
LOAD_WITH_ALTERED_SEARCH_PATH = 0x00000008
LOAD_IGNORE_CODE_AUTHZ_LEVEL = 0x00000010
LOAD_LIBRARY_AS_IMAGE_RESOURCE = 0x00000020
LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE = 0x00000040
# SetSearchPathMode flags
# TODO I couldn't find these constants :(
##BASE_SEARCH_PATH_ENABLE_SAFE_SEARCHMODE = ???
##BASE_SEARCH_PATH_DISABLE_SAFE_SEARCHMODE = ???
##BASE_SEARCH_PATH_PERMANENT = ???
# Console control events
CTRL_C_EVENT = 0
CTRL_BREAK_EVENT = 1
CTRL_CLOSE_EVENT = 2
CTRL_LOGOFF_EVENT = 5
CTRL_SHUTDOWN_EVENT = 6
# Heap flags
HEAP_NO_SERIALIZE = 0x00000001
HEAP_GENERATE_EXCEPTIONS = 0x00000004
HEAP_ZERO_MEMORY = 0x00000008
HEAP_CREATE_ENABLE_EXECUTE = 0x00040000
# Standard access rights
DELETE = (0x00010000L)
READ_CONTROL = (0x00020000L)
WRITE_DAC = (0x00040000L)
WRITE_OWNER = (0x00080000L)
SYNCHRONIZE = (0x00100000L)
STANDARD_RIGHTS_REQUIRED = (0x000F0000L)
STANDARD_RIGHTS_READ = (READ_CONTROL)
STANDARD_RIGHTS_WRITE = (READ_CONTROL)
STANDARD_RIGHTS_EXECUTE = (READ_CONTROL)
STANDARD_RIGHTS_ALL = (0x001F0000L)
SPECIFIC_RIGHTS_ALL = (0x0000FFFFL)
# Mutex access rights
MUTEX_ALL_ACCESS = 0x1F0001
MUTEX_MODIFY_STATE = 1
# Event access rights
EVENT_ALL_ACCESS = 0x1F0003
EVENT_MODIFY_STATE = 2
# Semaphore access rights
SEMAPHORE_ALL_ACCESS = 0x1F0003
SEMAPHORE_MODIFY_STATE = 2
# Timer access rights
TIMER_ALL_ACCESS = 0x1F0003
TIMER_MODIFY_STATE = 2
TIMER_QUERY_STATE = 1
# Process access rights for OpenProcess
PROCESS_TERMINATE = 0x0001
PROCESS_CREATE_THREAD = 0x0002
PROCESS_SET_SESSIONID = 0x0004
PROCESS_VM_OPERATION = 0x0008
PROCESS_VM_READ = 0x0010
PROCESS_VM_WRITE = 0x0020
PROCESS_DUP_HANDLE = 0x0040
PROCESS_CREATE_PROCESS = 0x0080
PROCESS_SET_QUOTA = 0x0100
PROCESS_SET_INFORMATION = 0x0200
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_SUSPEND_RESUME = 0x0800
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
# Thread access rights for OpenThread
THREAD_TERMINATE = 0x0001
THREAD_SUSPEND_RESUME = 0x0002
THREAD_ALERT = 0x0004
THREAD_GET_CONTEXT = 0x0008
THREAD_SET_CONTEXT = 0x0010
THREAD_SET_INFORMATION = 0x0020
THREAD_QUERY_INFORMATION = 0x0040
THREAD_SET_THREAD_TOKEN = 0x0080
THREAD_IMPERSONATE = 0x0100
THREAD_DIRECT_IMPERSONATION = 0x0200
THREAD_SET_LIMITED_INFORMATION = 0x0400
THREAD_QUERY_LIMITED_INFORMATION = 0x0800
# The values of PROCESS_ALL_ACCESS and THREAD_ALL_ACCESS were changed in Vista/2008
PROCESS_ALL_ACCESS_NT = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFF)
PROCESS_ALL_ACCESS_VISTA = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF)
THREAD_ALL_ACCESS_NT = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3FF)
THREAD_ALL_ACCESS_VISTA = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0xFFFF)
if NTDDI_VERSION < NTDDI_VISTA:
PROCESS_ALL_ACCESS = PROCESS_ALL_ACCESS_NT
THREAD_ALL_ACCESS = THREAD_ALL_ACCESS_NT
else:
PROCESS_ALL_ACCESS = PROCESS_ALL_ACCESS_VISTA
THREAD_ALL_ACCESS = THREAD_ALL_ACCESS_VISTA
# Process priority classes
IDLE_PRIORITY_CLASS = 0x00000040
BELOW_NORMAL_PRIORITY_CLASS = 0x00004000
NORMAL_PRIORITY_CLASS = 0x00000020
ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000
HIGH_PRIORITY_CLASS = 0x00000080
REALTIME_PRIORITY_CLASS = 0x00000100
PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000
PROCESS_MODE_BACKGROUND_END = 0x00200000
# dwCreationFlag values
DEBUG_PROCESS = 0x00000001
DEBUG_ONLY_THIS_PROCESS = 0x00000002
CREATE_SUSPENDED = 0x00000004 # Threads and processes
DETACHED_PROCESS = 0x00000008
CREATE_NEW_CONSOLE = 0x00000010
NORMAL_PRIORITY_CLASS = 0x00000020
IDLE_PRIORITY_CLASS = 0x00000040
HIGH_PRIORITY_CLASS = 0x00000080
REALTIME_PRIORITY_CLASS = 0x00000100
CREATE_NEW_PROCESS_GROUP = 0x00000200
CREATE_UNICODE_ENVIRONMENT = 0x00000400
CREATE_SEPARATE_WOW_VDM = 0x00000800
CREATE_SHARED_WOW_VDM = 0x00001000
CREATE_FORCEDOS = 0x00002000
BELOW_NORMAL_PRIORITY_CLASS = 0x00004000
ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000
INHERIT_PARENT_AFFINITY = 0x00010000
STACK_SIZE_PARAM_IS_A_RESERVATION = 0x00010000 # Threads only
INHERIT_CALLER_PRIORITY = 0x00020000 # Deprecated
CREATE_PROTECTED_PROCESS = 0x00040000
EXTENDED_STARTUPINFO_PRESENT = 0x00080000
PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000
PROCESS_MODE_BACKGROUND_END = 0x00200000
CREATE_BREAKAWAY_FROM_JOB = 0x01000000
CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000
CREATE_DEFAULT_ERROR_MODE = 0x04000000
CREATE_NO_WINDOW = 0x08000000
PROFILE_USER = 0x10000000
PROFILE_KERNEL = 0x20000000
PROFILE_SERVER = 0x40000000
CREATE_IGNORE_SYSTEM_DEFAULT = 0x80000000
# Thread priority values
THREAD_BASE_PRIORITY_LOWRT = 15 # value that gets a thread to LowRealtime-1
THREAD_BASE_PRIORITY_MAX = 2 # maximum thread base priority boost
THREAD_BASE_PRIORITY_MIN = (-2) # minimum thread base priority boost
THREAD_BASE_PRIORITY_IDLE = (-15) # value that gets a thread to idle
THREAD_PRIORITY_LOWEST = THREAD_BASE_PRIORITY_MIN
THREAD_PRIORITY_BELOW_NORMAL = (THREAD_PRIORITY_LOWEST+1)
THREAD_PRIORITY_NORMAL = 0
THREAD_PRIORITY_HIGHEST = THREAD_BASE_PRIORITY_MAX
THREAD_PRIORITY_ABOVE_NORMAL = (THREAD_PRIORITY_HIGHEST-1)
THREAD_PRIORITY_ERROR_RETURN = (0xFFFFFFFFL)
THREAD_PRIORITY_TIME_CRITICAL = THREAD_BASE_PRIORITY_LOWRT
THREAD_PRIORITY_IDLE = THREAD_BASE_PRIORITY_IDLE
# Memory access
SECTION_QUERY = 0x0001
SECTION_MAP_WRITE = 0x0002
SECTION_MAP_READ = 0x0004
SECTION_MAP_EXECUTE = 0x0008
SECTION_EXTEND_SIZE = 0x0010
SECTION_MAP_EXECUTE_EXPLICIT = 0x0020 # not included in SECTION_ALL_ACCESS
SECTION_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED|SECTION_QUERY|\
SECTION_MAP_WRITE | \
SECTION_MAP_READ | \
SECTION_MAP_EXECUTE | \
SECTION_EXTEND_SIZE)
PAGE_NOACCESS = 0x01
PAGE_READONLY = 0x02
PAGE_READWRITE = 0x04
PAGE_WRITECOPY = 0x08
PAGE_EXECUTE = 0x10
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
PAGE_GUARD = 0x100
PAGE_NOCACHE = 0x200
PAGE_WRITECOMBINE = 0x400
MEM_COMMIT = 0x1000
MEM_RESERVE = 0x2000
MEM_DECOMMIT = 0x4000
MEM_RELEASE = 0x8000
MEM_FREE = 0x10000
MEM_PRIVATE = 0x20000
MEM_MAPPED = 0x40000
MEM_RESET = 0x80000
MEM_TOP_DOWN = 0x100000
MEM_WRITE_WATCH = 0x200000
MEM_PHYSICAL = 0x400000
MEM_LARGE_PAGES = 0x20000000
MEM_4MB_PAGES = 0x80000000
SEC_FILE = 0x800000
SEC_IMAGE = 0x1000000
SEC_RESERVE = 0x4000000
SEC_COMMIT = 0x8000000
SEC_NOCACHE = 0x10000000
SEC_LARGE_PAGES = 0x80000000
MEM_IMAGE = SEC_IMAGE
WRITE_WATCH_FLAG_RESET = 0x01
FILE_MAP_ALL_ACCESS = 0xF001F
SECTION_QUERY = 0x0001
SECTION_MAP_WRITE = 0x0002
SECTION_MAP_READ = 0x0004
SECTION_MAP_EXECUTE = 0x0008
SECTION_EXTEND_SIZE = 0x0010
SECTION_MAP_EXECUTE_EXPLICIT = 0x0020 # not included in SECTION_ALL_ACCESS
SECTION_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED|SECTION_QUERY|\
SECTION_MAP_WRITE | \
SECTION_MAP_READ | \
SECTION_MAP_EXECUTE | \
SECTION_EXTEND_SIZE)
FILE_MAP_COPY = SECTION_QUERY
FILE_MAP_WRITE = SECTION_MAP_WRITE
FILE_MAP_READ = SECTION_MAP_READ
FILE_MAP_ALL_ACCESS = SECTION_ALL_ACCESS
FILE_MAP_EXECUTE = SECTION_MAP_EXECUTE_EXPLICIT # not included in FILE_MAP_ALL_ACCESS
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
FILE_SHARE_DELETE = 0x00000004
CREATE_NEW = 1
CREATE_ALWAYS = 2
OPEN_EXISTING = 3
OPEN_ALWAYS = 4
TRUNCATE_EXISTING = 5
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_NORMAL = 0x00000080
FILE_ATTRIBUTE_TEMPORARY = 0x00000100
FILE_FLAG_WRITE_THROUGH = 0x80000000
FILE_FLAG_NO_BUFFERING = 0x20000000
FILE_FLAG_RANDOM_ACCESS = 0x10000000
FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000
FILE_FLAG_DELETE_ON_CLOSE = 0x04000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
FILE_ATTRIBUTE_SYSTEM = 0x00000004
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
FILE_ATTRIBUTE_DEVICE = 0x00000040
FILE_ATTRIBUTE_NORMAL = 0x00000080
FILE_ATTRIBUTE_TEMPORARY = 0x00000100
# Debug events
EXCEPTION_DEBUG_EVENT = 1
CREATE_THREAD_DEBUG_EVENT = 2
CREATE_PROCESS_DEBUG_EVENT = 3
EXIT_THREAD_DEBUG_EVENT = 4
EXIT_PROCESS_DEBUG_EVENT = 5
LOAD_DLL_DEBUG_EVENT = 6
UNLOAD_DLL_DEBUG_EVENT = 7
OUTPUT_DEBUG_STRING_EVENT = 8
RIP_EVENT = 9
# Debug status codes (ContinueDebugEvent)
DBG_EXCEPTION_HANDLED = 0x00010001L
DBG_CONTINUE = 0x00010002L
DBG_REPLY_LATER = 0x40010001L
DBG_UNABLE_TO_PROVIDE_HANDLE = 0x40010002L
DBG_TERMINATE_THREAD = 0x40010003L
DBG_TERMINATE_PROCESS = 0x40010004L
DBG_CONTROL_C = 0x40010005L
DBG_PRINTEXCEPTION_C = 0x40010006L
DBG_RIPEXCEPTION = 0x40010007L
DBG_CONTROL_BREAK = 0x40010008L
DBG_COMMAND_EXCEPTION = 0x40010009L
DBG_EXCEPTION_NOT_HANDLED = 0x80010001L
DBG_NO_STATE_CHANGE = 0xC0010001L
DBG_APP_NOT_IDLE = 0xC0010002L
# Status codes
STATUS_WAIT_0 = 0x00000000L
STATUS_ABANDONED_WAIT_0 = 0x00000080L
STATUS_USER_APC = 0x000000C0L
STATUS_TIMEOUT = 0x00000102L
STATUS_PENDING = 0x00000103L
STATUS_SEGMENT_NOTIFICATION = 0x40000005L
STATUS_GUARD_PAGE_VIOLATION = 0x80000001L
STATUS_DATATYPE_MISALIGNMENT = 0x80000002L
STATUS_BREAKPOINT = 0x80000003L
STATUS_SINGLE_STEP = 0x80000004L
STATUS_INVALID_INFO_CLASS = 0xC0000003L
STATUS_ACCESS_VIOLATION = 0xC0000005L
STATUS_IN_PAGE_ERROR = 0xC0000006L
STATUS_INVALID_HANDLE = 0xC0000008L
STATUS_NO_MEMORY = 0xC0000017L
STATUS_ILLEGAL_INSTRUCTION = 0xC000001DL
STATUS_NONCONTINUABLE_EXCEPTION = 0xC0000025L
STATUS_INVALID_DISPOSITION = 0xC0000026L
STATUS_ARRAY_BOUNDS_EXCEEDED = 0xC000008CL
STATUS_FLOAT_DENORMAL_OPERAND = 0xC000008DL
STATUS_FLOAT_DIVIDE_BY_ZERO = 0xC000008EL
STATUS_FLOAT_INEXACT_RESULT = 0xC000008FL
STATUS_FLOAT_INVALID_OPERATION = 0xC0000090L
STATUS_FLOAT_OVERFLOW = 0xC0000091L
STATUS_FLOAT_STACK_CHECK = 0xC0000092L
STATUS_FLOAT_UNDERFLOW = 0xC0000093L
STATUS_INTEGER_DIVIDE_BY_ZERO = 0xC0000094L
STATUS_INTEGER_OVERFLOW = 0xC0000095L
STATUS_PRIVILEGED_INSTRUCTION = 0xC0000096L
STATUS_STACK_OVERFLOW = 0xC00000FDL
STATUS_CONTROL_C_EXIT = 0xC000013AL
STATUS_FLOAT_MULTIPLE_FAULTS = 0xC00002B4L
STATUS_FLOAT_MULTIPLE_TRAPS = 0xC00002B5L
STATUS_REG_NAT_CONSUMPTION = 0xC00002C9L
STATUS_SXS_EARLY_DEACTIVATION = 0xC015000FL
STATUS_SXS_INVALID_DEACTIVATION = 0xC0150010L
STATUS_STACK_BUFFER_OVERRUN = 0xC0000409L
STATUS_WX86_BREAKPOINT = 0x4000001FL
STATUS_HEAP_CORRUPTION = 0xC0000374L
STATUS_POSSIBLE_DEADLOCK = 0xC0000194L
STATUS_UNWIND_CONSOLIDATE = 0x80000029L
# Exception codes
EXCEPTION_ACCESS_VIOLATION = STATUS_ACCESS_VIOLATION
EXCEPTION_ARRAY_BOUNDS_EXCEEDED = STATUS_ARRAY_BOUNDS_EXCEEDED
EXCEPTION_BREAKPOINT = STATUS_BREAKPOINT
EXCEPTION_DATATYPE_MISALIGNMENT = STATUS_DATATYPE_MISALIGNMENT
EXCEPTION_FLT_DENORMAL_OPERAND = STATUS_FLOAT_DENORMAL_OPERAND
EXCEPTION_FLT_DIVIDE_BY_ZERO = STATUS_FLOAT_DIVIDE_BY_ZERO
EXCEPTION_FLT_INEXACT_RESULT = STATUS_FLOAT_INEXACT_RESULT
EXCEPTION_FLT_INVALID_OPERATION = STATUS_FLOAT_INVALID_OPERATION
EXCEPTION_FLT_OVERFLOW = STATUS_FLOAT_OVERFLOW
EXCEPTION_FLT_STACK_CHECK = STATUS_FLOAT_STACK_CHECK
EXCEPTION_FLT_UNDERFLOW = STATUS_FLOAT_UNDERFLOW
EXCEPTION_ILLEGAL_INSTRUCTION = STATUS_ILLEGAL_INSTRUCTION
EXCEPTION_IN_PAGE_ERROR = STATUS_IN_PAGE_ERROR
EXCEPTION_INT_DIVIDE_BY_ZERO = STATUS_INTEGER_DIVIDE_BY_ZERO
EXCEPTION_INT_OVERFLOW = STATUS_INTEGER_OVERFLOW
EXCEPTION_INVALID_DISPOSITION = STATUS_INVALID_DISPOSITION
EXCEPTION_NONCONTINUABLE_EXCEPTION = STATUS_NONCONTINUABLE_EXCEPTION
EXCEPTION_PRIV_INSTRUCTION = STATUS_PRIVILEGED_INSTRUCTION
EXCEPTION_SINGLE_STEP = STATUS_SINGLE_STEP
EXCEPTION_STACK_OVERFLOW = STATUS_STACK_OVERFLOW
EXCEPTION_GUARD_PAGE = STATUS_GUARD_PAGE_VIOLATION
EXCEPTION_INVALID_HANDLE = STATUS_INVALID_HANDLE
EXCEPTION_POSSIBLE_DEADLOCK = STATUS_POSSIBLE_DEADLOCK
EXCEPTION_WX86_BREAKPOINT = STATUS_WX86_BREAKPOINT
CONTROL_C_EXIT = STATUS_CONTROL_C_EXIT
DBG_CONTROL_C = 0x40010005L
MS_VC_EXCEPTION = 0x406D1388L
# Access violation types
ACCESS_VIOLATION_TYPE_READ = EXCEPTION_READ_FAULT
ACCESS_VIOLATION_TYPE_WRITE = EXCEPTION_WRITE_FAULT
ACCESS_VIOLATION_TYPE_DEP = EXCEPTION_EXECUTE_FAULT
# RIP event types
SLE_ERROR = 1
SLE_MINORERROR = 2
SLE_WARNING = 3
# DuplicateHandle constants
DUPLICATE_CLOSE_SOURCE = 0x00000001
DUPLICATE_SAME_ACCESS = 0x00000002
# GetFinalPathNameByHandle constants
FILE_NAME_NORMALIZED = 0x0
FILE_NAME_OPENED = 0x8
VOLUME_NAME_DOS = 0x0
VOLUME_NAME_GUID = 0x1
VOLUME_NAME_NONE = 0x4
VOLUME_NAME_NT = 0x2
# GetProductInfo constants
PRODUCT_BUSINESS = 0x00000006
PRODUCT_BUSINESS_N = 0x00000010
PRODUCT_CLUSTER_SERVER = 0x00000012
PRODUCT_DATACENTER_SERVER = 0x00000008
PRODUCT_DATACENTER_SERVER_CORE = 0x0000000C
PRODUCT_DATACENTER_SERVER_CORE_V = 0x00000027
PRODUCT_DATACENTER_SERVER_V = | |
import unittest
from oaipmh import (
entities,
exceptions,
)
class ResumptionTokenInterfaceTest:
def test_responds_to_encode(self):
self.assertTrue(hasattr(self.object, 'encode'))
def test_responds_to_decode(self):
self.assertTrue(hasattr(self.object, 'decode'))
def test_responds_to_next(self):
self.assertTrue(hasattr(self.object, 'next'))
def test_responds_to_new_from_request(self):
self.assertTrue(hasattr(self.object, 'new_from_request'))
def test_responds_to_query_offset(self):
self.assertTrue(hasattr(self.object, 'query_offset'))
def test_responds_to_query_from(self):
self.assertTrue(hasattr(self.object, 'query_from'))
def test_responds_to_query_until(self):
self.assertTrue(hasattr(self.object, 'query_until'))
def test_responds_to_query_count(self):
self.assertTrue(hasattr(self.object, 'query_count'))
def test_responds_to_is_first_page(self):
self.assertTrue(hasattr(self.object, 'is_first_page'))
class ResumptionTokenTests(ResumptionTokenInterfaceTest, unittest.TestCase):
def setUp(self):
self.object = entities.ChunkedResumptionToken()
def test_token_is_encoded_correctly(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(0)', count='1000',
metadataPrefix='oai_dc')
self.assertEqual(token.encode(),
':1998-01-01:1998-12-31:1998-01-01(0):1000:oai_dc')
def test_encode_ommit_empty_strings(self):
token = entities.ChunkedResumptionToken(set='', from_='', until='',
offset='1998-01-01(0)', count='1000', metadataPrefix='oai_dc')
self.assertEqual(token.encode(),
':::1998-01-01(0):1000:oai_dc')
def test_encode_turns_integer_to_string(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(0)', count=1000,
metadataPrefix='oai_dc')
self.assertEqual(token.encode(),
':1998-01-01:1998-12-31:1998-01-01(0):1000:oai_dc')
def test_encode_treats_none_as_empty_strings(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(0)', count=None,
metadataPrefix='oai_dc')
self.assertEqual(token.encode(),
':1998-01-01:1998-12-31:1998-01-01(0)::oai_dc')
def test_token_is_decoded_correctly(self):
token = 'foo:1<PASSWORD>-01-01:1998-12-31:1998-01-01(0):1000:oai_dc'
self.assertEqual(entities.ChunkedResumptionToken.decode(token),
entities.ChunkedResumptionToken(set='foo', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(0)', count='1000',
metadataPrefix='oai_dc'))
def test_decodes_empty_values_to_empty_strings(self):
token = ':::1998-01-01(0):1000:oai_dc'
self.assertEqual(entities.ChunkedResumptionToken.decode(token),
entities.ChunkedResumptionToken(set='', from_='', until='',
offset='1998-01-01(0)', count='1000',
metadataPrefix='oai_dc'))
def test_first_page_detection(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(0)', count='1000',
metadataPrefix='oai_dc')
self.assertTrue(token.is_first_page())
def test_non_first_page_detection(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(100)', count='1000',
metadataPrefix='oai_dc')
self.assertFalse(token.is_first_page())
def test_non_first_page_detection_on_different_from_year(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1999-12-31', offset='1999-01-01(0)', count='1000',
metadataPrefix='oai_dc')
self.assertFalse(token.is_first_page())
class ResumptionTokenPrivateMethodTests(unittest.TestCase):
def test_increments_offset_size(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(0)', count='1000',
metadataPrefix='oai_dc')
self.assertEqual(token._incr_offset_size(),
entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-01-01(1000)', count='1000',
metadataPrefix='oai_dc'))
def test_increments_offset_from(self):
token = entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1998-12-31(1001)', count='1000',
metadataPrefix='oai_dc')
self.assertEqual(token._incr_offset_from(),
entities.ChunkedResumptionToken(set='', from_='1998-01-01',
until='1998-12-31', offset='1999-01-01(0)', count='1000',
metadataPrefix='oai_dc'))
class ResumptionTokenSyntaxTests:
"""A validade do formato do token depende do valor do argumento
``defaul_count``, passado para a factory ``new_from_request``. Além disso,
o esquema de validação depende do valor do atributo ``OAIRequest.verb``.
As subclasses desta devem redefinir os atributos ``verb`` e ``count``
para que funcionem corretamente.
"""
verb = NotImplemented
count = NotImplemented
def setUp(self):
def _partial(token):
oaireq = self.makeOne(resumptionToken=token)
return entities.ChunkedResumptionToken.new_from_request(
oairequest=oaireq, default_count=self.count,
default_from='1998-01-01', default_until='1999-11-07')
self.create = _partial
def makeOne(self, **kwargs):
args = {'verb': self.verb, 'identifier': None, 'metadataPrefix': None,
'set': None, 'resumptionToken': None, 'from_': None,
'until': None}
args.update(**kwargs)
return entities.OAIRequest(**args)
class ListRecordsResumptionTokenTests(ResumptionTokenSyntaxTests, unittest.TestCase):
verb = 'ListRecords'
count = 10
def test_case_1(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_2(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_3(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_4(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_5(self):
token = 'setname:1998-01-01:1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_6(self):
token = 'setname:1998-01-01:1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_7(self):
token = 'setname:1998-01-01:1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_8(self):
token = 'setname:1998-01-01:1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_9(self):
token = 'setname:1998-01-01::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_10(self):
token = 'setname:1998-01-01::1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_11(self):
token = 'setname:1998-01-01::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_12(self):
token = 'setname:1998-01-01::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_13(self):
token = 'setname:1998-01-01:::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_14(self):
token = 'setname:1998-01-01:::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_15(self):
token = 'setname:1998-01-01::::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_16(self):
token = 'setname:1998-01-01::::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_17(self):
token = 'setname::1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_18(self):
token = 'setname::1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_19(self):
token = 'setname::1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_20(self):
token = 'setname::1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_21(self):
token = 'setname::1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_22(self):
token = 'setname::1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_23(self):
token = 'setname::1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_24(self):
token = 'setname::1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_25(self):
token = 'setname:::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_26(self):
token = 'setname:::0:10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_27(self):
token = 'setname:::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_28(self):
token = 'setname:::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_29(self):
token = 'setname::::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_30(self):
token = 'setname::::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_31(self):
token = 'setname:::::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_32(self):
token = 'setname:::::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_33(self):
token = ':1998-01-01:1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_34(self):
token = ':1998-01-01:1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_35(self):
token = ':1998-01-01:1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_36(self):
token = ':1998-01-01:1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_37(self):
token = ':1<PASSWORD>-01-01:1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_38(self):
token = ':1<PASSWORD>-01-01:1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_39(self):
token = ':1998-01-01:1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_40(self):
token = ':1998-01-01:1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_41(self):
token = ':1998-01-01::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_42(self):
token = ':1998-01-01::1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_43(self):
token = ':1998-01-01::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_44(self):
token = ':1998-01-01::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_45(self):
token = ':1998-01-01:::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_46(self):
token = ':1998-01-01:::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_47(self):
token = <PASSWORD>::::<PASSWORD>'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_48(self):
token = ':<PASSWORD>::::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_49(self):
token = '::1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_50(self):
token = '::1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_51(self):
token = '::1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_52(self):
token = '::1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_53(self):
token = '::1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_54(self):
token = '::1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_55(self):
token = '::1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_56(self):
token = '::1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_57(self):
token = ':::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_58(self):
token = ':::1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_59(self):
token = ':::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_60(self):
token = ':::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_61(self):
token = '::::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_62(self):
token = '::::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_63(self):
token = ':::::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_64(self):
token = ':::::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
class ListIdentifiersResumptionTokenTests(ResumptionTokenSyntaxTests, unittest.TestCase):
verb = 'ListIdentifiers'
count = 10
def test_case_1(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_2(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_3(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_4(self):
token = 'setname:1998-01-01:1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_5(self):
token = 'setname:1998-01-01:1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_6(self):
token = 'setname:1998-01-01:1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_7(self):
token = 'setname:1998-01-01:1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_8(self):
token = 'setname:1998-01-01:1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_9(self):
token = 'setname:1998-01-01::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_10(self):
token = 'setname:1998-01-01::1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_11(self):
token = 'setname:1998-01-01::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_12(self):
token = 'setname:1998-01-01::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_13(self):
token = 'setname:1998-01-01:::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_14(self):
token = 'setname:1998-01-01:::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_15(self):
token = 'setname:1998-01-01::::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_16(self):
token = 'setname:1998-01-01::::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_17(self):
token = 'setname::1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_18(self):
token = 'setname::1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_19(self):
token = 'setname::1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_20(self):
token = 'setname::1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_21(self):
token = 'setname::1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_22(self):
token = 'setname::1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_23(self):
token = 'setname::1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_24(self):
token = 'setname::1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_25(self):
token = 'setname:::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_26(self):
token = 'setname:::1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_27(self):
token = 'setname:::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_28(self):
token = 'setname:::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_29(self):
token = 'setname::::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_30(self):
token = 'setname::::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_31(self):
token = 'setname:::::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_32(self):
token = 'setname:::::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_33(self):
token = ':1998-01-01:1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_34(self):
token = ':1998-01-01:1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_35(self):
token = ':1<PASSWORD>-01-01:1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_36(self):
token = ':1998-01-01:1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_37(self):
token = ':1998-01-01:1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_38(self):
token = ':1998-01-01:1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_39(self):
token = ':1998-01-01:1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_40(self):
token = ':1998-01-01:1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_41(self):
token = ':1998-01-01::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_42(self):
token = ':1998-01-01::1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_43(self):
token = ':1998-01-01::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_44(self):
token = ':1998-01-01::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_45(self):
token = ':1998-01-01:::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_46(self):
token = ':1<PASSWORD>1:::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_47(self):
token = ':<PASSWORD>::::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_48(self):
token = ':1998-01-01::::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_49(self):
token = '::1998-12-31:1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_50(self):
token = '::1998-12-31:1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_51(self):
token = '::1998-12-31:1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_52(self):
token = '::1998-12-31:1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_53(self):
token = '::1998-12-31::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_54(self):
token = '::1998-12-31::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_55(self):
token = '::1998-12-31:::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_56(self):
token = '::1998-12-31:::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_57(self):
token = ':::1998-01-01(0):10:oai_dc'
self.create(token)
def test_case_58(self):
token = ':::1998-01-01(0):10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_59(self):
token = ':::1998-01-01(0)::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_60(self):
token = ':::1998-01-01(0)::'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_61(self):
token = '::::10:oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_62(self):
token = '::::10:'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, token)
def test_case_63(self):
token = ':::::oai_dc'
self.assertRaises(exceptions.BadResumptionTokenError, self.create, | |
import pytest
from tulius.forum.threads import models
from tulius.forum.comments import signals
def test_comments_api(client, superuser, admin, user):
# create root room and thread in it
response = superuser.put(
'/api/forum/', {
'title': 'group', 'body': 'group description',
'room': True, 'default_rights': None, 'granted_rights': []})
assert response.status_code == 200
group = response.json()
response = admin.put(
group['url'], {
'title': 'thread', 'body': 'thread description',
'room': False, 'default_rights': models.NO_ACCESS,
'granted_rights': [], 'media': {}})
assert response.status_code == 200
thread = response.json()
assert thread['first_comment_id'] is not None
# check how thread looks on room page
response = admin.get(group['url'])
assert response.status_code == 200
data = response.json()
assert data['threads'][0]['comments_count'] == 1
last_comment = data['threads'][0]['last_comment']
assert last_comment['id'] == thread['first_comment_id']
# check comments not readable for other users
response = user.get(thread['url'] + 'comments_page/')
assert response.status_code == 403
# make thread readable
response = admin.put(
thread['url'] + 'granted_rights/', {
'default_rights': models.ACCESS_READ
})
assert response.status_code == 200
# check user now can read comments
response = user.get(thread['url'] + 'comments_page/')
assert response.status_code == 200
data = response.json()
assert len(data['comments']) == 1
first_comment = data['comments'][0]
assert first_comment['title'] == 'thread'
assert first_comment['body'] == 'thread description'
assert first_comment['is_thread']
assert not first_comment['edit_right']
assert first_comment['id'] == thread['first_comment_id']
# check that user can't post comments
response = user.post(
thread['url'] + 'comments_page/', {
'reply_id': first_comment['id'],
'title': 'hello', 'body': 'world',
'media': {},
})
assert response.status_code == 403
# make thread opened
response = admin.put(
thread['url'] + 'granted_rights/', {'default_rights': None})
assert response.status_code == 200
# check comment preview works
response = user.post(
thread['url'] + 'comments_page/', {
'reply_id': first_comment['id'],
'title': 'hello', 'body': 'world',
'media': {}, 'preview': True,
})
assert response.status_code == 200
data = response.json()
assert data['id'] is None
assert data['user']['id'] == user.user.pk
assert data['title'] == 'hello'
assert data['body'] == 'world'
# check that comment really not created
response = user.get(thread['url'] + 'comments_page/')
assert response.status_code == 200
data = response.json()
assert len(data['comments']) == 1
# now really post comment
response = user.post(
thread['url'] + 'comments_page/', {
'reply_id': first_comment['id'],
'title': 'hello', 'body': 'world',
'media': {},
})
assert response.status_code == 200
data = response.json()
assert len(data['comments']) == 2
comment = data['comments'][1]
assert comment['id']
assert comment['user']['id'] == user.user.pk
assert comment['title'] == 'hello'
assert comment['body'] == 'world'
# check anonymous cant post comments
response = client.post(
thread['url'] + 'comments_page/', {
'reply_id': first_comment['id'],
'title': 'hello', 'body': 'world',
'media': {},
})
assert response.status_code == 403
# check how thread looks on room page
response = admin.get(group['url'])
assert response.status_code == 200
data = response.json()
assert data['threads'][0]['comments_count'] == 2
last_comment = data['threads'][0]['last_comment']
assert last_comment['id'] == comment['id']
# check user can update his comment
response = user.post(
comment['url'], {
'reply_id': first_comment['id'],
'title': 'hello world', 'body': 'world is great',
'media': {},
})
assert response.status_code == 200
data = response.json()
assert data['id'] == comment['id']
assert data['title'] == 'hello world'
assert data['body'] == 'world is great'
# check it is really updated
response = user.get(comment['url'])
assert response.status_code == 200
data = response.json()
assert data['id'] == comment['id']
assert data['title'] == 'hello world'
assert data['body'] == 'world is great'
# delete comment
response = user.delete(comment['url'] + '?comment=wow')
assert response.status_code == 200
# check it is deleted
response = user.get(thread['url'] + 'comments_page/')
assert response.status_code == 200
data = response.json()
assert len(data['comments']) == 1
assert data['comments'][0]['id'] == first_comment['id']
# check how thread looks on room page
response = admin.get(group['url'])
assert response.status_code == 200
data = response.json()
assert data['threads'][0]['comments_count'] == 1
last_comment = data['threads'][0]['last_comment']
assert last_comment['id'] == thread['first_comment_id']
# check we can't delete first comment
response = superuser.delete(first_comment['url'] + '?comment=wow')
assert response.status_code == 403
# add comment by admin
response = admin.post(
thread['url'] + 'comments_page/', {
'reply_id': first_comment['id'],
'title': 'Im admin', 'body': 'my comment is awesome',
'media': {},
})
assert response.status_code == 200
data = response.json()
assert len(data['comments']) == 2
admin_comment = data['comments'][1]
# check user can't delete it
response = user.delete(admin_comment['url'] + '?comment=wow')
assert response.status_code == 403
# check user can't update it
response = user.post(
admin_comment['url'], {
'reply_id': first_comment['id'],
'title': 'hello world', 'body': 'world is great',
'media': {},
})
assert response.status_code == 403
# check comments readable by anonymous user
response = client.get(thread['url'] + 'comments_page/')
assert response.status_code == 200
data = response.json()
assert len(data['comments']) == 2
# check we can't update first comment as comment
response = admin.post(
first_comment['url'], {
'reply_id': first_comment['id'],
'title': 'hello world', 'body': 'world is great',
'media': {},
})
assert response.status_code == 403
# check update comment preview
response = admin.post(
admin_comment['url'], {
'reply_id': first_comment['id'],
'title': 'hello world', 'body': 'world is great',
'media': {}, 'preview': True
})
assert response.status_code == 200
data = response.json()
assert data['id'] == admin_comment['id']
assert data['title'] == 'hello world'
assert data['body'] == 'world is great'
# check it is not really updated
response = admin.get(admin_comment['url'])
assert response.status_code == 200
data = response.json()
assert data['title'] == 'Im admin'
assert data['body'] == 'my comment is awesome'
# check we can't reply to comment in other thread
response = admin.put(
group['url'], {
'title': 'thread2', 'body': 'thread2 description',
'room': False, 'default_rights': None,
'granted_rights': [], 'media': {}})
assert response.status_code == 200
thread2 = response.json()
response = admin.post(
thread['url'] + 'comments_page/', {
'reply_id': thread2['first_comment_id'],
'title': 'Im admin2', 'body': 'my comment is awesome2',
'media': {},
})
assert response.status_code == 403
# check comment without body is not added
response = admin.post(
thread['url'] + 'comments_page/', {
'reply_id': thread['first_comment_id'],
'title': 'Im admin2', 'body': '',
'media': {},
})
assert response.status_code == 200
data = response.json()
assert len(data['comments']) == 2
def test_broken_last_comment(room_group, thread, user):
# check last comment is on place
response = user.get(room_group['url'])
assert response.status_code == 200
data = response.json()
last_comment = data['threads'][0]['last_comment']
assert last_comment['id'] == thread['first_comment_id']
# break it
obj = models.Thread.objects.get(pk=thread['id'])
obj.data['last_comment']['all'] += 1
obj.save()
# check it not breaks original view
response = user.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert 'last_comment' not in data['threads'][0]
def _my_receiver(comment, **_kwargs):
comment.media['bar'] = 'foo'
return True
def test_after_update_saves_comment(thread, user):
# do "fix"
signals.after_add.connect(_my_receiver)
try:
response = user.post(
thread['url'] + 'comments_page/', {
'reply_id': thread['first_comment_id'],
'title': 'ho ho ho', 'body': 'happy new year',
'media': {},
})
finally:
assert signals.after_add.disconnect(_my_receiver)
assert response.status_code == 200
data = response.json()
response = user.get(data['comments'][1]['url'])
assert response.status_code == 200
comment = response.json()
assert comment['media']['bar'] == 'foo'
def test_comment_counters_on_rights_change(room_group, admin, client):
# Create room in root room
response = admin.put(
room_group['url'], {
'title': 'room1', 'body': 'room1 description',
'room': True, 'default_rights': None,
'granted_rights': []})
assert response.status_code == 200
room = response.json()
# create thread
response = admin.put(
room['url'], {
'title': 'thread1', 'body': 'thread1 description',
'room': False, 'default_rights': None,
'granted_rights': [], 'media': {}})
assert response.status_code == 200
thread = response.json()
# check initial state
response = admin.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert data['rooms'][0]['last_comment']['id']
assert data['rooms'][0]['comments_count'] == 1
response = client.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert data['rooms'][0]['last_comment']['id']
assert data['rooms'][0]['comments_count'] == 1
# close thread
response = admin.put(
thread['url'] + 'granted_rights/', {
'default_rights': models.NO_ACCESS})
assert response.status_code == 200
# check counters, admin still see
response = admin.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert data['rooms'][0]['last_comment']['id']
assert data['rooms'][0]['comments_count'] == 1
# but anonymous user is not
response = client.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert 'last_comment' not in data['rooms'][0]
assert data['rooms'][0]['comments_count'] == 0
def test_comment_counters_on_rights_combination(room_group, admin, user):
# Create room in root room
response = admin.put(
room_group['url'], {
'title': 'room1', 'body': 'room1 description',
'room': True, 'default_rights': None,
'granted_rights': []})
assert response.status_code == 200
room = response.json()
# create thread1 - closed
response = admin.put(
room['url'], {
'title': 'thread1', 'body': 'thread1 description',
'room': False, 'default_rights': models.NO_ACCESS,
'granted_rights': [], 'media': {}})
assert response.status_code == 200
thread1 = response.json()
# check state
response = admin.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert data['rooms'][0]['last_comment']['id'] == \
thread1['first_comment_id']
response = user.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert 'last_comment' not in data['rooms'][0]
# add opened thread
response = admin.put(
room['url'], {
'title': 'thread2', 'body': 'thread1 description',
'room': False, 'default_rights': None,
'granted_rights': [], 'media': {}})
assert response.status_code == 200
thread2 = response.json()
# check it now
response = admin.get(room_group['url'])
assert response.status_code == 200
data = response.json()
assert data['rooms'][0]['last_comment']['id'] == \
thread2['first_comment_id']
assert | |
<gh_stars>1-10
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
import collections
import fractions
import itertools
import json
import pickle
import random
import shutil
import tempfile
import unittest
from os import path
import numpy as np
import dimod
from dimod.exceptions import WriteableError
from dimod.binary_quadratic_model import LockableDict
try:
import networkx as nx
_networkx = True
except ImportError:
_networkx = False
try:
import pandas as pd
_pandas = True
except ImportError:
_pandas = False
class TestLockableDict(unittest.TestCase):
def test__setitem__(self):
d = LockableDict({'a': -1})
d.is_writeable = False
with self.assertRaises(WriteableError):
d['a'] = 5
def test__delitem__(self):
d = LockableDict({'a': -1})
d.is_writeable = False
with self.assertRaises(WriteableError):
del d['a']
def test_clear(self):
d = LockableDict({'a': -1})
d.is_writeable = False
with self.assertRaises(WriteableError):
d.clear()
d.is_writeable = True
d.clear()
self.assertEqual(d, {})
def test_pop(self):
d = LockableDict({'a': -1})
d.is_writeable = False
with self.assertRaises(WriteableError):
d.pop('a')
def test_popitem(self):
d = LockableDict({'a': -1, 'b': 2})
d.popitem()
self.assertEqual(len(d), 1)
d.is_writeable = False
with self.assertRaises(WriteableError):
d.popitem()
def test_setdefault(self):
d = LockableDict()
d.setdefault('a')
d.setdefault('b', 5)
self.assertEqual(d, {'a': None, 'b': 5})
d.is_writeable = False
with self.assertRaises(WriteableError):
d.setdefault('c', None)
def test_update(self):
d = LockableDict()
# check that it works normally
d.update({'a': 1})
self.assertEqual(d, {'a': 1})
d.is_writeable = False
with self.assertRaises(WriteableError):
d.update({'b': -1})
class TestBinaryQuadraticModel(unittest.TestCase):
def assertConsistentBQM(self, bqm):
# adjacency and linear are self-consistent
for v in bqm.linear:
self.assertIn(v, bqm.adj)
for v in bqm.adj:
self.assertIn(v, bqm.linear)
# adjacency and quadratic are self-consistent
for u, v in bqm.quadratic:
self.assertIn(v, bqm.linear)
self.assertIn(v, bqm.adj)
self.assertIn(u, bqm.adj[v])
self.assertIn(u, bqm.linear)
self.assertIn(u, bqm.adj)
self.assertIn(v, bqm.adj[u])
self.assertEqual(bqm.adj[u][v], bqm.quadratic[(u, v)])
self.assertEqual(bqm.adj[v][u], bqm.adj[u][v])
# self.assertNotIn((v, u), bqm.quadratic)
for u in bqm.adj:
for v in bqm.adj[u]:
self.assertTrue((u, v) in bqm.quadratic and (v, u) in bqm.quadratic)
# self.assertFalse((u, v) in bqm.quadratic and (v, u) in bqm.quadratic)
self.assertEqual(len(bqm.quadratic), len(set(bqm.quadratic)))
self.assertEqual(len(bqm.variables), len(bqm.linear))
def test_construction(self):
# spin model
linear = {0: 1, 1: -1, 2: .5}
quadratic = {(0, 1): .5, (1, 2): 1.5}
offset = 1.4
vartype = dimod.SPIN
bqm = dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype)
self.assertConsistentBQM(bqm)
for v, bias in linear.items():
self.assertEqual(bqm.linear[v], bias)
for v in bqm.linear:
self.assertIn(v, linear)
for (u, v), bias in quadratic.items():
self.assertEqual(bqm.adj[u][v], bias)
for interaction in bqm.quadratic:
self.assertIn(interaction, quadratic)
self.assertEqual(bqm.offset, offset)
#
# binary model
linear = {0: 1, 1: -1, 2: .5}
quadratic = {(0, 1): .5, (1, 2): 1.5}
offset = 1.4
vartype = dimod.BINARY
bqm = dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype)
self.assertConsistentBQM(bqm)
for v, bias in linear.items():
self.assertEqual(bqm.linear[v], bias)
for v in bqm.linear:
self.assertIn(v, linear)
for (u, v), bias in quadratic.items():
self.assertEqual(bqm.adj[u][v], bias)
for interaction in bqm.quadratic:
self.assertIn(interaction, quadratic)
self.assertEqual(bqm.offset, offset)
def test_construction_vartype(self):
"""Check that exceptions get thrown for broken inputs"""
# this biases values are themselves not important, so just choose them randomly
linear = {v: v * .01 for v in range(10)}
quadratic = {(u, v): u * v * .01 for u, v in itertools.combinations(linear, 2)}
offset = 1.2
with self.assertRaises(TypeError):
dimod.BinaryQuadraticModel(linear, quadratic, offset, 147)
with self.assertRaises(TypeError):
dimod.BinaryQuadraticModel(linear, quadratic, offset, 'my made up type')
self.assertEqual(dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.BINARY).vartype, dimod.BINARY)
self.assertEqual(dimod.BinaryQuadraticModel(linear, quadratic, offset, {-1, 1}).vartype, dimod.SPIN)
self.assertEqual(dimod.BinaryQuadraticModel(linear, quadratic, offset, 'BINARY').vartype, dimod.BINARY)
def test_construction_quadratic(self):
linear = {v: v * .01 for v in range(10)}
quadratic = {(u, v): u * v * .01 for u, v in itertools.combinations(linear, 2)}
offset = 1.2
vartype = dimod.SPIN
self.assertEqual(dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.BINARY).quadratic, quadratic)
# quadratic should be a dict or an iterable of 3-tuples
with self.assertRaises(ValueError):
dimod.BinaryQuadraticModel(linear, ['a'], offset, dimod.BINARY)
with self.assertRaises(TypeError):
dimod.BinaryQuadraticModel(linear, 1, offset, dimod.BINARY)
# not 2-tuple
with self.assertRaises(ValueError):
dimod.BinaryQuadraticModel(linear, {'edge': .5}, offset, dimod.BINARY)
# no self-loops
with self.assertRaises(ValueError):
dimod.BinaryQuadraticModel(linear, {(0, 0): .5}, offset, dimod.BINARY)
def test__eq__(self):
linear = {v: v * -.13 for v in range(10)}
quadratic = {(u, v): u * v * .021 for u, v in itertools.combinations(linear, 2)}
offset = 1.2
vartype = dimod.BINARY
self.assertEqual(dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype),
dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype))
# mismatched type
self.assertNotEqual(dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype), -1)
# models of different type
self.assertNotEqual(dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN),
dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.BINARY))
#
linear = {v: v * -.13 for v in range(10)}
quadratic = {(u, v): u * v * .021 for u, v in itertools.combinations(linear, 2)}
offset = -1.2
vartype = dimod.SPIN
bqm = dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype)
reversed_quadratic = {(v, u): bias for (u, v), bias in quadratic.items()}
reversed_bqm = dimod.BinaryQuadraticModel(linear, reversed_quadratic, offset, vartype)
self.assertEqual(bqm, reversed_bqm)
def test__repr__(self):
bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1}, {(0, 1): .5, (1, 2): 1.5}, 1.4, dimod.SPIN)
# should recreate the model
from dimod import BinaryQuadraticModel, Vartype
new_bqm = eval(bqm.__repr__())
self.assertEqual(bqm, new_bqm)
def test__len__(self):
linear = {v: v * -.13 for v in range(10)}
quadratic = {}
offset = -1.2
vartype = dimod.SPIN
bqm = dimod.BinaryQuadraticModel(linear, quadratic, offset, vartype)
self.assertEqual(len(bqm), len(linear))
def test__contains__(self):
bqm = dimod.BinaryQuadraticModel({'a': -1}, {}, 0.0, dimod.SPIN)
self.assertIn('a', bqm)
self.assertNotIn('b', bqm)
bqm.add_interaction('a', 'b', .5)
self.assertIn('b', bqm)
def test__iter__(self):
bqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY)
self.assertEqual(set(bqm), set())
bqm.add_interaction('a', 'b', -1)
self.assertEqual(set(bqm), {'a', 'b'})
def test_variables(self):
bqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY)
self.assertEqual(set(bqm.variables), set())
bqm.add_interaction('a', 'b', -1)
self.assertEqual(set(bqm.variables), {'a', 'b'})
self.assertIn('a', bqm.variables)
self.assertEqual(bqm.variables & {'b'}, {'b'})
self.assertEqual(bqm.variables | {'c'}, {'a', 'b', 'c'})
def test_add_variable(self):
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN)
bqm.add_variable('a', .5)
self.assertEqual(bqm.linear['a'], .5)
# add a single variable of a different type
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN)
bqm.add_variable('a', .5, vartype=dimod.BINARY)
self.assertEqual(bqm.energy({'a': -1, 'b': -1}), -1)
self.assertEqual(bqm.energy({'a': 1, 'b': 1}), -.5)
# and again
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.BINARY)
bqm.add_variable('a', .4, vartype=dimod.SPIN)
self.assertEqual(bqm.energy({'a': 0, 'b': 0}), -.4)
self.assertEqual(bqm.energy({'a': 1, 'b': 1}), -.6)
# add a new variable
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN)
bqm.add_variable('c', .5)
self.assertEqual({'a': 0, 'b': 0, 'c': .5}, bqm.linear)
bqm.add_variable('c', .5)
self.assertEqual({'a': 0, 'b': 0, 'c': 1}, bqm.linear)
# bad type
with self.assertRaises(ValueError):
bqm.add_variable('a', 1.2, -1)
def test_add_variable_counterpart(self):
# spin
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN)
bqm.add_variable('a', .5)
for av, bv in itertools.product((0, 1), repeat=2):
self.assertEqual(bqm.energy({'a': 2 * av - 1, 'b': 2 * bv - 1}),
bqm.binary.energy({'a': av, 'b': bv}))
#
# spin
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN)
__ = bqm.binary # create counterpart
bqm.add_variable('a', .5)
for av, bv in itertools.product((0, 1), repeat=2):
self.assertEqual(bqm.energy({'a': 2 * av - 1, 'b': 2 * bv - 1}),
bqm.binary.energy({'a': av, 'b': bv}))
#
# binary
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.BINARY)
bqm.add_variable('a', .5)
for av, bv in itertools.product((0, 1), repeat=2):
self.assertEqual(bqm.spin.energy({'a': 2 * av - 1, 'b': 2 * bv - 1}),
bqm.energy({'a': av, 'b': bv}))
#
# binary
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.BINARY)
__ = bqm.spin # create counterpart
bqm.add_variable('a', .5)
for av, bv in itertools.product((0, 1), repeat=2):
self.assertEqual(bqm.spin.energy({'a': 2 * av - 1, 'b': 2 * bv - 1}),
bqm.energy({'a': av, 'b': bv}))
#
bqm = dimod.BinaryQuadraticModel({'a': 1.}, {}, 0, dimod.SPIN)
self.assertEqual(bqm.energy({'a': -1}), bqm.binary.energy({'a': 0}))
bqm.add_variables_from({'a': .5, 'b': -2})
self.assertEqual(bqm.linear, {'a': 1.5, 'b': -2})
self.assertEqual(bqm.energy({'a': -1, 'b': -1}), bqm.binary.energy({'a': 0, 'b': 0}))
self.assertEqual(bqm.energy({'a': +1, 'b': -1}), bqm.binary.energy({'a': 1, 'b': 0}))
self.assertEqual(bqm.energy({'a': -1, 'b': +1}), bqm.binary.energy({'a': 0, 'b': 1}))
self.assertEqual(bqm.energy({'a': +1, 'b': +1}), bqm.binary.energy({'a': 1, 'b': 1}))
def test_add_variables_from(self):
linear = {'a': .5, 'b': -.5}
offset = 0.0
vartype = dimod.SPIN
# create an empty model then add linear
bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, vartype)
bqm.add_variables_from(linear)
self.assertEqual(bqm, dimod.BinaryQuadraticModel(linear, {}, 0.0, vartype))
# add from 2-tuples
bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, vartype)
bqm.add_variables_from((key, value) for key, value in linear.items())
self.assertEqual(bqm, dimod.BinaryQuadraticModel(linear, {}, 0.0, vartype))
with self.assertRaises(TypeError):
bqm.add_variables_from(1)
def test_add_interaction(self):
# spin-to-binary
bqm = dimod.BinaryQuadraticModel({'a': 0, 'b': 0}, {}, 0.0, dimod.BINARY)
bqm.add_interaction('a', 'b', -1, vartype=dimod.SPIN) # add a chain link
self.assertEqual(bqm.energy({'a': 0, 'b': 0}), -1)
self.assertEqual(bqm.energy({'a': 1, 'b': 1}), -1)
self.assertConsistentBQM(bqm)
bqm = dimod.BinaryQuadraticModel({'b': 0}, {}, 0.0, dimod.BINARY)
bqm.add_interaction('a', 'b', -1, vartype=dimod.SPIN) # add a chain | |
# organization/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import organizations_import_from_master_server
from .models import Organization
from .serializers import OrganizationSerializer
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaign, CandidateCampaignListManager, CandidateCampaignManager
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_deleted_exception, handle_record_not_found_exception, handle_record_not_saved_exception
from election.models import Election, ElectionManager
from measure.models import ContestMeasure, ContestMeasureList, ContestMeasureManager
from organization.models import OrganizationListManager, OrganizationManager
from position.models import PositionEntered, PositionEnteredManager, INFORMATION_ONLY, OPPOSE, \
STILL_DECIDING, SUPPORT
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import retrieve_voter_authority, voter_has_authority
from voter_guide.models import VoterGuideManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, extract_twitter_handle_from_text_string, positive_value_exists, \
STATE_CODE_MAP
ORGANIZATION_STANCE_CHOICES = (
(SUPPORT, 'We Support'),
(OPPOSE, 'We Oppose'),
(INFORMATION_ONLY, 'Information Only - No stance'),
(STILL_DECIDING, 'We Are Still Deciding Our Stance'),
)
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
class OrganizationsSyncOutView(APIView):
def __str__(self):
return str("")
def get(self, request, format=None):
state_served_code = request.GET.get('state_served_code', '')
organization_list = Organization.objects.all()
if positive_value_exists(state_served_code):
organization_list = organization_list.filter(state_served_code__iexact=state_served_code)
serializer = OrganizationSerializer(organization_list, many=True, allow_null=True)
return Response(serializer.data)
@login_required
def organizations_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = organizations_import_from_master_server(request, state_code)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Organizations import completed. '
'Saved: {saved}, Updated: {updated}, '
'Master data not imported (local duplicates found): '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def organization_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_state_code = request.GET.get('organization_state', '')
google_civic_election_id = request.GET.get('google_civic_election_id', '')
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', '')
organization_search = request.GET.get('organization_search', '')
messages_on_stage = get_messages(request)
organization_list_query = Organization.objects.all()
if positive_value_exists(organization_state_code):
organization_list_query = organization_list_query.filter(state_served_code__iexact=organization_state_code)
if positive_value_exists(organization_search):
filters = []
new_filter = Q(organization_name__icontains=organization_search)
filters.append(new_filter)
new_filter = Q(organization_twitter_handle__icontains=organization_search)
filters.append(new_filter)
new_filter = Q(organization_website__icontains=organization_search)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=organization_search)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
organization_list_query = organization_list_query.filter(final_filters)
organization_list_query = organization_list_query.order_by('organization_name')
organization_list = organization_list_query
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
template_values = {
'messages_on_stage': messages_on_stage,
'candidate_we_vote_id': candidate_we_vote_id,
'google_civic_election_id': google_civic_election_id,
'organization_list': organization_list,
'organization_search': organization_search,
'organization_state': organization_state_code,
'state_list': sorted_state_list,
}
return render(request, 'organization/organization_list.html', template_values)
@login_required
def organization_new_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# A positive value in google_civic_election_id means we want to create a voter guide for this org for this election
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
}
return render(request, 'organization/organization_edit.html', template_values)
@login_required
def organization_edit_view(request, organization_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# A positive value in google_civic_election_id means we want to create a voter guide for this org for this election
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
organization_on_stage_found = False
organization_manager = OrganizationManager()
organization_on_stage = Organization()
state_served_code = ''
results = organization_manager.retrieve_organization(organization_id)
if results['organization_found']:
organization_on_stage = results['organization']
state_served_code = organization_on_stage.state_served_code
organization_on_stage_found = True
election_manager = ElectionManager()
upcoming_election_list = []
results = election_manager.retrieve_upcoming_elections()
if results['success']:
upcoming_election_list = results['election_list']
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
if organization_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
'state_served_code': state_served_code,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'upcoming_election_list': upcoming_election_list,
'google_civic_election_id': google_civic_election_id,
'state_list': sorted_state_list,
}
return render(request, 'organization/organization_edit.html', template_values)
@login_required
def organization_edit_process_view(request):
"""
Process the new or edit organization forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_id = convert_to_int(request.POST.get('organization_id', 0))
organization_name = request.POST.get('organization_name', '')
organization_twitter_handle = request.POST.get('organization_twitter_handle', False)
organization_facebook = request.POST.get('organization_facebook', False)
organization_website = request.POST.get('organization_website', False)
wikipedia_page_title = request.POST.get('wikipedia_page_title', False)
wikipedia_photo_url = request.POST.get('wikipedia_photo_url', False)
state_served_code = request.POST.get('state_served_code', False)
# A positive value in google_civic_election_id or add_organization_button means we want to create a voter guide
# for this org for this election
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
# add_organization_button = request.POST.get('add_organization_button', False)
# Filter incoming data
organization_twitter_handle = extract_twitter_handle_from_text_string(organization_twitter_handle)
# Check to see if this organization is already being used anywhere
organization_on_stage_found = False
try:
organization_query = Organization.objects.filter(id=organization_id)
if organization_query.count():
organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if organization_on_stage_found:
# Update
if organization_name is not False:
organization_on_stage.organization_name = organization_name
if organization_twitter_handle is not False:
organization_on_stage.organization_twitter_handle = organization_twitter_handle
if organization_facebook is not False:
organization_on_stage.organization_facebook = organization_facebook
if organization_website is not False:
organization_on_stage.organization_website = organization_website
if wikipedia_page_title is not False:
organization_on_stage.wikipedia_page_title = wikipedia_page_title
if wikipedia_photo_url is not False:
organization_on_stage.wikipedia_photo_url = wikipedia_photo_url
if state_served_code is not False:
organization_on_stage.state_served_code = state_served_code
organization_on_stage.save()
organization_id = organization_on_stage.id
organization_we_vote_id = organization_on_stage.we_vote_id
messages.add_message(request, messages.INFO, 'Organization updated.')
else:
# Create new
# But first double-check that we don't have an org entry already
organization_email = ''
organization_list_manager = OrganizationListManager()
results = organization_list_manager.organization_search_find_any_possibilities(
organization_name, organization_twitter_handle, organization_website, organization_email)
if results['organizations_found']:
organizations_list = results['organizations_list']
organizations_count = len(organizations_list)
messages.add_message(request, messages.INFO, 'We found {count} existing organizations '
'that might match.'.format(count=organizations_count))
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'organizations_list': organizations_list,
'organization_name': organization_name,
'organization_twitter_handle': organization_twitter_handle,
'organization_facebook': organization_facebook,
'organization_website': organization_website,
'wikipedia_page_title': wikipedia_page_title,
'wikipedia_photo_url': wikipedia_photo_url,
}
return render(request, 'organization/organization_edit.html', template_values)
minimum_required_variables_exist = positive_value_exists(organization_name)
if not minimum_required_variables_exist:
messages.add_message(request, messages.INFO, 'Missing name, which is required.')
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'organization_name': organization_name,
'organization_twitter_handle': organization_twitter_handle,
'organization_facebook': organization_facebook,
'organization_website': organization_website,
'wikipedia_page_title': wikipedia_page_title,
'wikipedia_photo_url': wikipedia_photo_url,
}
return render(request, 'voter_guide/voter_guide_search.html', template_values)
organization_on_stage = Organization(
organization_name=organization_name,
)
if organization_twitter_handle is not False:
organization_on_stage.organization_twitter_handle = organization_twitter_handle
if organization_facebook is not False:
organization_on_stage.organization_facebook = organization_facebook
if organization_website is not False:
organization_on_stage.organization_website = organization_website
if wikipedia_page_title is not False:
organization_on_stage.wikipedia_page_title = wikipedia_page_title
if wikipedia_photo_url is not False:
organization_on_stage.wikipedia_photo_url = wikipedia_photo_url
if state_served_code is not False:
organization_on_stage.state_served_code = state_served_code
organization_on_stage.save()
organization_id = organization_on_stage.id
organization_we_vote_id = organization_on_stage.we_vote_id
messages.add_message(request, messages.INFO, 'New organization saved.')
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not save organization.'
' {error} [type: {error_type}]'.format(error=e,
error_type=type(e)))
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
# Create voter_guide for this election?
if positive_value_exists(google_civic_election_id) and positive_value_exists(organization_we_vote_id):
election_manager = ElectionManager()
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
voter_guide_manager = VoterGuideManager()
results = voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization_we_vote_id, google_civic_election_id)
if results['voter_guide_saved']:
messages.add_message(request, messages.INFO, 'Voter guide for {election_name} election saved.'
''.format(election_name=election.election_name))
return HttpResponseRedirect(reverse('organization:organization_position_list', args=(organization_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def organization_position_list_view(request, organization_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', '')
organization_on_stage = Organization()
organization_on_stage_found = False
try:
organization_query = Organization.objects.filter(id=organization_id)
if organization_query.count():
organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
organization_on_stage_found = False
if not organization_on_stage_found:
messages.add_message(request, messages.ERROR,
'Could not find organization when trying to retrieve positions.')
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
else:
organization_position_list_found = False
try:
organization_position_list = PositionEntered.objects.order_by('stance')
organization_position_list = organization_position_list.filter(organization_id=organization_id)
if positive_value_exists(google_civic_election_id):
organization_position_list = organization_position_list.filter(
google_civic_election_id=google_civic_election_id)
organization_position_list = organization_position_list.order_by(
'google_civic_election_id', '-vote_smart_time_span')
if len(organization_position_list):
organization_position_list_found = True
except Exception as e:
organization_position_list = []
for one_position in organization_position_list:
position_manager = PositionEnteredManager()
one_position = position_manager.refresh_cached_position_info(one_position)
election_list = Election.objects.order_by('-election_day_text')
if organization_position_list_found:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position_list': organization_position_list,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'candidate_we_vote_id': candidate_we_vote_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'candidate_we_vote_id': candidate_we_vote_id,
}
return render(request, 'organization/organization_position_list.html', template_values)
@login_required
def organization_position_new_view(request, organization_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
authority_results = retrieve_voter_authority(request)
if not voter_has_authority(request, authority_required, authority_results):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
candidate_we_vote_id = request.GET.get('candidate_we_vote_id', False)
measure_we_vote_id = request.GET.get('measure_we_vote_id', False)
# Take in some incoming values
candidate_and_measure_not_found = request.GET.get('candidate_and_measure_not_found', False)
stance = request.GET.get('stance', SUPPORT) # Set a default if stance comes in empty
statement_text = request.GET.get('statement_text', '') # Set a default if stance comes in empty
more_info_url = request.GET.get('more_info_url', '')
# We pass candidate_we_vote_id to this page to pre-populate the form
candidate_campaign_id = 0
if positive_value_exists(candidate_we_vote_id):
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
candidate_campaign_id = candidate_campaign.id
# We pass candidate_we_vote_id to this page to pre-populate the form
contest_measure_id = 0
if positive_value_exists(measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
contest_measure_id = contest_measure.id
| |
372 , (3, 0, None, None) , 0 , )),
(( 'SchedulePlusPriority' , 'SchedulePlusPriority' , ), 33071, (33071, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 376 , (3, 0, None, None) , 0 , )),
(( 'SchedulePlusPriority' , 'SchedulePlusPriority' , ), 33071, (33071, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 380 , (3, 0, None, None) , 0 , )),
(( 'StartDate' , 'StartDate' , ), 33028, (33028, (), [ (16391, 10, None, None) , ], 1 , 2 , 4 , 0 , 384 , (3, 0, None, None) , 0 , )),
(( 'StartDate' , 'StartDate' , ), 33028, (33028, (), [ (7, 1, None, None) , ], 1 , 4 , 4 , 0 , 388 , (3, 0, None, None) , 0 , )),
(( 'Status' , 'Status' , ), 33025, (33025, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 392 , (3, 0, None, None) , 0 , )),
(( 'Status' , 'Status' , ), 33025, (33025, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 396 , (3, 0, None, None) , 0 , )),
(( 'StatusOnCompletionRecipients' , 'StatusOnCompletionRecipients' , ), 3586, (3586, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 400 , (3, 0, None, None) , 0 , )),
(( 'StatusOnCompletionRecipients' , 'StatusOnCompletionRecipients' , ), 3586, (3586, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 404 , (3, 0, None, None) , 0 , )),
(( 'StatusUpdateRecipients' , 'StatusUpdateRecipients' , ), 3587, (3587, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 408 , (3, 0, None, None) , 0 , )),
(( 'StatusUpdateRecipients' , 'StatusUpdateRecipients' , ), 3587, (3587, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 412 , (3, 0, None, None) , 0 , )),
(( 'TeamTask' , 'TeamTask' , ), 33027, (33027, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 416 , (3, 0, None, None) , 0 , )),
(( 'TeamTask' , 'TeamTask' , ), 33027, (33027, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 420 , (3, 0, None, None) , 0 , )),
(( 'TotalWork' , 'TotalWork' , ), 33041, (33041, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 424 , (3, 0, None, None) , 0 , )),
(( 'TotalWork' , 'TotalWork' , ), 33041, (33041, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 428 , (3, 0, None, None) , 0 , )),
(( 'Assign' , 'Item' , ), 63008, (63008, (), [ (16397, 10, None, "IID('{00061032-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 432 , (3, 0, None, None) , 0 , )),
(( 'CancelResponseState' , ), 63010, (63010, (), [ ], 1 , 1 , 4 , 0 , 436 , (3, 0, None, None) , 0 , )),
(( 'ClearRecurrencePattern' , ), 61605, (61605, (), [ ], 1 , 1 , 4 , 0 , 440 , (3, 0, None, None) , 0 , )),
(( 'GetRecurrencePattern' , 'RecurrencPattern' , ), 61604, (61604, (), [ (16393, 10, None, "IID('{00063044-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 444 , (3, 0, None, None) , 0 , )),
(( 'MarkComplete' , ), 62989, (62989, (), [ ], 1 , 1 , 4 , 0 , 448 , (3, 0, None, None) , 0 , )),
(( 'Respond' , 'Response' , 'fNoUI' , 'fAdditionalTextDialog' , 'Item' ,
), 63009, (63009, (), [ (3, 1, None, None) , (12, 1, None, None) , (12, 1, None, None) , (16397, 10, None, "IID('{00061032-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 452 , (3, 0, None, None) , 0 , )),
(( 'Send' , ), 61557, (61557, (), [ ], 1 , 1 , 4 , 0 , 456 , (3, 0, None, None) , 0 , )),
(( 'SkipRecurrence' , 'flg' , ), 63012, (63012, (), [ (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 460 , (3, 0, None, None) , 0 , )),
(( 'StatusReport' , 'StatusReport' , ), 62994, (62994, (), [ (16393, 10, None, None) , ], 1 , 1 , 4 , 0 , 464 , (3, 0, None, None) , 0 , )),
(( 'Links' , 'Links' , ), 62469, (62469, (), [ (16393, 10, None, "IID('{0006308A-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 468 , (3, 0, None, None) , 0 , )),
]
_TaskRequestAcceptItem_vtables_dispatch_ = 1
_TaskRequestAcceptItem_vtables_ = [
(( 'Application' , 'Application' , ), 61440, (61440, (), [ (16393, 10, None, "IID('{00063001-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 28 , (3, 0, None, None) , 0 , )),
(( 'Class' , 'Class' , ), 61450, (61450, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( 'Session' , 'Session' , ), 61451, (61451, (), [ (16393, 10, None, "IID('{00063002-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 36 , (3, 0, None, None) , 0 , )),
(( 'Parent' , 'Parent' , ), 61441, (61441, (), [ (16393, 10, None, None) , ], 1 , 2 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( 'Actions' , 'Actions' , ), 63511, (63511, (), [ (16393, 10, None, "IID('{0006303E-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 44 , (3, 0, None, None) , 0 , )),
(( 'Attachments' , 'Attachments' , ), 63509, (63509, (), [ (16393, 10, None, "IID('{0006303C-0000-0000-C000-000000000046}')") , ], 1 , 2 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( 'BillingInformation' , 'BillingInformation' , ), 34101, (34101, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 52 , (3, 0, None, None) , 0 , )),
(( 'BillingInformation' , 'BillingInformation' , ), 34101, (34101, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( 'Body' , 'Body' , ), 37120, (37120, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 60 , (3, 0, None, None) , 0 , )),
(( 'Body' , 'Body' , ), 37120, (37120, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( 'Categories' , 'Categories' , ), 36865, (36865, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 68 , (3, 0, None, None) , 0 , )),
(( 'Categories' , 'Categories' , ), 36865, (36865, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( 'Companies' , 'Companies' , ), 34107, (34107, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 76 , (3, 0, None, None) , 0 , )),
(( 'Companies' , 'Companies' , ), 34107, (34107, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( 'ConversationIndex' , 'ConversationIndex' , ), 113, (113, (), [ (16392, 10, None, None) , ], 1 , 2 , | |
#!/usr/bin/env python3
import os
import sys
import time
import argparse
import re
import subprocess
import requests
import shutil
from bs4 import BeautifulSoup
from termcolor import cprint
from multiprocessing import Pool
import traceback
MARKDOWN_SEARCH_TERM = r'\.md$'
# Regex to find a URL
URL_SEARCH_TERM = r'(\b(https?|ftp|file)://[^\s\)\]\\"<>]+[^\s\)\.\]\\"<>])'
HTTP_URL_SEARCH_TERM = r'https?://'
# Some HTML tags that we choose to ignore
IGNORED_LINK_SCHEMES = r'mailto:|ftp:|tel:'
# Regexes to identify links to Github PRs or issues, which are very common in changelogs
# and may result in rate limiting if each link is fetched manually.
PULL_REQUEST_SEARCH = r'https://github.com/([A-Za-z0-9_.-]+)/([A-Za-z0-9_.-]+)/pull/(\d+)$'
ISSUE_SEARCH = r'https://github.com/([A-Za-z0-9_.-]+)/([A-Za-z0-9_.-]+)/issues/(\d+)$'
# The value at which we should fetch all of a repo's PRs or issues rather than testing
# individually. It takes roughly half a second to test one link, and between 1/2 to 5
# seconds to fetch them all, depending on the size of the repo.
GITHUB_FETCH_THRESHOLD = 5
NUM_PR_KEY = 'num_prs'
NUM_IS_KEY = 'num_issues'
PR_KEY = 'prs'
ISSUE_KEY = 'issues'
PR_CACHED_KEY = 'pr_cached'
ISSUE_CACHED_KEY = 'issue_cached'
"""
Format for repository list:
{
"owner/repository": {
"num_prs": 0, //Number of links that point to an issue in this repo.
"num_issues": 0, //Number of links that point to a PR in this repo.
"pr_cached": True, //Whether we've already fetched and cached PRs/issues.
"issue_cached": True,
"prs": (1,2,3), //Cached set of PRs.
"issues": (4,5,6) //Cached set of issues.
}
}
"""
main_repo_list = {}
# Whether to use the above cache of repositories.
use_gh_cache = True
# Track links so that we don't test multiple times.
link_cache = {}
class HtmlFile:
"""A class of files with a .html extension"""
def __init__(self, html_file_name):
"""Parse html in file and extract links and ids"""
self.ids = []
self.internal_links = []
self.external_links = []
self.name = html_file_name
self.abspath = os.path.abspath(html_file_name)
self.broken_links = []
self.linked_repos = {}
with open(html_file_name, 'r') as infile:
html_data = infile.read()
dirname = os.path.dirname(self.name)
soup = BeautifulSoup(html_data, 'html.parser')
# Find IDs. This is to check internal links within a file.
for tag in soup.find_all(True, {'id': True}):
self.ids.append(tag.get('id'))
pr_search = re.compile(PULL_REQUEST_SEARCH)
issue_search = re.compile(ISSUE_SEARCH)
for tag in soup.find_all('a'):
link = tag.get('href')
if not re.search(HTTP_URL_SEARCH_TERM, link, re.IGNORECASE):
if not re.search(IGNORED_LINK_SCHEMES, link, re.IGNORECASE):
if link is not None and link not in self.internal_links:
self.internal_links.append(link)
else:
if link is not None and link not in self.external_links:
self.external_links.append(link)
pr_match = pr_search.search(link)
if pr_match:
self.increment_gh_link_count(pr_match.group(1), pr_match.group(2), pr_match.group(3), True)
else:
issue_match = issue_search.search(link)
if issue_match:
self.increment_gh_link_count(issue_match.group(1), issue_match.group(2), issue_match.group(3), False)
def increment_gh_link_count(self, owner, repo, num, is_pr):
"""Increment the count of links to Github PRs or issues"""
repo_key = f'{owner}/{repo}'.lower()
if repo_key not in self.linked_repos:
self.linked_repos[repo_key] = { NUM_IS_KEY : 0, NUM_PR_KEY : 0 }
if is_pr:
self.linked_repos[repo_key][NUM_PR_KEY] += 1
else:
self.linked_repos[repo_key][NUM_IS_KEY] += 1
def print_filename(self, filename, file_printed):
"""Prints a file name if it hasn't been printed before"""
if not file_printed:
print(f'FILE: {filename}')
return True
def identify_broken_links(self, files, verbose):
"""Tests links for existence"""
dirname = os.path.dirname(self.name)
# Only print the file name once
file_printed = False
for link in self.internal_links:
# First, look for anchors in the same document.
link_elements = link.split('#')
path = link_elements[0]
id = None
if len(link_elements) > 1:
id = link_elements[1]
if path == '':
if id is not None:
if id.lower() not in self.ids:
self.broken_links.append(link)
file_printed = self.print_filename(files[self.name], file_printed)
cprint(f'\tUnknown link: {link}', 'red')
elif verbose:
file_printed = self.print_filename(files[self.name], file_printed)
cprint(f'\t{link}', 'green')
continue
# At this point, this is probably a link to a file in the same repo,
# so we test if the file exists.
filename = os.path.join(dirname, path)
absfile = os.path.abspath(filename)
# Note: We don't test whether the link target exists, just the file.
if not os.path.exists(absfile):
self.broken_links.append(link)
file_printed = self.print_filename(files[self.name], file_printed)
cprint(f'\tUnknown file: {path}', 'red')
elif verbose:
file_printed = self.print_filename(files[self.name], file_printed)
cprint(f'\t{link}','green')
for link in self.external_links:
is_broken, status_code = test_url(link)
if is_broken:
self.broken_links.append(link)
file_printed = self.print_filename(files[self.name], file_printed)
cprint(f' {status_code}\t{link}', 'red')
else:
if verbose:
file_printed = self.print_filename(files[self.name], file_printed)
cprint(f' {status_code}\t{link}', 'green')
def parse_file(html_file):
"""Parse href tags from an HTML file"""
return HtmlFile(html_file)
def html_name_from_markdown(filename):
md_pattern = re.compile('.md', re.IGNORECASE)
return md_pattern.sub('.html', filename)
def create_html(markdown_file):
"""Use pandoc to convert a markdown file to an HTML file"""
html_file = html_name_from_markdown(markdown_file)
# Convert from Github-flavored Markdown to HTML
cmd = f'pandoc -f gfm -o {html_file} {markdown_file}'
# Use pandoc to generate HTML from Markdown
process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
encoding="utf-8",
universal_newlines=True
)
return process
def test_url(url):
"""Tests a single url"""
global use_gh_cache
global main_repo_list
global link_cache
status = ''
is_broken = False
# Test if link was already tested before.
if url in link_cache:
return link_cache[url]
# Test if link was cached in pre-fetched GitHub issues. If not, send a request for the link.
if use_gh_cache:
pr_match = re.search(PULL_REQUEST_SEARCH, url)
issue_match = re.search(ISSUE_SEARCH, url)
if pr_match is not None:
repo_key = f'{pr_match.group(1)}/{pr_match.group(2)}'.lower()
if repo_key in main_repo_list and PR_KEY in main_repo_list[repo_key]:
if int(pr_match.group(3)) in main_repo_list[repo_key][PR_KEY]:
status = 'Good'
elif issue_match is not None:
repo_key = f'{issue_match.group(1)}/{issue_match.group(2)}'.lower()
if repo_key in main_repo_list and ISSUE_KEY in main_repo_list[repo_key]:
if int(issue_match.group(3)) in main_repo_list[repo_key][ISSUE_KEY]:
status = 'Good'
if status != 'Good':
try:
r = requests.head(url, allow_redirects=True)
# Some sites may return 404 for head but not get, e.g.
# https://tls.mbed.org/kb/development/thread-safety-and-multi-threading
if r.status_code >= 400:
# Allow redirects is already enabled by default for GET.
r = requests.get(url)
# It's likely we will run into GitHub's rate-limiting if there are many links.
if r.status_code == 429:
time.sleep(int(r.headers['Retry-After']))
r = requests.head(url, allow_redirects=True)
if r.status_code >= 400:
is_broken = True
status = r.status_code
# requests.exceptions.ConnectionError if URL does not exist, but we capture
# all possible exceptions from trying the link to be safe.
except Exception as e:
print(str(e))
is_broken = True
status = 'Error'
# Add result to cache so it won't be tested again.
link_cache[url] = (is_broken, status)
return is_broken, status
def fetch_issues(repo, issue_type, limit):
"""Uses the GitHub CLI to fetch a list of PRs or issues"""
global use_gh_cache
global main_repo_list
if shutil.which('gh') is not None:
# List PRs or issues for repository and extract numbers.
cmd = f'gh {issue_type} list -R {repo} -s all -L {limit} | awk \'{{print $1}}\''
process = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
encoding="utf-8",
universal_newlines=True
)
if process.returncode == 0:
key = issue_type + 's'
for issue in process.stdout.split():
main_repo_list[repo][key].add(int(issue))
return 0
else:
use_gh_cache = False
def consolidate_repo_list(repo_list):
"""Combines each list of repos into a single main list"""
global use_gh_cache
global main_repo_list
for repo, stats in repo_list.items():
if repo not in main_repo_list:
main_repo_list[repo] = stats
main_repo_list[repo][PR_CACHED_KEY] = False
main_repo_list[repo][ISSUE_CACHED_KEY] = False
main_repo_list[repo][PR_KEY] = set()
main_repo_list[repo][ISSUE_KEY] = set()
else:
main_repo_list[repo][NUM_PR_KEY] += stats[NUM_PR_KEY]
main_repo_list[repo][NUM_IS_KEY] += stats[NUM_IS_KEY]
# Fetch the list of GH PRs and cache them. If we run into an error then we
# stop trying to use the cached list.
if use_gh_cache:
if main_repo_list[repo][NUM_PR_KEY] > GITHUB_FETCH_THRESHOLD and main_repo_list[repo][PR_CACHED_KEY] == False:
try:
fetch_issues(repo, 'pr', 1500)
except Exception as e:
traceback.print_exc()
use_gh_cache = False
main_repo_list[repo][PR_CACHED_KEY] = True
if main_repo_list[repo][NUM_IS_KEY] > GITHUB_FETCH_THRESHOLD and main_repo_list[repo][ISSUE_CACHED_KEY] == False:
try:
fetch_issues(repo, 'issue', 1000)
except Exception as e:
traceback.print_exc()
use_gh_cache = False
main_repo_list[repo][ISSUE_CACHED_KEY] = True
def main():
parser = argparse.ArgumentParser(
description='A script to test HTTP links, and all links in Markdown files.',
epilog='Requires beautifulsoup4, requests, and termcolor from PyPi. ' +
'Optional dependencies: pandoc (to support testing Markdown files), gh (To speed up checking GitHub links)'
)
parser.add_argument("-F", "--files", action="store", dest="files", nargs='+', help="List of Markdown files to test links in.")
parser.add_argument("-L", "--links", action="store", dest="links", nargs='+', help="List of links to test.")
parser.add_argument("-M", "--test-markdown", action="store_true", default=False, help="Enable search of Markdown files for testing links.")
parser.add_argument("-D", "--exclude-dirs", action="store", dest="exclude_dirs", nargs='+', help="List of directories to ignore.")
parser.add_argument("-I", "--include-file-types", action="store", dest="include_files", nargs='+', help="List of file patterns to search for URLs.")
parser.add_argument("-A", "--allowlist-file", action="store", dest="allowlist", help="Path to file containing list of allowed URLs.")
parser.add_argument("-n", "--num-processes", action="store", type=int, default=4, help="Number of processes to run in parallel")
parser.add_argument("-k", "--keep", action="store_true", default=False, help="Keep temporary files instead of deleting")
parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Print all links tested")
args = parser.parse_args()
html_file_list = []
broken_links = []
file_list = []
link_list = []
exclude_dirs = [dir.lower() for dir in args.exclude_dirs] if args.exclude_dirs else []
# If any explicit files are passed, add them to file_list.
if args.files is not None:
file_list = args.files
elif args.test_markdown:
# Obtain list of Markdown files from the repository | |
<filename>annulus/detector.py<gh_stars>1-10
import numpy as np
import cv2
def _cross_ratio(z1, z2, z3, z4):
"""Calculate cross ratio between four values on a line"""
nom = (z3 - z1) * (z4 - z2)
den = (z3 - z2) * (z4 - z1)
return nom / den
def _cross_ratio_annulus(center, circle1, circle2):
"""Return a cross ratio for a single annulus.
z1 z3 z4 z2
( ( x ) )
"""
p1, p2 = _line_ellipse_intersection(center, [1, 0], circle1)
p3, p4 = _line_ellipse_intersection(center, [1, 0], circle2)
# Reorder points if necessary
if np.linalg.norm(p1 - p3) > np.linalg.norm(p1 - p4):
p3, p4 = p4, p3
z1 = -np.linalg.norm(p1 - center)
z2 = -np.linalg.norm(p3 - center)
z3 = np.linalg.norm(p4 - center)
z4 = np.linalg.norm(p2 - center)
return _cross_ratio(z1, z2, z3, z4)
def _cross_ratio_neighbors(center1, center2, circle1, circle2):
"""Cross ratio between two ellipse:
The following points are used (center is not used):
z1 z2 z4 z3
( x ) ( x )
The lines are the intersection of the ellipse with a line
connecting both ellipse centers.
"""
center1 = np.asarray(center1)
center2 = np.asarray(center2)
p1, p2 = _line_ellipse_intersection(center1, center2 - center1, circle1)
p3, p4 = _line_ellipse_intersection(center1, center2 - center1, circle2)
# Pick points closer to each other
if np.linalg.norm(center1 - p3) > np.linalg.norm(center1 - p4):
p3, p4 = p4, p3
if np.linalg.norm(center2 - p2) > np.linalg.norm(center2 - p1):
p1, p2 = p2, p1
z1 = 0
z2 = np.linalg.norm(p2 - p1)
z3 = np.linalg.norm(p4 - p1)
z4 = np.linalg.norm(p3 - p1)
return _cross_ratio(z1, z2, z3, z4)
def _ellipse_to_homogeneous(parameter):
"""Convert from OpenCV ellipse form into A*x^2 + B*x*y + C*y^2 + D*x + E*y + F = 0"""
xc = parameter[0][0]
yc = parameter[0][1]
a = parameter[1][0] / 2
b = parameter[1][1] / 2
theta = parameter[2] * np.pi / 180
A = a**2 * np.sin(theta)**2 + b**2 * np.cos(theta)**2
B = 2 * (b**2 - a**2) * np.sin(theta) * np.cos(theta)
C = a**2 * np.cos(theta)**2 + b**2 * np.sin(theta)**2
D = -2 * A * xc - B * yc
E = -B * xc - 2 * C * yc
F = A * xc**2 + B * xc * yc + C * yc**2 - a**2 * b**2
den = F if np.abs(F) > 1e-3 else 1
return np.array([A, B, C, D, E, F]) / den
def _line_ellipse_intersection(x0, xd, ellipse):
"""Calculate intersection of line with ellipse in homogeneous form."""
x0 = np.hstack((x0, 1.0))
xd = np.hstack((xd, 0.0))
xd /= np.linalg.norm(xd)
C = np.array([[ellipse[0], ellipse[1] / 2, ellipse[3] / 2],
[ellipse[1] / 2, ellipse[2], ellipse[4] / 2],
[ellipse[3] / 2, ellipse[4] / 2, ellipse[5]]])
a = np.dot(xd, np.dot(C, xd))
b = np.dot(x0, np.dot(C, xd)) + np.dot(xd, np.dot(C, x0))
c = np.dot(x0, np.dot(C, x0))
v = b**2 - 4 * a *c
if v < 0 or np.isclose(a, 0):
return None, None
r1 = (-b + np.sqrt(v)) / (2 * a)
r2 = (-b - np.sqrt(v)) / (2 * a)
x1 = x0 + r1 * xd
x2 = x0 + r2 * xd
return x1[0:2], x2[0:2]
def map_point(H, point):
"""Performs a homogeneous mapping of a 2D point"""
x = np.dot(H, np.hstack((point, 1)))
x = x[0:2] / x[2]
return x
def map_points(H, pixel):
"""Performs a homogeneous mapping of a list of 2D point"""
pixel = np.column_stack((pixel, np.ones(len(pixel))))
x = np.dot(H, pixel.T).T
x = x[:,0:2] / x[:,2][:,np.newaxis]
return x
def map_ellipse(H, ellipse):
"""Performs a homogeneous mapping of an ellipse center point"""
points = np.array([e[0] for e in ellipse])
return map_points(H, points)
def annuli_shape_filter(axis_ratio = 0.2, max_angle = 10 * np.pi / 180, angle_ratio = 1.2):
"""Filter based on the similiraity of the two ellipses of the annulus.
Calculates the ratios betwen large and small ellipse axis and checks if they are similar.
Compare the angle angle (in rad) between their main axes.
Args:
axis_ratio: Range of maximum allowed difference between axis ratios.
max_angle: Angle (in rand) between the main axes.
angle_ratio: Minimum ratio of large to small ellipse axis to compare axis angles.
"""
deg = np.pi / 180
def run(annulus):
e1 = annulus[0]
e2 = annulus[1]
axr1 = e1[1][1] / e1[1][0]
axr2 = e2[1][1] / e2[1][0]
ratio = axr1 / axr2
if ratio < 1 - axis_ratio or ratio > 1 + axis_ratio:
return False
# Only compare axis if it is a strong ellipse, because otherwise the angle is dominated
# by noise; especially for a circle the directions of the "main" axis is somewhat random.
if axr1 > angle_ratio:
# Angle between two ellipse directions
angle = np.arccos(np.cos(e1[2] * deg) * np.cos(e2[2] * deg) + np.sin(e1[2] * deg) * np.sin(e2[2] * deg))
if angle > max_angle:
return False
return True
return lambda annuli: list(filter(run, annuli))
def cross_ratio_filter(inner_circle_diameter, outer_circle_diameter, tolerance = 0.1):
"""Filter annuli based on the cross ratio of the two circles.
Args:
inner_circle_diameter: Diameter of inner circle
outer_circle_diameter: Diameter of outer circle
tolerance: Tolerance for cross ratio
"""
annulus_cr = _cross_ratio(-0.5 * outer_circle_diameter, -0.5 * inner_circle_diameter, 0.5 * inner_circle_diameter, 0.5 * outer_circle_diameter)
def run(annulus):
center = (np.array(annulus[0][0]) + np.array(annulus[1][0])) / 2
cr = _cross_ratio_annulus(center, annulus[2], annulus[3])
return np.isclose(cr, annulus_cr, rtol = tolerance)
return lambda annuli: list(filter(run, annuli))
def neighbor_filter(outer_circle_diameter, marker_spacing):
"""Filter annuli based on the cross ratio between two annuli
Only allows annuli which have a direct neighbor.
Args:
outer_circle_diameter: Diameter of outer circle
marker_spacing: Distance between two neighboring annuli
"""
cr_grid = _cross_ratio(0, outer_circle_diameter, marker_spacing + outer_circle_diameter, marker_spacing)
def run(annuli):
result = []
for i in range(len(annuli)):
m1 = annuli[i]
for j in range(len(annuli)):
if i == j:
continue
m2 = annuli[j]
cr = _cross_ratio_neighbors(m1[0][0], m2[0][0], m1[2], m2[2])
if np.isclose(cr, cr_grid, rtol = 0.2):
result.append(m1)
break
return result
return run
class AnnulusDetection(object):
"""Detect annuli in images."""
def __init__(self, **kwargs):
"""Detect ring shaped object bounded by two concentric circles transformed by a homography (camera image)
All the parameters are optional
Args:
minimum_inner_circle_size: Minimum size in pixel of inner circle
minimum_outer_circle_size: Minimum size in pixel of outer circle
relative_outer_inner_size: Maximum difference in size between outer and inner circle
border_distance: Minimum distance in pixel to image border
minimum_circle_points: Minimum number of points for fitting circle
"""
self.minimum_inner_circle_size = kwargs.pop("minimum_inner_circle_size", 8) # Minimum size in pixel of inner circle
self.minimum_outer_circle_size = kwargs.pop("minimum_outer_circle_size", 16) # Minimum size in pixel of outer circle
self.relative_outer_inner_size = kwargs.pop("relative_outer_inner_size", 4) # Maximum difference in size between outer and inner circle
self.border_distance = kwargs.pop("border_distance", 5) # Minimum distance in pixel to image border
self.minimum_circle_points = kwargs.pop("minimum_circle_points", 20) # Minimum number of points for fitting circle
self.filter = []
if len(kwargs) > 0:
raise ValueError("Unknown arguments: {0}".format(list(kwargs.keys())))
def add_filter(self, f):
"""Add a filter to list."""
self.filter.append(f)
def _filter_annuli(self, annuli):
"""Apply filters."""
for f in self.filter:
annuli = f(annuli)
return annuli
def detect(self, image, binary_image, high_quality = True):
"""Detect annuli in image."
Args:
image: Gray image used for detection
binary_image: Binary image used for detection
high_quality: True to detect the annuli using the gray image. Improves quality but more time consuming.
Returns:
List of detected annuli
"""
assert image.shape == binary_image.shape, "Binary image size does not correspond to gray image size"
inv_binary_image = 255 - binary_image
stats_annulus, stats_background = self._label_image(binary_image, inv_binary_image)
candidates = self._find_candidates(stats_annulus, stats_background, image.shape)
annuli, rect = self._approx_annuli(inv_binary_image, candidates)
annuli = self._filter_annuli(annuli)
if high_quality and len(annuli) > 0:
annuli = self._fit_annuli(image, annuli, rect)
result = self._calculate_center(annuli)
return result
def _label_image(self, binary_image, inv_binary_image):
"""Detects connected components for foreground and background in binary image."""
_, label_background, stats_background, _ = cv2.connectedComponentsWithStats(binary_image)
_, label_annulus, stats_annulus, _ = cv2.connectedComponentsWithStats(inv_binary_image)
return stats_annulus, stats_background
def _find_candidates(self, annulus_areas, background_areas, image_shape):
"""Find potential candidates. Afterwards its only elimination."""
def get_background(annulus):
bg_cand = None
for background in background_areas[1:, (cv2.CC_STAT_LEFT, cv2.CC_STAT_TOP, cv2.CC_STAT_WIDTH, cv2.CC_STAT_HEIGHT, cv2.CC_STAT_AREA)]:
# Minimum size of ellipse
if background[2] < self.minimum_inner_circle_size or background[3] < self.minimum_inner_circle_size:
continue
# Cotained in ellipse
if background[0] < annulus[0] or background[1] < annulus[1] or background[0] + background[2] > annulus[0] + | |
for and averaged
across all markets.
market_weights : `array-like, optional`
Weights for averaging micro moments over specified ``market_ids``. By default, these are :math:`1 / T_m`.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
product_id1: Optional[Any]
product_id2: Optional[Any]
def __init__(
self, product_id1: Any, product_id2: Optional[Any], value: float, observations: int,
market_ids: Optional[Sequence] = None, market_weights: Optional[Array] = None) -> None:
"""Validate information about the moment to the greatest extent possible without an economy instance."""
if product_id1 is None and product_id2 is None:
raise ValueError("At least one of product_id1 or product_id2 must be not None.")
super().__init__(
value, observations, market_ids, market_weights, requires_inside=product_id1 is None,
requires_eliminated=[] if product_id1 is None else [product_id1]
)
self.product_id1 = product_id1
self.product_id2 = product_id2
def _format_moment(self) -> str:
"""Construct a string expression for the moment."""
product1 = "Outside" if self.product_id1 is None else f"'{self.product_id1}'"
product2 = "Outside" if self.product_id2 is None else f"'{self.product_id2}'"
return f"P({product1} First, {product2} Second)"
def _validate(self, economy: 'Economy') -> None:
"""Check that matrix indices are valid in the economy."""
super()._validate(economy)
economy._validate_product_ids([self.product_id1], self.market_ids)
economy._validate_product_ids([self.product_id2], self.market_ids)
def _compute_agent_values(
self, market: 'Market', delta: Array, probabilities: Array, conditionals: Optional[Array],
inside_probabilities: Optional[Array], eliminated_probabilities: Dict[int, Array],
inside_eliminated_sum: Optional[Array]) -> Array:
"""Compute agent-specific micro moment values, which will be aggregated up into means or covariances."""
# match the second choice probability of a certain inside good for agents who choose the outside good
if self.product_id1 is None:
assert inside_probabilities is not None
k = market.get_product(self.product_id2)
outside_share = 1 - market.products.shares.sum()
numerator = inside_probabilities[[k]].T - market.products.shares[k]
return numerator / outside_share
# match the second choice probability of the outside good for agents who choose a certain inside good
if self.product_id2 is None:
j = market.get_product(self.product_id1)
eliminated_outside_probabilities = 1 - eliminated_probabilities[j].sum(axis=0, keepdims=True)
outside_share = 1 - market.products.shares.sum()
numerator = eliminated_outside_probabilities.T - outside_share
return numerator / market.products.shares[j]
# match the second choice probability of a certain inside good for agents who choose a certain inside good
j = market.get_product(self.product_id1)
k = market.get_product(self.product_id2)
numerator = eliminated_probabilities[j][[k]].T - market.products.shares[k]
return numerator / market.products.shares[j]
def _compute_agent_values_tangent(
self, market: 'Market', p: int, delta: Array, probabilities: Array, probabilities_tangent: Array,
inside_probabilities: Optional[Array], inside_tangent: Optional[Array],
eliminated_tangents: Dict[int, Array], inside_eliminated_sum: Optional[Array],
inside_eliminated_sum_tangent: Optional[Array]) -> Array:
"""Compute the tangent of agent-specific micro moments with respect to a parameter."""
# handle the second choice probability of a certain inside good for agents who choose the outside good
if self.product_id1 is None:
assert inside_tangent is not None
k = market.get_product(self.product_id2)
outside_share = 1 - market.products.shares.sum()
return inside_tangent[[k]].T / outside_share
# handle the second choice probability of the outside good for agents who choose a certain inside good
if self.product_id2 is None:
j = market.get_product(self.product_id1)
eliminated_outside_tangent = -eliminated_tangents[j].sum(axis=0, keepdims=True)
return eliminated_outside_tangent.T / market.products.shares[j]
# handle the second choice probability of a certain inside good for agents who choose a certain inside good
j = market.get_product(self.product_id1)
k = market.get_product(self.product_id2)
return eliminated_tangents[j][[k]].T / market.products.shares[j]
class DiversionCovarianceMoment(Moment):
r"""Configuration for micro moments that match covariances between product characteristics of first and second
choices.
For example, survey data can sometimes be used to compute the sample covariance :math:`\mathscr{V}_m` between a
product characteristic :math:`x_{jt}^{(1)}` of an agent's first choice :math:`j` and either the same or a different
product characteristic :math:`x_{kt}^{(2)}` of the agent's second choice :math:`k` if :math:`j` were removed from
the choice set, amongst those agents whose first and second choices are both inside goods. Its simulated analogue
:math:`v_{mt}` can be defined by
.. math:: v_{mt} = \text{Cov}(z_{it}^{(1)}, z_{it}^{(2)})
where conditional on purchasing inside goods, the expected values of :math:`x_{jt}^{(1)}` and
:math:`x_{kt}^{(2)}` for agent :math:`i` are
.. math::
z_{it}^{(1)} = \sum_{j \in J_t} x_{jt}^{(1)} s_{ij(-0)t}, \quad
z_{it}^{(2)} = \sum_{j, k \in J_t} x_{kt}^{(2)} s_{ik(-0,j)t} s_{ij(-0)t}
where :math:`s_{ij(-0)t}` is the probability of choosing :math:`j` when the outside option is removed from the
choice set and :math:`s_{ik(-0,j)t}` is the probability of choosing :math:`k` when both the outside option and
:math:`j` are removed from the choice set.
These are averaged across a set of markets :math:`T_m` and compared with :math:`\mathscr{V}_m`, which gives
:math:`\bar{g}_{M,m}` in :eq:`averaged_micro_moments`.
Parameters
----------
X2_index1 : `int`
Column index of :math:`x_{jt}^{(1)}` in the matrix of demand-side nonlinear product characteristics,
:math:`X_2`. This should be between zero and :math:`K_2 - 1`, inclusive.
X2_index2 : `int`
Column index of :math:`x_{kt}^{(2)}` in the matrix of demand-side nonlinear product characteristics,
:math:`X_2`. This should be between zero and :math:`K_2 - 1`, inclusive.
value : `float`
Value :math:`\mathscr{V}_m` of the statistic estimated from micro data.
observations : `int`
Number of micro data observations :math:`N_m` used to estimate :math:`\mathscr{V}_m`, which is used to properly
scale micro moment covariances in :eq:`scaled_micro_moment_covariances`.
market_ids : `array-like, optional`
Distinct market IDs over which the micro moments will be averaged to get :math:`\bar{g}_{M,m}`. These are also
the only markets in which the moments will be computed. By default, the moments are computed for and averaged
across all markets.
market_weights : `array-like, optional`
Weights for averaging micro moments over specified ``market_ids``. By default, these are :math:`1 / T_m`.
Examples
--------
- :doc:`Tutorial </tutorial>`
"""
X2_index1: int
X2_index2: int
def __init__(
self, X2_index1: int, X2_index2: int, value: float, observations: int,
market_ids: Optional[Sequence] = None, market_weights: Optional[Array] = None) -> None:
"""Validate information about the moment to the greatest extent possible without an economy instance."""
if not isinstance(X2_index1, int) or X2_index1 < 0:
raise ValueError("X2_index1 must be a positive int.")
if not isinstance(X2_index2, int) or X2_index2 < 0:
raise ValueError("X2_index2 must be a positive int.")
super().__init__(
value, observations, market_ids, market_weights, requires_inside=True, requires_inside_eliminated=True
)
self.X2_index1 = X2_index1
self.X2_index2 = X2_index2
def _format_moment(self) -> str:
"""Construct a string expression for the moment."""
return f"Cov(X2 Column {self.X2_index1} First, X2 Column {self.X2_index2} Second)"
def _validate(self, economy: 'Economy') -> None:
"""Check that matrix indices are valid in the economy."""
super()._validate(economy)
if self.X2_index1 >= economy.K2:
raise ValueError(f"X2_index1 must be between 0 and K2 = {economy.K2}, inclusive.")
if self.X2_index2 >= economy.K2:
raise ValueError(f"X2_index2 must be between 0 and K2 = {economy.K2}, inclusive.")
def _compute_agent_values(
self, market: 'Market', delta: Array, probabilities: Array, conditionals: Optional[Array],
inside_probabilities: Optional[Array], eliminated_probabilities: Dict[int, Array],
inside_eliminated_sum: Optional[Array]) -> Array:
"""Compute agent-specific micro moment values, which will be aggregated up into means or covariances."""
assert inside_probabilities is not None and inside_eliminated_sum is not None
x1 = market.products.X2[:, [self.X2_index1]]
x2 = market.products.X2[:, [self.X2_index2]]
z1 = inside_probabilities.T @ x1
z2 = inside_eliminated_sum.T @ x2
demeaned_z1 = z1 - market.agents.weights.T @ z1
demeaned_z2 = z2 - market.agents.weights.T @ z2
return demeaned_z1 * demeaned_z2
def _compute_agent_values_tangent(
self, market: 'Market', p: int, delta: Array, probabilities: Array, probabilities_tangent: Array,
inside_probabilities: Optional[Array], inside_tangent: Optional[Array],
eliminated_tangents: Dict[int, Array], inside_eliminated_sum: Optional[Array],
inside_eliminated_sum_tangent: Optional[Array]) -> Array:
"""Compute the tangent of agent-specific micro moments with respect to a parameter."""
assert inside_probabilities is not None and inside_tangent is not None
assert inside_eliminated_sum is not None and inside_eliminated_sum_tangent is not None
x1 = market.products.X2[:, [self.X2_index1]]
x2 = market.products.X2[:, [self.X2_index2]]
z1 = inside_probabilities.T @ x1
z1_tangent = inside_tangent.T @ x1
z2 = inside_eliminated_sum.T @ x2
z2_tangent = inside_eliminated_sum_tangent.T @ x2
demeaned_z1 = z1 - market.agents.weights.T @ z1
demeaned_z1_tangent = z1_tangent - market.agents.weights.T @ z1_tangent
demeaned_z2 = z2 - market.agents.weights.T @ z2
demeaned_z2_tangent = z2_tangent - market.agents.weights.T @ z2_tangent
return demeaned_z1_tangent * demeaned_z2 + demeaned_z1 * demeaned_z2_tangent
class CustomMoment(Moment):
r"""Configuration for custom micro moments.
This configuration requires a value :math:`\mathscr{V}_m` computed, for example, from survey data. It also requires
a function that computes the simulated counterpart of this value,
.. math:: v_{mt} = \sum_{i \in I_t} w_{it} v_{imt},
a simulated integral over agent-specific micro values :math:`v_{imt}` computed according to a custom function. These
are averaged across a set of markets :math:`T_m` and compared with :math:`\mathscr{V}_m`, which gives
:math:`\bar{g}_{M,m}` in :eq:`averaged_micro_moments`.
Parameters
----------
value : `float`
Value :math:`\mathscr{V}_m` of the statistic estimated from micro data.
observations : `int`
Number of micro data observations :math:`N_m` used to estimate :math:`\mathscr{V}_m`, which is used to properly
scale micro moment covariances in :eq:`scaled_micro_moment_covariances`.
compute_custom : `callable`
Function that computes :math:`v_{imt}` in a single market :math:`t`, which is of | |
<reponame>qwghlm/WhensMyBus<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#pylint: disable=W0142
"""
When's My Transport?
A Twitter bot that takes requests for a bus or Tube, and replies the real-time data from TfL on Twitter
This is a parent classes used by all three bots, handling common functionality between them all, such as (but not limited to)
loading the databases, config, connecting to Twitter, reading @ replies, replying to them, checking new followers, following them back
as well as models and classes for useful constructs such as Trains and Stations
The WhensMyBus and WhensMyTrain classes handle looking up route, line, station and stop locations and names, and processing
data using the respective services' APIs
(c) 2011-12 <NAME> (<EMAIL> AT qwghlm DOT co DOT uk)
Released under the MIT License
"""
# Standard libraries of Python 2.6
from abc import abstractmethod, ABCMeta
import ConfigParser
import logging
import os
import re
import traceback
from pprint import pprint # For debugging
# From library modules in this package
from lib.browser import WMTBrowser, WMTURLProvider
from lib.exceptions import WhensMyTransportException
from lib.geo import convertWGS84toOSEastingNorthing, gridrefNumToLet, GoogleGeocoder
from lib.logger import setup_logging
from lib.twitterclient import WMTTwitterClient, is_direct_message
# Some constants we use
VERSION_NUMBER = 0.90
HOME_DIR = os.path.dirname(os.path.abspath(__file__))
TESTING_NONE = 0
TESTING_TEST_LOCAL_DATA = 1
TESTING_TEST_LIVE_DATA = 2
class WhensMyTransport:
"""
Parent class for all WhensMy* bots, with common functions shared by all
"""
__metaclass__ = ABCMeta
def __init__(self, instance_name, testing=TESTING_NONE):
"""
Read config and set up logging, settings database, geocoding and Twitter OAuth
"""
# Instance name is something like 'whensmybus', 'whensmytube'
self.instance_name = instance_name
# Try opening the file first just to see if it exists, exception caught below
try:
config_file = 'config.cfg'
open(HOME_DIR + '/' + config_file)
config = ConfigParser.SafeConfigParser({'debug_level': 'INFO',
'yahoo_app_id': None,
'silent_mode' : 0 })
config.read(HOME_DIR + '/' + config_file)
config.get(self.instance_name, 'debug_level')
except (ConfigParser.Error, IOError):
error_string = "Fatal error: can't find a valid config file for %s." % self.instance_name
error_string += " Please make sure there is a %s file in this directory" % config_file
raise RuntimeError(error_string)
# Setup debugging
debug_level = config.get(self.instance_name, 'debug_level')
setup_logging(self.instance_name, testing, debug_level)
if testing == TESTING_TEST_LOCAL_DATA:
logging.info("In TEST MODE - No Tweets will be made and local test data will be used!")
elif testing == TESTING_TEST_LIVE_DATA:
logging.info("In TEST MODE - No Tweets will be made! Will be using LIVE TfL data")
# Name of the admin so we know who to alert if there is an issue
self.admin_name = config.get(self.instance_name, 'admin_name')
# Setup browser for JSON & XML
self.browser = WMTBrowser()
self.urls = WMTURLProvider(use_test_data=(testing == TESTING_TEST_LOCAL_DATA))
# These get overridden by subclasses
self.geodata = None
self.parser = None
# Setup geocoder for looking up place names
self.geocoder = GoogleGeocoder()
# Setup Twitter client
# Silent mode is true if we are testing, or if we are live but the user has overridden
# in the config file
silent_mode = testing
if silent_mode == TESTING_NONE and config.get(self.instance_name, 'silent_mode'):
silent_mode = config.get(self.instance_name, 'silent_mode')
self.username = config.get(self.instance_name, 'username')
consumer_key = config.get(self.instance_name, 'consumer_key')
consumer_secret = config.get(self.instance_name, 'consumer_secret')
access_token = config.get(self.instance_name, 'key')
access_token_secret = config.get(self.instance_name, 'secret')
self.twitter_client = WMTTwitterClient(self.instance_name, consumer_key, consumer_secret, access_token, access_token_secret, silent_mode)
# The following can be overridden by child classes - whether to allow blank tweets,
# and what the default route should be if none is given
self.allow_blank_tweets = False
self.default_requested_route = None
def check_tweets(self):
"""
Check incoming Tweets, and reply to them
"""
tweets = self.twitter_client.fetch_tweets()
logging.debug("%s Tweets to process", len(tweets))
for tweet in tweets:
# If the Tweet is not valid (e.g. not directly addressed, from ourselves) then skip it
if not self.validate_tweet(tweet):
continue
# Try processing the Tweet. This may fail with a WhensMyTransportException for a number of reasons, in which
# case we catch the exception and process an apology accordingly. Other Python Exceptions may occur too - we handle
# these by DMing the admin with an alert
try:
replies = self.process_tweet(tweet)
except WhensMyTransportException as exc:
replies = (exc.get_user_message(),)
except Exception as exc:
logging.error("Exception encountered: %s", exc.__class__.__name__)
logging.error("Traceback:\r\n%s" % traceback.format_exc())
self.alert_admin_about_exception(tweet, exc.__class__.__name__)
replies = (WhensMyTransportException('unknown_error').get_user_message(),)
# If the reply is blank, probably didn't contain a bus number or Tube line, so check to see if there was a thank-you
if not replies:
replies = self.check_politeness(tweet)
# Send a reply back, if we have one. DMs and @ replies have different structures and different handlers
for reply in replies:
if is_direct_message(tweet):
self.twitter_client.send_reply_back(reply, tweet.sender.screen_name, True, tweet.id)
else:
self.twitter_client.send_reply_back(reply, tweet.user.screen_name, False, tweet.id)
self.twitter_client.check_followers()
def validate_tweet(self, tweet):
"""
Check to see if a Tweet is valid (i.e. we want to reply to it), and returns True if so
Tweets from ourselves, and mentions that are not directly addressed to us, returns False
"""
message = tweet.text
# Bit of logging, plus we always return True for DMs
if is_direct_message(tweet):
logging.info("Have a DM from %s: %s", tweet.sender.screen_name, message)
return True
else:
username = tweet.user.screen_name
logging.info("Have an @ reply from %s: %s", username, message)
# Don't start talking to yourself
if username == self.username:
logging.debug("Not talking to myself, that way madness lies")
return False
# Ignore mentions that are not direct replies
if not message.lower().startswith('@%s' % self.username.lower()):
logging.debug("Not a proper @ reply, skipping")
return False
return True
def process_tweet(self, tweet):
"""
Process a single Tweet object and return a list of strings (replies), one per route or line
e.g.:
'@whensmybus 341 from Clerkenwell' produces
'341 Clerkenwell Road to Waterloo 1241; Rosebery Avenue to Angel Road 1247'
Each reply might be more than 140 characters
No replies at all are given if the message is a thank-you or does not include a route or line
"""
# Don't do anything if this is a thank-you
if self.check_politeness(tweet):
logging.debug("This Tweet is a thank-you Tweet, skipping")
return []
# Get route number, from and to from the message
message = self.sanitize_message(tweet.text)
logging.debug("Message from user: %s", message)
(requested_routes, origin, destination, direction) = self.parser.parse_message(message)
# If no routes found, we may be able to deduce from origin or position if we have specified a default requested route
if not requested_routes and self.default_requested_route and (origin or self.tweet_has_geolocation(tweet)):
logging.debug("No line name detected, going to try %s for now and see if that works", self.default_requested_route)
requested_routes = [self.default_requested_route]
if not requested_routes:
logging.debug("No routes or lines detected on this Tweet, cannot determine position, skipping")
return []
# If no origin specified, let's see if we have co-ordinates on the Tweet
if not origin:
position = self.get_tweet_geolocation(tweet, message)
else:
position = None
replies = []
for requested_route in requested_routes:
try:
replies.append(self.process_individual_request(requested_route, origin, destination, direction, position))
# Exceptions produced for an individual request are particular to a route/stop combination - e.g. the bus
# given does not stop at the stop given, so we just provide an error message for that circumstance, treat as
# a non-fatal error, and process the next one. The one case where there is a fatal error (TfL's servers are
# down), we raise this exception to be caught higher up by check_tweets()
except WhensMyTransportException as exc:
if exc.msgid == 'tfl_server_down':
raise
else:
replies.append(exc.get_user_message())
return replies
def check_politeness(self, tweet):
"""
Checks a Tweet for politeness. In case someone's just being nice to us, return a "No problem" else return an empty list
"""
message = self.sanitize_message(tweet.text).lower()
if message.startswith('thanks') or message.startswith('thank you'):
return ("No problem :)",)
return ()
def sanitize_message(self, message):
"""
Takes a message string, scrub out the @username of this bot and any #hashtags, and return the sanitized messages
"""
# Remove hashtags and kisses at end
message = re.sub(r"\s#\w+\b", '', message)
message = re.sub(r"\sx+$", '', message)
# Remove usernames
if message.lower().startswith('@%s' % self.username.lower()):
message = message[len('@%s ' % self.username):].strip()
else:
message = message.strip()
# Exception if the Tweet contains nothing useful
if not message and not self.allow_blank_tweets:
raise WhensMyTransportException('blank_%s_tweet' % self.instance_name.replace('whensmy', ''))
return message
def tweet_has_geolocation(self, tweet):
"""
Returns True if the Tweet has geolocation data
"""
# pylint: disable=R0201
return hasattr(tweet, 'geo') and tweet.geo and 'coordinates' in tweet.geo
def get_tweet_geolocation(self, tweet, user_request):
"""
Ensure any geolocation on a Tweet is valid, and return the co-ordinates as a (latitude, longitude) tuple
| |
set to False
force_no_sim: `bool`, default False
If True, prediction with no simulations is forced.
This can be useful when speed is of concern or for validation purposes.
na_fill_func : callable (`pd.DataFrame` -> `pd.DataFrame`)
default::
lambda df: df.interpolate().bfill().ffill()
A function which interpolated missing values in a dataframe.
The main usage is invoked when there is a gap between the timestamps.
In that case to fill in the gaps, the regressors need to be interpolated/filled.
The default works by first interpolating the continuous variables.
Then it uses back-filling and then forward-filling for categorical variables.
Returns
-------
result: `dict`
A dictionary with following items
- "fut_df": `pandas.DataFrame`
The same as input dataframe with an added column for the response.
If value_col already appears in ``fut_df``, it will be over-written.
If ``uncertainty_dict`` is provided as input,
it will also contain a ``{value_col}_quantile_summary`` column.
Here are the expected columns:
(1) A time column with the column name being ``trained_model["time_col"]``
(2) The predicted response in ``value_col`` column.
(3) Quantile summary response in ``f"{value_col}_quantile_summary`` column.
This column only appears if the model includes uncertainty.
(4) Error std in `ERR_STD_COL` column.
This column only appears if the model includes uncertainty.
- "x_mat": `pandas.DataFrame`
Design matrix of the predictive machine-learning model
"""
if freq is None:
freq = trained_model["freq"]
# Creates the future time grid
dates = pd.date_range(
start=trained_model["last_date_for_fit"],
periods=fut_time_num + 1,
freq=freq)
dates = dates[dates > trained_model["last_date_for_fit"]] # drops values up to last_date_for_fit
fut_df = pd.DataFrame({trained_model["time_col"]: dates.tolist()})
return self.predict(
fut_df=fut_df,
trained_model=trained_model,
past_df=past_df,
new_external_regressor_df=new_external_regressor_df,
include_err=include_err,
force_no_sim=force_no_sim,
na_fill_func=na_fill_func)
def partition_fut_df(
self,
fut_df,
trained_model,
freq,
na_fill_func=lambda s: s.interpolate().bfill().ffill()):
"""This function takes a dataframe ``fut_df`` which includes the timestamps to forecast
and a ``trained_model`` returned by
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`
and decomposes
``fut_df`` to various dataframes which reflect if the timestamps are before,
during or after the training periods.
It also determines if: 'the future timestamps after the training period' are immediately
after 'the last training period' or if there is some extra gap.
In that case, this function creates an expanded dataframe which includes the missing
timestamps as well.
If ``fut_df`` also includes extra columns (they could be regressor columns),
this function will interpolate the extra regressor columns.
Parameters
----------
fut_df : `pandas.DataFrame`
The data frame which includes the timestamps for prediction
and possibly regressors. Note that the timestamp column in ``fut_df``
must be the same as ``trained_model["time_col"]``.
We assume ``fut_df[time_col]`` is pandas.datetime64 type.
trained_model : `dict`
A fitted silverkite model which is the output of
`~greykite.algo.forecast.silverkite.SilverkiteForecast.forecast`
freq : `str`
Timeseries frequency, DateOffset alias.
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
for the allowed frequencies.
na_fill_func : callable (`pd.DataFrame` -> `pd.DataFrame`)
default::
lambda df: df.interpolate().bfill().ffill()
A function which interpolated missing values in a dataframe.
The main usage is invoked when there is a gap between the timestamps.
In that case to fill in the gaps, the regressors need to be interpolated/filled.
The default works by first interpolating the continuous variables.
Then it uses back-filling and then forward-filling for categorical variables.
Returns
-------
result: `dict`
A dictionary with following items:
- ``"fut_freq_in_secs"``: `float`
The inferred frequency in ``fut_df``
- ``"training_freq_in_secs"``: `float`
The inferred frequency in training data
- ``"index_before_training"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df`` include a time
which is before the training start.
- ``"index_within_training"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df`` include a time
which is during the training period.
- ``"index_after_training"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df`` include a time
which is after the training end date.
- ``"fut_df_before_training"``: `pandas.DataFrame`
A partition of ``fut_df`` with timestamps before the training start date
- ``"fut_df_within_training"``: `pandas.DataFrame`
A partition of ``fut_df`` with timestamps during the training period
- ``"fut_df_after_training"``: `pandas.DataFrame`
A partition of ``fut_df`` with timestamps after the training start date
- ``"fut_df_gap"``: `pandas.DataFrame` or None
If there is a gap between training end date and the first timestamp
after the training end date in ``fut_df``, this dataframe can fill the
gap between the two. In case ``fut_df`` includes extra columns as well,
the values for those columns will be filled using ``na_fill_func``.
- ``"fut_df_after_training_expanded"``: `pandas.DataFrame`
If there is a gap between training end date and the first timestamp
after the training end date in ``fut_df``, this dataframe will include
the data for the gaps (``fut_df_gap``) as well as ``fut_df_after_training``.
- ``"index_after_training_original"``: `list` [`bool`]
A boolean list to determine which rows of ``fut_df_after_training_expanded``
correspond to raw data passed by user which are after training end date,
appearing in ``fut_df``.
Note that this partition corresponds to ``fut_df_after_training``
which is the subset of data in ``fut_df`` provided by user and
also returned by this function.
- ``"missing_periods_num"``: `int`
Number of missing timestamps between the last date of training and
first date in ``fut_df`` appearing after the training end date
- ``"inferred_forecast_horizon"``: `int`
This is the inferred forecast horizon from ``fut_df``.
This is defined to be the distance between the last training end date
and last date appearing in ``fut_df``.
Note that this value can be smaller or larger than the number of
rows of ``fut_df``.
This is calculated by adding the number of potentially missing timestamps
and the number of time periods appearing after the training end point.
Also note if there are no timestamps after the training end point in
``fut_df``, this value will be zero.
- ``"forecast_partition_summary"``: `dict`
A dictionary which includes the size of various partitions of ``fut_df``
as well as the missing timestamps if needed. The dictionary keys are as
follows:
- ``"len_before_training"``: the number of time periods before training start
- ``"len_within_training"``: the number of time periods within training
- ``"len_after_training"``: the number of time periods after training
- ``"len_gap"``: the number of missing time periods between training data and
future time stamps in ``fut_df``
"""
fut_df = fut_df.copy()
training_start_timestamp = trained_model["min_timestamp"]
training_end_timestamp = trained_model["max_timestamp"]
training_freq_in_secs = trained_model["inferred_freq_in_secs"]
time_col = trained_model["time_col"]
if len(fut_df) > 1:
fut_df_time_stats = describe_timeseries(
df=fut_df,
time_col=time_col)
if not fut_df_time_stats["regular_increments"]:
warnings.warn(
"``fut_df`` does not have regular time increments")
if not fut_df_time_stats["increasing"]:
raise ValueError(f"``fut_df``'s time column {time_col} must be increasing in time")
fut_freq_in_secs = fut_df_time_stats["freq_in_secs"]
else:
# When test_horizon/cv_horizon/forecast_horizon is 1, not all stats above
# are available, thus it produces an error.
# The "else" handles this case.
fut_freq_in_secs = None
index_before_training = (fut_df[time_col] < training_start_timestamp)
index_within_training = (
(fut_df[time_col] >= training_start_timestamp) &
(fut_df[time_col] <= training_end_timestamp))
index_after_training = (fut_df[time_col] > training_end_timestamp)
fut_df_before_training = fut_df[index_before_training]
fut_df_within_training = fut_df[index_within_training]
fut_df_after_training = fut_df[index_after_training]
fut_df_gap = None # a dataframe which fills in the missing time periods
missing_periods_num = 0 # the number of missing time periods
if fut_df_after_training.shape[0] > 0:
min_timestamp_after_training = min(
fut_df_after_training[time_col])
expected_timestamp_after_training = pd.date_range(
start=training_end_timestamp,
periods=2,
freq=freq)[1]
if min_timestamp_after_training < expected_timestamp_after_training:
raise ValueError(
"The most immediate time in the future is off "
f"The last training date: {training_end_timestamp}. "
f"The first future period: {min_timestamp_after_training}. "
f"Expected first future period is {expected_timestamp_after_training}")
elif min_timestamp_after_training > expected_timestamp_after_training:
missing_dates = pd.date_range(
start=expected_timestamp_after_training,
end=min_timestamp_after_training,
freq=freq)
# The last timestamp is already there, therefore we drop it
missing_dates = missing_dates[:-1]
missing_periods_num = len(missing_dates)
# The length of missing dates is non-zero since there are missing timestamps
# since ``min_timestamp_after_training > next_period_after_training``
assert missing_periods_num > 0
fut_df_gap = pd.DataFrame({time_col: missing_dates.tolist()})
# `fut_df` might include other columns than `time_col`
# Those extra columns might be the regressors passed through `fut_df`
# Therefore we need to ensure `fut_df_gap` includes those columns
# Also note that those extra columns need to be imputed in that case
if fut_df_gap is not None and len(fut_df.columns) > 1:
fut_df_expanded = pd.concat(
[fut_df_within_training, fut_df_gap, fut_df_after_training],
axis=0,
ignore_index=True,
sort=False)
# Imputes the missing values
fut_df_expanded = na_fill_func(fut_df_expanded)
index = (
[False]*fut_df_within_training.shape[0] +
[True]*fut_df_gap.shape[0] +
[False]*fut_df_after_training.shape[0])
fut_df_gap = fut_df_expanded[index].copy()
inferred_forecast_horizon = fut_df_after_training.shape[0]
if fut_df_gap is not None:
inferred_forecast_horizon += fut_df_gap.shape[0]
# Creates an expanded dataframe which includes the missing times
# between the end | |
<gh_stars>10-100
import asyncio
import collections
import logging
import typing as T
import discord
from lifesaver.utils.timing import Ratelimiter
from dog.formatting import represent
from . import checks as checks_module
from .core import Ban, Bounce, CheckFailure, Report, create_embed
from .threshold import Threshold
ALL_CHECKS = [getattr(checks_module, name) for name in checks_module.__all__]
INCORRECTLY_CONFIGURED_STRING = """**Gatekeeper was configured incorrectly!**
I'm not sure what to do, so I'm going to prevent this user from joining just to
be safe."""
class Keeper:
"""A class that gatekeeps users from guilds by processing checks and
ratelimits on users.
Each :class:`discord.Guild` should have its own persistent Keeper instance.
This class tracks some state in order to handle ratelimits and take
associated action.
The Gatekeeper cog automatically handles Keeper creation. Keepers are also
stored in its state. Keepers hold the Gatekeeper configuration
(NOT the entire guild configuration). Whenever a guild configuration is
updated, the Gatekeeper cog updates the configuration for that guild's
Keeper instance (if it has one).
"""
def __init__(self, guild: discord.Guild, config, *, bot) -> None:
self.bot = bot
self.guild = guild
self.log = logging.getLogger(f"{__name__}[{guild.id}]")
#: A list of recent :class:`discord.Member`s that have joined. This list
#: is used to keep track of users joining so that during a burst of
#: joins (like in a raid), everyone who joined is removed instead of the
#: single user that ended up triggering the ratelimit.
self.recent_joins: T.List[discord.Member] = []
#: The Gatekeeper config (the ``gatekeeper`` key of the guild config).
self.config: T.Optional[T.Dict] = None
#: A ratelimiter for each user. Combats users repeatedly joining after
#: being bounced by Gatekeeper.
self.unique_join_ratelimiter: T.Optional[Ratelimiter] = None
#: A ratelimiter for all users. Combats large amounts of users joining
#: at a time (like in raids).
self.join_ratelimiter: T.Optional[Ratelimiter] = None
self.update_config(config)
def __repr__(self):
return f"<Keeper guild={self.guild!r}>"
@property
def broadcast_channel(self) -> discord.TextChannel:
"""Return the broadcast channel for the associated guild."""
channel_id = self.config.get("broadcast_channel")
if channel_id is None:
return None
channel = self.bot.get_channel(channel_id)
if not isinstance(channel, discord.TextChannel):
return None
return channel
@property
def bounce_message(self):
"""Return the configured bounce message."""
return self.config.get("bounce_message")
def _update_ratelimiter(
self,
threshold: T.Optional[str],
attribute_name: str,
*,
after_update: T.Callable[[Ratelimiter, Ratelimiter], None] = None,
):
"""Update/create a :class:`Ratelimiter` attribute on ``self`` using a
threshold string and attribute name.
The :class:`Ratelimiter` is created from the provided threshold string.
``self`` is updated using the given ``attribute_name``. If the
ratelimiter hasn't changed, then the old one will be kept.
If the threshold string contains invalid syntax or is ``None``,
the ratelimiter attribute becomes disabled.
"""
try:
if threshold is None:
raise TypeError
old_ratelimiter = getattr(self, attribute_name)
threshold = Threshold.from_string(threshold)
new_ratelimiter = Ratelimiter(threshold.rate, threshold.per)
if old_ratelimiter != new_ratelimiter:
setattr(self, attribute_name, new_ratelimiter)
self.log.debug(
"_update_ratelimiter: replacing stale ratelimiter %s",
attribute_name,
)
if after_update:
after_update(old_ratelimiter, new_ratelimiter)
else:
self.log.debug(
"_update_ratelimiter: not stale, using old %s", attribute_name
)
except TypeError:
self.log.debug("_update_ratelimiter: invalid %s, disabling", attribute_name)
setattr(self, attribute_name, None)
def update_config(self, config):
"""Update this Keeper to use a new config."""
self.config = config
# this method can be indirectly called by _lockdown. it edits the config
# which in turn makes the Gatekeeper cog call this method. so, we need
# to make sure that our ratelimits don't reset!
#
# the special _update_ratelimiter doesn't reset ratelimits if the new
# config doesn't change that ratelimit.
self._update_ratelimiter(config.get("ban_threshold"), "unique_join_ratelimiter")
def remove_unneeded_joins(old, new):
if old is None:
# join_ratelimiter is being created for the first time
return
if new.rate < old.rate:
num_outside = old.rate - new.rate
self.log.debug(
"update_config: removing %d outside tracked joins", num_outside
)
del self.recent_joins[:num_outside]
auto_lockdown = config.get("auto_lockdown", {})
auto_lockdown_threshold = auto_lockdown.get("threshold")
self._update_ratelimiter(
auto_lockdown_threshold,
"join_ratelimiter",
after_update=remove_unneeded_joins,
)
async def _lockdown(self):
"""Enable the block_all check for this guild and send a warning report."""
gatekeeper_cog = self.bot.get_cog("Gatekeeper")
async with gatekeeper_cog.edit_config(self.guild) as config:
config["checks"] = {
**config.get("checks", {}),
"block_all": {"enabled": True},
}
# TODO: have this be reported in a separate channel, with a mod ping!
await self.report(
"Users are joining too quickly. `block_all` has automatically been enabled."
)
async def _auto_lockdown(self, triggering_member):
"""Start the auto lockdown procedure."""
# triggering_member is the user that joined that ended up causing the
# ratelimiter to go off. now we have to remove this user...
await self.bounce(triggering_member, "Users are joining too quickly")
# ...and the rest of the users who were part of the join burst.
#
# we explicitly filter out the triggering member because if they joined
# more than once to trigger the ratelimit, they would appear in this
# list.
accompanying = [
member
for member in self.recent_joins[-self.join_ratelimiter.rate :]
if member != triggering_member
]
self.log.debug("_auto_lockdown: triggering_member: %r", triggering_member)
self.log.debug("_auto_lockdown: accompanying: %r", accompanying)
if accompanying:
for member in accompanying:
await self.bounce(member, "Users are joining too quickly")
# empty out the recent joins list
self.recent_joins = []
checks = self.config.get("checks", {})
block_all_check = checks.get("block_all", {})
is_blocking_all = block_all_check.get("enabled", False)
# now prevent anyone else from joining by enabling block_all
if not is_blocking_all:
self.log.debug("performing automatic lockdown")
await self._lockdown()
else:
self.log.debug("already blocking all, skipping lockdown")
async def send_bounce_message(self, member: discord.Member):
"""Send a bounce message to a member."""
if self.bounce_message is None:
return
try:
await member.send(self.bounce_message)
except discord.HTTPException:
if self.config.get("echo_dm_failures", False):
await self.report(
f"Failed to send bounce message to {represent(member)}."
)
async def report(self, *args, **kwargs) -> T.Optional[discord.Message]:
"""Send a message to the designated broadcast channel of a guild.
If the bot doesn't have permission to send to the channel, the error
will be silently dropped.
"""
channel = self.broadcast_channel
if not channel:
self.log.warning("no broadcast channel, cannot report")
return
if channel.guild != self.guild:
self.log.warning("broadcast channel is somewhere else, ignoring")
return
try:
return await channel.send(*args, **kwargs)
except discord.HTTPException as error:
self.log.warning("unable to send message to %r: %r", channel, error)
async def _ban_reverse_prompt(
self, message: discord.Message, banned: discord.Member
):
"""Shows a reaction prompt to reverse a ban.
This is only called on ban notice messages to let moderators reverse an
automatic ban.
"""
unban_emoji = self.bot.emoji("gatekeeper.unban")
await message.add_reaction(unban_emoji)
def check(reaction, member):
if not isinstance(member, discord.Member) or member.bot:
return False
can_ban = member.guild_permissions.ban_members
return (
reaction.message.id == message.id
and reaction.emoji == unban_emoji
and can_ban
)
_reaction, user = await self.bot.wait_for("reaction_add", check=check)
try:
await banned.unban(
reason=f"Gatekeeper: Ban was reversed by {represent(user)}"
)
except discord.HTTPException as error:
await self.report(
f"Cannot reverse the ban of {represent(banned)}: `{error}`"
)
else:
await self.report(
f"The ban of {represent(banned)} was reversed by {represent(user)}."
)
async def ban(self, member: discord.Member, reason: str):
"""Ban a user from the guild.
An embed with the provided ban reason will be reported to the guild's
broadcast channel.
"""
try:
# cya nerd
await member.ban(delete_message_days=0, reason=f"Gatekeeper: {reason}")
except discord.HTTPException as error:
self.log.debug("failed to ban %d: %r", member.id, error)
await self.report(f"Failed to ban {represent(member)}: `{error}`")
else:
embed = create_embed(
member,
color=discord.Color.purple(),
title=f"Banned {represent(member)}",
reason=reason,
)
message = await self.report(embed=embed)
# in case mods wants to reverse the ban, present a reaction prompt
self.bot.loop.create_task(self._ban_reverse_prompt(message, member))
async def bounce(self, member: discord.Member, reason: str):
"""Kick ("bounce") a user from the guild.
An embed with the provided bounce reason will be reported to the guild's
broadcast channel.
"""
await self.send_bounce_message(member)
try:
await member.kick(reason=f"Gatekeeper: {reason}")
except discord.HTTPException as error:
self.log.debug("failed to kick %d: %r", member.id, error)
await self.report(f"Failed to kick {represent(member)}: `{error}`")
else:
embed = create_embed(
member,
color=discord.Color.red(),
title=f"Bounced {represent(member)}",
reason=reason,
)
await self.report(embed=embed)
async def _perform_checks(self, member: discord.Member, checks):
"""Perform a list of checks on a member.
When calling this method, make sure to handle any thrown Report, Ban,
and Bounce exceptions.
"""
for check in ALL_CHECKS:
check_name = check.__name__
check_options = checks.get(check_name)
# check isn't present in the config
if check_options is None:
continue
if isinstance(check_options, collections.Mapping):
# enabled subkey of check options
if not check_options.get("enabled", False):
continue
elif isinstance(check_options, bool):
# legacy behavior: the "check options" is simply a boolean
# denoting whether the check is enabled or not
if not check_options:
continue
try:
await check(member, check_options)
except CheckFailure as error:
# inject check details into the error
error.check_name = check_name
error.check = check
raise error from None
async def _unique_joining_too_quickly_ban(self, member: discord.Member):
self.log.debug("%d: is joining | |
from gibson.envs.camera_env import VirtualCameraEnv
from gibson.envs.env_bases import BaseEnv
from gibson.utils.play import play
from gibson.core.render.profiler import Profiler
from scipy.signal import savgol_filter
from multiprocessing import Pool, TimeoutError
import gibson
import argparse
import os
import trimesh
import numpy as np
import gym
import sys
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import time
import pybullet as p
import yaml
import peakutils
import imageio
'''
try:
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
except Exception:
pass
'''
#import pyglet.window as pw
from collections import deque
#from pygame.locals import HWSURFACE, DOUBLEBUF, RESIZABLE, VIDEORESIZE
from threading import Thread
def load_obj(obj_path):
verts = []
faces = []
with open(obj_path) as f:
for line in f:
if line[:2] == 'v ':
verts.append(list(map(float, line.strip().split()[1:4])))
if line[:2] == 'f ':
face = [int(item.split('/')[0]) for item in line.strip().split()[-3:]]
faces.append(face)
verts = np.array(verts)
faces = np.array(faces) - 1
return verts, faces
def sample_faces(vertices, faces, n_samples=10**4):
"""
Samples point cloud on the surface of the model defined as vectices and
faces. This function uses vectorized operations so fast at the cost of some
memory.
Parameters:
vertices - n x 3 matrix
faces - n x 3 matrix
n_samples - positive integer
Return:
vertices - point cloud
Reference :
[1] Barycentric coordinate system
\begin{align}
P = (1 - \sqrt{r_1})A + \sqrt{r_1} (1 - r_2) B + \sqrt{r_1} r_2 C
\end{align}
"""
vec_cross = np.cross(vertices[faces[:, 0], :] - vertices[faces[:, 2], :],
vertices[faces[:, 1], :] - vertices[faces[:, 2], :])
face_areas = np.sqrt(np.sum(vec_cross ** 2, 1))
face_areas = face_areas / np.sum(face_areas)
# Sample exactly n_samples. First, oversample points and remove redundant
# Contributed by Yangyan (<EMAIL>)
n_samples_per_face = np.ceil(n_samples * face_areas).astype(int)
floor_num = np.sum(n_samples_per_face) - n_samples
if floor_num > 0:
indices = np.where(n_samples_per_face > 0)[0]
floor_indices = np.random.choice(indices, floor_num, replace=True)
n_samples_per_face[floor_indices] -= 1
n_samples = np.sum(n_samples_per_face)
# Create a vector that contains the face indices
sample_face_idx = np.zeros((n_samples, ), dtype=int)
acc = 0
for face_idx, _n_sample in enumerate(n_samples_per_face):
sample_face_idx[acc: acc + _n_sample] = face_idx
acc += _n_sample
r = np.random.rand(n_samples, 2);
A = vertices[faces[sample_face_idx, 0], :]
B = vertices[faces[sample_face_idx, 1], :]
C = vertices[faces[sample_face_idx, 2], :]
P = (1 - np.sqrt(r[:,0:1])) * A + np.sqrt(r[:,0:1]) * (1 - r[:,1:]) * B + \
np.sqrt(r[:,0:1]) * r[:,1:] * C
return P
def find_floors(obj):
verts, faces = obj
points = sample_faces(verts, faces, n_samples=10**4)
mini = points[:,2].min()
maxi = points[:,2].max()
hist = np.histogram(points[:,2], bins=30)
plt.hist(points[:,2], bins=30)
#data = savgol_filter(np.hstack((np.zeros(1), hist[0], np.zeros(1))), 9, 3)
data = np.hstack((np.zeros(1), hist[0], np.zeros(1)))
peaks = peakutils.indexes(data, min_dist=5)
peaks = peaks - 1
print(peaks.shape)
print(peaks)
plt.scatter(hist[1][peaks], hist[0][peaks])
plt.savefig('hist.png')
plt.clf()
peak_values = hist[1][peaks]
return [(a - b) / 2 + b
for a, b in zip(peak_values[:-1], peak_values[1:])]
def find_cameras(camera_path):
cameras = []
with open(camera_path) as f:
for line in f:
l = line.strip().split(',')
uuid = l[0]
xyz = list(map(float, l[1:4]))
quat = list(map(float, l[4:8]))
cameras.append(xyz)
clusters = []
for cam in cameras:
found = False
for i, (cc, v) in enumerate(clusters):
if abs(cam[2] - cc[2]) < 0.1:
found = True
clusters[i][1] += 1
break
if not found:
clusters.append([cam, 1])
camera_pos = [c[2] for (c, v) in clusters
if v > 5]
return camera_pos
def make_floorplan(mesh, height=0.5):
z = height
s = mesh.section(plane_origin=(0, 0, z),
plane_normal=[0,0,1])
s, _ = s.to_planar()
return s
#verts, faces = obj
#z = height
#return meshcut.cross_section(verts, faces, plane_orig=(0, 0, z), plane_normal=(0, 0, 1))
def draw_floorplan(floorplan):
floorplan.show()
def sample_floorplan(floorplan, num_samples, min_dist, boundary_dist):
paths = []
for i, entity in enumerate(floorplan.entities):
discrete = entity.discrete(floorplan.vertices)
paths.append(discrete)
paths = np.vstack(paths)
paths = floorplan.vertices
path = mpath.Path(floorplan.vertices)
min_x = np.min(paths[:,0])
max_x = np.max(paths[:,0])
min_y = np.min(paths[:,1])
max_y = np.max(paths[:,1])
used = 0
used_points = np.zeros((num_samples, 2))
timeout = 1000
while used < num_samples:
point = np.array(np.random.random(2))
point[0] = point[0] * (max_x - min_x) + min_x
point[1] = point[1] * (max_y - min_y) + min_y
contains = path.contains_point(point, radius=boundary_dist)
if not contains:
continue
dist = np.linalg.norm(used_points[:used,:] - point, axis=1)
if np.all(dist >= min_dist) or used == 0:
used_points[used,:] = point
used += 1
timeout = 1000
timeout -= 1
if timeout == 0:
break
used_points = used_points[:used,:]
return used_points
def draw_points(points):
plt.scatter(x=points[:,0], y=points[:,1])
def normalize(v, tolerance=0.00001):
mag2 = sum(n * n for n in v)
if abs(mag2 - 1.0) > tolerance:
mag = sqrt(mag2)
v = tuple(n / mag for n in v)
return v
def axisangle_to_q(v, theta):
v = normalize(v)
x, y, z = v
theta /= 2
w = np.cos(theta)
x = x * np.sin(theta)
y = y * np.sin(theta)
z = z * np.sin(theta)
return w, x, y, z
def q_mult(q1, q2):
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return w, x, y, z
def qv_mult(q1, v1):
q2 = (0.0,) + v1
return q_mult(q_mult(q1, q2), q_conjugate(q1))[1:]
def render_model(config_path, model, output_directory):
start = time.time()
print('Rendering {:s}...'.format(model))
# Load the config file for finding the model of interest
with open(config_path, 'r') as f:
config = yaml.load(f)
config['model_id'] = model
# Get the path to the model directory
model_path = gibson.data.datasets.get_model_path(config['model_id'])
# mesh_z_up is the mesh where the z dimension is normal to the
# floor/ceiling of the building
mesh_path = os.path.join(model_path, 'mesh_z_up.obj')
# Load the mesh
mesh = trimesh.load_mesh(mesh_path)
# The camera poses used to scan the mesh.
posefile = os.path.join(model_path, 'camera_poses.csv')
heights = find_cameras(posefile)
floorplans_directory = os.path.join(output_directory, 'floorplans')
views_directory = os.path.join(output_directory, 'views')
os.makedirs(floorplans_directory, exist_ok=True)
os.makedirs(views_directory, exist_ok=True)
sampled_cameras = {}
for i, h in enumerate(heights):
#print('Height: {:3f}'.format(h))
#print('Making floorplan...')
floorplan = make_floorplan(mesh, height=h)
if len(floorplan.vertices) == 0:
# print('Skipped due to lack of floorplan')
continue
#print('Floorplan segments: {:d}'.format(len(floorplan)))
#print('Sampling floorplan...')
points = sample_floorplan(floorplan, 1000, 1.0, -0.5)
#print('# of points sampled: {:d}'.format(points.shape[0]))
sampled_cameras[i] = {
'points': points,
'z': h
}
plt.figure()
draw_floorplan(floorplan)
draw_points(points)
path = os.path.join(floorplans_directory, 'floor{:02d}.png'.format(i))
plt.savefig(path)
plt.clf()
# CURRENT HACKY STRATEGY:
# For each floor, for each camera, randomly select 3 camera orientations
# IDEAL STRATEGY:
# For each floor, select a set of cameras and camera orientations such that
# each triangle is visible by at least 3 cameras
num_views_per_camera = 3
pitch_range = (30.0 / 180) * np.pi
env = VirtualCameraEnv(config = config)
obs = env.reset()
total_images = 0
poses = []
for floor, data in sampled_cameras.items():
camera_positions = data['points']
h = data['z']
image_idx = 0
for i in range(camera_positions.shape[0]):
pos_x = camera_positions[i,0]
pos_y = camera_positions[i,1]
pos_z = h
for n in range(num_views_per_camera):
num_tries = 0
while num_tries < 10:
pitch = (np.random.random(1)[0] - 0.5) * pitch_range
yaw = np.random.random(1)[0] * 2 * np.pi
roll = 0
# Reference vector
q = axisangle_to_q((1, 0, 0), 0)
# Rotate by yaw
q = q_mult(axisangle_to_q((1, 0, 0), yaw), q)
# Rotate by pitch
q = q_mult(axisangle_to_q((0, 1, 0), pitch), q)
view = np.array([pos_x, pos_y, pos_z, q[0], q[1], q[2], q[3]])
obs, _, _, _ = env.step(view) # x y z quat
depth = obs["depth"]
median_depth = np.median(depth)
mx = np.max(depth)
mi = np.min(depth)
shape_size = depth.shape[0] * depth.shape[1]
if np.count_nonzero(depth < 0.1) < shape_size * 0.1:
break
num_tries += 1
if num_tries == 10:
continue
poses.append(view)
filled = obs["rgb_filled"]
prefilled = obs["rgb_prefilled"]
path = os.path.join(
views_directory,
'floor{:02d}_{:06d}.png'.format(floor, image_idx))
imageio.imwrite(path, filled)
image_idx += 1
total_images += image_idx
env.reset()
env.close()
print('Done rendering {:s}. Images: {:d}. Time: {:2f}'.format(
model, total_images, time.time() - start))
if __name__ == '__main__':
config_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..', 'configs', 'play',
'test_camera.yaml')
train_models = [
'Klickitat',
'Marstons',
'Hanson',
'Lakeville',
'Merom',
'Lindenwood',
'Pinesdale',
'Forkland',
'Wainscott',
'Newfields',
'Ranchester',
'Hiteman',
'Leonardo',
'Onaga',
'Pomaria',
'Stockman',
'Tolstoy',
'Cosmos',
'Benevolence',
'Woodbine',
'Beechwood',
'Shelbyville',
'Mifflinburg',
'Coffeen',
'Allensville',
]
val_models = [
'Darden',
'Markleeville',
'Wiconisco',
'Corozal',
'Collierville',
]
test_models = [
'Uvalda',
'Muleshoe',
'Noxapater',
'McDade',
'Ihlen',
]
all_models = train_models + val_models + test_models
pool = Pool(processes=1)
start = time.time()
for model in all_models:
output_directory = '/n/scanner/datasets/gibson_scene/{:s}'.format(model)
#pool.apply_async(render_model, (config_path, model, output_directory))
try:
render_model(config_path, | |
<filename>python/albireolib/image/general.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2017-10-012
# @Filename: general.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: <NAME> (<EMAIL>)
# @Last modified time: 2018-08-08 16:03:51
from __future__ import absolute_import, division, print_function
import itertools
import re
import warnings
import astropy.convolution
import astropy.modeling
import astropy.modeling.models
import astropy.wcs
import numpy
__all__ = ['crop_hdu', 'replace_wcs', 'fwhm_to_sigma', 'sigma_to_fwhm',
'gaussian_kernel_from_fwhm', 'gaussian_filter', 'fit_gaussian',
'CCD', 'SyntheticImage']
def crop_hdu(hdu, xmin, xmax, ymin, ymax, return_wcs=True, ignore_warnings=True):
"""Crops a HDU.
Trims the input HDU and, optionally, calculates the WCS of the resulting
image. It returns a new `~astropy.io.fits.ImageHDU` object with the cropped
image. If ``return_wcs=True`` (the default), also returns the WCS
definition for the cropped image.
Parameters:
hdu (~astropy.io.fits.ImageHDU):
The `~astropy.io.fits.ImageHDU` that will be cropped.
xmin,xmax,ymin,ymax (int):
The section of ``hdu`` to crop.
return_wcs (bool):
If *True*, and the input HDU contains WCS information, will return
a `~astropy.wcs.WCS` object with the WCS definition for the cropped
image. Returns ``None`` if the input HDU does not contain WCS
information.
ignore_warnings (bool):
If *True*, warnings raised during the creation of the WCS object
will be silenced.
"""
new_hdu = hdu.copy()
hdu_shape = new_hdu.data.shape
assert xmin > 0 and ymin > 0 and xmax < hdu_shape[1] and ymax < hdu_shape[0], \
'invalid crop region.'
data = new_hdu.data.copy()
data = data[ymin:ymax, xmin:xmax]
new_hdu.data = numpy.array(data)
if return_wcs is False:
return new_hdu
with warnings.catch_warnings():
if ignore_warnings:
warnings.simplefilter('ignore', astropy.wcs.FITSFixedWarning)
# Checks whether this image has WCS information
wcs_list = astropy.wcs.find_all_wcs(new_hdu.header)
if len(wcs_list) == 0:
return new_hdu, None
else:
new_wcs = wcs_list[0].deepcopy()
new_wcs.wcs.crpix[0] -= xmin
new_wcs.wcs.crpix[1] -= ymin
return new_hdu, new_wcs
def replace_wcs(hdu, wcs):
"""Replaces WCS information in the header.
Removes the current WCS information in the input
`~astropy.io.fits.ImageHDU` and replace it with new one.
Parameters
----------
hdu : `~astropy.io.fits.ImageHDU`:
The `~astropy.io.fits.ImageHDU` whose WCS definition we will
replace.
wcs : `~astropy.wcs.WCS`:
A `~astropy.wcs.WCS` object containing the new WCS definition.
"""
# Checks for old WCS keys in the form PC001002
pc_old_pattern = re.compile('PC0*[0-9]{1}0*[0-9]{1}')
header_keys = hdu.header.keys()
pc_old_in_header = filter(pc_old_pattern.match, header_keys)
wcs_keys = wcs.to_header().keys()
for key in itertools.chain(wcs_keys, pc_old_in_header):
if key in hdu.header:
del hdu.header[key]
# Adds the new WCS header to the hdu
hdu.header.extend(wcs.to_header().cards)
return hdu
def fwhm_to_sigma(fwhm):
"""Returns the sigma for a FWHM."""
return fwhm / 2 / numpy.sqrt(2 * numpy.log(2))
def sigma_to_fwhm(sigma):
"""Returns the FWHM for a sigma."""
return sigma * 2 * numpy.sqrt(2 * numpy.log(2))
def gaussian_kernel_from_fwhm(fwhm, pixel_scale=1, **kwargs):
"""Returns a Gaussian kernel for a FWHM.
Parameters
----------
fwhm : `float`
The FWHM (seeing) of the Gaussian kernel, in arcsec.
pixel_scale : `float`
The pixels scale, in arcsec.
kwargs : `dict`
Other parameters to be passed to
`~astropy.convolution.Gaussian2DKernel`.
Returns
-------
kernel : `~astropy.convolution.Gaussian2DKernel`
An astropy `~astropy.convolution.Gaussian2DKernel` kernel for the
input FHWM.
"""
stddev = fwhm_to_sigma(fwhm) / pixel_scale
return astropy.convolution.Gaussian2DKernel(stddev, **kwargs)
def gaussian_filter(stddev, array):
"""Convolves an array with a Gaussian filter."""
return astropy.convolution.convolve(
array, astropy.convolution.Gaussian2DKernel(stddev))
def fit_gaussian(array):
"""Fits a 2D gaussian to an array of data."""
shape = array.shape
xmean, ymean = numpy.array(shape) / 2.
xx, yy = numpy.mgrid[:shape[0], :shape[1]]
g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,
x_stddev=1., y_stddev=1.)
f2 = astropy.modeling.fitting.LevMarLSQFitter()
gg = f2(g_init, xx, yy, array)
return gg
class CCD(object):
"""A class representing the parameters that define a CCD chip.
Parameters
----------
shape : `tuple`
The shape of the image to generate.
pixel_size : `float`
The pixel size, in microns. Assumes the pixel is square.
read_noise : `float`
The RMS of the read noise, in electrons.
gain : `float`
The gain in electrons per ADU.
name : `str` or None
A string with the name of the CCD chip (e.g., its model or SN).
"""
def __init__(self, shape, pixel_size, read_noise=1.0, gain=1.0, name=None):
self.shape = shape
self.pixel_size = pixel_size
self.read_noise = read_noise
self.gain = gain
self.name = name
class SyntheticImage(object):
"""Creates and image with Gaussian features, bias, and noise.
Parameters
----------
ccd : .CCD
A `.CCD` object describing the chip that produces this image.
xy : list
A list of tuples in which each tuple are the ``(x, y)`` coordinates of
the Gaussian sources.
sigma_x : list or float
A list of floats with the same length as ``xy`` in which each element
is the x-axis sigma for the Gaussian sources. Alternatively, a single
float which will be applied to all the sources.
sigma_y : list or float
As ``sigma_x`` but for the y axis.
fluxes : list or float
The total flux of the Gaussian sources. Same format as ``sigma_x``.
peaks : list or float
The peak of each of the Gaussian sources. Same format as ``sigma_x``.
Cannot be defined at the same time as ``fluxes``.
bias : float or None
The bias level of the image. If ``None``, no bias level will be added.
cosmic_p : float
The p factor for the binomial distribution used to model cosmic rays.
exp_time : float
The exposure time, in seconds. Used as a multiplicative factor for the
log-normal distribution to estimate the total dark current.
dark_sigma : float
The sigma of the log-normal dark current distribution.
sample_box : int
The length of the box used to sample the Gaussian source.
Attributes
----------
signal : `numpy.ndarray`
The array representing the image signal.
noise : `numpy.ndarray`
The array representing the image noise.
sources : `list`
A list of `~astropy.modeling.functional_models.Gaussian2D` objects that
have been added to the image.
"""
def __init__(self, ccd, xy=None, sigma_x=None, sigma_y=None,
fluxes=None, peaks=None, bias=400., read_noise=1.,
cosmic_p=0.005, exp_time=1., dark_sigma=5., sample_box=100):
self.ccd = ccd
self.signal = numpy.zeros(self.ccd.shape[::-1], dtype=numpy.float32)
self.noise = numpy.zeros(self.ccd.shape[::-1], dtype=numpy.float32)
# Add a bias level
self.bias = 0.0
if bias is not None:
self.add_bias_level(bias)
self.sample_box = sample_box
assert self.sample_box % 2 == 0, 'sample_box must be even.'
# We use a small grid to make the computing of the source Gaussian faster
# and define the meshgrid here to avoid having to repeat this for each source
self._meshgrid = numpy.mgrid[0:sample_box, 0:sample_box]
# Add sources
self.sources = []
if xy is not None:
assert sigma_x is not None, 'sigma_x cannot be None'
sigma_x = numpy.atleast_1d(sigma_x)
sigma_x = numpy.tile(sigma_x, len(xy)) if len(sigma_x) == 1 else sigma_x
if sigma_y is not None:
sigma_y = numpy.atleast_1d(sigma_y)
sigma_y = numpy.tile(sigma_y, len(xy)) if len(sigma_y) == 1 else sigma_y
else:
sigma_y = sigma_x
assert sigma_y is None or len(sigma_x) == len(sigma_y), \
'invalid length for sigma_x or sigma_y'
assert fluxes is not None or peaks is not None, \
'either fluxes or peaks need to be defined'
if peaks is not None:
peaks = numpy.atleast_1d(peaks)
peaks = numpy.tile(peaks, len(xy)) if len(peaks) == 1 else peaks
assert fluxes is None, 'fluxes cannot be active at the same time as peaks'
if fluxes is not None:
fluxes = numpy.atleast_1d(fluxes)
fluxes = numpy.tile(fluxes, len(xy)) if len(fluxes) == 1 else fluxes
assert peaks is None, 'peaks cannot be active at the same time as fluxes'
# Convert to peaks
peaks = fluxes / (2. * numpy.pi * sigma_x * sigma_y)
for ii in range(len(xy)):
self.add_source(xy[ii], peaks[ii], sigma_x[ii], sigma_y[ii])
if self.ccd.read_noise is not None:
self.noise += self.get_read_noise()
@property
def image(self):
"""Returns the signal plus its associated noise."""
return self.signal + self.noise
@property
def snr(self):
"""Returns the signal to noise ratio for each element in the image."""
return self.signal / self.noise
def get_read_noise(self):
"""Returns an array of read noise assuming a normal distribution."""
read_noise_adu = self.ccd.read_noise / self.ccd.gain
return numpy.random.normal(scale=read_noise_adu, size=self.image.shape)
def add_bias_level(self, bias):
"""Adds a bias level to the image.
A ``read_noise`` noise is added to the ``bias`` value. The attribute
`~bias` is updated with the bias level added by this method.
Parameters
----------
bias : float
The bias level to add.
"""
self.bias += bias
self.signal += bias
def add_source(self, xy, peak, sigma_x, sigma_y):
"""Adds a series of Gaussian sources with noise to the image.
Parameters
----------
xy : tuple
A tuple containing the ``(x, y)`` coordinates of the source.
peak : float
The height of the peak of the Gaussian.
sigma_x : float
The sigma across the x axis.
sigma_y : float
The sigma across the y axis.
Returns
-------
out : `~astropy.modeling.functional_models.Gaussian2D`
The | |
<reponame>0xCAF2/calcium-py
# Used for py3ca.py
class Engine:
def __init__(self, code_list):
self.env = Environment(code_list)
self.breakpoints = []
self.parser = Parser()
def run(self):
while True:
result = self.step()
if result == RESULT_EXECUTED:
continue
elif result == RESULT_BREAKPOINT:
return False
elif result == RESULT_TERMINATED:
return True
def step(self):
last_index = len(self.env.code)
last_index -= 1
if self.env.address.indent == 0:
end_of_code = self.parser.parse(self.env.code[last_index])
is_end_of_code = isinstance(end_of_code, EndOfCode)
if not is_end_of_code:
raise InvalidEndOfCodeError()
end_of_code.execute(self.env)
return RESULT_TERMINATED
else:
if self.env.address.line == last_index:
return RESULT_TERMINATED
line = self.env.code[self.env.address.line]
command = self.parser.parse(line)
command.execute(self.env)
is_end_of_code = isinstance(command, EndOfCode)
if is_end_of_code:
return RESULT_TERMINATED
self.env.skip_to_next_line()
next_line = self.env.code[self.env.address.line]
keyword = next_line[INDEX_KEYWORD]
while keyword == KEYWORD_COMMENT or keyword == KEYWORD_IFS:
command = self.parser.parse(next_line)
command.execute(self.env)
self.env.skip_to_next_line()
next_line = self.env.code[self.env.address.line]
keyword = next_line[INDEX_KEYWORD]
if self.env.address.line in self.breakpoints:
return RESULT_BREAKPOINT
else:
return RESULT_EXECUTED
# Keyword
KEYWORD_ASSIGNMENT = '='
KEYWORD_ADDITION = '+'
KEYWORD_SUBTRACTION = '-'
KEYWORD_MULTIPLICATION = '*'
KEYWORD_EXPONENTIATION = '**'
KEYWORD_DIVISION = '/'
KEYWORD_FLOOR_DIVISION = '//'
KEYWORD_REMAINDER = '%'
KEYWORD_COMPOUND_ADDITION = '+='
KEYWORD_COMPOUND_SUBTRACTION = '-='
KEYWORD_COMPOUND_MULTIPLICATION = '*='
KEYWORD_EQUAL = '=='
KEYWORD_NOT_EQUAL = '!='
KEYWORD_LESS_THAN = '<'
KEYWORD_LESS_THAN_OR_EQUAL = '<='
KEYWORD_GREATER_THAN = '>'
KEYWORD_GREATER_THAN_OR_EQUAL = '>='
KEYWORD_AND = 'and'
KEYWORD_OR = 'or'
KEYWORD_IS = 'is'
KEYWORD_IS_NOT = 'is not'
KEYWORD_IN = 'in'
KEYWORD_NOT_IN = 'not in'
KEYWORD_BIT_AND = '&'
KEYWORD_BIT_OR = '|'
KEYWORD_BIT_XOR = '^'
KEYWORD_LEFT_SHIFT = '<<'
KEYWORD_RIGHT_SHIFT = '>>'
KEYWORD_NOT = 'not'
KEYWORD_NEGATIVE = '-_'
KEYWORD_BIT_NOT = '~'
KEYWORD_IFS = 'ifs'
KEYWORD_IF = 'if'
KEYWORD_ELIF = 'elif'
KEYWORD_ELSE = 'else'
KEYWORD_FOR_RANGE = 'for range'
KEYWORD_FOR_EACH = 'for each'
KEYWORD_WHILE = 'while'
KEYWORD_BREAK = 'break'
KEYWORD_CONTINUE = 'continue'
KEYWORD_FUNC_DEF = 'def'
KEYWORD_CALL = 'call'
KEYWORD_RETURN = 'return'
KEYWORD_CLASS_DEF = 'class'
KEYWORD_TRY = 'try'
KEYWORD_EXCEPT = 'except'
KEYWORD_RAISE = 'raise'
KEYWORD_VARIABLE = 'var'
KEYWORD_ATTRIBUTE = 'attr'
KEYWORD_SUBSCRIPT = 'sub'
KEYWORD_COMMENT = '#'
KEYWORD_PASS = 'pass'
KEYWORD_END_OF_CODE = 'end'
# Index
INDEX_INDENT = 0
INDEX_OPTIONS = 1
INDEX_KEYWORD = 2
INDEX_ASSIGNMENT_LHS = 3 # Left Hand Side
INDEX_ASSIGNMENT_RHS = 4 # Right Hand Side
INDEX_CONDITION = 3
INDEX_FOR_RANGE_VARIABLE_NAME = 3
INDEX_FOR_RANGE_VALUES = 4
INDEX_FOR_EACH_ELEMENT_NAME = 3
INDEX_FOR_EACH_ITERABLE_NAME = 4
INDEX_FUNC_DEF_FUNC_NAME = 3
INDEX_FUNC_DEF_PARAMETERS = 4
INDEX_CALL_LHS = 3
INDEX_CALL_REFERENCE = 4
INDEX_CALL_ARGS = 5 # Arguments
INDEX_RETURN_VALUE = 3
INDEX_CLASS_DEF_CLASS_NAME = 3
INDEX_CLASS_DEF_SUPERCLASS_NAME = 4
INDEX_EXCEPT_TYPE_NAME = 3
INDEX_EXCEPT_OBJ_NAME = 4
INDEX_RAISE_EXCEPTION = 3
INDEX_RAISE_ARGS = 4
INDEX_EXPRESSION_KEYWORD = 0
INDEX_VARIABLE_NAME = 1
INDEX_ATTRIBUTE_OBJECT_NAME = 1
INDEX_ATTRIBUTE_PROPERTY_NAMES = 2
INDEX_SUBSCRIPT_REFERENCED_OBJECT = 1
INDEX_SUBSCRIPT_INDEX_EXPR = 2
INDEX_LEFT_OPERAND = 1
INDEX_RIGHT_OPERAND = 2
INDEX_UNARY_OPERAND = 1
# Result
RESULT_TERMINATED = 0
RESULT_EXECUTED = 1
RESULT_BREAKPOINT = 2
class Address:
def __init__(self, indent, line):
self.indent = indent
self.line = line
def _copy_address(point):
address = Address(point.indent, point.line)
return address
# BlockKind
BLOCK_KIND_IFS = 0
BLOCK_KIND_IF_ELIF_ELSE = 1
BLOCK_KIND_FOR_RANGE = 2
BLOCK_KIND_FOR_EACH = 3
BLOCK_KIND_WHILE = 4
BLOCK_KIND_FUNC_CALL = 5
BLOCK_KIND_CLASS_DEF = 6
BLOCK_KIND_TRY = 7
BLOCK_KIND_EXCEPT = 8
class Block:
def __init__(self, kind, address, begin, end):
self.kind = kind
self.address = _copy_address(address)
self.begin = begin
self.end = end
class Namespace:
def __init__(self, nesting_scope, dictobj):
self.nesting_scope = nesting_scope # None is allowed
self.dictobj = dictobj
def register(self, name, obj):
self.dictobj[name] = obj
def lookup(self, name):
if name in self.dictobj:
return self.dictobj[name]
else:
raise NameNotFoundError(name)
class GlobalScope(Namespace):
pass
class FuncScope(Namespace):
pass
class ClassScope(Namespace):
def get_attr(self):
return self.dictobj
class Inaccessible:
def evaluate(self, env):
return self
class BuiltinFuncObj(Inaccessible):
def __init__(self, name, body):
self.name = name
self.body = body
self.selfclass = builtin_type.builtin_function_or_method
class Accessible:
def evaluate(self, env):
return self
class FuncObj(Accessible):
def __init__(self, name, params, nesting_scope, address):
self.name = name
self.params = params
self.nesting_scope = nesting_scope
self.address = _copy_address(address)
self.attributes = {}
self.selfclass = builtin_type.function
def get_attr(self, name):
return self.attributes[name]
def set_attr(self, name, value):
self.attributes[name] = value
return True
class MethodObj(Inaccessible):
def __init__(self, instance, funcobj):
self.instance = instance
self.funcobj = funcobj
self.selfclass = builtin_type.instance_method
class ClassObj(Accessible):
def __init__(self, name, superclass, attributes):
self.name = name
self.superclass= superclass
self.attributes = attributes
def get_attr(self, name):
if name in self.attributes:
return self.attributes[name]
else:
attr = self.superclass.get_attr(name)
return attr
def set_attr(self, name, value):
if self.attributes != None:
self.attributes[name] = value
return True
else:
return False
def get_description(self):
return '<class ' + self.name + '>'
class Instance(Accessible):
def __init__(self, selfclass):
self.selfclass= selfclass
self.attributes = {}
def get_attr(self, name):
try:
attr = self.attributes[name]
return attr
except:
classattr = self.selfclass.get_attr(name)
is_funcobj = isinstance(classattr, FuncObj)
if is_funcobj:
methodobj = MethodObj(self, classattr)
return methodobj
else:
return classattr
def set_attr(self, name, value):
self.attributes[name] = value
return True
class Super(Accessible):
def __init__(self, classobj, instance):
self.classobj = classobj
self.instance = instance
self.selfclass = builtin_type.super
def get_attr(self, name):
currentclass = self.instance.selfclass
while True:
if currentclass == None:
raise SuperCallFailedError()
if self.classobj.name != currentclass.name:
currentclass = currentclass.superclass
continue
else:
superclass = currentclass.superclass
if superclass == None:
raise SuperCallFailedError()
funcobj = superclass.get_attr(name)
is_funcobj = isinstance(funcobj, FuncObj)
if funcobj == None or not is_funcobj:
raise SuperCallFailedError()
methodobj = MethodObj(self.instance, funcobj)
return methodobj
def set_attr(self, name, value):
return False
class Variable:
def __init__(self, name):
self.name = name
def assign(self, obj, env):
env.register(self.name, obj)
def evaluate(self, env):
value = env.lookup(self.name)
return value
class Attribute:
def __init__(self, objname, propertynames):
self.objname = objname
self.propertynames = propertynames
def assign(self, value, env):
instance = env.lookup(self.objname)
target = instance
length = len(self.propertynames)
for i in range(length - 1):
target = _get_attribute(target, self.propertynames[i])
target.set_attr(self.propertynames[length - 1], value)
def evaluate(self, env):
instance = env.lookup(self.objname)
try:
target = instance
for prop in self.propertynames:
target = _get_attribute(target, prop)
return target
except:
raise AttributeNotExistError(prop)
class Subscript:
def __init__(self, objref, indexexpr):
self.objref = objref
self.indexexpr = indexexpr
def assign(self, value, env):
obj = self.lookup(env)
is_str = isinstance(obj, str)
if is_str:
raise SubscriptNotAllowedError()
index = env.evaluate(self.indexexpr)
obj[index] = value
def evaluate(self, env):
obj = self.lookup(env)
index = env.evaluate(self.indexexpr)
try:
value = obj[index]
return value
except:
raise ValueNotFoundError()
def lookup(self, env):
obj = env.evaluate(self.objref)
is_list = isinstance(obj, list)
is_str = isinstance(obj, str)
is_dict = isinstance(obj, dict)
if is_list or is_str or is_dict:
return obj
else:
raise SubscriptNotAllowedError()
class BuiltinType:
def __init__(self):
self.object = ClassObj('object', None, None)
self.function = ClassObj('function', self.object, None)
self.instance_method = ClassObj(
'instancemethod', self.object, None)
self.super = ClassObj('super', self.object, None)
self.builtin_function_or_method = ClassObj(
'builtin_function_or_method', self.object, None)
builtin_type = BuiltinType()
class BinaryOperation:
def __init__(self, operator, left, right):
self.operator = operator
self.left = left
self.right = right
def operate(self, env):
l = env.evaluate(self.left)
r = env.evaluate(self.right)
op = self.operator
try:
if op == KEYWORD_ADDITION:
return l + r
elif op == KEYWORD_SUBTRACTION:
return l - r
elif op == KEYWORD_MULTIPLICATION:
return l * r
elif op == KEYWORD_EXPONENTIATION:
return l ** r
elif op == KEYWORD_DIVISION:
return l / r
elif op == KEYWORD_FLOOR_DIVISION:
return l // r
elif op == KEYWORD_REMAINDER:
return l % r
elif op == KEYWORD_EQUAL:
return l == r
elif op == KEYWORD_NOT_EQUAL:
return l != r
elif op == KEYWORD_LESS_THAN:
return l < r
elif op == KEYWORD_LESS_THAN_OR_EQUAL:
return l <= r
elif op == KEYWORD_GREATER_THAN:
return l > r
elif op == KEYWORD_GREATER_THAN_OR_EQUAL:
return l >= r
elif op == KEYWORD_AND:
return l and r
elif op == KEYWORD_OR:
return l or r
elif op == KEYWORD_IS:
return l is r
elif op == KEYWORD_IS_NOT:
return l is not r
elif op == KEYWORD_IN:
return l in r
elif op == KEYWORD_NOT_IN:
return l not in r
elif op == KEYWORD_BIT_AND:
return l & r
elif op == KEYWORD_BIT_OR:
return l | r
elif op == KEYWORD_BIT_XOR:
return l ^ r
elif op == KEYWORD_LEFT_SHIFT:
return l << r
elif op == KEYWORD_RIGHT_SHIFT:
return l >> r
else:
raise Exception()
except:
raise InvalidOperationError()
class UnaryOperation:
def __init__(self, operator, operand):
self.operator = operator
self.operand = operand
def operate(self, env):
v = env.evaluate(self.operator)
op = self.operator
try:
if op == KEYWORD_NOT:
return not v
elif op == KEYWORD_NEGATIVE:
return -v
elif op == KEYWORD_BIT_NOT:
return ~v
else:
raise Exception()
except:
raise InvalidOperationError()
method_names = {
'append': {},
'pop': {},
'insert': {},
'find': {},
'replace': {},
'keys': {}
}
def _get_attribute(obj, name):
is_list = isinstance(obj, list)
if is_list:
if name == 'append' or name in method_names['append']:
def append(args, env):
elem = env.evaluate(args[0])
obj.append(elem)
builtin_append = BuiltinFuncObj(name, append)
return builtin_append
elif name == 'pop' or name in method_names['pop']:
def pop(args, env):
length = len(args)
try:
if length == 0:
value = obj.pop()
else:
index = env.evaluate(args[0])
value = obj.pop(index)
return value
except:
raise CannotPopFromListError()
builtin_pop = BuiltinFuncObj(name, pop)
return builtin_pop
elif name == 'insert' or name in method_names['insert']:
def insert(args, env):
index = env.evaluate(args[0])
elem = env.evaluate(args[1])
obj.insert(index, elem)
builtin_insert = BuiltinFuncObj(name, insert)
return builtin_insert
else:
raise MethodNotFoundError(name)
is_str = isinstance(obj, str)
if is_str:
if | |
tca_test_case_id=0))
self.assertEqual(client_result.tsr_error.e_code, 0, 'GetTestStatus')
self.assertEqual(client_result.tsr_state, PASSED,
'PortStatus PASSED')
self.assertEqual(client_result.tsr_type, CLIENT,
'PortStatus CLIENT')
self.assertEqual(client_result.tsr_l4_proto, l4_proto,
'PortStatus L4')
if l4_proto == TCP:
self.assertEqual(client_result.tsr_stats.gs_estab, 1,
'PortStatus ESTAB')
# Check server test to be passed
server_result = self.warp17_call('GetTestStatus',
TestCaseArg(tca_eth_port=1,
tca_test_case_id=0))
self.assertEqual(server_result.tsr_error.e_code, 0, 'GetTestStatus')
self.assertEqual(server_result.tsr_state, PASSED,
'PortStatus PASSED')
self.assertEqual(server_result.tsr_type, SERVER,
'PortStatus SERVER')
self.assertEqual(server_result.tsr_l4_proto, l4_proto,
'PortStatus L4')
self.assertEqual(server_result.tsr_stats.gs_estab, 1,
'PortStatus ESTAB')
for i in range(0, self.PORT_CNT):
server_result = self.warp17_call('GetStatistics',
PortArg(pa_eth_port=i))
self.assertGreater(server_result.sr_phy.pys_rx_pkts, 0,
'Phy pys_rx_pkts has to be greater than 0')
self.assertGreater(server_result.sr_phy.pys_rx_bytes, 0,
'Phy pys_rx_bytes has to be greater than 0')
self.assertGreater(server_result.sr_phy.pys_tx_pkts, 0,
'Phy pys_tx_pkts has to be greater than 0')
self.assertGreater(server_result.sr_phy.pys_tx_bytes, 0,
'Phy pys_tx_bytes has to be greater than 0')
self.assertEqual(server_result.sr_phy.pys_rx_errors, 0,
'Phy pys_rx_errors has to be 0')
self.assertEqual(server_result.sr_phy.pys_tx_errors, 0,
'Phy pys_tx_errors has to be 0')
self.assertGreater(server_result.sr_phy.pys_link_speed, 0,
'Phy pys_link_speed has to be greater than 0')
self.assertGreater(server_result.sr_port.ps_received_pkts, 0,
'Port ps_received_pkts has to be greater than 0')
self.assertGreater(server_result.sr_port.ps_received_bytes, 0,
'Port ps_received_bytes has to be greater than 0')
self.assertGreater(server_result.sr_port.ps_sent_pkts, 0,
'Port ps_sent_pkts has to be greater than 0')
self.assertGreater(server_result.sr_port.ps_sent_bytes, 0,
'Port ps_sent_bytes has to be greater than 0')
self.assertEqual(server_result.sr_port.ps_sent_failure, 0,
'Port ps_sent_failure has to be 0')
self.assertEqual(
server_result.sr_port.ps_received_ring_if_failed, 0,
'Port ps_received_ring_if_failed has to be 0')
self.assertEqual(server_result.sr_port.ps_sent_sim_failure, 0,
'Port ps_sent_sim_failure has to be 0')
if l4_proto == TCP:
self.assertGreater(server_result.sr_tcp.ts_received_pkts, 0,
'TCP ts_received_pkts has to be greater than 0')
self.assertGreater(server_result.sr_tcp.ts_received_bytes,
0,
'TCP ts_received_bytes has to be greater than 0')
self.assertGreater(server_result.sr_tcp.ts_sent_ctrl_pkts,
0,
'TCP ts_sent_ctrl_pkts has to be greater than 0')
self.assertGreater(server_result.sr_tcp.ts_sent_ctrl_bytes,
0,
'TCP ts_sent_ctrl_bytes has to be greater than 0')
self.assertGreater(server_result.sr_tcp.ts_sent_data_pkts,
0,
'TCP ts_sent_data_pkts has to be greater than 0')
self.assertGreater(server_result.sr_tcp.ts_sent_data_bytes,
0,
'TCP ts_sent_data_bytes has to be greater than 0')
# TODO: Investigate why this test fails
# self.assertEqual(server_result.sr_tcp.ts_tcb_not_found, 0, 'TCP ts_tcb_not_found has to be 0')
self.assertEqual(server_result.sr_tcp.ts_tcb_alloc_err, 0,
'TCP ts_tcb_alloc_err has to be 0')
self.assertEqual(server_result.sr_tcp.ts_to_small_fragment,
0,
'TCP ts_to_small_fragment has to be 0')
self.assertEqual(server_result.sr_tcp.ts_hdr_to_small, 0,
'TCP ts_hdr_to_small has to be 0')
self.assertEqual(server_result.sr_tcp.ts_invalid_checksum,
0,
'TCP ts_invalid_checksum has to be 0')
self.assertEqual(server_result.sr_tcp.ts_failed_ctrl_pkts,
0,
'TCP ts_failed_ctrl_pkts has to be 0')
self.assertEqual(server_result.sr_tcp.ts_failed_data_pkts,
0,
'TCP ts_failed_data_pkts has to be 0')
self.assertEqual(server_result.sr_tcp.ts_failed_data_clone,
0,
'TCP ts_failed_data_clone has to be 0')
self.assertEqual(server_result.sr_tcp.ts_reserved_bit_set,
0,
'TCP ts_reserved_bit_set has to be 0')
if l4_proto == UDP:
self.assertGreater(server_result.sr_udp.us_received_pkts, 0,
'UDP us_received_pkts has to be greater than 0')
self.assertGreater(server_result.sr_udp.us_received_bytes,
0,
'UDP us_received_bytes has to be greater than 0')
self.assertGreater(server_result.sr_udp.us_sent_pkts, 0,
'UDP us_sent_pkts has to be greater than 0')
self.assertGreater(server_result.sr_udp.us_sent_ctrl_bytes,
0,
'UDP us_sent_ctrl_bytes has to be greater than 0')
self.assertGreater(server_result.sr_udp.us_sent_data_bytes,
0,
'UDP us_sent_data_bytes has to be greater than 0')
# TODO: Investigate why this test fails
# self.assertEqual(server_result.sr_udp.us_ucb_not_found, 0, 'UDP us_ucb_not_found has to be 0')
self.assertEqual(server_result.sr_udp.us_ucb_alloc_err, 0,
'UDP us_ucb_alloc_err has to be 0')
self.assertEqual(server_result.sr_udp.us_to_small_fragment,
0,
'UDP us_to_small_fragment has to be 0')
self.assertEqual(server_result.sr_udp.us_invalid_checksum,
0,
'UDP us_invalid_checksum has to be 0')
self.assertEqual(server_result.sr_udp.us_failed_pkts, 0,
'UDP us_failed_pkts has to be 0')
self.assertEqual(server_result.sr_error.e_code,
0,
'GetPortCfg')
self.Stop()
self.TearDown()
def test_get_statistics(self):
for eth_port in range(0, self.PORT_CNT):
res = self.warp17_call('GetStatistics',
PortArg(pa_eth_port=eth_port))
error = res.sr_error
self.assertEqual(error.e_code, 0, 'GetPortCfg')
def test_get_statistics_invalid_port(self):
self.assertEqual(self.warp17_call('GetStatistics',
PortArg(
pa_eth_port=self.PORT_CNT + 1)).sr_error.e_code,
-errno.EINVAL,
'GetPortCfg')
def test_latency(self):
"""Check recent latency behaviour for TCP/UDP"""
run_t = 2 # run time
ip_cnt = TPG_TEST_MAX_L3_INTF # ip count
port_cnt = 100 # port count
n_sess = (port_cnt * ip_cnt) # n of sessions
self.lh.info('Check recent latency behaviour for TCP/UDP with sessions {}'.format(n_sess))
ciclemap = [(None, None, None), # support variables for latency options
(0, 0, None),
(None, None, 100),
(0, 0, 100)]
app_ccfg, app_scfg, rate_ccfg = self.SetUp(ip_cnt)
for tcs_max, tcs_max_avg, tcs_samples in ciclemap:
s_latency = TestCaseLatency() # latency options container
if tcs_max is not None and tcs_max_avg is not None:
s_latency.tcs_max = tcs_max
s_latency.tcs_max_avg = tcs_max_avg
if tcs_samples is not None:
s_latency.tcs_samples = tcs_samples
for l4_proto in [TCP, UDP]:
#########################################
# configure:CLIENT #
#########################################
c_tc = TestCaseArg(tca_eth_port=0, tca_test_case_id=0)
l4_ccfg = L4Client(l4c_proto=l4_proto,
l4c_tcp_udp=TcpUdpClient(
tuc_sports=b2b_ports(port_cnt),
tuc_dports=b2b_ports(1)))
ccfg = TestCase(tc_type=CLIENT, tc_eth_port=0,
tc_id=0,
tc_client=Client(cl_src_ips=b2b_sips(0, ip_cnt),
cl_dst_ips=b2b_dips(0, 1),
cl_l4=l4_ccfg,
cl_rates=rate_ccfg),
tc_app=app_ccfg,
tc_criteria=TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=run_t))
self.assertEqual(
self.warp17_call('ConfigureTestCase', ccfg).e_code,
0, 'ConfigureTestCase')
c_ip4_opt = Ipv4Sockopt(ip4so_tx_tstamp=True)
ip4_opt_arg = Ipv4SockoptArg(i4sa_tc_arg=c_tc,
i4sa_opts=c_ip4_opt)
self.assertEqual(
self.warp17_call('SetIpv4Sockopt', ip4_opt_arg).e_code,
0, 'SetIpv4Sockopt')
#########################################
# configure:SERVER #
#########################################
s_tc = TestCaseArg(tca_eth_port=1, tca_test_case_id=0)
l4_scfg = L4Server(l4s_proto=l4_proto,
l4s_tcp_udp=TcpUdpServer(
tus_ports=b2b_ports(1)))
scfg = TestCase(tc_type=SERVER, tc_eth_port=1, tc_id=0,
tc_server=Server(srv_ips=b2b_sips(1, 1),
srv_l4=l4_scfg),
tc_app=app_scfg,
tc_criteria=TestCriteria(tc_crit_type=SRV_UP,
tc_srv_up=1),
tc_latency=s_latency)
self.assertEqual(
self.warp17_call('ConfigureTestCase', scfg).e_code,
0, 'ConfigureTestCase')
tc_res = self.warp17_call('GetTestCase', s_tc)
self.assertEqual(tc_res.tcr_error.e_code, 0, 'GetTestCase')
self.assertEqual(tc_res.tcr_cfg.tc_latency, s_latency,
'Sample Lat option PASSED')
s_ip4_opt = Ipv4Sockopt(ip4so_rx_tstamp=True)
ip4_opt_arg = Ipv4SockoptArg(i4sa_tc_arg=s_tc,
i4sa_opts=s_ip4_opt)
self.assertEqual(
self.warp17_call('SetIpv4Sockopt', ip4_opt_arg).e_code,
0, 'SetIpv4Sockopt')
self.Start()
sleep(run_t) # to be sure to collect all the data
#########################################
# CLIENT #
#########################################
# Check client test to be passed
c_result = self.warp17_call('GetTestStatus', c_tc)
self.assertEqual(c_result.tsr_error.e_code, 0, 'GetTestStatus')
self.assertEqual(c_result.tsr_state, PASSED,
'PortStatus PASSED')
self.assertEqual(c_result.tsr_type, CLIENT,
'PortStatus CLIENT')
self.assertEqual(c_result.tsr_l4_proto, l4_proto,
'PortStatus L4')
if l4_proto == TCP:
self.assertEqual(c_result.tsr_stats.gs_estab, n_sess,
'PortStatus ESTAB')
# check ip options
ip4_res = self.warp17_call('GetIpv4Sockopt', c_tc)
self.assertEqual(ip4_res.i4sr_error.e_code, 0, 'GetIpv4Sockopt')
ip4_res = ip4_res.i4sr_opts
self.assertEqual(ip4_res.ip4so_tx_tstamp,
c_ip4_opt.ip4so_tx_tstamp,
'Ip option PASSED')
self.assertEqual(ip4_res.ip4so_rx_tstamp,
c_ip4_opt.ip4so_rx_tstamp,
'Ip option PASSED')
#########################################
# SERVER #
#########################################
# Check server test to be passed
s_result = self.warp17_call('GetTestStatus', s_tc)
self.assertEqual(s_result.tsr_error.e_code, 0, 'GetTestStatus')
self.assertEqual(s_result.tsr_state, PASSED,
'PortStatus PASSED')
self.assertEqual(s_result.tsr_type, SERVER,
'PortStatus SERVER')
self.assertEqual(s_result.tsr_l4_proto, l4_proto,
'PortStatus L4')
if l4_proto == TCP:
self.assertEqual(s_result.tsr_stats.gs_estab, n_sess,
'PortStatus ESTAB')
# check ip options
ip4_res = self.warp17_call('GetIpv4Sockopt', s_tc)
self.assertEqual(ip4_res.i4sr_error.e_code, 0, 'GetIpv4Sockopt')
ip4_res = ip4_res.i4sr_opts
self.assertEqual(ip4_res.ip4so_tx_tstamp,
s_ip4_opt.ip4so_tx_tstamp,
'Ip option PASSED')
self.assertEqual(ip4_res.ip4so_rx_tstamp,
s_ip4_opt.ip4so_rx_tstamp,
'Ip option PASSED')
# Check latency statistics
tc_result = self.warp17_call('GetTestStatus',
TestCaseArg(
tca_eth_port=s_tc.tca_eth_port,
tca_test_case_id=0))
self.assertEqual(tc_result.tsr_error.e_code, 0, 'GetTestStatus')
stat = tc_result.tsr_stats.gs_latency_stats.gls_stats
if tcs_max is not None and tcs_max_avg is not None:
self.assertGreater(stat.ls_max_exceeded, 0,
'ls_max_exceeded')
self.assertGreater(stat.ls_max_average_exceeded, 0,
'ls_max_average_exceeded')
else:
self.assertEqual(stat.ls_max_exceeded, 0, 'ls_max_exceeded')
self.assertEqual(stat.ls_max_average_exceeded, 0,
'ls_max_average_exceeded')
self.assertLess(stat.ls_min_latency, UINT32MAX,
'ls_min_latency')
self.assertGreater(stat.ls_max_latency, 0, 'ls_max_latency')
self.assertGreater(stat.ls_sum_latency, 0, 'ls_sum_latency')
self.assertGreater(stat.ls_samples_count, 0, 'ls_samples_count')
if tcs_samples is not None:
stat = tc_result.tsr_stats.gs_latency_stats.gls_sample_stats
if tcs_max is not None and tcs_max_avg is not None:
self.assertGreater(stat.ls_max_exceeded, 0,
'ls_max_exceeded')
self.assertGreater(stat.ls_max_average_exceeded, 0,
'ls_max_average_exceeded')
else:
self.assertEqual(stat.ls_max_exceeded, 0,
'ls_max_exceeded')
self.assertEqual(stat.ls_max_average_exceeded, 0,
'ls_max_average_exceeded')
self.assertLess(stat.ls_min_latency, UINT32MAX,
'ls_min_latency')
self.assertGreater(stat.ls_max_latency, 0, 'ls_max_latency')
self.assertGreater(stat.ls_sum_latency, 0, 'ls_sum_latency')
self.assertGreater(stat.ls_samples_count, 0,
'ls_samples_count')
else:
self.assertEqual(tc_result.tsr_stats.gs_latency_stats.
gls_sample_stats.ls_samples_count, 0,
'ls_samples_count')
self.Stop()
self.TearDown()
def test_negative_latency(self):
"""Check negative latency behaviour for TCP/UDP"""
ipv4_opt_def = Ipv4Sockopt(ip4so_rx_tstamp=False,
ip4so_tx_tstamp=False, ip4so_tos=0)
app_ccfg, app_scfg, rate_ccfg = self.SetUp(ip_cnt=1)
for l4_proto in [TCP, UDP]:
# configure client
c_tc = TestCaseArg(tca_eth_port=0, tca_test_case_id=0)
l4_ccfg = L4Client(l4c_proto=l4_proto,
l4c_tcp_udp=TcpUdpClient(tuc_sports=b2b_ports(1),
tuc_dports=b2b_ports(
1)))
latency_cfg = TestCaseLatency(tcs_samples=TPG_TSTAMP_SAMPLES_MAX_BUFSIZE + 1)
ccfg = TestCase(tc_type=CLIENT, tc_eth_port=0,
tc_id=0,
tc_client=Client(cl_src_ips=b2b_sips(0, 1),
cl_dst_ips=b2b_dips(0, 1),
cl_l4=l4_ccfg,
cl_rates=rate_ccfg),
tc_app=app_ccfg,
tc_latency=latency_cfg,
tc_criteria=TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=1))
self.assertEqual(self.warp17_call('ConfigureTestCase', ccfg).e_code,
-errno.EINVAL, 'ConfigureTestCase')
# configure server
s_tc = TestCaseArg(tca_eth_port=1, tca_test_case_id=0)
l4_scfg = L4Server(l4s_proto=l4_proto,
l4s_tcp_udp=TcpUdpServer(tus_ports=b2b_ports(1)))
scfg = TestCase(tc_type=SERVER, tc_eth_port=1, tc_id=0,
tc_server=Server(srv_ips=b2b_sips(1, 1),
srv_l4=l4_scfg),
tc_app=app_scfg,
tc_latency=latency_cfg,
tc_criteria=TestCriteria(tc_crit_type=SRV_UP,
tc_srv_up=1))
self.assertEqual(self.warp17_call('ConfigureTestCase', scfg).e_code,
-errno.EINVAL, 'ConfigureTestCase')
def SetUp(self, ip_cnt):
"""Port 0 is the client, Port 1 is the server
:param ip_cnt: how many sessions
"""
# No def gw
no_def_gw = Ip(ip_version=IPV4, ip_v4=0)
# Setup interfaces on port 0
pcfg = b2b_port_add(0, def_gw=no_def_gw)
b2b_port_add_intfs(pcfg, [(Ip(ip_version=IPV4, ip_v4=b2b_ipv4(0, i)),
Ip(ip_version=IPV4, ip_v4=b2b_mask(0, i)),
b2b_count(0, i)) for i in range(0, ip_cnt)])
self.warp17_call('ConfigurePort', pcfg)
# Setup interfaces on port 1
pcfg = b2b_port_add(1, def_gw=no_def_gw)
b2b_port_add_intfs(pcfg, [(Ip(ip_version=IPV4, ip_v4=b2b_ipv4(1, i)),
Ip(ip_version=IPV4, ip_v4=b2b_mask(1, i)),
b2b_count(1, i)) for i in range(0, ip_cnt)])
self.warp17_call('ConfigurePort', pcfg)
rate_ccfg = RateClient(rc_open_rate=Rate(),
rc_close_rate=Rate(),
rc_send_rate=Rate())
app_ccfg = App(app_proto=RAW_CLIENT,
app_raw_client=RawClient(rc_req_plen=10,
rc_resp_plen=10))
app_scfg = App(app_proto=RAW_SERVER,
app_raw_server=RawServer(rs_req_plen=10,
rs_resp_plen=10))
return app_ccfg, app_scfg, rate_ccfg
def Start(self):
# Start server test
self.assertEqual(self.warp17_call('PortStart',
PortArg(pa_eth_port=1)).e_code,
0,
'PortStart')
# Start client test
self.assertEqual(self.warp17_call('PortStart',
PortArg(pa_eth_port=0)).e_code,
0,
'PortStart')
# should be done in way less than 5 seconds!
sleep(5)
def Stop(self):
# Stop server test
self.assertEqual(self.warp17_call('PortStop',
PortArg(pa_eth_port=1)).e_code,
0,
'PortStop')
# Fail to stop client test (already passed)
self.assertEqual(self.warp17_call('PortStop',
PortArg(pa_eth_port=1)).e_code,
-errno.ENOENT,
'PortStop')
def TearDown(self):
# Delete client test
self.assertEqual(self.warp17_call('DelTestCase',
TestCaseArg(tca_eth_port=0,
tca_test_case_id=0)).e_code,
0,
'DelTestCase')
# Delete server test
self.assertEqual(self.warp17_call('DelTestCase',
TestCaseArg(tca_eth_port=1,
tca_test_case_id=0)).e_code,
0,
'DelTestCase')
self.assertEqual(self.warp17_call('ClearStatistics',
PortArg(pa_eth_port=0)).e_code,
0,
'ClearStatistics')
self.assertEqual(self.warp17_call('ClearStatistics',
PortArg(pa_eth_port=1)).e_code,
0,
'ClearStatistics')
##############################################################################
# Partial Get/Update APIs.
##############################################################################
class TestPartialPortApi(Warp17UnitTestCase):
"""Tests the functionality of the partial update/get port config APIs."""
"""Assumes a B2B setup with even two ports."""
"""Port 0 <-> Port 1"""
PORT_CNT = 2
def _get_server_test(self, eth_port, tc_id):
l4_scfg = L4Server(l4s_proto=TCP,
l4s_tcp_udp=TcpUdpServer(tus_ports=b2b_ports(1)))
app_scfg = App(app_proto=RAW_SERVER,
app_raw_server=RawServer(rs_req_plen=42,
rs_resp_plen=42))
return TestCase(tc_type=SERVER, tc_eth_port=eth_port, tc_id=tc_id,
tc_server=Server(srv_ips=b2b_sips(1, 1),
srv_l4=l4_scfg),
tc_app=app_scfg,
tc_criteria=TestCriteria(tc_crit_type=SRV_UP,
tc_srv_up=1))
def _get_client_test(self, eth_port, tc_id):
l4cfg = L4Client(l4c_proto=TCP,
l4c_tcp_udp=TcpUdpClient(tuc_sports=b2b_ports(1),
tuc_dports=b2b_ports(1)))
rate_cfg = RateClient(rc_open_rate=Rate(r_value=42),
rc_close_rate=Rate(r_value=42),
rc_send_rate=Rate(r_value=42))
app_cfg = App(app_proto=RAW_CLIENT,
app_raw_client=RawClient(rc_req_plen=1,
rc_resp_plen=1))
return TestCase(tc_type=CLIENT, tc_eth_port=eth_port,
tc_id=tc_id,
tc_client=Client(cl_src_ips=b2b_sips(eth_port, 1),
cl_dst_ips=b2b_dips(eth_port, 1),
cl_l4=l4cfg,
cl_rates=rate_cfg),
tc_app=app_cfg,
tc_criteria=TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=42))
def setUp(self):
self._pcfg = b2b_configure_port(eth_port=0,
def_gw=Ip(ip_version=IPV4, ip_v4=42),
l3_intf_count=TPG_TEST_MAX_L3_INTF)
| |
proceeding to shutdown to ensure worker0 RPCs make
# it through to other workers.
dist.barrier()
rpc.shutdown(graceful=False)
@dist_init
def test_all_gather(self):
info = rpc.get_worker_info()
results = rpc.api._all_gather(info.id)
expected = {}
for info in rpc._get_current_rpc_agent().get_worker_infos():
expected[info.name] = info.id
self.assertEqual(expected, results)
@dist_init
def test_all_gather_timeout(self):
rpc._set_rpc_timeout(0.1)
if self.rank == 0:
with self.assertRaisesRegex(
RuntimeError,
"timed out in _all_gather after 0\\.10 seconds"
):
rpc.api._all_gather(SlowPickleClass(0.5))
else:
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc.api._all_gather(SlowPickleClass(0.5))
def _test_barrier_helper(self, info, names, multi_threaded=False):
names = sorted(names)
leader = names[0]
rpc.rpc_sync(leader, _reset_count)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, 0)
rpc.api._barrier(names)
rpc.rpc_sync(leader, _increment_count)
rpc.api._barrier(names)
if not multi_threaded and info.name == leader:
self.assertEqual(_rpc_barrier_count, len(names))
@dist_init
def test_rpc_barrier_all(self):
# Test rpc barrier when called with full list of workers
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_subset(self):
# Test rpc barrier when processes are called with different subsets of the full list
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [worker.name for worker in all_worker_info if not worker.id % 2]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_partial_subset(self):
# Test rpc barrier when some processes are not involved in the barrier
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
if info.id % 2:
names = [worker.name for worker in all_worker_info if worker.id % 2]
else:
names = [f"worker{info.id}"]
self._test_barrier_helper(info, names)
@dist_init
def test_rpc_barrier_multithreaded(self):
# This tests validates the implementation of barrier when multiple threads call into it
# We only need to check that it does not hang in this case
info = rpc.get_worker_info()
all_worker_info = rpc._get_current_rpc_agent().get_worker_infos()
names = [worker.name for worker in all_worker_info]
threads = []
for _ in range(3):
th = threading.Thread(target=self._test_barrier_helper, args=(info, names, True))
threads.append(th)
th.start()
for th in threads:
th.join()
@dist_init
def test_graceful_shutdown_with_uneven_workload(self):
"""Test graceful termination."""
self._run_uneven_workload()
@dist_init(setup_rpc=False)
def test_shutdown_followed_by_rpc(self):
# Initialize RPC.
rpc.init_rpc(
name="worker%d" % self.rank,
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
self.assertEqual(ret, torch.ones(n, n) * 2)
rpc.shutdown()
with self.assertRaisesRegex(RuntimeError, "^RPC has not been initialized"):
rpc.rpc_sync(
worker_name(dst_rank),
torch.add,
args=(torch.ones(n, n), torch.ones(n, n)),
)
@dist_init
def test_expected_src(self):
dst_rank = (self.rank + 1) % self.world_size
expected_src_rank = (self.rank - 1) % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), set_value, args=(self.rank,))
value = VALUE_FUTURE.result()
self.assertEqual(value, expected_src_rank)
@dist_init
def test_py_built_in(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(worker_name(dst_rank), min, args=(n, n + 1, n + 2))
self.assertEqual(ret, min(n, n + 1, n + 2))
@dist_init
def test_py_user_defined(self):
n = self.rank + 1
dst_rank = n % self.world_size
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_function,
kwargs={"a": n, "b": n + 1, "c": n + 2},
)
self.assertEqual(ret, my_function(n, n + 1, n + 2))
def test_build_rpc_profiling_key(self):
# Tests that the name that shows up as an Event in profiling RPCs has all
# the necessary information.
for exec_mode in [RPCExecMode.SYNC, RPCExecMode.ASYNC, RPCExecMode.REMOTE]:
rpc_profiling_key = _build_rpc_profiling_key(
exec_mode, "foo", "worker0", "worker1"
)
self.assertIn(exec_mode.value, rpc_profiling_key)
self.assertIn("foo", rpc_profiling_key)
self.assertIn("worker0", rpc_profiling_key)
self.assertIn("worker1", rpc_profiling_key)
def check_profiling_info(self, self_worker_name, dst_worker_name, func, rpc_event, rpc_exec_mode):
self.assertTrue(self_worker_name in rpc_event.name)
self.assertTrue(dst_worker_name in rpc_event.name)
if isinstance(func, torch.jit.ScriptFunction):
self.assertTrue(torch._jit_internal._qualified_name(func) in rpc_event.name)
else:
self.assertTrue(func.__name__ in rpc_event.name)
self.assertTrue(rpc_exec_mode.value in rpc_event.name)
self.assertEqual(rpc_event.count, 1)
@dist_init
def test_profiler_rpc_record_shapes(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
t1, t2 = torch.ones(100), torch.ones(100)
with torch.autograd.profiler.profile(record_shapes=True) as prof:
rpc.rpc_sync(dst_worker, torch.add, args=(t1, t2))
function_events = prof.function_events
remote_events = [event for event in function_events if event.is_remote]
remote_add_event = [
event for event in remote_events if "aten::add" in event.name
][0]
remote_add_input_shapes = remote_add_event.input_shapes
# Run profiler on equivalent local op and validate shapes are the same.
with torch.autograd.profiler.profile(record_shapes=True) as prof:
torch.add(t1, t2)
local_function_events = prof.function_events
local_add_event = [
event for event in local_function_events if "aten::add" in event.name
][0]
local_add_input_shapes = local_add_event.input_shapes
self.assertEqual(remote_add_input_shapes, local_add_input_shapes)
@dist_init
def test_profiler_rpc_memory(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile(profile_memory=True) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
# if cpu_memory_usage was not propagated over the wire, this set would
# only contain 0 (indicates no memory being profiled)
self.assertNotEqual({0}, event_cpu_mem_usages)
# No memory profiled if profile_memory=False
with torch.autograd.profiler.profile(profile_memory=False) as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
function_events = p.function_events
event_cpu_mem_usages = set(event.cpu_memory_usage for event in function_events)
self.assertEqual({0}, event_cpu_mem_usages)
@dist_init
def test_profiler_export_trace(self):
if self.rank != 1:
return
dst = (self.rank + 1) % self.world_size
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as p:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
res = fut.wait()
events = p.function_events
with TemporaryFileName() as fname:
path = fname
p.export_chrome_trace(path)
with open(path) as f:
trace = json.load(f)
event_names = [event['name'] for event in trace]
for expected_event_name in EXPECTED_REMOTE_EVENTS + [RPCExecMode.ASYNC.value]:
event_exists = any([expected_event_name in event_name for event_name in event_names])
self.assertTrue(event_exists)
@dist_init
def test_profiler_rpc_key_names(self):
# tests that remote events are properly prefixed with the RPC profiling key.
if self.rank != 1:
return
# Spawn multiple threads that send RPCs to ensure keys are correctly
# prefixied when there are multiple RPCs being created/in flight at the
# same time.
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
def rpc_with_profiling(dst_worker):
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
fut.wait()
events = prof.function_events
remote_event_names = {
event.name: event for event in events if event.is_remote
}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
dst_worker,
)
remote_event_name_set = set(EXPECTED_REMOTE_EVENTS)
for name, event in remote_event_names.items():
# Ensure that we have the expected key as part of the remote
# event.
self.assertTrue(name.startswith(rpc_profiling_key))
self.assertTrue(event.is_remote)
self.assertTrue(event.node_id == rpc.get_worker_info(dst_worker).id)
# Ensure that the remote event name also contains the operator.
operator_name_substr = name[len(rpc_profiling_key) :]
# Note: we don't assert that every remote event needs to be
# in the above set, the set is just a representative set of
# what we expect to see. The profiler can change and add more
# events, but we should always expect to see this representative
# set.
matching_event = {
remote_event_name
for remote_event_name in remote_event_name_set
if remote_event_name in operator_name_substr
}
remote_event_name_set -= matching_event
# The set should be empty, otherwise its contained elements did
# not show up in the remote profiler output.
self.assertTrue(
remote_event_name_set == set(),
f"Expected {remote_event_name_set} to be included in remote profiler output.",
)
for dst in dst_ranks:
dst_worker = worker_name(dst)
num_parallel_rpcs = 2
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_parallel_rpcs
) as executor:
futs = [
executor.submit(rpc_with_profiling, dst_worker)
for _ in range(num_parallel_rpcs)
]
# Wait for workers to finish test
for fut in futs:
fut.result()
def _run_test_profiler_remote_events_profiled(self):
# Tests that we can successfully invoke the profiler on a remote node,
# and collect the remote events back in the local profiler.
if self.rank != 1:
return
dst_ranks = [rank for rank in range(0, self.world_size) if rank != self.rank]
for dst in dst_ranks:
dst_worker = worker_name(dst)
with torch.autograd.profiler.profile() as prof:
fut = rpc.rpc_async(dst_worker, udf_with_torch_ops, args=())
ret = fut.wait()
events = prof.function_events
rpc_event = get_function_event(events, RPCExecMode.ASYNC.value)
self.check_profiling_info(
worker_name(self.rank),
dst_worker,
udf_with_torch_ops,
rpc_event,
RPCExecMode.ASYNC,
)
remote_events = {event.name: event for event in events if event.is_remote}
rpc_profiling_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
udf_with_torch_ops.__qualname__,
worker_name(self.rank),
worker_name(dst),
)
for expected_remote_event_name in EXPECTED_REMOTE_EVENTS:
expected_key = rpc_profiling_key + REMOTE_OP_STR + expected_remote_event_name
self.assertTrue(expected_key in remote_events)
remote_event = remote_events[expected_key]
# Remote event should have a node ID corresponding to the worker
# it ran on.
self.assertEqual(remote_event.node_id, dst)
# Validate order remote events show up in profiling output.
def convert_remote_to_local(event_name):
remote_op_key = rpc_profiling_key + REMOTE_OP_STR
return event_name[
event_name.find(remote_op_key)
+ len(remote_op_key) :
]
remote_events_list = [
convert_remote_to_local(event.name)
for event in events
if convert_remote_to_local(event.name) in EXPECTED_REMOTE_EVENTS
]
self.assertEqual(
set(remote_events_list),
set(EXPECTED_REMOTE_EVENTS),
f"Mismatch between profiled events: {set(remote_events_list)} and expected events: {set(EXPECTED_REMOTE_EVENTS)}",
)
@dist_init
def test_profiler_remote_events_profiled(self):
self._run_test_profiler_remote_events_profiled()
@single_threaded_process_group_agent
@dist_init
def test_profiler_remote_events_profiled_single_threaded(self):
self._run_test_profiler_remote_events_profiled()
def run_profiling_workload(self, dst):
fut = rpc.rpc_async(
worker_name(dst),
torch.mul,
args=(
torch.tensor(1.0, requires_grad=True),
torch.tensor(1.0, requires_grad=True),
),
)
fut.wait()
def _run_rpc_profiling_async_function(self, device="cpu"):
if self.rank != 1:
return
dst1 = worker_name((self.rank + 1) % self.world_size)
| |
{}'.format(j['message']))
return rest.internal_error('failed to call daemon process')
obj = json.loads(resp.content)
path = os.path.join(_get_project_dir_path(project_name), 'spacy_rules/' + field_name + '.json')
data[project_name]['master_config']['fields'][field_name]['number_of_rules'] = len(obj['rules'])
# data[project_name]['master_config']['spacy_field_rules'] = {field_name: path}
update_master_config_file(project_name)
write_to_file(json.dumps(obj, indent=2), path)
# git_helper.commit(files=[path, project_name + '/master_config.json'],
# message='create / update spacy rules: project {}, field {}'.format(project_name, field_name))
with codecs.open(path, 'r') as f:
obj = json.loads(f.read())
return rest.created(obj)
@requires_auth
def put(self, project_name, field_name):
return self.post(project_name, field_name)
@requires_auth
def get(self, project_name, field_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if field_name not in data[project_name]['master_config']['fields']:
return rest.not_found('Field {} not found'.format(field_name))
path = os.path.join(_get_project_dir_path(project_name), 'spacy_rules/' + field_name + '.json')
if not os.path.exists(path):
return rest.not_found('no spacy rules')
obj = dict()
with codecs.open(path, 'r') as f:
obj = json.loads(f.read())
type = request.args.get('type', '')
if type == 'rules':
return {'rules': obj['rules']}
elif type == 'tokens':
return {'test_tokens': obj['test_tokens']}
elif type == 'results':
return {'results': obj['results']}
else:
return obj
@requires_auth
def delete(self, project_name, field_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if field_name not in data[project_name]['master_config']['fields']:
return rest.not_found('Field {} not found'.format(field_name))
path = os.path.join(_get_project_dir_path(project_name), 'spacy_rules/' + field_name + '.json')
if not os.path.exists(path):
return rest.not_found('no spacy rules')
os.remove(path)
data[project_name]['master_config']['fields'][field_name]['number_of_rules'] = 0
# del data[project_name]['master_config']['spacy_field_rules'][field_name]
update_master_config_file(project_name)
# git_helper.commit(files=[path, project_name + '/master_config.json'],
# message='delete spacy rules: project {}, field {}'.format(project_name, field_name))
return rest.deleted()
@api.route('/projects/<project_name>/glossaries')
class ProjectGlossaries(Resource):
@requires_auth
def post(self, project_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
parse = reqparse.RequestParser()
parse.add_argument('glossary_file', type=werkzeug.FileStorage, location='files')
parse.add_argument('glossary_name')
args = parse.parse_args()
# http://werkzeug.pocoo.org/docs/0.12/datastructures/#werkzeug.datastructures.FileStorage
if args['glossary_name'] is None or args['glossary_file'] is None:
return rest.bad_request('Invalid glossary_name or glossary_file')
name = args['glossary_name'].strip()
if len(name) == 0:
return rest.bad_request('Invalid glossary_name')
if name in data[project_name]['master_config']['glossaries']:
return rest.exists('Glossary {} exists'.format(name))
file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + name + '.txt')
gzip_file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + name + '.txt.gz')
json_file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + name + '.json.gz')
content = args['glossary_file'].stream.read()
with gzip.open(gzip_file_path, 'w') as f:
f.write(content)
with gzip.open(json_file_path, 'w') as f:
f.write(ProjectGlossaries.convert_glossary_to_json(content))
write_to_file(content, file_path)
# file.save(file_path)
self.compute_statistics(project_name, name, json_file_path)
# git_helper.commit(files=[project_name + '/master_config.json', project_name + '/glossaries/*'],
# message='create a glossary: project {}, glossary {}'.format(project_name, name))
return rest.created()
@requires_auth
def get(self, project_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
return data[project_name]['master_config']['glossaries'].keys()
@requires_auth
def delete(self, project_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
dir_path = os.path.join(_get_project_dir_path(project_name), 'glossaries')
shutil.rmtree(dir_path)
os.mkdir(dir_path) # recreate folder
data[project_name]['master_config']['glossaries'] = dict()
# remove all glossary names from all fields
for k, v in data[project_name]['master_config']['fields'].items():
if 'glossaries' in v and v['glossaries']:
v['glossaries'] = []
update_master_config_file(project_name)
# git_helper.commit(files=[project_name + '/master_config.json', project_name + '/glossaries/*'],
# message='delete all glossaries: project {}'.format(project_name))
return rest.deleted()
@staticmethod
def compute_statistics(project_name, glossary_name, json_file_path):
THRESHOLD = 5
ngram = {}
with gzip.open(json_file_path, 'r') as f:
obj = json.loads(f.read())
for item in obj:
t = len(item.split(' '))
if t > THRESHOLD:
continue
ngram[t] = ngram.get(t, 0) + 1
data[project_name]['master_config']['glossaries'][glossary_name] = {
'ngram_distribution': ngram,
'entry_count': len(obj),
'path': glossary_name + '.json.gz'
}
update_master_config_file(project_name)
@staticmethod
def convert_glossary_to_json(lines):
glossary = list()
lines = lines.replace('\r', '\n') # convert
lines = lines.split('\n')
t = CrfTokenizer()
t.setRecognizeHtmlEntities(True)
t.setRecognizeHtmlTags(True)
t.setSkipHtmlTags(True)
for line in lines:
line = line.strip()
if len(line) == 0: # trim empty line
continue
line = ' '.join(t.tokenize(line))
glossary.append(line)
return json.dumps(glossary)
@api.route('/projects/<project_name>/glossaries/<glossary_name>')
class Glossary(Resource):
@requires_auth
def post(self, project_name, glossary_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if glossary_name not in data[project_name]['master_config']['glossaries']:
return rest.not_found('Glossary {} not found'.format(glossary_name))
parse = reqparse.RequestParser()
parse.add_argument('glossary_file', type=werkzeug.FileStorage, location='files')
args = parse.parse_args()
# http://werkzeug.pocoo.org/docs/0.12/datastructures/#werkzeug.datastructures.FileStorage
if args['glossary_file'] is None:
return rest.bad_request('Invalid glossary_file')
name = glossary_name
file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + name + '.txt')
gzip_file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + name + '.txt.gz')
json_file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + name + '.json.gz')
# file = args['glossary_file']
# file.save(file_path)
content = args['glossary_file'].stream.read()
with gzip.open(gzip_file_path, 'w') as f:
f.write(content)
with gzip.open(json_file_path, 'w') as f:
f.write(ProjectGlossaries.convert_glossary_to_json(content))
write_to_file(content, file_path)
ProjectGlossaries.compute_statistics(project_name, glossary_name, json_file_path)
# git_helper.commit(files=[project_name + '/master_config.json', project_name + '/glossaries/*'],
# message='update a glossary: project {}, glossary {}'.format(project_name, name))
return rest.created()
@requires_auth
def put(self, project_name, glossary_name):
return self.post(project_name, glossary_name)
@requires_auth
def get(self, project_name, glossary_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if glossary_name not in data[project_name]['master_config']['glossaries']:
return rest.not_found('Glossary {} not found'.format(glossary_name))
file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + glossary_name + '.txt.gz')
ret = send_file(file_path, mimetype='application/gzip',
as_attachment=True, attachment_filename=glossary_name + '.txt.gz')
ret.headers['Access-Control-Expose-Headers'] = 'Content-Disposition'
return ret
@requires_auth
def delete(self, project_name, glossary_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if glossary_name not in data[project_name]['master_config']['glossaries']:
return rest.not_found('Glossary {} not found'.format(glossary_name))
file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + glossary_name + '.txt')
gzip_file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + glossary_name + '.txt.gz')
json_file_path = os.path.join(_get_project_dir_path(project_name), 'glossaries/' + glossary_name + '.json.gz')
os.remove(file_path)
os.remove(gzip_file_path)
os.remove(json_file_path)
del data[project_name]['master_config']['glossaries'][glossary_name]
# remove glossary_name from field which contains it
for k, v in data[project_name]['master_config']['fields'].items():
if 'glossaries' in v and glossary_name in v['glossaries']:
v['glossaries'].remove(glossary_name)
update_master_config_file(project_name)
# git_helper.commit(files=[project_name + '/master_config.json', project_name + '/glossaries/*'],
# message='delete a glossary: project {}, glossary {}'.format(project_name, glossary_name))
return rest.deleted()
@api.route('/projects/<project_name>/table_attributes')
class ProjectTableAttributes(Resource):
@requires_auth
def post(self, project_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
input = request.get_json(force=True)
is_valid, message = ProjectTableAttributes.validator(input)
if not is_valid:
return rest.bad_request(message)
attribute_name = input['name']
if attribute_name in data[project_name]['master_config']['table_attributes']:
return rest.exists()
if input['field_name'] != '' and \
input['field_name'] not in data[project_name]['master_config']['fields']:
return rest.bad_request('No such field')
data[project_name]['master_config']['table_attributes'][attribute_name] = input
update_master_config_file(project_name)
# git_helper.commit(files=[project_name + '/master_config.json'],
# message='create / update table attributes: project {}, attribute {}'
# .format(project_name, attribute_name))
return rest.created()
@requires_auth
def get(self, project_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if 'table_attributes' not in data[project_name]['master_config']:
return rest.ok()
return data[project_name]['master_config']['table_attributes']
@requires_auth
def delete(self, project_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if 'table_attributes' not in data[project_name]['master_config']:
return rest.deleted()
data[project_name]['master_config']['table_attributes'] = input
update_master_config_file(project_name)
# git_helper.commit(files=[project_name + '/master_config.json'],
# message='delete table attributes: project {}'.format(project_name))
return rest.deleted()
@staticmethod
def validator(obj):
if 'name' not in obj or len(obj['name'].strip()) == 0:
return False, 'Invalid attribute: name'
if 'field_name' not in obj:
obj['field_name'] = ''
if 'value' not in obj or not isinstance(obj['value'], list):
return False, 'Invalid attribute: value'
if 'info' not in obj:
return False, 'Invalid attribute: info'
return True, None
@api.route('/projects/<project_name>/table_attributes/<attribute_name>')
class TableAttribute(Resource):
@requires_auth
def post(self, project_name, attribute_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if attribute_name not in data[project_name]['master_config']['table_attributes']:
return rest.not_found('attribute name not found')
input = request.get_json(force=True)
is_valid, message = ProjectTableAttributes.validator(input)
if not is_valid:
return rest.bad_request(message)
if attribute_name != input['name']:
return rest.bad_request('Invalid table attribute name')
if input['field_name'] != '' and \
input['field_name'] not in data[project_name]['master_config']['fields']:
return rest.bad_request('No such field')
data[project_name]['master_config']['table_attributes'][attribute_name] = input
update_master_config_file(project_name)
# git_helper.commit(files=[project_name + '/master_config.json'],
# message='create / update table attributes: project {}, attribute {}'
# .format(project_name, attribute_name))
return rest.created()
@requires_auth
def put(self, project_name, attribute_name):
return self.post(project_name, attribute_name)
@requires_auth
def get(self, project_name, attribute_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if attribute_name not in data[project_name]['master_config']['table_attributes']:
return rest.not_found('attribute name not found')
return data[project_name]['master_config']['table_attributes'][attribute_name]
@requires_auth
def delete(self, project_name, attribute_name):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if attribute_name not in data[project_name]['master_config']['table_attributes']:
return rest.not_found('attribute name not found')
del data[project_name]['master_config']['table_attributes'][attribute_name]
update_master_config_file(project_name)
# git_helper.commit(files=[project_name + '/master_config.json'],
# message='delete table attributes: project {}, attribute {}'
# .format(project_name, attribute_name))
return rest.deleted()
# @api.route('/projects/<project_name>/entities/<kg_id>/tags')
# class EntityTags(Resource):
# @requires_auth
# def get(self, project_name, kg_id):
# if project_name not in data:
# return rest.not_found('Project {} not found'.format(project_name))
# entity_name = 'Ad'
# if entity_name not in data[project_name]['entities']:
# data[project_name]['entities'][entity_name] = dict()
# if kg_id not in data[project_name]['entities'][entity_name]:
# return rest.not_found('kg_id {} not found'.format(kg_id))
#
# return data[project_name]['entities'][entity_name][kg_id]
#
# @requires_auth
# def post(self, project_name, kg_id):
# if project_name not in data:
# return rest.not_found()
#
# input = request.get_json(force=True)
# tags = input.get('tags', [])
# if len(tags) == 0:
# return rest.bad_request('No tags given')
# # tag should be exist
# for tag_name in tags:
# if tag_name not in data[project_name]['master_config']['tags']:
# return rest.bad_request('Tag {} is not exist'.format(tag_name))
# # add tags to entity
# entity_name = 'Ad'
# for tag_name in tags:
# if entity_name not in data[project_name]['entities']:
# data[project_name]['entities'][entity_name] = dict()
# if kg_id not in data[project_name]['entities'][entity_name]:
# data[project_name]['entities'][entity_name][kg_id] = dict()
# if tag_name not in data[project_name]['entities'][entity_name][kg_id]:
# data[project_name]['entities'][entity_name][kg_id][tag_name] = dict()
#
# # write to file
# file_path = os.path.join(_get_project_dir_path(project_name), 'entity_annotations/entity_annotations.json')
# write_to_file(json.dumps(data[project_name]['entities'], indent=4), file_path)
# return rest.created()
@api.route('/projects/<project_name>/entities/<kg_id>/fields/<field_name>/annotations')
class FieldAnnotations(Resource):
@requires_auth
def get(self, project_name, kg_id, field_name):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if kg_id not in data[project_name]['field_annotations']:
return rest.not_found('kg_id {} not | |
<gh_stars>1-10
#!/usr/bin/env python3
# Library installation notes:
# plotly:
# pip3 install chart_studio
# pip3 install plotly
# or, for updates
# pip3 install plotly --upgrade
# scikit-learn:
# ref: https://www.kaggle.com/c/titanic/discussion/6801
# python3 -m pip install scikit-learn
# this might be necessary: pip3 install scipy
# or try running: python3 -m pip install scikit-learn
# or use use if trouble installing on linux: python3 -m pip install scikit-learn --user
#
# scipy regression demo: https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html#sphx-glr-auto-examples-linear-model-plot-ols-py
import os, sys, math
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
#import plotly.plotly as py
import chart_studio.plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import matplotlib.pyplot as plt
from scipy import stats
import numpy as np
DATE = "24 July 2020"
VERSION = "2_ii"
AUTHOR = " <NAME>"
AUTHORMAIL = "<EMAIL>"
#OUTPUT_DIR = "/tmp/0out/" # all results are saved in this local directory
OUTPUT_DIR = "0out/" # all results are saved in this local directory
INPUT_DIR = "data/"
# the file containing the names of genes to be used for normalizing
NORM_FILE = "normNames_i.csv"
# the file containing the names of the genes that we are studying.
PICK_THESE = "pickThese.csv" # contains the genes to study in the datasets. note: this file must include all data files and those used for normalizing.
THRESH = "thresh.csv" # contains a list of thresholds of r-squared values to study in heatmaps.
# the below line is to exclude particular files
IGNORE_FILES_list = [".DS_Store", "MANIFEST.txt",NORM_FILE, "~lock","annotations.txt",".gz",".html",PICK_THESE]
def help():
h_str = " "+DATE+" | version: "+VERSION+" |"+AUTHOR+" | "+AUTHORMAIL
print(" "+len(h_str) * "-")
print(h_str)
print(" "+len(h_str) * "-")
print("\n\tThe GeneExPy program to perform linear regression over GDP datasets.")
print("""\n\tLibrary installation notes:
plotly:
pip3 install plotly, or try running python3 -m pip install scikit-learn
scikit-learn:
python3 -m pip install scikit-learn, maybe necessary: pip3 install scipy
""")
print("\t+ \U0001f600 USAGE: programName <any key to launch>")
print("\t+ INPUT directory: (your data files are here) : ",INPUT_DIR)
print("\t+ OUTPUT directory: (your output is placed here) : ",OUTPUT_DIR)
print("\n\t+ Note:\n\t Use parameter <<heatmap>> or <rsqu>>> to ensure that\n\t Plotly and the statistical libraries have been correctly installed.")
print("\n\t Note: the data directory cannot handle subdirectories holding data. Please \n\tplace the text files into this data directory without using subdirectories.")
#end of help()
class Wrangler:
#""" Class to wrangle the data: to convert files to usable data for analysis"""
def __init__(self):
""" initiation method for Wrangler class"""
#print(" Wrangler Class __init__()")
self.file_list = [] # holds each file and diretory
self.lower_list = [g.lower() for g in IGNORE_FILES_list]
#self.ensNums_list = [] # holds the ensNums for building a mtrix
#self.exp_list = [] # holds the expression for each dataset
self.raw_dic = {} # the matrix in a dictionary. Key is dataset, value is list of expressions in order of file
# Note:
#self.raw_dic[ds_str][0] # contains the ensNumbers for this dataset
#self.raw_dic[ds_str][1] # contains the raw expressions for this dataset
#self.raw_dic[ds_str][2] # contains the rawlogs of expressions for this dataset
#self.raw_dic[ds_str][3] # contains the AVGg1 normalizing
#self.raw_dic[ds_str][4] # contains the AVGg2 normalizing
#self.raw_dic[ds_str][5] # contains the AVGg3 normalizing
#self.raw_dic[ds_str][6] # contains the tubb normalizing
#self.raw_dic[ds_str][7] # contains the tuba1a normalizing
self.picker_dic = {} # a dic of genes to study from the PICK_THESE file
self.normNames_dic = {} # a listing of the names of genes to be used for averaging and then normalizing
self.groupGene_dic = {} # contains the names of the genes of each group. exp of these used for avgs.
self.thresh_list = [] # listing of thresholds to capture r-squared values for study.
self.rSquMat_dic = {} # dic to contain the r-squared matrices. key is name.html, value is [[x_list], [y_list], [z_list]]
self.threshold_dic = {} # dic to contain the threshold specific heatmaps. prepared by filterMatrix()
#end of __init__()
def openTextFile(self, inFile):
#print("openTextFile(): ",inFile)
#read a list and then restun a dic
try: # is there a file?
#data = open(inFile).read().lower() # return a string
data = open(inFile).readlines()
return data
except IOError:
print("No file found... \"",inFile,"\" Exiting")
sys.exit(1)
#end of openTextFile()
def isFileInIgnoreList(self, fName_str):
"""function to determine whether a file is to be ignored"""
#lower_list = [g.lower() for g in self.IGNORE_FILES_list]
fName_str = fName_str.lower()
#print(" fName_str :" ,fName_str)
# print("self.lower_list",self.lower_list)
for i in self.lower_list:
if i in fName_str:
#print(i,"found in ",fName_str)
return True # ignore the file
return False # load the file, not in the list
#end of isFileInIgnore()
def getFileListing(self,corpusDir):
""" method to grab all files not on the ignore list """
#self.file_list = [] # holds each file and diretory
for root, dirs, files in os.walk(corpusDir):
for file1 in files:
if self.isFileInIgnoreList(file1) == False:
#print("loading ",file1)
dataFile = os.path.join(root, file1)
self.file_list.append(dataFile)
else:
print("\t- Ignoring file at this step: ",file1)
return self.file_list
#end of getFileListing
def getParams(self):
"""Method to open the picker file (PICK_THESE) to find the genes to study and the thresholds file (THRESH) for an r-squared focus. """
self.picker_dic = {} # a dic of genes to study from the PICK_THESE file
self.thresh_list = [] # listing of thresholds to capture r-squared values for study.
# pickThese file format:
# UBE2V2,ENSG00000169139.10
# FAAP20 (C1orf86),ENSG00000162585.15
# DNAJA4,ENSG00000140403.11
# PSMD4,ENSG00000159352.14
# POLE3,ENSG00000148229.11
# include the PICK_THESE genes to use.
# file format
# ABR,ENSG00000159842.13
# ACTR8,ENSG00000113812.12
# APLF,ENSG00000169621.8
# APTX,ENSG00000137074.17
d = open(PICK_THESE)
for i in d:
isplit_list = i.strip().split(",") # positions 0 and 1 for ensNums and expressions, resp
self.picker_dic[isplit_list[1]] = isplit_list[0].strip()#replace(" ","")# ensnum (key) gene name (value)
# printer(self.picker_dic)
# include the NORM_FILE genes to use.
# file format
# WDR77,ENSG00000116455.12,1
# USP39,ENSG00000168883.18,2
# CDC5L,ENSG00000096401.7,2
d = open(NORM_FILE)
for i in d:
isplit_list = i.strip().split(",") # positions 0 and 1 for ensNums and expressions, resp
self.picker_dic[isplit_list[1]] = isplit_list[0].strip()#replace(" ","")# ensnum (key) gene name (value)
# printer(self.picker_dic)
# threshold values; genes having less than or equal to these values
# file format:
# 0.1
# 0.2
# 0.3
d = open(THRESH)
for i in d: # each line of the file
val_float = float(i)
self.thresh_list.append(val_float)
# printer(self.thresh_list)
# end of getParams()
def getRawMatrix(self):
"""Method to load each file in the file_list and then create a huge dictionary to make matrix for working"""
#printer(self.picker_dic)
for f in self.file_list:# for each file
ensNums_list = [] # contains the ensemble numbers from the current file
exp_list = [] # contains the expression values from current file
rawlogs_list = [] # contains the log of the exp. 0's are placed instead of log(0)
m_list = [] # contains all the above lists as a list.
d = self.openTextFile(f)
# counter = 0
for i in d: # in the file itself
m_list = []
isplit_list = i.split() # positions 0 and 1 for ensNums and expressions, resp
#print("\ti in d: isplit :",isplit_list, "file: ",f)
# prints ['ENSG00000153561.11', '16.1267035586'] file: data/d578e27f-537c-4aaa-8903-6ffe68346276.FPKM.txt
# raw data
if isplit_list[0] in self.picker_dic: #if ensNum in self.picker_dic, then keep
#print(isplit_list[0],"in the dictionary: RAW")
# counter += 1
ensNums_list.append(isplit_list[0])
exp_list.append(isplit_list[1])
# print("total :",counter)
# rawlogs
try: #replace the counter list with a log-normed of raw data
rawlogs_list.append(math.log(float(isplit_list[1]),math.exp(1)))
except ValueError:
rawlogs_list.append(0)
#counter += 1
m_list = [ensNums_list, exp_list, rawlogs_list]
#print(m_list[0:10]) # show what the data looks like...
#print("FFFFFFFF:", f)
#ff = f.replace(" ","") # remove the spaces in the filename, good for dictionary keys
self.raw_dic[f] = m_list
#print("keys (files) ")
#print(self.raw_dic.keys())
#end of getRawMatrix(file_list)
def getNormNamesMatrix(self, normFilename):
"""Method to load the file containing the names of files to use for normalizing factor creation. Returns a matrix of these gene names in file. """
#file format:
# humanGene EnsNum Group
# WDR77 ENSG00000116455.12 1
# USP39 ENSG00000168883.18 2
# CDC5L ENSG00000096401.7 2
# CASC3 ENSG00000108349.13 1
counter_list = [] # contains the position in the file
counter = 0
humanGene_list = [] # contains the human gene names
ensNums_list = [] # contains the ensemble numbers from the current file
group_list = [] # contains the group number for each gene. The group is to deterine which set the gene is to be placed for averaging
m_list = [] # contains all the above lists as a list.
d = self.openTextFile(normFilename)
for i in d: # in the file itself
isplit_list = i.split() # positions 0 and 1 for ensNums and expressions, resp
#print("\tisplit :",isplit_list,type(isplit_list))
if len(isplit_list) > 1:
humanGene_list.append(isplit_list[0])
ensNums_list.append(isplit_list[1])
group_list.append(isplit_list[2])
counter_list.append(counter)
counter += 1
if len(isplit_list) == 1:
headers_list = isplit_list[0].split(",")
#print("\theaders_list :", headers_list)
humanGene_list.append(headers_list[0])
ensNums_list.append(headers_list[1])
group_list.append(headers_list[2])
counter_list.append(counter)
counter += 1
m_list = [humanGene_list, ensNums_list, group_list, counter_list]
#print(m_list[0:10])
self.normNames_dic[normFilename] = m_list
#print(self.normNames_dic.keys())
#end of getNormNamesMatrix(NormFileName)
# TODO: compare the dataset names across the whole set of files to make sure that all expressions are for the same ensNum.
# Not sure that this is necessary?
def compareGeneOrder(self):
print("CompareGeneOrder()")
#end of compareGeneOrder()
def getGroups(self):
""" Method to determine the number of groups. Creates self.group_set to contain groups and self.gene_dic to hold the genes from the group."""
#print("\traw data :",self.raw_dic.keys())
#print(self.raw_dic.keys())
#print("normNames")
#print(self.normNames_dic)
# make a dictionary of lists (values) with key (groups)
group_dic = {}
# print(self.normNames_dic[NORM_FILE])
main_list = self.normNames_dic[NORM_FILE]
# print("&&&&", main_list)
#print("\n0",main_list[0]) # humanGene
#print("\n1",main_list[1]) # ensNum
#print("\n2",main_list[2]) # group
# How many groups are there?
self.group_set = set() # contains element to represent each group
for i in main_list[2]:
self.group_set.add(i)
self.group_set.discard("Group") # note: discard removes the member element but does nothing if element is not in set
#print("self.group_set :",self.group_set)
# go through each group, pull ensNums of each to make a list
self.groupGene_dic = {}
my_list = []
myGeneGroup_list = []
for i in self.group_set:
myGeneGroup_list = []
#print( "\tGroup: ",i)
for j | |
Wipe old installation, otherwise qwt refuses to install
cmd = ['rm', '-vf'] + glob(P.join(installDir, 'lib/', 'libqwt.*'))
self.helper(*cmd)
cmd = [installDir + '/bin/qmake','-spec']
if self.arch.os == 'osx':
cmd.append(P.join(installDir,'mkspecs','macx-clang'))
else:
cmd.append(P.join(installDir,'mkspecs','linux-g++'))
self.helper(*cmd)
# Turn of designer option in config file
config_path = 'qwtconfig.pri'
self.helper('sed', '-ibak', '-e',
's/QWT_CONFIG += QwtDesigner/#QWT_CONFIG += QwtDesigner/g',
config_path)
# Qwt pollutes the doc folder
@stage
def install(self):
super(qwt, self).install()
cmd = ['rm', '-vrf', P.join( self.env['INSTALL_DIR'], 'doc', 'html' ) ]
self.helper(*cmd)
cmd = ['rm', '-vrf', P.join( self.env['INSTALL_DIR'], 'doc', 'man' ) ]
self.helper(*cmd)
class zlib(Package):
src = 'http://downloads.sourceforge.net/libpng/zlib-1.2.8.tar.gz'
chksum = 'a4d316c404ff54ca545ea71a27af7dbc29817088'
@stage
def configure(self):
super(zlib,self).configure(other=('--shared',))
@stage
def install(self):
super(zlib, self).install()
self.helper(*['rm', P.join(self.env['INSTALL_DIR'], 'lib', 'libz.a')])
class jpeg(Package):
src = 'http://www.ijg.org/files/jpegsrc.v8d.tar.gz'
chksum = 'f080b2fffc7581f7d19b968092ba9ebc234556ff'
patches = 'patches/jpeg8'
def configure(self):
super(jpeg, self).configure(enable=('shared',), disable=('static',))
class png(Package):
src = 'http://downloads.sourceforge.net/libpng/libpng-1.6.24.tar.gz'
chksum = 'bdd5a59136c6b1e4cc94de12268122796e24036a'
def configure(self):
super(png,self).configure(disable='static',
other=['--with-zlib-prefix='+self.env['INSTALL_DIR']])
class cspice(Package):
# Note: Version 66 has been released which incorporates the dsk library!
# This will break when they release a new version BECAUSE THEY USE UNVERSIONED TARBALLS.
PLATFORM = dict(
linux64 = dict(
src = 'ftp://naif.jpl.nasa.gov/pub/naif/toolkit/C/PC_Linux_GCC_64bit/packages/cspice.tar.Z',
chksum = 'bb1bee61522e4fac18b68364362270b4eb2f3fd8', # N0065
),
osx64 = dict(
src = 'ftp://naif.jpl.nasa.gov/pub/naif/toolkit/C/MacIntel_OSX_AppleC_64bit/packages/cspice.tar.Z',
chksum = 'ec3fd214facf14f72908c11cc865d4c8579baf3d', # N0066
),
)
def __init__(self, env):
super(cspice, self).__init__(env)
self.pkgname += '_' + self.arch.osbits
self.src = self.PLATFORM[self.arch.osbits]['src']
self.chksum = self.PLATFORM[self.arch.osbits]['chksum']
if self.arch.os == "osx":
self.patches = 'patches/cspice_osx'
else:
self.patches = 'patches/cspice_linux'
def configure(self): pass
@stage
def compile(self):
cmd = ['csh']
self.args = ['./makeall.csh']
cmd += self.args
self.helper(*cmd)
@stage
def install(self):
d = P.join('%(INSTALL_DIR)s' % self.env, 'include', 'naif')
self.helper('mkdir', '-p', d)
cmd = ['cp', '-vf'] + glob(P.join(self.workdir, 'include', '*.h')) + [d]
self.helper(*cmd)
d = P.join('%(INSTALL_DIR)s' % self.env, 'lib')
self.helper('mkdir', '-p', d)
# Wipe the static libraries
cmd = ['rm' ] + glob(P.join(self.workdir,'lib', '*.a'))
self.helper(*cmd)
# Copy everything else, including the dynamic libraries
cmd = ['cp', '-vf'] + glob(P.join(self.workdir, 'lib', '*')) + [d]
self.helper(*cmd)
class dsk(Package):
# TODO: This library has been folded into cspice and is no longer available!
# This will break when they release a new version BECAUSE THEY USE UNVERSIONED TARBALLS.
PLATFORM = dict(
linux64 = dict(
src = 'ftp://naif.jpl.nasa.gov/pub/naif/misc/alpha_dsk/C/PC_Linux_GCC_64bit/packages/alpha_dsk_c.tar.Z',
chksum = '01f258d3233ba7<PASSWORD>025df0<PASSWORD>02a14611643',
),
osx64 = dict(
src = 'ftp://naif.jpl.nasa.gov/pub/naif/misc/alpha_dsk/C/MacIntel_OSX_AppleC_64bit/packages/alpha_dsk_c.tar.Z',
chksum = 'd574fe46fcb3a12c0c64d982503c383fd6f2b355',
),
)
def __init__(self, env):
super(dsk, self).__init__(env)
self.pkgname += '_' + self.arch.osbits
self.src = self.PLATFORM[self.arch.osbits]['src']
self.chksum = self.PLATFORM[self.arch.osbits]['chksum']
if self.arch.os == "osx":
self.patches = 'patches/dsk_osx'
else:
self.patches = 'patches/dsk_linux'
def configure(self): pass
@stage
def compile(self):
cmd = ['csh']
self.args = ['./makeall.csh']
cmd += self.args
self.helper(*cmd)
@stage
def install(self):
d = P.join('%(INSTALL_DIR)s' % self.env, 'include', 'naif')
self.helper('mkdir', '-p', d)
cmd = ['cp', '-vf'] + glob(P.join(self.workdir, 'include', '*.h')) + [d]
cmd = ['cp', '-vf'] + glob(P.join(self.workdir, 'src', 'dsklib_c', '*.h')) + [d]
self.helper(*cmd)
d = P.join('%(INSTALL_DIR)s' % self.env, 'lib')
self.helper('mkdir', '-p', d)
# Wipe the static libraries
cmd = ['rm' ] + glob(P.join(self.workdir,'lib', '*.a'))
self.helper(*cmd)
# Copy everything else, including the dynamic libraries
cmd = ['cp', '-vf'] + glob(P.join(self.workdir, 'lib', '*')) + [d]
self.helper(*cmd)
class protobuf(Package):
src = 'https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.bz2'
chksum = '6421ee86d8fb4e39f21f56991daa892a3e8d314b'
@stage
def configure(self):
# Our builtin curl, which we need for xerces, does not have
# ssl and cannot be used here. So try every curl we can find
# in the path, hoping one works.
success = False
curl_paths = program_paths("curl")
for curl_path in curl_paths:
curl_dir=os.path.dirname(curl_path)
self.env['PATH'] = curl_dir + ':' + self.env['PATH'] # change the path for this package
try:
print("Trying to use: " + curl_path)
self.helper('./autogen.sh')
super(protobuf, self).configure(disable=('static'),
other=(['cflags="-stdlib=libc++"' 'cxxflags="-stdlib=libc++"', 'linkflags="-stdlib=libc++"']))
success=True
break
except Exception, e:
print("Bad version of curl.")
print(str(e))
if not success:
raise PackageError(self, 'Could not find a good curl to use.')
class suitesparse(Package):
src = 'http://faculty.cse.tamu.edu/davis/SuiteSparse/SuiteSparse-4.2.1.tar.gz'
chksum = '2fec3bf93314bd14cbb7470c0a2c294988096ed6'
# Note: Currently this is archive only. They don't have the option
# of using shared (probably for performance reasons). If we want
# shared, we'll have make then a build system.
@stage
def configure(self):
pass
@stage
def install(self):
inc = P.join(self.env['INSTALL_DIR'],'include')
lib = P.join(self.env['INSTALL_DIR'],'lib')
self.helper('make','install',
'INSTALL_INCLUDE=' + inc,
'INSTALL_LIB=' + lib
)
class osg3(CMakePackage):
src = 'https://github.com/openscenegraph/OpenSceneGraph/archive/OpenSceneGraph-3.2.0.zip'
chksum = '5435de08cd7f67691f6be7cfa0d36b80f04bcb34'
patches = 'patches/osg3'
def configure(self):
other_flags = ['-DBUILD_OSG_APPLICATIONS=ON', '-DCMAKE_VERBOSE_MAKEFILE=ON', '-DOSG_USE_QT=OFF', '-DBUILD_DOCUMENTATION=OFF']
if self.arch.os == 'osx':
other_flags.extend(['-DOSG_DEFAULT_IMAGE_PLUGIN_FOR_OSX=imageio','-DOSG_WINDOWING_SYSTEM=Cocoa'])
super(osg3, self).configure(
with_='GDAL GLUT JPEG OpenEXR PNG ZLIB'.split(),
without='CURL QuickTime CoreVideo QTKit COLLADA FBX FFmpeg FLTK FOX FreeType GIFLIB Inventor ITK Jasper LibVNCServer OpenAL OpenVRML OurDCMTK Performer Qt3 Qt4 SDL TIFF wxWidgets Xine XUL RSVG NVTT DirectInput GtkGL Poppler-glib GTA'.split(),
other=other_flags)
class flann(GITPackage, CMakePackage):
src = 'https://github.com/mariusmuja/flann.git'
chksum = 'b8a442f'
@stage
def configure(self):
super(flann, self).configure(other=['-DBUILD_C_BINDINGS=OFF','-DBUILD_MATLAB_BINDINGS=OFF','-DBUILD_PYTHON_BINDINGS=OFF','-DBUILD_CUDA_LIB=OFF','-DUSE_MPI=OFF','-DUSE_OPENMP=OFF'])
@stage
def install(self):
super(flann, self).install()
cmd = ['rm' ] +glob(P.join(self.env['INSTALL_DIR'], 'lib', 'libflann*.a'))
self.helper(*cmd)
class eigen(CMakePackage):
src = 'http://bitbucket.org/eigen/eigen/get/3.2.5.tar.bz2'
chksum = 'aa4667f0b134f5688c5dff5f03335d9a19aa9b3d'
def configure(self):
super(eigen, self).configure(other=[
'-DBoost_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include','boost-'+boost.version),
'-DBoost_LIBRARY_DIRS=' + P.join(self.env['INSTALL_DIR'],'lib'),
'-DCMAKE_BUILD_TYPE=RelWithDebInfo'
])
class glog(GITPackage, CMakePackage):
src = 'https://github.com/google/glog.git'
chksum = '0472b91'
def configure(self):
ext = lib_ext(self.arch.os)
if self.arch.os == 'osx':
other_flags = []#'CFLAGS=-m64', 'CXXFLAGS=-m64',]
else:
other_flags = []
other_flags += ['-DGFLAGS_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include/gflags'),
'-DGFLAGS_LIBRARY=' + P.join(self.env['INSTALL_DIR'],'lib/libgflags'+ext),
'-DBUILD_SHARED_LIBS=ON']
super(glog, self).configure(other = other_flags)
class ceres(CMakePackage):
src = 'http://ceres-solver.org/ceres-solver-1.11.0.tar.gz'
chksum = '5e8683bfb410b1ba8b8204eeb0ec1fba009fb2d0'
def configure(self):
ext = lib_ext(self.arch.os)
super(ceres, self).configure(other=[
'-DEIGEN_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include/eigen3'),
'-DBoost_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include','boost-'+boost.version),
'-DBoost_LIBRARY_DIRS=' + P.join(self.env['INSTALL_DIR'],'lib'),
'-DGFLAGS_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include/gflags'),
'-DGFLAGS_LIBRARY=' + P.join(self.env['INSTALL_DIR'],'lib/libgflags'+ext),
'-DGLOG_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include'),
'-DGLOG_LIBRARY=' + P.join(self.env['INSTALL_DIR'],'lib/libglog'+ext),
'-DCMAKE_VERBOSE_MAKEFILE=ON', '-DSHARED_LIBS=ON', '-DMINIGLOG=OFF',
'-DSUITESPARSE=ON', '-DLAPACK=ON',
'-DLIB_SUFFIX=', '-DBUILD_EXAMPLES=OFF', '-DBUILD_SHARED_LIBS=ON', '-DBUILD_TESTING=OFF'
])
class libnabo(GITPackage, CMakePackage):
src = 'https://github.com/ethz-asl/libnabo.git'
patches = 'patches/libnabo'
chksum = '2df86e0'
def configure(self):
installDir = self.env['INSTALL_DIR']
# Remove python bindings, tests, and examples
self.helper('sed', '-ibak', '-e', 's/add_subdirectory(python)//g', '-e', 's/add_subdirectory(tests)//g', '-e', 's/add_subdirectory(examples)//g', 'CMakeLists.txt')
options = [
'-DCMAKE_CXX_FLAGS=-g -O3',
'-DCMAKE_PREFIX_PATH=' + installDir,
'-DEIGEN_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include/eigen3'),
'-DBoost_INCLUDE_DIR=' + P.join(self.env['INSTALL_DIR'],'include',
'boost-'+boost.version),
'-DBoost_LIBRARY_DIRS=' + P.join(self.env['INSTALL_DIR'],'lib'),
'-DBoost_DIR=' + P.join(self.env['INSTALL_DIR'],'lib'),
'-DCMAKE_VERBOSE_MAKEFILE=ON',
'-DSHARED_LIBS=ON',
'-DCMAKE_BUILD_TYPE=Release',
'-DCMAKE_PREFIX_PATH=' + installDir,
]
# Bugfix for wrong boost dir being found
if self.arch.os == 'linux':
options += [
'-DBoost_DIR=' + os.getcwd() + '/settings/boost',
'-DMY_BOOST_VERSION=' + boost.version,
'-DMY_BOOST_DIR=' + installDir
]
super(libnabo, self).configure(other=options)
class libpointmatcher(GITPackage, CMakePackage):
src = 'https://github.com/ethz-asl/libpointmatcher'
#src = 'https://github.com/oleg-alexandrov/libpointmatcher.git'
chksum = 'bcf4b04'
# We apply a non-trivial patch to libpointmatcher to make
# it a bit more efficient. These changes seem to be custom
# enough that would not make sense to be merged upstream.
patches = 'patches/libpointmatcher'
# A patch can be re-generated with
# f=patches/libpointmatcher/0001_custom_lib_changes.patch
# git diff hash1 hash2 > $f
# perl -pi -e "s# (a|b)/# #g" $f
# perl -pi -e "s#--- pointmatcher#--- libpointmatcher/pointmatcher#g" $f
# perl -pi -e "s#\+\+\+ pointmatcher#+++ libpointmatcher/pointmatcher#g" $f
def configure(self):
installDir = self.env['INSTALL_DIR']
# Turn off the unit tests which don't build on OSX10.12
self.helper('sed', '-ibak', '-e',
's/add_subdirectory(utest)/#add_subdirectory(utest)/g',
'CMakeLists.txt')
# Ensure we use the header files from the just fetched code,
# rather than its older version in the install dir.
curr_include = '-I' + self.workdir + '/pointmatcher'
self.env['CPPFLAGS'] = curr_include + ' ' + self.env['CPPFLAGS']
curr_include = '-I' + self.workdir
self.env['CPPFLAGS'] = curr_include + ' ' + self.env['CPPFLAGS']
# bugfix for lunokhod2
boost_dir = P.join(installDir,'include','boost-'+boost.version)
self.env['CXXFLAGS'] += ' -I' + boost_dir
# OSX clang does not support fopenmp as of 10.11
if self.arch.os == 'linux':
self.env['CPPFLAGS'] += ' -fopenmp'
options = [
'-DCMAKE_CXX_FLAGS=-g -O3 -I' + boost_dir,
'-DBoost_INCLUDE_DIR=' + boost_dir,
'-DBoost_LIBRARY_DIRS=' + P.join(installDir,'lib'),
'-DEIGEN_INCLUDE_DIR=' + P.join(installDir,'include/eigen3'),
'-DCMAKE_VERBOSE_MAKEFILE=ON',
'-DCMAKE_PREFIX_PATH=' + installDir,
'-DSHARED_LIBS=ON',
'-DUSE_SYSTEM_YAML_CPP=OFF', # Use the yaml code included with LPM
'-DCMAKE_BUILD_TYPE=Release'
]
# Bugfix for lunokhod2. This has problems on Mac OSX 10.6.
if self.arch.os == 'linux':
options += [
'-DBoost_DIR=' + os.getcwd() + '/settings/boost',
'-DMY_BOOST_VERSION=' + boost.version,
'-DMY_BOOST_DIR=' + installDir
]
super(libpointmatcher, self).configure(other=options)
# We would like to fetch this very source code. This is used
# in the nightly builds and regressions.
class binarybuilder(GITPackage):
src = 'https://github.com/NeoGeographyToolkit/BinaryBuilder.git'
def configure(self): pass
@stage
def compile(self, cwd=None): pass
@stage
def install(self): pass
class opencv(CMakePackage):
if get_platform().os == 'osx':
src = 'https://github.com/opencv/opencv/archive/3.3.1.tar.gz'
chksum = '79dba99090a5c48308fe91db8336ec2931e06b57'
else:
src = 'https://github.com/opencv/opencv/archive/3.1.0.tar.gz'
chksum = '31dd36c5d59c76f6b7982a64d6ffc0993736d7ea'
#patches = 'patches/opencv'
# NOTE: OSX 10.12 seems to require a newer version (3.3.1 works) but that does not work on CentOS 6.
# - To get it to build on CentOS 6, a newer CMake is needed (with SSL/HTTPS support) to perform
# the file fetching steps in the OpenCV 3.3.1 CMake files. | |
def exportChildren(self, outfile, level, namespace_='', name_='headType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for MEDIA_DESCRIPTOR_ in self.MEDIA_DESCRIPTOR:
MEDIA_DESCRIPTOR_.export(outfile, level, namespace_, name_='MEDIA_DESCRIPTOR', pretty_print=pretty_print)
for LINKED_FILE_DESCRIPTOR_ in self.LINKED_FILE_DESCRIPTOR:
LINKED_FILE_DESCRIPTOR_.export(outfile, level, namespace_, name_='LINKED_FILE_DESCRIPTOR', pretty_print=pretty_print)
for PROPERTY_ in self.PROPERTY:
PROPERTY_.export(outfile, level, namespace_, name_='PROPERTY', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('MEDIA_FILE', node)
if value is not None and 'MEDIA_FILE' not in already_processed:
already_processed.add('MEDIA_FILE')
self.MEDIA_FILE = value
value = find_attr_value_('TIME_UNITS', node)
if value is not None and 'TIME_UNITS' not in already_processed:
already_processed.add('TIME_UNITS')
self.TIME_UNITS = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'MEDIA_DESCRIPTOR':
obj_ = MEDIA_DESCRIPTORType.factory()
obj_.build(child_)
self.MEDIA_DESCRIPTOR.append(obj_)
obj_.original_tagname_ = 'MEDIA_DESCRIPTOR'
elif nodeName_ == 'LINKED_FILE_DESCRIPTOR':
obj_ = LINKED_FILE_DESCRIPTORType.factory()
obj_.build(child_)
self.LINKED_FILE_DESCRIPTOR.append(obj_)
obj_.original_tagname_ = 'LINKED_FILE_DESCRIPTOR'
elif nodeName_ == 'PROPERTY':
obj_ = propType.factory()
obj_.build(child_)
self.PROPERTY.append(obj_)
obj_.original_tagname_ = 'PROPERTY'
# end class headType
class timeType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, TIME_SLOT=None):
self.original_tagname_ = None
if TIME_SLOT is None:
self.TIME_SLOT = []
else:
self.TIME_SLOT = TIME_SLOT
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, timeType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if timeType.subclass:
return timeType.subclass(*args_, **kwargs_)
else:
return timeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TIME_SLOT(self): return self.TIME_SLOT
def set_TIME_SLOT(self, TIME_SLOT): self.TIME_SLOT = TIME_SLOT
def add_TIME_SLOT(self, value): self.TIME_SLOT.append(value)
def insert_TIME_SLOT_at(self, index, value): self.TIME_SLOT.insert(index, value)
def replace_TIME_SLOT_at(self, index, value): self.TIME_SLOT[index] = value
def hasContent_(self):
if (
self.TIME_SLOT
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='timeType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('timeType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='timeType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='timeType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='timeType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='timeType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for TIME_SLOT_ in self.TIME_SLOT:
TIME_SLOT_.export(outfile, level, namespace_, name_='TIME_SLOT', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TIME_SLOT':
obj_ = TIME_SLOTType.factory()
obj_.build(child_)
self.TIME_SLOT.append(obj_)
obj_.original_tagname_ = 'TIME_SLOT'
# end class timeType
class tierType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, TIER_ID=None, PARTICIPANT=None, ANNOTATOR=None, LINGUISTIC_TYPE_REF=None, DEFAULT_LOCALE=None, PARENT_REF=None, EXT_REF=None, LANG_REF=None, ANNOTATION=None):
self.original_tagname_ = None
self.TIER_ID = _cast(None, TIER_ID)
self.PARTICIPANT = _cast(None, PARTICIPANT)
self.ANNOTATOR = _cast(None, ANNOTATOR)
self.LINGUISTIC_TYPE_REF = _cast(None, LINGUISTIC_TYPE_REF)
self.DEFAULT_LOCALE = _cast(None, DEFAULT_LOCALE)
self.PARENT_REF = _cast(None, PARENT_REF)
self.EXT_REF = _cast(None, EXT_REF)
self.LANG_REF = _cast(None, LANG_REF)
if ANNOTATION is None:
self.ANNOTATION = []
else:
self.ANNOTATION = ANNOTATION
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, tierType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if tierType.subclass:
return tierType.subclass(*args_, **kwargs_)
else:
return tierType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ANNOTATION(self): return self.ANNOTATION
def set_ANNOTATION(self, ANNOTATION): self.ANNOTATION = ANNOTATION
def add_ANNOTATION(self, value): self.ANNOTATION.append(value)
def insert_ANNOTATION_at(self, index, value): self.ANNOTATION.insert(index, value)
def replace_ANNOTATION_at(self, index, value): self.ANNOTATION[index] = value
def get_TIER_ID(self): return self.TIER_ID
def set_TIER_ID(self, TIER_ID): self.TIER_ID = TIER_ID
def get_PARTICIPANT(self): return self.PARTICIPANT
def set_PARTICIPANT(self, PARTICIPANT): self.PARTICIPANT = PARTICIPANT
def get_ANNOTATOR(self): return self.ANNOTATOR
def set_ANNOTATOR(self, ANNOTATOR): self.ANNOTATOR = ANNOTATOR
def get_LINGUISTIC_TYPE_REF(self): return self.LINGUISTIC_TYPE_REF
def set_LINGUISTIC_TYPE_REF(self, LINGUISTIC_TYPE_REF): self.LINGUISTIC_TYPE_REF = LINGUISTIC_TYPE_REF
def get_DEFAULT_LOCALE(self): return self.DEFAULT_LOCALE
def set_DEFAULT_LOCALE(self, DEFAULT_LOCALE): self.DEFAULT_LOCALE = DEFAULT_LOCALE
def get_PARENT_REF(self): return self.PARENT_REF
def set_PARENT_REF(self, PARENT_REF): self.PARENT_REF = PARENT_REF
def get_EXT_REF(self): return self.EXT_REF
def set_EXT_REF(self, EXT_REF): self.EXT_REF = EXT_REF
def get_LANG_REF(self): return self.LANG_REF
def set_LANG_REF(self, LANG_REF): self.LANG_REF = LANG_REF
def hasContent_(self):
if (
self.ANNOTATION
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='tierType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('tierType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='tierType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='tierType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='tierType'):
if self.TIER_ID is not None and 'TIER_ID' not in already_processed:
already_processed.add('TIER_ID')
outfile.write(' TIER_ID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.TIER_ID), input_name='TIER_ID')), ))
if self.PARTICIPANT is not None and 'PARTICIPANT' not in already_processed:
already_processed.add('PARTICIPANT')
outfile.write(' PARTICIPANT=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.PARTICIPANT), input_name='PARTICIPANT')), ))
if self.ANNOTATOR is not None and 'ANNOTATOR' not in already_processed:
already_processed.add('ANNOTATOR')
outfile.write(' ANNOTATOR=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.ANNOTATOR), input_name='ANNOTATOR')), ))
if self.LINGUISTIC_TYPE_REF is not None and 'LINGUISTIC_TYPE_REF' not in already_processed:
already_processed.add('LINGUISTIC_TYPE_REF')
outfile.write(' LINGUISTIC_TYPE_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LINGUISTIC_TYPE_REF), input_name='LINGUISTIC_TYPE_REF')), ))
if self.DEFAULT_LOCALE is not None and 'DEFAULT_LOCALE' not in already_processed:
already_processed.add('DEFAULT_LOCALE')
outfile.write(' DEFAULT_LOCALE=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.DEFAULT_LOCALE), input_name='DEFAULT_LOCALE')), ))
if self.PARENT_REF is not None and 'PARENT_REF' not in already_processed:
already_processed.add('PARENT_REF')
outfile.write(' PARENT_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.PARENT_REF), input_name='PARENT_REF')), ))
if self.EXT_REF is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
outfile.write(' EXT_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.EXT_REF), input_name='EXT_REF')), ))
if self.LANG_REF is not None and 'LANG_REF' not in already_processed:
already_processed.add('LANG_REF')
outfile.write(' LANG_REF=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.LANG_REF), input_name='LANG_REF')), ))
def exportChildren(self, outfile, level, namespace_='', name_='tierType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ANNOTATION_ in self.ANNOTATION:
ANNOTATION_.export(outfile, level, namespace_, name_='ANNOTATION', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('TIER_ID', node)
if value is not None and 'TIER_ID' not in already_processed:
already_processed.add('TIER_ID')
self.TIER_ID = value
value = find_attr_value_('PARTICIPANT', node)
if value is not None and 'PARTICIPANT' not in already_processed:
already_processed.add('PARTICIPANT')
self.PARTICIPANT = value
value = find_attr_value_('ANNOTATOR', node)
if value is not None and 'ANNOTATOR' not in already_processed:
already_processed.add('ANNOTATOR')
self.ANNOTATOR = value
value = find_attr_value_('LINGUISTIC_TYPE_REF', node)
if value is not None and 'LINGUISTIC_TYPE_REF' not in already_processed:
already_processed.add('LINGUISTIC_TYPE_REF')
self.LINGUISTIC_TYPE_REF = value
value = find_attr_value_('DEFAULT_LOCALE', node)
if value is not None and 'DEFAULT_LOCALE' not in already_processed:
already_processed.add('DEFAULT_LOCALE')
self.DEFAULT_LOCALE = value
value = find_attr_value_('PARENT_REF', node)
if value is not None and 'PARENT_REF' not in already_processed:
already_processed.add('PARENT_REF')
self.PARENT_REF = value
value = find_attr_value_('EXT_REF', node)
if value is not None and 'EXT_REF' not in already_processed:
already_processed.add('EXT_REF')
self.EXT_REF = value
value = find_attr_value_('LANG_REF', node)
if value is not None and 'LANG_REF' not in already_processed:
already_processed.add('LANG_REF')
self.LANG_REF = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ANNOTATION':
obj_ = annotationType.factory()
obj_.build(child_)
self.ANNOTATION.append(obj_)
obj_.original_tagname_ = 'ANNOTATION'
# end class tierType
class annotationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ALIGNABLE_ANNOTATION=None, REF_ANNOTATION=None):
self.original_tagname_ = None
self.ALIGNABLE_ANNOTATION = ALIGNABLE_ANNOTATION
self.REF_ANNOTATION = REF_ANNOTATION
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, annotationType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if annotationType.subclass:
return annotationType.subclass(*args_, **kwargs_)
else:
return annotationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ALIGNABLE_ANNOTATION(self): return self.ALIGNABLE_ANNOTATION
def set_ALIGNABLE_ANNOTATION(self, ALIGNABLE_ANNOTATION): self.ALIGNABLE_ANNOTATION = ALIGNABLE_ANNOTATION
def get_REF_ANNOTATION(self): return self.REF_ANNOTATION
def set_REF_ANNOTATION(self, REF_ANNOTATION): self.REF_ANNOTATION = REF_ANNOTATION
def hasContent_(self):
if (
self.ALIGNABLE_ANNOTATION is not None or
self.REF_ANNOTATION is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='annotationType', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('annotationType')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='annotationType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='annotationType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='annotationType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='annotationType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ALIGNABLE_ANNOTATION | |
import logging
from pathlib import Path
import uuid
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from brainbox.core import Bunch
import ibllib.exceptions as err
import ibllib.plots as plots
import ibllib.io.spikeglx
import ibllib.dsp as dsp
import alf.io
from ibllib.io.spikeglx import glob_ephys_files, get_neuropixel_version_from_files
import ibllib.io.raw_data_loaders as raw
_logger = logging.getLogger('ibllib')
SYNC_BATCH_SIZE_SAMPLES = 2 ** 18 # number of samples to read at once in bin file for sync
WHEEL_RADIUS_CM = 1 # stay in radians
WHEEL_TICKS = 1024
BPOD_FPGA_DRIFT_THRESHOLD_PPM = 150
CHMAPS = {'3A':
{'ap':
{'left_camera': 2,
'right_camera': 3,
'body_camera': 4,
'bpod': 7,
'frame2ttl': 12,
'rotary_encoder_0': 13,
'rotary_encoder_1': 14,
'audio': 15
}
},
'3B':
{'nidq':
{'left_camera': 0,
'right_camera': 1,
'body_camera': 2,
'imec_sync': 3,
'frame2ttl': 4,
'rotary_encoder_0': 5,
'rotary_encoder_1': 6,
'audio': 7,
'bpod': 16},
'ap':
{'imec_sync': 6}
},
}
def get_ibl_sync_map(ef, version):
"""
Gets default channel map for the version/binary file type combination
:param ef: ibllib.io.spikeglx.glob_ephys_file dictionary with field 'ap' or 'nidq'
:return: channel map dictionary
"""
if version == '3A':
default_chmap = CHMAPS['3A']['ap']
elif version == '3B':
if ef.get('nidq', None):
default_chmap = CHMAPS['3B']['nidq']
elif ef.get('ap', None):
default_chmap = CHMAPS['3B']['ap']
return ibllib.io.spikeglx.get_sync_map(ef['path']) or default_chmap
def _sync_to_alf(raw_ephys_apfile, output_path=None, save=False, parts=''):
"""
Extracts sync.times, sync.channels and sync.polarities from binary ephys dataset
:param raw_ephys_apfile: bin file containing ephys data or spike
:param output_path: output directory
:param save: bool write to disk only if True
:param parts: string or list of strings that will be appended to the filename before extension
:return:
"""
# handles input argument: support ibllib.io.spikeglx.Reader, str and pathlib.Path
if isinstance(raw_ephys_apfile, ibllib.io.spikeglx.Reader):
sr = raw_ephys_apfile
else:
raw_ephys_apfile = Path(raw_ephys_apfile)
sr = ibllib.io.spikeglx.Reader(raw_ephys_apfile)
# if no output, need a temp folder to swap for big files
if not output_path:
output_path = raw_ephys_apfile.parent
file_ftcp = Path(output_path).joinpath(f'fronts_times_channel_polarity{str(uuid.uuid4())}.bin')
# loop over chunks of the raw ephys file
wg = dsp.WindowGenerator(sr.ns, SYNC_BATCH_SIZE_SAMPLES, overlap=1)
fid_ftcp = open(file_ftcp, 'wb')
for sl in wg.slice:
ss = sr.read_sync(sl)
ind, fronts = dsp.fronts(ss, axis=0)
# a = sr.read_sync_analog(sl)
sav = np.c_[(ind[0, :] + sl.start) / sr.fs, ind[1, :], fronts.astype(np.double)]
sav.tofile(fid_ftcp)
# print progress
wg.print_progress()
# close temp file, read from it and delete
fid_ftcp.close()
tim_chan_pol = np.fromfile(str(file_ftcp))
tim_chan_pol = tim_chan_pol.reshape((int(tim_chan_pol.size / 3), 3))
file_ftcp.unlink()
sync = {'times': tim_chan_pol[:, 0],
'channels': tim_chan_pol[:, 1],
'polarities': tim_chan_pol[:, 2]}
if save:
alf.io.save_object_npy(output_path, sync, '_spikeglx_sync', parts=parts)
return Bunch(sync)
def _bpod_events_extraction(bpod_t, bpod_fronts):
"""
From detected fronts on the bpod sync traces, outputs the synchronisation events
related to trial start and valve opening
:param bpod_t: numpy vector containing times of fronts
:param bpod_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
:return: numpy arrays of times t_trial_start, t_valve_open and t_iti_in
"""
TRIAL_START_TTL_LEN = 2.33e-4
VALVE_OPEN_TTL_LEN = 0.4
# make sure that there are no 2 consecutive fall or consecutive rise events
assert(np.all(np.abs(np.diff(bpod_fronts)) == 2))
# make sure that the first event is a rise
assert(bpod_fronts[0] == 1)
# take only even time differences: ie. from rising to falling fronts
dt = np.diff(bpod_t)[::2]
# detect start trials event assuming length is 0.23 ms except the first trial
i_trial_start = np.r_[0, np.where(dt <= TRIAL_START_TTL_LEN)[0] * 2]
t_trial_start = bpod_t[i_trial_start]
# # the first trial we detect the first falling edge to which we subtract 0.1ms
# t_trial_start[0] -= 1e-4
# the last trial is a dud and should be removed
t_trial_start = t_trial_start[:-1]
# valve open events are between 50ms to 300 ms
i_valve_open = np.where(np.logical_and(dt > TRIAL_START_TTL_LEN,
dt < VALVE_OPEN_TTL_LEN))[0] * 2
i_valve_open = np.delete(i_valve_open, np.where(i_valve_open < 2))
t_valve_open = bpod_t[i_valve_open]
# ITI events are above 400 ms
i_iti_in = np.where(dt > VALVE_OPEN_TTL_LEN)[0] * 2
i_iti_in = np.delete(i_iti_in, np.where(i_valve_open < 2))
i_iti_in = bpod_t[i_iti_in]
# # some debug plots when needed
# import matplotlib.pyplot as plt
# import ibllib.plots as plots
# plt.figure()
# plots.squares(bpod_t, bpod_fronts)
# plots.vertical_lines(t_valve_open, ymin=-0.2, ymax=1.2, linewidth=0.5, color='g')
# plots.vertical_lines(t_trial_start, ymin=-0.2, ymax=1.2, linewidth=0.5, color='r')
return t_trial_start, t_valve_open, i_iti_in
def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb, ticks=WHEEL_TICKS, radius=1,
coding='x4'):
"""
Extracts the rotary encoder absolute position as function of time from fronts detected
on the 2 channels. Outputs in units of radius parameters, by default radians
Coding options detailed here: http://www.ni.com/tutorial/7109/pt/
Here output is clockwise from subject perspective
:param ta: time of fronts on channel A
:param pa: polarity of fronts on channel A
:param tb: time of fronts on channel B
:param pb: polarity of fronts on channel B
:param ticks: number of ticks corresponding to a full revolution (1024 for IBL rotary encoder)
:param radius: radius of the wheel. Defaults to 1 for an output in radians
:param coding: x1, x2 or x4 coding (IBL default is x4)
:return: indices vector (ta) and position vector
"""
if coding == 'x1':
ia = np.searchsorted(tb, ta[pa == 1])
ia = ia[ia < ta.size]
ia = ia[pa[ia] == 1]
ib = np.searchsorted(ta, tb[pb == 1])
ib = ib[ib < tb.size]
ib = ib[pb[ib] == 1]
t = np.r_[ta[ia], tb[ib]]
p = np.r_[ia * 0 + 1, ib * 0 - 1]
ordre = np.argsort(t)
t = t[ordre]
p = p[ordre]
p = np.cumsum(p) / ticks * np.pi * 2 * radius
return t, p
elif coding == 'x2':
p = pb[np.searchsorted(tb, ta) - 1] * pa
p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 2
return ta, p
elif coding == 'x4':
p = np.r_[pb[np.searchsorted(tb, ta) - 1] * pa, -pa[np.searchsorted(ta, tb) - 1] * pb]
t = np.r_[ta, tb]
ordre = np.argsort(t)
t = t[ordre]
p = p[ordre]
p = - np.cumsum(p) / ticks * np.pi * 2 * radius / 4
return t, p
def _audio_events_extraction(audio_t, audio_fronts):
"""
From detected fronts on the audio sync traces, outputs the synchronisation events
related to tone in
:param audio_t: numpy vector containing times of fronts
:param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
:return: numpy arrays t_ready_tone_in, t_error_tone_in
"""
# make sure that there are no 2 consecutive fall or consecutive rise events
assert(np.all(np.abs(np.diff(audio_fronts)) == 2))
# take only even time differences: ie. from rising to falling fronts
dt = np.diff(audio_t)[::2]
# detect ready tone by length below 110 ms
i_ready_tone_in = np.r_[np.where(dt <= 0.11)[0] * 2]
t_ready_tone_in = audio_t[i_ready_tone_in]
# error tones are events lasting from 400ms to 600ms
i_error_tone_in = np.where(np.logical_and(0.4 < dt, dt < 1.2))[0] * 2
t_error_tone_in = audio_t[i_error_tone_in]
return t_ready_tone_in, t_error_tone_in
def _assign_events_to_trial(t_trial_start, t_event, take='last'):
"""
Assign events to a trial given trial start times and event times.
Trials without an event
result in nan value in output time vector.
The output has a consistent size with t_trial_start and ready to output to alf.
:param t_trial_start: numpy vector of trial start times
:param t_event: numpy vector of event times to assign to trials
:param take: 'last' or 'first' (optional, default 'last'): index to take in case of duplicates
:return: numpy array of event times with the same shape of trial start.
"""
# make sure the events are sorted
try:
assert(np.all(np.diff(t_trial_start) >= 0))
except AssertionError:
raise ValueError('Trial starts vector not sorted')
try:
assert(np.all(np.diff(t_event) >= 0))
except AssertionError:
raise ValueError('Events vector is not sorted')
# remove events that happened before the first trial start
t_event = t_event[t_event >= t_trial_start[0]]
ind = np.searchsorted(t_trial_start, t_event) - 1
t_event_nans = np.zeros_like(t_trial_start) * np.nan
# select first or last element matching each trial start
if take == 'last':
iall, iu = np.unique(np.flip(ind), return_index=True)
t_event_nans[iall] = t_event[- (iu - ind.size + 1)]
elif take == 'first':
iall, iu = np.unique(ind, return_index=True)
t_event_nans[iall] = t_event[iu]
return t_event_nans
def _get_sync_fronts(sync, channel_nb, tmax=np.inf):
selection = np.logical_and(sync['channels'] == channel_nb, sync['times'] <= tmax)
return Bunch({'times': sync['times'][selection],
'polarities': sync['polarities'][selection]})
def extract_camera_sync(sync, output_path=None, save=False, chmap=None):
"""
Extract camera timestamps from the sync matrix
:param sync: dictionary 'times', 'polarities' of fronts detected on sync trace
:param output_path: where to save the data
:param save: True/False
:param chmap: dictionary containing channel indices. Default to constant.
:return: dictionary containing camera timestamps
"""
# NB: should we check we opencv the expected number of frames ?
assert(chmap)
sr = _get_sync_fronts(sync, chmap['right_camera'])
sl = _get_sync_fronts(sync, chmap['left_camera'])
sb = | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as f
from torch.autograd import Variable
from utils import *
import numpy as np
import pandas as pd
from tqdm import tqdm
import json
import random
import argparse
import datetime
import logging
import os, os.path
import shutil
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, accuracy_score, confusion_matrix
from transformers import AdamW, AutoTokenizer, RobertaConfig
from model_weighted_roberta import *
import json
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8
"""
m = nn.Softmax(dim=1)
probabilities = m(preds)
values, indices = torch.max(probabilities, 1)
y_pred = indices
acc = accuracy_score(y, y_pred)
return acc
def eval_metrics(preds, y):
'''
Returns performance metrics of predictor
:param y: ground truth label
:param preds: predicted logits
:return: auc, acc, tn, fp, fn, tp
'''
m = nn.Softmax(dim=1)
probabilities = m(preds)
y_values, indices = torch.max(probabilities, 1)
y_pred = indices
try:
auc = roc_auc_score(y, y_values)
except ValueError:
auc = np.array(0)
acc = accuracy_score(y, y_pred)
conf_mat = confusion_matrix(y, y_pred, labels=[0, 1])
tn = conf_mat[0, 0]
fp = conf_mat[0, 1]
fn = conf_mat[1, 0]
tp = conf_mat[1, 1]
return auc, acc, tn, fp, fn, tp
def simple_tokenize(orig_tokens, tokenizer, orig_labels, label_map, max_seq_length):
"""
tokenize a array of raw text
"""
# orig_tokens = orig_tokens.split()
pad_token_label_id = -100
tokens = []
label_ids = []
for word, label in zip(orig_tokens, orig_labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = tokenizer.num_special_tokens_to_add()
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
bert_tokens = [tokenizer.cls_token]
# bert_tokens = ["[CLS]"]
bert_tokens.extend(tokens)
label_ids = [pad_token_label_id] + label_ids
bert_tokens.append(tokenizer.sep_token)
# bert_tokens.append("[SEP]")
label_ids += [pad_token_label_id]
return bert_tokens, label_ids
def tokenize_with_new_mask(orig_text, max_length, tokenizer, orig_labels, label_map):
"""
tokenize a array of raw text and generate corresponding
attention labels array and attention masks array
"""
pad_token_label_id = -100
simple_tokenize_results = [list(tt) for tt in zip(
*[simple_tokenize(orig_text[i], tokenizer, orig_labels[i], label_map, max_length) for i in
range(len(orig_text))])]
bert_tokens, label_ids = simple_tokenize_results[0], simple_tokenize_results[1]
input_ids = [tokenizer.convert_tokens_to_ids(x) for x in bert_tokens]
input_ids = pad_sequences(input_ids, maxlen=max_length, dtype="long", truncating="post", padding="post")
label_ids = pad_sequences(label_ids, maxlen=max_length, dtype="long", truncating="post", padding="post",
value=pad_token_label_id)
attention_masks = []
for seq in input_ids:
seq_mask = [float(i > 0) for i in seq]
attention_masks.append(seq_mask)
attention_masks = np.array(attention_masks)
return input_ids, attention_masks, label_ids
def train(model, optimizer, train_batch_generator, num_batches, device, args, label_map, token_weight, y_weight):
"""
Main training routine
"""
epoch_loss = 0
epoch_s_acc, epoch_s_auc, epoch_t_acc = 0, 0, 0
epoch_t_results, epoch_t_results_by_tag = {}, {}
epoch_s_tn, epoch_s_fp, epoch_s_fn, epoch_s_tp = 0, 0, 0, 0
epoch_t_CR = ""
model.train()
# Training
for b in tqdm(range(num_batches)):
x_batch, y_batch_l, t_batch_l, masks_batch = next(train_batch_generator)
if len(x_batch.shape) == 3:
x_batch = Variable(torch.FloatTensor(x_batch)).to(device)
else:
x_batch = Variable(torch.LongTensor(x_batch)).to(device)
y_batch_l = y_batch_l.astype(np.float)
y_batch_l = torch.LongTensor(y_batch_l)
y_batch = Variable(y_batch_l).to(device)
t_batch_l = t_batch_l.astype(np.float)
t_batch_l = torch.LongTensor(t_batch_l)
t_batch = Variable(t_batch_l).to(device)
masks_batch = Variable(torch.FloatTensor(masks_batch)).to(device)
token_weight = token_weight.to(device) if token_weight is not None else None
y_weight = y_weight.to(device) if y_weight is not None else None
optimizer.zero_grad()
outputs = model(input_ids=x_batch, attention_mask=masks_batch,
seq_labels=y_batch, token_labels=t_batch,
token_class_weight=token_weight, seq_class_weight=y_weight,
token_lambda=args.token_lambda, )
loss, token_logits, seq_logits = outputs[:3]
loss.backward()
optimizer.step()
s_auc, s_acc, s_tn, s_fp, s_fn, s_tp = eval_metrics(seq_logits.detach().cpu(),
y_batch_l)
if type(model) in [RobertaForTokenAndSequenceClassificationWithCRF, BiLSTMForWeightedTokenAndSequenceClassificationWithCRF]:
t_batch_filtered = [t_batch_l[i][t_batch_l[i] >= 0].tolist() for i in range(t_batch_l.shape[0])]
t_eval_metrics = compute_crf_metrics(outputs[3], t_batch_filtered, label_map)
else:
t_eval_metrics = compute_metrics(token_logits.detach().cpu(), t_batch_l, label_map)
epoch_loss += loss.item()
epoch_s_auc += s_auc
epoch_s_acc += s_acc
epoch_s_tn += s_tn
epoch_s_fp += s_fp
epoch_s_fn += s_fn
epoch_s_tp += s_tp
epoch_t_acc += t_eval_metrics["accuracy_score"]
epoch_t_results.update(t_eval_metrics["results"])
epoch_t_results_by_tag.update(t_eval_metrics["results_by_tag"])
epoch_t_CR = t_eval_metrics["CR"]
print(f'\tClassification Loss: {epoch_loss / num_batches:.3f}')
return_s_tuple = (epoch_loss / num_batches, epoch_s_auc / num_batches, epoch_s_acc / num_batches,
epoch_s_tn, epoch_s_fp, epoch_s_fn,
epoch_s_tp)
return_t_tuple = (
epoch_t_acc / num_batches, epoch_t_results, epoch_t_results_by_tag, epoch_t_CR)
return_tuple = (return_s_tuple, return_t_tuple)
return return_tuple
def evaluate(model, test_batch_generator, num_batches, device, args, label_map, token_weight, y_weight):
"""
Main evaluation routine
"""
epoch_loss = 0
epoch_s_acc, epoch_s_auc, epoch_t_acc = 0, 0, 0
epoch_t_results, epoch_t_results_by_tag = {}, {}
epoch_s_tn, epoch_s_fp, epoch_s_fn, epoch_s_tp = 0, 0, 0, 0
epoch_t_CR = ""
output_t_pred, output_s_pred = None, None
model.eval()
with torch.no_grad():
for b in tqdm(range(num_batches)):
x_batch, y_batch, t_batch, masks_batch = next(test_batch_generator)
if len(x_batch.shape) == 3:
x_batch = Variable(torch.FloatTensor(x_batch)).to(device)
else:
x_batch = Variable(torch.LongTensor(x_batch)).to(device)
y_batch = y_batch.astype(np.float)
y_batch = Variable(torch.LongTensor(y_batch)).to(device)
t_batch = t_batch.astype(np.float)
t_batch = Variable(torch.LongTensor(t_batch)).to(device)
masks_batch = Variable(torch.FloatTensor(masks_batch)).to(device)
token_weight = token_weight.to(device) if token_weight is not None else None
y_weight = y_weight.to(device) if y_weight is not None else None
outputs = model(input_ids=x_batch, attention_mask=masks_batch,
seq_labels=y_batch, token_labels=t_batch,
token_class_weight=token_weight, seq_class_weight=y_weight,
token_lambda=args.token_lambda, )
loss, token_logits, seq_logits = outputs[:3]
s_auc, s_acc, s_tn, s_fp, s_fn, s_tp = eval_metrics(seq_logits.detach().cpu(),
y_batch.detach().cpu())
if type(model) in [RobertaForTokenAndSequenceClassificationWithCRF, BiLSTMForWeightedTokenAndSequenceClassificationWithCRF]:
t_batch_l = t_batch.detach().cpu()
t_batch_filtered = [t_batch_l[i][t_batch_l[i] >= 0].tolist() for i in range(t_batch_l.shape[0])]
t_eval_metrics = compute_crf_metrics(outputs[3], t_batch_filtered, label_map)
else:
t_eval_metrics = compute_metrics(token_logits.detach().cpu(), t_batch.detach().cpu(), label_map)
epoch_loss += loss.item()
epoch_s_auc += s_auc
epoch_s_acc += s_acc
epoch_s_tn += s_tn
epoch_s_fp += s_fp
epoch_s_fn += s_fn
epoch_s_tp += s_tp
epoch_t_acc += t_eval_metrics["accuracy_score"]
epoch_t_results.update(t_eval_metrics["results"])
epoch_t_results_by_tag.update(t_eval_metrics["results_by_tag"])
epoch_t_CR = t_eval_metrics["CR"]
if output_t_pred is None:
output_t_pred = token_logits.detach().cpu().numpy()
output_s_pred = seq_logits.detach().cpu().numpy()
else:
output_t_pred = np.concatenate([output_t_pred, token_logits.detach().cpu().numpy()], axis=0)
output_s_pred = np.concatenate([output_s_pred, seq_logits.detach().cpu().numpy()], axis=0)
print(f'\tClassification Loss: {epoch_loss / num_batches:.3f}')
return_s_tuple = (epoch_loss / num_batches, epoch_s_auc / num_batches, epoch_s_acc / num_batches,
epoch_s_tn, epoch_s_fp, epoch_s_fn,
epoch_s_tp)
return_t_tuple = (
epoch_t_acc / num_batches, epoch_t_results, epoch_t_results_by_tag, epoch_t_CR)
return_tuple = (return_s_tuple, return_t_tuple, output_t_pred, output_s_pred)
return return_tuple
def load_model(model_type, model_path, config):
if model_type.startswith('bertweet-multi') and not model_type.startswith('bertweet-multi-crf'):
model = RobertaForTokenAndSequenceClassification.from_pretrained(model_path, config=config)
elif model_type == 'bertweet-multi-crf':
model = RobertaForTokenAndSequenceClassificationWithCRF.from_pretrained(model_path, config=config)
elif model_type == 'BiLSTM-multi':
model = BiLSTMForWeightedTokenAndSequenceClassification(config=config)
if model_path is not None:
model.load_state_dict(torch.load(os.path.join(model_path, 'pytorch_model.pt')))
elif model_type == 'BiLSTM-multi-crf':
model = BiLSTMForWeightedTokenAndSequenceClassificationWithCRF(config=config)
if model_path is not None:
model.load_state_dict(torch.load(os.path.join(model_path, 'pytorch_model.pt')))
else:
model = None
return model
def get_embedding(text_list, embeddings_index, embeddings, max_length, token_label_raw_list, label_map):
pad_token_label_id = -100
output_embedding = []
label_ids_list = []
attention_masks_list = []
for words, token_labels in zip(text_list, token_label_raw_list):
words_mapped = [0] * max_length
label_ids = [0] * max_length
length = len(words)
if (length < max_length):
for i in range(0, length):
words_mapped[i] = embeddings_index.get(words[i], -1)
label_ids[i] = label_map[token_labels[i]]
for i in range(length, max_length):
words_mapped[i] = -2
label_ids[i] = pad_token_label_id
elif (length > max_words):
print('We should never see this print either')
else:
for i in range(0, max_length):
words_mapped[i] = embeddings_index.get(words[i], -1)
label_ids[i] = label_map[token_labels[i]]
output_embedding.append(np.array([embeddings[ix] for ix in words_mapped]))
label_ids_list.append(label_ids)
attention_masks_list.append([float(i >= 0) for i in label_ids])
output_embedding = np.array(output_embedding)
attention_masks_list = np.array(attention_masks_list)
label_ids_list = np.array(label_ids_list)
return output_embedding, attention_masks_list, label_ids_list
NOTE = 'V1.0.0: Initial Public Version'
### Main
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert_model", default=None, type=str)
parser.add_argument("--model_type", default=None, type=str)
parser.add_argument("--task_type", default='entity_detection', type=str)
parser.add_argument('--n_epochs', default=30, type=int)
parser.add_argument('--max_length', default=128, type=int)
parser.add_argument('--rnn_hidden_size', default=384, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--eval_batch_size', default=300, type=int)
parser.add_argument('--test_batch_size', default=300, type=int)
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--data', default='wnut_16', type=str)
parser.add_argument('--log_dir', default='log-BERTweet-multi', type=str)
parser.add_argument("--save_model", default=False, action='store_true')
parser.add_argument("--early_stop", default=False, action='store_true')
parser.add_argument("--assign_token_weight", default=False, action='store_true')
parser.add_argument("--assign_seq_weight", default=False, action='store_true')
parser.add_argument('--token_lambda', default=10, type=float)
parser.add_argument("--train_file", default=None, type=str)
parser.add_argument("--val_file", default=None, type=str)
parser.add_argument("--test_file", default=None, type=str)
parser.add_argument("--label_map", default=None, type=str)
parser.add_argument("--performance_file", default='all_test_performance.txt', type=str)
parser.add_argument("--embeddings_file", default='glove.840B.300d.txt', type=str)
args = parser.parse_args()
assert args.task_type in ['entity_detection', 'relevant_entity_detection', 'entity_relevance_classification']
print("cuda is available:", torch.cuda.is_available())
log_directory = args.log_dir + '/' + str(args.bert_model).split('/')[-1] + '/' + args.model_type + '/' + \
args.task_type + '/' + str(args.n_epochs) + \
'_epoch/' + args.data.split('/')[-1] + '/' + str(args.assign_token_weight) + \
'_token_weight/' + str(args.assign_seq_weight) + '_seq_weight/' + str(args.token_lambda) + \
'_token_lambda/' + str(args.seed) + '_seed/'
log_filename = 'log.' + str(datetime.datetime.now()).replace(' ', '--').replace(':', '-').replace('.', '-') + '.txt'
model_dir = 'saved-model'
if not os.path.exists(log_directory):
os.makedirs(log_directory)
logname = log_directory + log_filename
modeldir = log_directory + model_dir
logging.basicConfig(filename=logname,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
if os.path.exists(modeldir) and os.listdir(modeldir):
logging.info(f"modeldir {modeldir} already exists and it is not empty")
print(f"modeldir {modeldir} already exists and it is not empty")
else:
os.makedirs(modeldir, exist_ok=True)
logging.info(f"Create modeldir: {modeldir}")
print(f"Create modeldir: {modeldir}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
train_data = pd.read_pickle(os.path.join(args.data, args.train_file))
val_data = pd.read_pickle(os.path.join(args.data, args.val_file))
test_data = pd.read_pickle(os.path.join(args.data, args.test_file))
need_columns | |
assert False, "Database type not recognised."
@step(r'Add IPv6 address reservation (\S+) with iaid (\S+) to (\S+) record id (\d+).')
def ipv6_address_db_backend_reservation(reserved_address, reserved_iaid, db_type, reservation_record_id):
if db_type == 'MySQL':
mysql_reservation.ipv6_address_db_backend_reservation(reserved_address, reserved_iaid,
int(reservation_record_id))
elif db_type == 'PostgreSQL':
pgsql_reservation.ipv6_address_db_backend_reservation(reserved_address, reserved_iaid,
int(reservation_record_id))
elif db_type == 'Cassandra':
cql_reservation.ipv6_address_db_backend_reservation(reserved_address, reserved_iaid,
int(reservation_record_id))
else:
assert False, "Database type not recognised."
@step(r'Add option reservation code (\S+) value (\S+) space (\S+) persistent (\d+) client class (\S+) subnet id (\d+) and scope (\S+) to (\S+) record id (\d+).')
def option_db_record_reservation(reserved_option_code, reserved_option_value, reserved_option_space,
reserved_option_persistent, reserved_option_client_class, reserved_subnet_id,
reserved_option_scope, db_type, reservation_record_id):
if db_type == 'MySQL':
mysql_reservation.option_db_record_reservation(reserved_option_code, reserved_option_value,
reserved_option_space, reserved_option_persistent,
reserved_option_client_class, reserved_subnet_id,
reserved_option_scope, int(reservation_record_id))
elif db_type == 'PostgreSQL':
pgsql_reservation.option_db_record_reservation(reserved_option_code, reserved_option_value,
reserved_option_space, reserved_option_persistent,
reserved_option_client_class, reserved_subnet_id,
reserved_option_scope, int(reservation_record_id))
elif db_type == 'Cassandra':
cql_reservation.option_db_record_reservation(reserved_option_code, reserved_option_value,
reserved_option_space, reserved_option_persistent,
reserved_option_client_class, reserved_subnet_id,
reserved_option_scope, int(reservation_record_id))
else:
assert False, "Database type not recognised."
@step(r'Dump all the reservation entries from (\S+) database.')
def dump_db_reservation(db_type):
if db_type == 'MySQL':
mysql_reservation.clear_all_reservations()
elif db_type == 'PostgreSQL':
pgsql_reservation.clear_all_reservations()
elif db_type == 'Cassandra':
cql_reservation.clear_all_reservations()
else:
assert False, "Database type not recognised."
@step(r'Upload hosts reservation to (\S+) database.')
def upload_db_reservation(db_type, exp_failed=False):
if db_type == 'MySQL':
mysql_reservation.upload_db_reservation(exp_failed)
elif db_type == 'PostgreSQL':
pgsql_reservation.upload_db_reservation(exp_failed)
elif db_type == 'Cassandra':
cql_reservation.upload_db_reservation(exp_failed)
else:
assert False, "Database type not recognised."
# END Reservation backend section
@step(r'Reserve (\S+) (\S+) for host uniquely identified by (\S+) (\S+).')
def host_reservation(reservation_type, reserved_value, unique_host_value_type, unique_host_value):
"""
Ability to configure simple host reservations.
"""
reservation_type, reserved_value, unique_host_value_type, unique_host_value = test_define_value(reservation_type,
reserved_value,
unique_host_value_type,
unique_host_value)
dhcp.host_reservation(reservation_type, reserved_value, unique_host_value_type, unique_host_value, None)
##shared-subnet cfg
@step(r'Add subnet (\d+) to shared-subnet set (\d+).')
def shared_subnet(subnet_id, shared_subnet_id):
"""
Configure shared subnets.
"""
subnet_id, shared_subnet_id = test_define_value(subnet_id, shared_subnet_id)
dhcp.add_to_shared_subnet(subnet_id, int(shared_subnet_id))
@step(r'Shared subnet (\d+) is configured with option line: (.+)')
def add_option_shared_subnet(shared_subnet_id, conf_line):
shared_subnet_id, conf_line = test_define_value(shared_subnet_id, conf_line)
dhcp.add_line_to_shared_subnet(shared_subnet_id, conf_line)
@step(r'Add configuration parameter (\S+) with value (\S+) to shared-subnet (\d+) configuration.')
def set_conf_parameter_shared_subnet(parameter_name, value, subnet_id):
"""
Can be used on the end of configuration process, just before starting server.
:param step:
:param parameter_name:
:param value:
:return:
"""
parameter_name, value, subnet_id = test_define_value(parameter_name, value, subnet_id)
dhcp.set_conf_parameter_shared_subnet(parameter_name, value, int(subnet_id))
##subnet options
@step(r'Reserve (\S+) (\S+) in subnet (\d+) for host uniquely identified by (\S+) (\S+).')
def host_reservation_in_subnet(reservation_type, reserved_value, subnet, unique_host_value_type, unique_host_value):
"""
Ability to configure simple host reservations in subnet.
"""
reservation_type, reserved_value, unique_host_value_type, unique_host_value = test_define_value(reservation_type,
reserved_value,
unique_host_value_type,
unique_host_value)
dhcp.host_reservation(reservation_type, reserved_value, unique_host_value_type, unique_host_value, int(subnet))
@step(r'For host reservation entry no. (\d+) in subnet (\d+) add (\S+) with value (\S+).')
def host_reservation_in_subnet_add_value(reservation_number, subnet, reservation_type, reserved_value):
"""
Ability to configure simple host reservations in subnet.
"""
reservation_type, reserved_value = test_define_value(reservation_type, reserved_value)
dhcp.host_reservation_extension(int(reservation_number), int(subnet), reservation_type, reserved_value)
@step(r'Time (\S+) in subnet (\d+) is configured with value (\d+).')
def set_time_in_subnet(which_time, subnet, value):
"""
Change values of T1, T2, preffered lifetime and valid lifetime.
"""
which_time, subnet, value = test_define_value(which_time, subnet, value)
dhcp.set_time(which_time, value, subnet)
@step(r'Server is configured with another pool (\S+) in subnet (\d+).')
def new_pool(pool, subnet):
dhcp.add_pool_to_subnet(pool, int(subnet))
@step(r'Server is configured with (\S+) option in subnet (\d+) with value (\S+).')
def config_srv(option_name, subnet, option_value):
"""
Prepare server configuration with the specified option.
option_name name of the option, e.g. dns-servers (number may be used here)
option_value value of the configuration
"""
dhcp.prepare_cfg_add_option_subnet(option_name, subnet, option_value)
@step(r'On space (\S+) server is configured with (\S+) option in subnet (\d+) with value (\S+).')
def config_srv_on_space(space, option_name, subnet, option_value):
"""
Prepare server configuration with the specified option.
option_name name of the option, e.g. dns-servers (number may be used here)
option_value value of the configuration
"""
dhcp.prepare_cfg_add_option_subnet(option_name, subnet, option_value, space)
@step(r'Server is configured with client-classification option in subnet (\d+) with name (\S+).')
def config_client_classification(subnet, option_value):
dhcp.config_client_classification(subnet, option_value)
@step(r'Server is configured with require-client-classification option in subnet (\d+) with name (\S+).')
def config_require_client_classification(subnet, option_value):
dhcp.config_require_client_classification(subnet, option_value)
@step(r'Add class called (\S+).')
def create_new_class(class_name):
dhcp.create_new_class(class_name)
@step(r'To class no (\d+) add parameter named: (\S+) with value: (.+)')
def add_test_to_class(class_number, parameter_name, parameter_value):
if parameter_name == "test":
parameter_name, parameter_value = test_define_value(parameter_name, parameter_value)
dhcp.add_test_to_class(int(class_number), parameter_name, parameter_value)
@step(r'To class no (\d+) add option (\S+) with value (\S+).')
def add_option_to_defined_class(class_no, option, option_value):
dhcp.add_option_to_defined_class(int(class_no), option, option_value)
@step(r'Server has control channel (\S+).')
def open_control_channel(socket_name=None):
dhcp.open_control_channel_socket(socket_name)
@step(r'Server has control agent configured on HTTP connection with address (\S+):(\S+) and socket (\S+) path: (\S+).')
def agent_control_channel(host_address='$(MGMT_ADDRESS)', host_port=8000, socket_name='control_socket'):
host_address, host_port = test_define_value(host_address, host_port)
dhcp.agent_control_channel(host_address, host_port, socket_name)
##DNS server configuration
@step(r'DNS server is configured on (\S+) address (\S+) on port no. (\d+) and working directory (\S+).')
def dns_conf(ip_type, address, port, direct):
ip_type, address, port, direct = test_define_value(ip_type, address, port, direct)
dns.add_defaults(ip_type, address, port, direct)
@step(r'DNS server is configured with zone (\S+) with type: (\S+) file: (\S+) with dynamic update key: (\S+).')
def add_zone(zone, zone_type, file_nem, key):
zone, zone_type, file_nem, key = test_define_value(zone, zone_type, file_nem, key)
dns.add_zone(zone, zone_type, file_nem, key)
@step(r'Add DNS key named: (\S+) algorithm: (\S+) and value: (\S+).')
def dns_add_key(key_name, algorithm, key_value):
key_name, algorithm, key_value = test_define_value(key_name, algorithm, key_value)
dns.add_key(key_name, algorithm, key_value)
@step(r'Add DNS rndc-key on address (\S+) and port (\d+). Using algorithm: (\S+) with value: (\S+)')
def dns_rest(address, port, alg, value):
address, port, alg, value = test_define_value(address, port, alg, value)
dns.add_rndc(address, port, alg, value)
@step(r'Server logging system is configured with logger type (\S+), severity (\S+), severity level (\S+) and log file (\S+).')
def configure_loggers(log_type, severity, severity_level, logging_file=None):
log_type, severity, severity_level = test_define_value(log_type, severity, severity_level)
dhcp.add_logger(log_type, severity, severity_level, logging_file)
##servers management
@step(r'Create server configuration.')
def build_config_files(cfg=None):
dhcp.build_config_files(cfg=cfg)
@step(r'Create and send server configuration.')
def build_and_send_config_files(cfg=None, dest=world.f_cfg.mgmt_address):
dest = test_define_value(dest)[0]
check_remote_address(dest)
dhcp.build_and_send_config_files(cfg=cfg, destination_address=dest)
@step(r'(\S+) server is (started|stopped|restarted|reconfigured).')
def start_srv(name, type_of_action, config_set=None, dest=world.f_cfg.mgmt_address):
"""
Decide which you want, start server of failed start (testing incorrect configuration)
Also decide in which part should it failed.
"""
dest = test_define_value(dest)[0]
check_remote_address(dest)
if name not in ["DHCP", "DNS"]:
assert False, "I don't think there is support for something else than DNS or DHCP"
if type_of_action == "started":
if name == "DHCP":
log.info('----------------- KEA START %s -------------------------------------------------------', dest)
dhcp.start_srv(True, None, destination_address=dest)
elif name == "DNS":
log.info('----------------- BIND START %s -------------------------------------------------------', dest)
if config_set is not None:
use_dns_set_number(config_set)
dns.start_srv(True, None, destination_address=dest)
elif type_of_action == "stopped":
if name == "DHCP":
log.info('----------------- KEA STOP %s -------------------------------------------------------', dest)
dhcp.stop_srv(destination_address=dest)
elif name == "DNS":
log.info('----------------- BIND STOP %s -------------------------------------------------------', dest)
dns.stop_srv(destination_address=dest)
elif type_of_action == "restarted":
if name == "DHCP":
log.info('----------------- KEA RESTART %s -------------------------------------------------------', dest)
dhcp.restart_srv(destination_address=dest)
elif name == "DNS":
log.info('----------------- BIND RESTART %s -------------------------------------------------------', dest)
dns.restart_srv(destination_address=dest)
elif type_of_action == "reconfigured":
if name == "DHCP":
log.info('----------------- KEA RECONFIG %s -------------------------------------------------------', dest)
dhcp.reconfigure_srv(destination_address=dest)
elif name == "DNS":
log.info('----------------- BIND RECONFIG %s -------------------------------------------------------', dest)
dns.reconfigure_srv(destination_address=dest)
else:
assert False, "we don't support '%s' action." % str(type_of_action)
def check_remote_address(remote_address):
"""
Add new remote server IP address as additional location, can be used for running dhcp server
From all added locations all files on clean up will be downloaded to specific local location
:param remote_address: IP address of remote vm
:return: nothing
"""
if remote_address not in world.f_cfg.multiple_tested_servers:
world.f_cfg.multiple_tested_servers.append(remote_address)
@step(r'Remote (\S+) server is (started|stopped|restarted|reconfigured) on address (\S+).')
def remote_start_srv(name, type_of_action, destination_address):
"""
Decide which you want, start server of failed start (testing incorrect configuration)
Also decide in which part should it failed.
"""
destination_address = test_define_value(destination_address)[0]
check_remote_address(destination_address)
if name not in ["DHCP", "DNS"]:
assert False, "I don't think there is support for something else than DNS or DHCP"
if type_of_action == "started":
if name == "DHCP":
dhcp.start_srv(True, None, destination_address)
elif name == "DNS":
dns.start_srv(True, None, destination_address)
elif type_of_action == "stopped":
if name == "DHCP":
dhcp.stop_srv(destination_address=destination_address)
elif name == "DNS":
dns.stop_srv(destination_address=destination_address)
elif type_of_action == "restarted":
if name == "DHCP":
dhcp.restart_srv(destination_address=destination_address)
elif name == "DNS":
dns.restart_srv(destination_address=destination_address)
elif type_of_action == "reconfigured":
if name == "DHCP":
dhcp.reconfigure_srv(destination_address=destination_address)
elif name == "DNS":
dns.reconfigure_srv(destination_address=destination_address)
else:
assert False, "we don't support this action."
@step(r'(\S+) server failed to start. During (\S+) process.')
def start_srv_during_process(name, process):
"""
Decide which you want, start server of failed start (testing incorrect configuration)
Also decide in which part should it failed.
"""
if name == "DHCP":
dhcp.start_srv(False, process)
elif name == "DNS":
dns.start_srv(False, process)
else:
assert False, "I don't think there is support for something else than DNS or DHCP"
@step(r'(\S+) server failed to start. During (\S+) process on remote destination (\S+).')
def start_srv_during_remote_process(name, process, destination_address):
"""
Decide which you want, start server of failed start (testing incorrect configuration)
Also decide in which part should it failed.
"""
destination_address[0] = test_define_value(destination_address)
check_remote_address(destination_address)
if name == "DHCP":
dhcp.start_srv(False, process, destination_address)
elif name == "DNS":
dns.start_srv(False, process, destination_address)
else:
assert False, "I don't think there is support for something else than DNS or | |
lean only, trimmed to 1/8" fat, select, cooked, grilled
23250: [], # Beef, top loin petite roast, boneless, separable lean only, trimmed to 1/8" fat, all grades, cooked, roasted
23251: [], # Beef, top loin petite roast, boneless, separable lean only, trimmed to 1/8" fat, choice, cooked, roasted
23252: [], # Beef, top loin petite roast, boneless, separable lean only, trimmed to 1/8" fat, select, cooked, roasted
23253: [], # Beef, top loin petite roast/filet, boneless, separable lean only, trimmed to 1/8" fat, all grades, raw
23254: [], # Beef, top loin petite roast/filet, boneless, separable lean only, trimmed to 1/8" fat, choice, raw
23255: [], # Beef, top loin petite roast/filet, boneless, separable lean only, trimmed to 1/8" fat, select, raw
23256: [], # Beef, loin, top sirloin filet, boneless, separable lean only, trimmed to 0" fat, all grades, cooked, grilled
23257: [], # Beef, loin, top sirloin filet, boneless, separable lean only, trimmed to 0" fat, choice, cooked, grilled
23258: [], # Beef, loin, top sirloin filet, boneless, separable lean only, trimmed to 0" fat, select, cooked, grilled
23259: [], # Beef, loin, top sirloin petite roast, boneless, separable lean only, trimmed to 0" fat, all grades, cooked, roasted
23260: [], # Beef, loin, top sirloin petite roast, boneless, separable lean only, trimmed to 0" fat, select, cooked, roasted
23261: [], # Beef, loin, top sirloin petite roast/filet, boneless, separable lean only, trimmed to 0" fat, all grades, raw
23262: [], # Beef, loin, top sirloin petite roast/filet, boneless, separable lean only, trimmed to 0" fat, select, raw
23263: [], # Beef, ribeye petite roast/filet, boneless, separable lean only, trimmed to 0" fat, all grades, raw
23264: [], # Beef, ribeye petite roast/filet, boneless, separable lean only, trimmed to 0" fat, choice, raw
23265: [], # Beef, ribeye petite roast/filet, boneless, separable lean only, trimmed to 0" fat, select, raw
23266: [], # Beef, ribeye cap steak, boneless, separable lean only, trimmed to 0" fat, all grades, cooked, grilled
23267: [], # Beef, ribeye cap steak, boneless, separable lean only, trimmed to 0" fat, choice, cooked, grilled
23268: [], # Beef, ribeye cap steak, boneless, separable lean only, trimmed to 0" fat, select, cooked, grilled
23269: [], # Beef, ribeye cap steak, boneless, separable lean only, trimmed to 0" fat, all grades, raw
23270: [], # Beef, ribeye cap steak, boneless, separable lean only, trimmed to 0" fat, choice, raw
23271: [], # Beef, ribeye cap steak, boneless, separable lean only, trimmed to 0" fat, select, raw
23272: [], # Beef, ribeye filet, boneless, separable lean only, trimmed to 0" fat, all grades, cooked, grilled
23273: [], # Beef, ribeye filet, boneless, separable lean only, trimmed to 0" fat, choice, cooked, grilled
23274: [], # Beef, ribeye filet, boneless, separable lean only, trimmed to 0" fat, select, cooked, grilled
23275: [], # Beef, ribeye petite roast, boneless, separable lean only, trimmed to 0" fat, all grades, cooked, roasted
23276: [], # Beef, ribeye petite roast, boneless, separable lean only, trimmed to 0" fat, choice, cooked, roasted
23277: [], # Beef, ribeye petite roast, boneless, separable lean only, trimmed to 0" fat, select, cooked, roasted
23278: [], # Beef, loin, top sirloin cap steak, boneless, separable lean and fat, trimmed to 1/8" fat, all grades, cooked, grilled
23279: [], # Beef, loin, top sirloin cap steak, boneless, separable lean and fat, trimmed to 1/8" fat, choice, cooked, grilled
23280: [], # Beef, loin, top sirloin cap steak, boneless, separable lean and fat, trimmed to 1/8" fat, select, cooked, grilled
23281: [], # Beef, loin, top sirloin cap steak, boneless, separable lean and fat, trimmed to 1/8" fat, all grades, raw
23282: [], # Beef, loin, top sirloin cap steak, boneless, separable lean and fat, trimmed to 1/8" fat, choice, raw
23283: [], # Beef, loin, top sirloin cap steak, boneless, separable lean and fat, trimmed to 1/8" fat, select, raw
23284: [], # Beef, top loin filet, boneless, separable lean and fat, trimmed to 1/8" fat, all grades, cooked, grilled
23285: [], # Beef, top loin filet, boneless, separable lean and fat, trimmed to 1/8" fat, choice, cooked, grilled
23286: [], # Beef, top loin filet, boneless, separable lean and fat, trimmed to 1/8" fat, select, cooked, grilled
23287: [], # Beef, top loin petite roast, boneless, separable lean and fat, trimmed to 1/8" fat, all grades, cooked, roasted
23288: [], # Beef, top loin petite roast, boneless, separable lean and fat, trimmed to 1/8" fat, choice, cooked, roasted
23289: [], # Beef, top loin petite roast, boneless, separable lean and fat, trimmed to 1/8" fat, select, cooked, roasted
23290: [], # Beef, top loin petite roast/filet, boneless, separable lean and fat, trimmed to 1/8" fat, all grades, raw
23291: [], # Beef, top loin petite roast/filet, boneless, separable lean and fat, trimmed to 1/8" fat, choice, raw
23292: [], # Beef, top loin petite roast/filet, boneless, separable lean and fat, trimmed to 1/8" fat, select, raw
23293: [], # Beef, Australian, imported, grass-fed, ground, 85% lean / 15% fat, raw
23294: [], # Beef, Australian, imported, grass-fed, loin, tenderloin steak/roast, boneless, separable lean only, raw
23295: [], # Beef, Australian, imported, Wagyu, loin, tenderloin steak/roast, boneless, separable lean only, Aust. marble score 4/5, raw
23296: [], # Beef, Australian, imported, grass-fed, external fat, raw
23297: [], # Beef, Australian, imported, grass-fed, seam fat, raw
23298: [], # Beef, Australian, imported, Wagyu, external fat, Aust. marble score 4/5, raw
23299: [], # Beef, Australian, imported, Wagyu, seam fat, Aust. marble score 4/5, raw
23300: [], # Beef, Australian, imported, Wagyu, external fat, Aust. marble score 9, raw
23301: [], # Beef, Australian, imported, Wagyu, seam fat, Aust. marble score 9, raw
23302: [], # Beef, Australian, imported, grass-fed, loin, tenderloin steak/roast, boneless, separable lean and fat, raw
23303: [], # Beef, Australian, imported, grass-fed, loin, top loin steak/roast, boneless, separable lean only, raw
23304: [], # Beef, Australian, imported, Wagyu, loin, tenderloin steak/roast, boneless, separable lean and fat, Aust. marble score 4/5, raw
23305: [], # Beef, Australian, imported, grass-fed, loin, top loin steak/roast, boneless, separable lean and fat, raw
23306: [], # Beef, Australian, imported, grass-fed, loin, top sirloin cap-off steak/roast, boneless, separable lean only, raw
23307: [], # Beef, Australian, imported, grass-fed, rib, ribeye steak/roast lip-on, boneless, separable lean only, raw
23308: [], # Beef, Australian, imported, grass-fed, round, bottom round steak/roast, boneless, separable lean only, raw
23309: [], # Beef, Australian, imported, grass-fed, round, top round cap-off steak/roast, boneless, separable lean only, raw
23310: [], # Beef, Australian, imported, Wagyu, loin, tenderloin steak/roast, boneless, separable lean only, Aust. marble score 9, raw
23311: [], # Beef, Australian, imported, Wagyu, loin, top loin steak/roast, boneless, separable lean only, Aust. marble score 4/5, raw
23312: [], # Beef, Australian, imported, Wagyu, loin, top loin steak/roast, boneless, separable lean only, Aust. marble score 9, raw
23313: [], # Beef, Australian, imported, Wagyu, rib, small end rib steak/roast, boneless, separable lean only, Aust. marble score 4/5, raw
23314: [], # Beef, Australian, imported, Wagyu, rib, small end rib steak/roast, boneless, separable lean only, Aust. marble score 9, raw
23315: [], # Beef, Australian, imported, grass-fed, loin, top sirloin cap-off steak/roast, boneless, separable lean and fat, raw
23316: [], # Beef, Australian, imported, grass-fed, rib, ribeye steak/roast lip-on, boneless, separable lean and fat, raw
23317: [], # Beef, Australian, imported, grass-fed, round, bottom round steak/roast, boneless, separable lean and fat, raw
23318: [], # Beef, Australian, imported, grass-fed, round, top round cap-off steak/roast, boneless, separable lean and fat, raw
23319: [], # Beef, Australian, imported, Wagyu, loin, top loin steak/roast, boneless, separable lean and fat, Aust. marble score 4/5, raw
23320: | |
8px;"
line = "<td {} style='{}'>".format(td_arg, css)
line = line + str(content)
line = line + " </td>\n"
#logging.debug(line)
return line
#############################################
### Vector graphics/animation
### Select objects
def select_checks_and_clears(self, object_id, n, check):
oid = str(object_id)
code_clear = """
function sel_obj_""" + oid + """_clear() {
if (!already_checked_obj_ok_""" + oid + """) {
for (let i=0; i<""" + str(n) + """; i++) {
if (check_""" + oid + """[i]) {
state_""" + oid + """[i] = false;
sel_obj_""" + oid + """[i].attr({fill: off_color_""" + oid + """[i], stroke: off_line_color_""" + oid + """[i]});
}
}
clearAllNoBorder('sel_canvas_""" + oid + """');
}
}
"""
self.clears.append("sel_obj_{}_clear();".format(oid))
code = code_clear
if check is not None:
# Special hack: JS doesn't have a sum function so we hard code it:
modified_check = check.replace("sum(result)", "(result.reduce((a, b) => a + b, 0))")
code_check = """
already_checked_obj_ok_""" + oid + """ = false;
function sel_obj_""" + oid + """_check() {
var result = [];
var ind = 0;
for (let i=0; i<""" + str(n) + """; i++) {
if (check_""" + oid + """[i]) {
if (state_""" + oid + """[i]) {
result[ind] = 1;
} else {
result[ind] = 0;
}
ind++;
}
}
if (""" + modified_check + """) {
setOK('sel_canvas_""" + oid + """');
already_checked_obj_ok_""" + oid + """ = true;
return true;
} else {
setError('sel_canvas_""" + oid + """');
already_checked_obj_ok_""" + oid + """ = false;
return false;
}
}
"""
self.checks.append("sel_obj_{}_check();".format(oid))
code = code + code_check
code_solutions = """
function sel_obj_""" + oid + """_solution() {
alread_shown_solutions = true;
var ind = 0;
for (let i=0; i<""" + str(n) + """; i++) {
if (check_""" + oid + """[i]) {
if (solution_""" + oid + """[ind] == 1) {
sel_obj_""" + oid + """[i].attr({fill: on_color_""" + oid + """[i], stroke: on_line_color_""" + oid + """[i]});
state_""" + oid + """[i] = true;
} else {
sel_obj_""" + oid + """[i].attr({fill: off_color_""" + oid + """[i], stroke: off_line_color_""" + oid + """[i]});
state_""" + oid + """[i] = false;
}
ind++;
}
}
}
"""
self.solutions.append("sel_obj_{}_solution();".format(oid))
code = code + code_solutions
values = """
{
values = { };
var ind = 0;
for (let i=0; i<""" + str(n) + """; i++) {
if (check_""" + oid + """[i]) {
values[ind.toString()] = (state_""" + oid + """[i]).toString();
ind++;
}
}
}
"""
self.values.append(values)
return code
def select_object_onmouse(self, object_id, n):
oid = str(object_id)
code = """
for (let i=0; i<""" + str(n) + """; i++) {
if (check_""" + oid + """[i]) {
sel_obj_""" + oid + """[i].mousedown( function() {
if (state_""" + oid + """[i]) {
sel_obj_""" + oid + """[i].attr({fill: off_color_""" + oid + """[i], stroke: off_line_color_""" + oid + """[i]});
state_""" + oid + """[i] = false;
} else {
sel_obj_""" + oid + """[i].attr({fill: on_color_""" + oid + """[i], stroke: on_line_color_""" + oid + """[i]});
state_""" + oid + """[i] = true;
}
});
}
}
"""
return code
### General drawing
# - check_code is a JS expression that evaluates to true if the response is correct
# or false if not. The input parameter is result[i] array where index i
# maps to the i-th selectable object on the canvas in the order they are created.
# Our JS code sets result[i] to 1 if i-th object is selected and 0 if not, and
# evaluate check_code with these values.
# A simple example is: check_code = "result[0] == 1 && result[1] == 0"
# See questions/geometry/q00045 for a more elaborate example.
# - solutions is a JS expression that sets an array solution[i] to a valid solution to the question.
# It maps to selectable objects on canvas, in the same way as results above.
# If object i is selected when solution[i] == 1 and deselected when solution[i] == 0.
# this implies a correct solution and should satisfy check_code.
# A simple example maching above is: solutions = "solution[0] = 1; solution[1] = 0;"
# If solutions == None we try to infer it from check_code by
# replacing "&&" -> ";"" and "==" -> "="
# But this is not always possible, e.g. in check_code = "sum(results) == 5"
# so a valide solutions string has to be entered manually
def start_canvas(self, width, height, align=None, check_code=None, solutions=None):
if self.canvas_id is not None:
self.page.add_lines("Canvas should not have been started...")
return
self.canvas_id = str(self.get_object_id())
self.canvas_align = align
self.canvas_check_code = check_code
self.canvas_items = []
if solutions is None:
if check_code is None:
solutions = ""
else:
solutions = check_code
solutions = solutions.replace("==", "=")
solutions = solutions.replace("&&", ";")
solutions = solutions.replace("result", "solution_" + self.canvas_id)
solutions = solutions + "; "
else:
solutions = solutions.replace("solution", "solution_" + self.canvas_id)
# Pass align style to surrounding div
inline = False
if align is not None:
if align == "inline":
inline = True
else:
div_ccs = "style='text-align:{}'".format(align)
else:
# default align is center
div_ccs = "style='text-align:center'"
if inline:
script = "<span style='vertical-align:middle;display:inline-block' id = 'sel_canvas_{}'>".format(self.canvas_id)
else:
script = "<div {} id = 'sel_canvas_{}'>".format(div_ccs, self.canvas_id)
script = script + """
<script type = "text/javascript">
var paper_""" + self.canvas_id +\
""" = Raphael("sel_canvas_""" + self.canvas_id + """", """ + \
str(width) + ", " + str(height) + """);
var on_color_""" + self.canvas_id + """ = [];
var off_color_""" + self.canvas_id + """ = [];
var on_line_color_""" + self.canvas_id + """ = [];
var off_line_color_""" + self.canvas_id + """ = [];
var check_""" + self.canvas_id + """ = [];
var state_""" + self.canvas_id + """ = [];
var sel_obj_""" + self.canvas_id + """ = [];
var solution_""" + self.canvas_id + """ = [];
"""
script = script + solutions + "\n"
self.page.add_lines(script)
def _add_draw_object(self, string, style={}, initial_state=None, check=None):
off_color="#fff"
on_color="#aff"
on_line_color = "#000"
off_line_color = "#000"
line_width = "2"
opacity = "1"
object_id = self.canvas_id
if style is None or (not isinstance(style, dict) and lupa.lua_type(style) != "table"):
style = {}
if "off_color" in style.keys():
if off_color != "none":
off_color = "#" + style["off_color"]
if "on_color" in style.keys():
if on_color != "none":
on_color = "#" + style["on_color"]
if "line_color" in style.keys():
on_line_color = "#" + style["line_color"]
if "off_line_color" in style.keys():
off_line_color = "#" + style["off_line_color"]
else:
off_line_color = on_line_color
if "line_width" in style.keys():
line_width = style["line_width"]
if "opacity" in style.keys():
opacity = style["opacity"]
font_attr = ""
if "font_size" in style.keys():
font_attr = font_attr + ", \"font-size\": \"{}\"".format(style["font_size"])
else:
font_attr = font_attr + ", \"font-size\": \"18\""
if "font_family" in style.keys():
font_attr = font_attr + ", \"font-family\": \"{}\"".format(style["font_family"])
off_attr_str = ".attr({fill: \"" + off_color + \
"\", stroke: \"" + off_line_color + \
"\", opacity: \"" + opacity + \
"\", \"stroke-width\": " + line_width + font_attr + "});\n"
on_attr_str = ".attr({fill: \"" + on_color + \
"\", stroke: \"" + on_line_color + \
"\", opacity: \"" + opacity + \
"\", \"stroke-width\": " + line_width + font_attr + "});\n"
if check is None:
check = (self.canvas_check_code is not None and self.canvas_check_code)
if initial_state:
attr_str = on_attr_str
state_str = "state_{}[{}] = {};\n".format(object_id, len(self.canvas_items), "true")
else:
attr_str = off_attr_str
state_str = "state_{}[{}] = {};\n".format(object_id, len(self.canvas_items), "false")
code = "sel_obj_{}[{}] = paper_{}.".format(\
object_id, len(self.canvas_items), object_id) + string + attr_str
color_str = "on_color_{}[{}] = \"{}\";\n".format(object_id, len(self.canvas_items), on_color)\
+ "off_color_{}[{}] = \"{}\";\n".format(object_id, len(self.canvas_items), off_color)\
+ "on_line_color_{}[{}] = \"{}\";\n".format(object_id, len(self.canvas_items), on_line_color)\
+ "off_line_color_{}[{}] = \"{}\";\n".format(object_id, len(self.canvas_items), off_line_color)
check_str = "check_{}[{}] = {};\n".format(object_id, len(self.canvas_items), \
"true" if check else "false")
self.canvas_items.append(
{"off_color": off_color,
"on_color": on_color,
"on_line_color": on_line_color,
"off_line_color": off_line_color
})
self.page.add_lines(color_str + check_str + state_str + code)
def add_rectangle(self, x, y, width, height, style={}, initial_state=None, check=None):
obj_str = "rect({}, {}, {}, {})".format(x, y, width, height)
self._add_draw_object(obj_str, style, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.